xref: /linux/net/bluetooth/mgmt.c (revision 6015fb905d89063231ed33bc15be19ef0fc339b8)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43 
44 #define MGMT_VERSION	1
45 #define MGMT_REVISION	22
46 
47 static const u16 mgmt_commands[] = {
48 	MGMT_OP_READ_INDEX_LIST,
49 	MGMT_OP_READ_INFO,
50 	MGMT_OP_SET_POWERED,
51 	MGMT_OP_SET_DISCOVERABLE,
52 	MGMT_OP_SET_CONNECTABLE,
53 	MGMT_OP_SET_FAST_CONNECTABLE,
54 	MGMT_OP_SET_BONDABLE,
55 	MGMT_OP_SET_LINK_SECURITY,
56 	MGMT_OP_SET_SSP,
57 	MGMT_OP_SET_HS,
58 	MGMT_OP_SET_LE,
59 	MGMT_OP_SET_DEV_CLASS,
60 	MGMT_OP_SET_LOCAL_NAME,
61 	MGMT_OP_ADD_UUID,
62 	MGMT_OP_REMOVE_UUID,
63 	MGMT_OP_LOAD_LINK_KEYS,
64 	MGMT_OP_LOAD_LONG_TERM_KEYS,
65 	MGMT_OP_DISCONNECT,
66 	MGMT_OP_GET_CONNECTIONS,
67 	MGMT_OP_PIN_CODE_REPLY,
68 	MGMT_OP_PIN_CODE_NEG_REPLY,
69 	MGMT_OP_SET_IO_CAPABILITY,
70 	MGMT_OP_PAIR_DEVICE,
71 	MGMT_OP_CANCEL_PAIR_DEVICE,
72 	MGMT_OP_UNPAIR_DEVICE,
73 	MGMT_OP_USER_CONFIRM_REPLY,
74 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 	MGMT_OP_USER_PASSKEY_REPLY,
76 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 	MGMT_OP_READ_LOCAL_OOB_DATA,
78 	MGMT_OP_ADD_REMOTE_OOB_DATA,
79 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 	MGMT_OP_START_DISCOVERY,
81 	MGMT_OP_STOP_DISCOVERY,
82 	MGMT_OP_CONFIRM_NAME,
83 	MGMT_OP_BLOCK_DEVICE,
84 	MGMT_OP_UNBLOCK_DEVICE,
85 	MGMT_OP_SET_DEVICE_ID,
86 	MGMT_OP_SET_ADVERTISING,
87 	MGMT_OP_SET_BREDR,
88 	MGMT_OP_SET_STATIC_ADDRESS,
89 	MGMT_OP_SET_SCAN_PARAMS,
90 	MGMT_OP_SET_SECURE_CONN,
91 	MGMT_OP_SET_DEBUG_KEYS,
92 	MGMT_OP_SET_PRIVACY,
93 	MGMT_OP_LOAD_IRKS,
94 	MGMT_OP_GET_CONN_INFO,
95 	MGMT_OP_GET_CLOCK_INFO,
96 	MGMT_OP_ADD_DEVICE,
97 	MGMT_OP_REMOVE_DEVICE,
98 	MGMT_OP_LOAD_CONN_PARAM,
99 	MGMT_OP_READ_UNCONF_INDEX_LIST,
100 	MGMT_OP_READ_CONFIG_INFO,
101 	MGMT_OP_SET_EXTERNAL_CONFIG,
102 	MGMT_OP_SET_PUBLIC_ADDRESS,
103 	MGMT_OP_START_SERVICE_DISCOVERY,
104 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 	MGMT_OP_READ_EXT_INDEX_LIST,
106 	MGMT_OP_READ_ADV_FEATURES,
107 	MGMT_OP_ADD_ADVERTISING,
108 	MGMT_OP_REMOVE_ADVERTISING,
109 	MGMT_OP_GET_ADV_SIZE_INFO,
110 	MGMT_OP_START_LIMITED_DISCOVERY,
111 	MGMT_OP_READ_EXT_INFO,
112 	MGMT_OP_SET_APPEARANCE,
113 	MGMT_OP_GET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_PHY_CONFIGURATION,
115 	MGMT_OP_SET_BLOCKED_KEYS,
116 	MGMT_OP_SET_WIDEBAND_SPEECH,
117 	MGMT_OP_READ_CONTROLLER_CAP,
118 	MGMT_OP_READ_EXP_FEATURES_INFO,
119 	MGMT_OP_SET_EXP_FEATURE,
120 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 	MGMT_OP_GET_DEVICE_FLAGS,
125 	MGMT_OP_SET_DEVICE_FLAGS,
126 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 	MGMT_OP_REMOVE_ADV_MONITOR,
129 	MGMT_OP_ADD_EXT_ADV_PARAMS,
130 	MGMT_OP_ADD_EXT_ADV_DATA,
131 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 };
133 
134 static const u16 mgmt_events[] = {
135 	MGMT_EV_CONTROLLER_ERROR,
136 	MGMT_EV_INDEX_ADDED,
137 	MGMT_EV_INDEX_REMOVED,
138 	MGMT_EV_NEW_SETTINGS,
139 	MGMT_EV_CLASS_OF_DEV_CHANGED,
140 	MGMT_EV_LOCAL_NAME_CHANGED,
141 	MGMT_EV_NEW_LINK_KEY,
142 	MGMT_EV_NEW_LONG_TERM_KEY,
143 	MGMT_EV_DEVICE_CONNECTED,
144 	MGMT_EV_DEVICE_DISCONNECTED,
145 	MGMT_EV_CONNECT_FAILED,
146 	MGMT_EV_PIN_CODE_REQUEST,
147 	MGMT_EV_USER_CONFIRM_REQUEST,
148 	MGMT_EV_USER_PASSKEY_REQUEST,
149 	MGMT_EV_AUTH_FAILED,
150 	MGMT_EV_DEVICE_FOUND,
151 	MGMT_EV_DISCOVERING,
152 	MGMT_EV_DEVICE_BLOCKED,
153 	MGMT_EV_DEVICE_UNBLOCKED,
154 	MGMT_EV_DEVICE_UNPAIRED,
155 	MGMT_EV_PASSKEY_NOTIFY,
156 	MGMT_EV_NEW_IRK,
157 	MGMT_EV_NEW_CSRK,
158 	MGMT_EV_DEVICE_ADDED,
159 	MGMT_EV_DEVICE_REMOVED,
160 	MGMT_EV_NEW_CONN_PARAM,
161 	MGMT_EV_UNCONF_INDEX_ADDED,
162 	MGMT_EV_UNCONF_INDEX_REMOVED,
163 	MGMT_EV_NEW_CONFIG_OPTIONS,
164 	MGMT_EV_EXT_INDEX_ADDED,
165 	MGMT_EV_EXT_INDEX_REMOVED,
166 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
167 	MGMT_EV_ADVERTISING_ADDED,
168 	MGMT_EV_ADVERTISING_REMOVED,
169 	MGMT_EV_EXT_INFO_CHANGED,
170 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
171 	MGMT_EV_EXP_FEATURE_CHANGED,
172 	MGMT_EV_DEVICE_FLAGS_CHANGED,
173 	MGMT_EV_ADV_MONITOR_ADDED,
174 	MGMT_EV_ADV_MONITOR_REMOVED,
175 	MGMT_EV_CONTROLLER_SUSPEND,
176 	MGMT_EV_CONTROLLER_RESUME,
177 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
178 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
179 };
180 
181 static const u16 mgmt_untrusted_commands[] = {
182 	MGMT_OP_READ_INDEX_LIST,
183 	MGMT_OP_READ_INFO,
184 	MGMT_OP_READ_UNCONF_INDEX_LIST,
185 	MGMT_OP_READ_CONFIG_INFO,
186 	MGMT_OP_READ_EXT_INDEX_LIST,
187 	MGMT_OP_READ_EXT_INFO,
188 	MGMT_OP_READ_CONTROLLER_CAP,
189 	MGMT_OP_READ_EXP_FEATURES_INFO,
190 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
191 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
192 };
193 
194 static const u16 mgmt_untrusted_events[] = {
195 	MGMT_EV_INDEX_ADDED,
196 	MGMT_EV_INDEX_REMOVED,
197 	MGMT_EV_NEW_SETTINGS,
198 	MGMT_EV_CLASS_OF_DEV_CHANGED,
199 	MGMT_EV_LOCAL_NAME_CHANGED,
200 	MGMT_EV_UNCONF_INDEX_ADDED,
201 	MGMT_EV_UNCONF_INDEX_REMOVED,
202 	MGMT_EV_NEW_CONFIG_OPTIONS,
203 	MGMT_EV_EXT_INDEX_ADDED,
204 	MGMT_EV_EXT_INDEX_REMOVED,
205 	MGMT_EV_EXT_INFO_CHANGED,
206 	MGMT_EV_EXP_FEATURE_CHANGED,
207 };
208 
209 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
210 
211 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
212 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
213 
214 /* HCI to MGMT error code conversion table */
215 static const u8 mgmt_status_table[] = {
216 	MGMT_STATUS_SUCCESS,
217 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
218 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
219 	MGMT_STATUS_FAILED,		/* Hardware Failure */
220 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
221 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
222 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
223 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
224 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
225 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
226 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
227 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
228 	MGMT_STATUS_BUSY,		/* Command Disallowed */
229 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
230 	MGMT_STATUS_REJECTED,		/* Rejected Security */
231 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
232 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
233 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
234 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
235 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
236 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
237 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
238 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
239 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
240 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
241 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
242 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
243 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
244 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
245 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
246 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
247 	MGMT_STATUS_FAILED,		/* Unspecified Error */
248 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
249 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
250 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
251 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
252 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
253 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
254 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
255 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
256 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
257 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
258 	MGMT_STATUS_FAILED,		/* Transaction Collision */
259 	MGMT_STATUS_FAILED,		/* Reserved for future use */
260 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
261 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
262 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
263 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
265 	MGMT_STATUS_FAILED,		/* Reserved for future use */
266 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
267 	MGMT_STATUS_FAILED,		/* Reserved for future use */
268 	MGMT_STATUS_FAILED,		/* Slot Violation */
269 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
270 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
271 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
272 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
273 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
274 	MGMT_STATUS_BUSY,		/* Controller Busy */
275 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
276 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
277 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
278 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
279 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
280 };
281 
282 static u8 mgmt_errno_status(int err)
283 {
284 	switch (err) {
285 	case 0:
286 		return MGMT_STATUS_SUCCESS;
287 	case -EPERM:
288 		return MGMT_STATUS_REJECTED;
289 	case -EINVAL:
290 		return MGMT_STATUS_INVALID_PARAMS;
291 	case -EOPNOTSUPP:
292 		return MGMT_STATUS_NOT_SUPPORTED;
293 	case -EBUSY:
294 		return MGMT_STATUS_BUSY;
295 	case -ETIMEDOUT:
296 		return MGMT_STATUS_AUTH_FAILED;
297 	case -ENOMEM:
298 		return MGMT_STATUS_NO_RESOURCES;
299 	case -EISCONN:
300 		return MGMT_STATUS_ALREADY_CONNECTED;
301 	case -ENOTCONN:
302 		return MGMT_STATUS_DISCONNECTED;
303 	}
304 
305 	return MGMT_STATUS_FAILED;
306 }
307 
308 static u8 mgmt_status(int err)
309 {
310 	if (err < 0)
311 		return mgmt_errno_status(err);
312 
313 	if (err < ARRAY_SIZE(mgmt_status_table))
314 		return mgmt_status_table[err];
315 
316 	return MGMT_STATUS_FAILED;
317 }
318 
319 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
320 			    u16 len, int flag)
321 {
322 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
323 			       flag, NULL);
324 }
325 
326 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
327 			      u16 len, int flag, struct sock *skip_sk)
328 {
329 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
330 			       flag, skip_sk);
331 }
332 
333 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
334 		      struct sock *skip_sk)
335 {
336 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
337 			       HCI_SOCK_TRUSTED, skip_sk);
338 }
339 
340 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
341 {
342 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
343 				   skip_sk);
344 }
345 
346 static u8 le_addr_type(u8 mgmt_addr_type)
347 {
348 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
349 		return ADDR_LE_DEV_PUBLIC;
350 	else
351 		return ADDR_LE_DEV_RANDOM;
352 }
353 
354 void mgmt_fill_version_info(void *ver)
355 {
356 	struct mgmt_rp_read_version *rp = ver;
357 
358 	rp->version = MGMT_VERSION;
359 	rp->revision = cpu_to_le16(MGMT_REVISION);
360 }
361 
362 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
363 			u16 data_len)
364 {
365 	struct mgmt_rp_read_version rp;
366 
367 	bt_dev_dbg(hdev, "sock %p", sk);
368 
369 	mgmt_fill_version_info(&rp);
370 
371 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
372 				 &rp, sizeof(rp));
373 }
374 
375 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
376 			 u16 data_len)
377 {
378 	struct mgmt_rp_read_commands *rp;
379 	u16 num_commands, num_events;
380 	size_t rp_size;
381 	int i, err;
382 
383 	bt_dev_dbg(hdev, "sock %p", sk);
384 
385 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
386 		num_commands = ARRAY_SIZE(mgmt_commands);
387 		num_events = ARRAY_SIZE(mgmt_events);
388 	} else {
389 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
390 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
391 	}
392 
393 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
394 
395 	rp = kmalloc(rp_size, GFP_KERNEL);
396 	if (!rp)
397 		return -ENOMEM;
398 
399 	rp->num_commands = cpu_to_le16(num_commands);
400 	rp->num_events = cpu_to_le16(num_events);
401 
402 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
403 		__le16 *opcode = rp->opcodes;
404 
405 		for (i = 0; i < num_commands; i++, opcode++)
406 			put_unaligned_le16(mgmt_commands[i], opcode);
407 
408 		for (i = 0; i < num_events; i++, opcode++)
409 			put_unaligned_le16(mgmt_events[i], opcode);
410 	} else {
411 		__le16 *opcode = rp->opcodes;
412 
413 		for (i = 0; i < num_commands; i++, opcode++)
414 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
415 
416 		for (i = 0; i < num_events; i++, opcode++)
417 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
418 	}
419 
420 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
421 				rp, rp_size);
422 	kfree(rp);
423 
424 	return err;
425 }
426 
427 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
428 			   u16 data_len)
429 {
430 	struct mgmt_rp_read_index_list *rp;
431 	struct hci_dev *d;
432 	size_t rp_len;
433 	u16 count;
434 	int err;
435 
436 	bt_dev_dbg(hdev, "sock %p", sk);
437 
438 	read_lock(&hci_dev_list_lock);
439 
440 	count = 0;
441 	list_for_each_entry(d, &hci_dev_list, list) {
442 		if (d->dev_type == HCI_PRIMARY &&
443 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
444 			count++;
445 	}
446 
447 	rp_len = sizeof(*rp) + (2 * count);
448 	rp = kmalloc(rp_len, GFP_ATOMIC);
449 	if (!rp) {
450 		read_unlock(&hci_dev_list_lock);
451 		return -ENOMEM;
452 	}
453 
454 	count = 0;
455 	list_for_each_entry(d, &hci_dev_list, list) {
456 		if (hci_dev_test_flag(d, HCI_SETUP) ||
457 		    hci_dev_test_flag(d, HCI_CONFIG) ||
458 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
459 			continue;
460 
461 		/* Devices marked as raw-only are neither configured
462 		 * nor unconfigured controllers.
463 		 */
464 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
465 			continue;
466 
467 		if (d->dev_type == HCI_PRIMARY &&
468 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
469 			rp->index[count++] = cpu_to_le16(d->id);
470 			bt_dev_dbg(hdev, "Added hci%u", d->id);
471 		}
472 	}
473 
474 	rp->num_controllers = cpu_to_le16(count);
475 	rp_len = sizeof(*rp) + (2 * count);
476 
477 	read_unlock(&hci_dev_list_lock);
478 
479 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
480 				0, rp, rp_len);
481 
482 	kfree(rp);
483 
484 	return err;
485 }
486 
487 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
488 				  void *data, u16 data_len)
489 {
490 	struct mgmt_rp_read_unconf_index_list *rp;
491 	struct hci_dev *d;
492 	size_t rp_len;
493 	u16 count;
494 	int err;
495 
496 	bt_dev_dbg(hdev, "sock %p", sk);
497 
498 	read_lock(&hci_dev_list_lock);
499 
500 	count = 0;
501 	list_for_each_entry(d, &hci_dev_list, list) {
502 		if (d->dev_type == HCI_PRIMARY &&
503 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
504 			count++;
505 	}
506 
507 	rp_len = sizeof(*rp) + (2 * count);
508 	rp = kmalloc(rp_len, GFP_ATOMIC);
509 	if (!rp) {
510 		read_unlock(&hci_dev_list_lock);
511 		return -ENOMEM;
512 	}
513 
514 	count = 0;
515 	list_for_each_entry(d, &hci_dev_list, list) {
516 		if (hci_dev_test_flag(d, HCI_SETUP) ||
517 		    hci_dev_test_flag(d, HCI_CONFIG) ||
518 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
519 			continue;
520 
521 		/* Devices marked as raw-only are neither configured
522 		 * nor unconfigured controllers.
523 		 */
524 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
525 			continue;
526 
527 		if (d->dev_type == HCI_PRIMARY &&
528 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 			rp->index[count++] = cpu_to_le16(d->id);
530 			bt_dev_dbg(hdev, "Added hci%u", d->id);
531 		}
532 	}
533 
534 	rp->num_controllers = cpu_to_le16(count);
535 	rp_len = sizeof(*rp) + (2 * count);
536 
537 	read_unlock(&hci_dev_list_lock);
538 
539 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541 
542 	kfree(rp);
543 
544 	return err;
545 }
546 
547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 			       void *data, u16 data_len)
549 {
550 	struct mgmt_rp_read_ext_index_list *rp;
551 	struct hci_dev *d;
552 	u16 count;
553 	int err;
554 
555 	bt_dev_dbg(hdev, "sock %p", sk);
556 
557 	read_lock(&hci_dev_list_lock);
558 
559 	count = 0;
560 	list_for_each_entry(d, &hci_dev_list, list) {
561 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
562 			count++;
563 	}
564 
565 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
566 	if (!rp) {
567 		read_unlock(&hci_dev_list_lock);
568 		return -ENOMEM;
569 	}
570 
571 	count = 0;
572 	list_for_each_entry(d, &hci_dev_list, list) {
573 		if (hci_dev_test_flag(d, HCI_SETUP) ||
574 		    hci_dev_test_flag(d, HCI_CONFIG) ||
575 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
576 			continue;
577 
578 		/* Devices marked as raw-only are neither configured
579 		 * nor unconfigured controllers.
580 		 */
581 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
582 			continue;
583 
584 		if (d->dev_type == HCI_PRIMARY) {
585 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
586 				rp->entry[count].type = 0x01;
587 			else
588 				rp->entry[count].type = 0x00;
589 		} else if (d->dev_type == HCI_AMP) {
590 			rp->entry[count].type = 0x02;
591 		} else {
592 			continue;
593 		}
594 
595 		rp->entry[count].bus = d->bus;
596 		rp->entry[count++].index = cpu_to_le16(d->id);
597 		bt_dev_dbg(hdev, "Added hci%u", d->id);
598 	}
599 
600 	rp->num_controllers = cpu_to_le16(count);
601 
602 	read_unlock(&hci_dev_list_lock);
603 
604 	/* If this command is called at least once, then all the
605 	 * default index and unconfigured index events are disabled
606 	 * and from now on only extended index events are used.
607 	 */
608 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
609 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
610 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
611 
612 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
613 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
614 				struct_size(rp, entry, count));
615 
616 	kfree(rp);
617 
618 	return err;
619 }
620 
621 static bool is_configured(struct hci_dev *hdev)
622 {
623 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
624 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
625 		return false;
626 
627 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
628 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
629 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
630 		return false;
631 
632 	return true;
633 }
634 
635 static __le32 get_missing_options(struct hci_dev *hdev)
636 {
637 	u32 options = 0;
638 
639 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
640 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
641 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
642 
643 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
644 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
645 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
646 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
647 
648 	return cpu_to_le32(options);
649 }
650 
651 static int new_options(struct hci_dev *hdev, struct sock *skip)
652 {
653 	__le32 options = get_missing_options(hdev);
654 
655 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
656 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
657 }
658 
659 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
660 {
661 	__le32 options = get_missing_options(hdev);
662 
663 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
664 				 sizeof(options));
665 }
666 
667 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
668 			    void *data, u16 data_len)
669 {
670 	struct mgmt_rp_read_config_info rp;
671 	u32 options = 0;
672 
673 	bt_dev_dbg(hdev, "sock %p", sk);
674 
675 	hci_dev_lock(hdev);
676 
677 	memset(&rp, 0, sizeof(rp));
678 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
679 
680 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
681 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
682 
683 	if (hdev->set_bdaddr)
684 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
685 
686 	rp.supported_options = cpu_to_le32(options);
687 	rp.missing_options = get_missing_options(hdev);
688 
689 	hci_dev_unlock(hdev);
690 
691 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
692 				 &rp, sizeof(rp));
693 }
694 
695 static u32 get_supported_phys(struct hci_dev *hdev)
696 {
697 	u32 supported_phys = 0;
698 
699 	if (lmp_bredr_capable(hdev)) {
700 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
701 
702 		if (hdev->features[0][0] & LMP_3SLOT)
703 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
704 
705 		if (hdev->features[0][0] & LMP_5SLOT)
706 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
707 
708 		if (lmp_edr_2m_capable(hdev)) {
709 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
710 
711 			if (lmp_edr_3slot_capable(hdev))
712 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
713 
714 			if (lmp_edr_5slot_capable(hdev))
715 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
716 
717 			if (lmp_edr_3m_capable(hdev)) {
718 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
719 
720 				if (lmp_edr_3slot_capable(hdev))
721 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
722 
723 				if (lmp_edr_5slot_capable(hdev))
724 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
725 			}
726 		}
727 	}
728 
729 	if (lmp_le_capable(hdev)) {
730 		supported_phys |= MGMT_PHY_LE_1M_TX;
731 		supported_phys |= MGMT_PHY_LE_1M_RX;
732 
733 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
734 			supported_phys |= MGMT_PHY_LE_2M_TX;
735 			supported_phys |= MGMT_PHY_LE_2M_RX;
736 		}
737 
738 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
739 			supported_phys |= MGMT_PHY_LE_CODED_TX;
740 			supported_phys |= MGMT_PHY_LE_CODED_RX;
741 		}
742 	}
743 
744 	return supported_phys;
745 }
746 
747 static u32 get_selected_phys(struct hci_dev *hdev)
748 {
749 	u32 selected_phys = 0;
750 
751 	if (lmp_bredr_capable(hdev)) {
752 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
753 
754 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
755 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
756 
757 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
758 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
759 
760 		if (lmp_edr_2m_capable(hdev)) {
761 			if (!(hdev->pkt_type & HCI_2DH1))
762 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
763 
764 			if (lmp_edr_3slot_capable(hdev) &&
765 			    !(hdev->pkt_type & HCI_2DH3))
766 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
767 
768 			if (lmp_edr_5slot_capable(hdev) &&
769 			    !(hdev->pkt_type & HCI_2DH5))
770 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
771 
772 			if (lmp_edr_3m_capable(hdev)) {
773 				if (!(hdev->pkt_type & HCI_3DH1))
774 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
775 
776 				if (lmp_edr_3slot_capable(hdev) &&
777 				    !(hdev->pkt_type & HCI_3DH3))
778 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
779 
780 				if (lmp_edr_5slot_capable(hdev) &&
781 				    !(hdev->pkt_type & HCI_3DH5))
782 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
783 			}
784 		}
785 	}
786 
787 	if (lmp_le_capable(hdev)) {
788 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
789 			selected_phys |= MGMT_PHY_LE_1M_TX;
790 
791 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
792 			selected_phys |= MGMT_PHY_LE_1M_RX;
793 
794 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
795 			selected_phys |= MGMT_PHY_LE_2M_TX;
796 
797 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
798 			selected_phys |= MGMT_PHY_LE_2M_RX;
799 
800 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
801 			selected_phys |= MGMT_PHY_LE_CODED_TX;
802 
803 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
804 			selected_phys |= MGMT_PHY_LE_CODED_RX;
805 	}
806 
807 	return selected_phys;
808 }
809 
810 static u32 get_configurable_phys(struct hci_dev *hdev)
811 {
812 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
813 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
814 }
815 
816 static u32 get_supported_settings(struct hci_dev *hdev)
817 {
818 	u32 settings = 0;
819 
820 	settings |= MGMT_SETTING_POWERED;
821 	settings |= MGMT_SETTING_BONDABLE;
822 	settings |= MGMT_SETTING_DEBUG_KEYS;
823 	settings |= MGMT_SETTING_CONNECTABLE;
824 	settings |= MGMT_SETTING_DISCOVERABLE;
825 
826 	if (lmp_bredr_capable(hdev)) {
827 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
828 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
829 		settings |= MGMT_SETTING_BREDR;
830 		settings |= MGMT_SETTING_LINK_SECURITY;
831 
832 		if (lmp_ssp_capable(hdev)) {
833 			settings |= MGMT_SETTING_SSP;
834 			if (IS_ENABLED(CONFIG_BT_HS))
835 				settings |= MGMT_SETTING_HS;
836 		}
837 
838 		if (lmp_sc_capable(hdev))
839 			settings |= MGMT_SETTING_SECURE_CONN;
840 
841 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
842 			     &hdev->quirks))
843 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
844 	}
845 
846 	if (lmp_le_capable(hdev)) {
847 		settings |= MGMT_SETTING_LE;
848 		settings |= MGMT_SETTING_SECURE_CONN;
849 		settings |= MGMT_SETTING_PRIVACY;
850 		settings |= MGMT_SETTING_STATIC_ADDRESS;
851 		settings |= MGMT_SETTING_ADVERTISING;
852 	}
853 
854 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
855 	    hdev->set_bdaddr)
856 		settings |= MGMT_SETTING_CONFIGURATION;
857 
858 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
859 
860 	return settings;
861 }
862 
863 static u32 get_current_settings(struct hci_dev *hdev)
864 {
865 	u32 settings = 0;
866 
867 	if (hdev_is_powered(hdev))
868 		settings |= MGMT_SETTING_POWERED;
869 
870 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
871 		settings |= MGMT_SETTING_CONNECTABLE;
872 
873 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
874 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
875 
876 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
877 		settings |= MGMT_SETTING_DISCOVERABLE;
878 
879 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
880 		settings |= MGMT_SETTING_BONDABLE;
881 
882 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
883 		settings |= MGMT_SETTING_BREDR;
884 
885 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
886 		settings |= MGMT_SETTING_LE;
887 
888 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
889 		settings |= MGMT_SETTING_LINK_SECURITY;
890 
891 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
892 		settings |= MGMT_SETTING_SSP;
893 
894 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
895 		settings |= MGMT_SETTING_HS;
896 
897 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
898 		settings |= MGMT_SETTING_ADVERTISING;
899 
900 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
901 		settings |= MGMT_SETTING_SECURE_CONN;
902 
903 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
904 		settings |= MGMT_SETTING_DEBUG_KEYS;
905 
906 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
907 		settings |= MGMT_SETTING_PRIVACY;
908 
909 	/* The current setting for static address has two purposes. The
910 	 * first is to indicate if the static address will be used and
911 	 * the second is to indicate if it is actually set.
912 	 *
913 	 * This means if the static address is not configured, this flag
914 	 * will never be set. If the address is configured, then if the
915 	 * address is actually used decides if the flag is set or not.
916 	 *
917 	 * For single mode LE only controllers and dual-mode controllers
918 	 * with BR/EDR disabled, the existence of the static address will
919 	 * be evaluated.
920 	 */
921 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
922 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
923 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
924 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
925 			settings |= MGMT_SETTING_STATIC_ADDRESS;
926 	}
927 
928 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
929 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
930 
931 	return settings;
932 }
933 
934 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
935 {
936 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
937 }
938 
939 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
940 {
941 	struct mgmt_pending_cmd *cmd;
942 
943 	/* If there's a pending mgmt command the flags will not yet have
944 	 * their final values, so check for this first.
945 	 */
946 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
947 	if (cmd) {
948 		struct mgmt_mode *cp = cmd->param;
949 		if (cp->val == 0x01)
950 			return LE_AD_GENERAL;
951 		else if (cp->val == 0x02)
952 			return LE_AD_LIMITED;
953 	} else {
954 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
955 			return LE_AD_LIMITED;
956 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
957 			return LE_AD_GENERAL;
958 	}
959 
960 	return 0;
961 }
962 
963 bool mgmt_get_connectable(struct hci_dev *hdev)
964 {
965 	struct mgmt_pending_cmd *cmd;
966 
967 	/* If there's a pending mgmt command the flag will not yet have
968 	 * it's final value, so check for this first.
969 	 */
970 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
971 	if (cmd) {
972 		struct mgmt_mode *cp = cmd->param;
973 
974 		return cp->val;
975 	}
976 
977 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
978 }
979 
980 static int service_cache_sync(struct hci_dev *hdev, void *data)
981 {
982 	hci_update_eir_sync(hdev);
983 	hci_update_class_sync(hdev);
984 
985 	return 0;
986 }
987 
988 static void service_cache_off(struct work_struct *work)
989 {
990 	struct hci_dev *hdev = container_of(work, struct hci_dev,
991 					    service_cache.work);
992 
993 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
994 		return;
995 
996 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
997 }
998 
999 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1000 {
1001 	/* The generation of a new RPA and programming it into the
1002 	 * controller happens in the hci_req_enable_advertising()
1003 	 * function.
1004 	 */
1005 	if (ext_adv_capable(hdev))
1006 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1007 	else
1008 		return hci_enable_advertising_sync(hdev);
1009 }
1010 
1011 static void rpa_expired(struct work_struct *work)
1012 {
1013 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1014 					    rpa_expired.work);
1015 
1016 	bt_dev_dbg(hdev, "");
1017 
1018 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1019 
1020 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1021 		return;
1022 
1023 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1024 }
1025 
1026 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1027 {
1028 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1029 		return;
1030 
1031 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1032 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1033 
1034 	/* Non-mgmt controlled devices get this bit set
1035 	 * implicitly so that pairing works for them, however
1036 	 * for mgmt we require user-space to explicitly enable
1037 	 * it
1038 	 */
1039 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1040 }
1041 
1042 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1043 				void *data, u16 data_len)
1044 {
1045 	struct mgmt_rp_read_info rp;
1046 
1047 	bt_dev_dbg(hdev, "sock %p", sk);
1048 
1049 	hci_dev_lock(hdev);
1050 
1051 	memset(&rp, 0, sizeof(rp));
1052 
1053 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1054 
1055 	rp.version = hdev->hci_ver;
1056 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1057 
1058 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1059 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1060 
1061 	memcpy(rp.dev_class, hdev->dev_class, 3);
1062 
1063 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1064 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1065 
1066 	hci_dev_unlock(hdev);
1067 
1068 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1069 				 sizeof(rp));
1070 }
1071 
1072 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1073 {
1074 	u16 eir_len = 0;
1075 	size_t name_len;
1076 
1077 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1078 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1079 					  hdev->dev_class, 3);
1080 
1081 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1082 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1083 					  hdev->appearance);
1084 
1085 	name_len = strlen(hdev->dev_name);
1086 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1087 				  hdev->dev_name, name_len);
1088 
1089 	name_len = strlen(hdev->short_name);
1090 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1091 				  hdev->short_name, name_len);
1092 
1093 	return eir_len;
1094 }
1095 
1096 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1097 				    void *data, u16 data_len)
1098 {
1099 	char buf[512];
1100 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1101 	u16 eir_len;
1102 
1103 	bt_dev_dbg(hdev, "sock %p", sk);
1104 
1105 	memset(&buf, 0, sizeof(buf));
1106 
1107 	hci_dev_lock(hdev);
1108 
1109 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1110 
1111 	rp->version = hdev->hci_ver;
1112 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1113 
1114 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1115 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1116 
1117 
1118 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1119 	rp->eir_len = cpu_to_le16(eir_len);
1120 
1121 	hci_dev_unlock(hdev);
1122 
1123 	/* If this command is called at least once, then the events
1124 	 * for class of device and local name changes are disabled
1125 	 * and only the new extended controller information event
1126 	 * is used.
1127 	 */
1128 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1129 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1130 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1131 
1132 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1133 				 sizeof(*rp) + eir_len);
1134 }
1135 
1136 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1137 {
1138 	char buf[512];
1139 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1140 	u16 eir_len;
1141 
1142 	memset(buf, 0, sizeof(buf));
1143 
1144 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1145 	ev->eir_len = cpu_to_le16(eir_len);
1146 
1147 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1148 				  sizeof(*ev) + eir_len,
1149 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1150 }
1151 
1152 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1153 {
1154 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1155 
1156 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1157 				 sizeof(settings));
1158 }
1159 
1160 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1161 {
1162 	struct mgmt_ev_advertising_added ev;
1163 
1164 	ev.instance = instance;
1165 
1166 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1167 }
1168 
1169 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1170 			      u8 instance)
1171 {
1172 	struct mgmt_ev_advertising_removed ev;
1173 
1174 	ev.instance = instance;
1175 
1176 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1177 }
1178 
1179 static void cancel_adv_timeout(struct hci_dev *hdev)
1180 {
1181 	if (hdev->adv_instance_timeout) {
1182 		hdev->adv_instance_timeout = 0;
1183 		cancel_delayed_work(&hdev->adv_instance_expire);
1184 	}
1185 }
1186 
1187 /* This function requires the caller holds hdev->lock */
1188 static void restart_le_actions(struct hci_dev *hdev)
1189 {
1190 	struct hci_conn_params *p;
1191 
1192 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1193 		/* Needed for AUTO_OFF case where might not "really"
1194 		 * have been powered off.
1195 		 */
1196 		list_del_init(&p->action);
1197 
1198 		switch (p->auto_connect) {
1199 		case HCI_AUTO_CONN_DIRECT:
1200 		case HCI_AUTO_CONN_ALWAYS:
1201 			list_add(&p->action, &hdev->pend_le_conns);
1202 			break;
1203 		case HCI_AUTO_CONN_REPORT:
1204 			list_add(&p->action, &hdev->pend_le_reports);
1205 			break;
1206 		default:
1207 			break;
1208 		}
1209 	}
1210 }
1211 
1212 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1213 {
1214 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1215 
1216 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1217 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1218 }
1219 
1220 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1221 {
1222 	struct mgmt_pending_cmd *cmd = data;
1223 	struct mgmt_mode *cp;
1224 
1225 	/* Make sure cmd still outstanding. */
1226 	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1227 		return;
1228 
1229 	cp = cmd->param;
1230 
1231 	bt_dev_dbg(hdev, "err %d", err);
1232 
1233 	if (!err) {
1234 		if (cp->val) {
1235 			hci_dev_lock(hdev);
1236 			restart_le_actions(hdev);
1237 			hci_update_passive_scan(hdev);
1238 			hci_dev_unlock(hdev);
1239 		}
1240 
1241 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1242 
1243 		/* Only call new_setting for power on as power off is deferred
1244 		 * to hdev->power_off work which does call hci_dev_do_close.
1245 		 */
1246 		if (cp->val)
1247 			new_settings(hdev, cmd->sk);
1248 	} else {
1249 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1250 				mgmt_status(err));
1251 	}
1252 
1253 	mgmt_pending_remove(cmd);
1254 }
1255 
1256 static int set_powered_sync(struct hci_dev *hdev, void *data)
1257 {
1258 	struct mgmt_pending_cmd *cmd = data;
1259 	struct mgmt_mode *cp = cmd->param;
1260 
1261 	BT_DBG("%s", hdev->name);
1262 
1263 	return hci_set_powered_sync(hdev, cp->val);
1264 }
1265 
1266 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1267 		       u16 len)
1268 {
1269 	struct mgmt_mode *cp = data;
1270 	struct mgmt_pending_cmd *cmd;
1271 	int err;
1272 
1273 	bt_dev_dbg(hdev, "sock %p", sk);
1274 
1275 	if (cp->val != 0x00 && cp->val != 0x01)
1276 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1277 				       MGMT_STATUS_INVALID_PARAMS);
1278 
1279 	hci_dev_lock(hdev);
1280 
1281 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1282 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1283 				      MGMT_STATUS_BUSY);
1284 		goto failed;
1285 	}
1286 
1287 	if (!!cp->val == hdev_is_powered(hdev)) {
1288 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1289 		goto failed;
1290 	}
1291 
1292 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1293 	if (!cmd) {
1294 		err = -ENOMEM;
1295 		goto failed;
1296 	}
1297 
1298 	err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1299 				 mgmt_set_powered_complete);
1300 
1301 	if (err < 0)
1302 		mgmt_pending_remove(cmd);
1303 
1304 failed:
1305 	hci_dev_unlock(hdev);
1306 	return err;
1307 }
1308 
1309 int mgmt_new_settings(struct hci_dev *hdev)
1310 {
1311 	return new_settings(hdev, NULL);
1312 }
1313 
1314 struct cmd_lookup {
1315 	struct sock *sk;
1316 	struct hci_dev *hdev;
1317 	u8 mgmt_status;
1318 };
1319 
1320 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1321 {
1322 	struct cmd_lookup *match = data;
1323 
1324 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1325 
1326 	list_del(&cmd->list);
1327 
1328 	if (match->sk == NULL) {
1329 		match->sk = cmd->sk;
1330 		sock_hold(match->sk);
1331 	}
1332 
1333 	mgmt_pending_free(cmd);
1334 }
1335 
1336 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1337 {
1338 	u8 *status = data;
1339 
1340 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1341 	mgmt_pending_remove(cmd);
1342 }
1343 
1344 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1345 {
1346 	if (cmd->cmd_complete) {
1347 		u8 *status = data;
1348 
1349 		cmd->cmd_complete(cmd, *status);
1350 		mgmt_pending_remove(cmd);
1351 
1352 		return;
1353 	}
1354 
1355 	cmd_status_rsp(cmd, data);
1356 }
1357 
1358 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1359 {
1360 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1361 				 cmd->param, cmd->param_len);
1362 }
1363 
1364 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1365 {
1366 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1367 				 cmd->param, sizeof(struct mgmt_addr_info));
1368 }
1369 
1370 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1371 {
1372 	if (!lmp_bredr_capable(hdev))
1373 		return MGMT_STATUS_NOT_SUPPORTED;
1374 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1375 		return MGMT_STATUS_REJECTED;
1376 	else
1377 		return MGMT_STATUS_SUCCESS;
1378 }
1379 
1380 static u8 mgmt_le_support(struct hci_dev *hdev)
1381 {
1382 	if (!lmp_le_capable(hdev))
1383 		return MGMT_STATUS_NOT_SUPPORTED;
1384 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1385 		return MGMT_STATUS_REJECTED;
1386 	else
1387 		return MGMT_STATUS_SUCCESS;
1388 }
1389 
1390 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1391 					   int err)
1392 {
1393 	struct mgmt_pending_cmd *cmd = data;
1394 
1395 	bt_dev_dbg(hdev, "err %d", err);
1396 
1397 	/* Make sure cmd still outstanding. */
1398 	if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1399 		return;
1400 
1401 	hci_dev_lock(hdev);
1402 
1403 	if (err) {
1404 		u8 mgmt_err = mgmt_status(err);
1405 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1406 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1407 		goto done;
1408 	}
1409 
1410 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1411 	    hdev->discov_timeout > 0) {
1412 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1413 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1414 	}
1415 
1416 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1417 	new_settings(hdev, cmd->sk);
1418 
1419 done:
1420 	mgmt_pending_remove(cmd);
1421 	hci_dev_unlock(hdev);
1422 }
1423 
1424 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1425 {
1426 	BT_DBG("%s", hdev->name);
1427 
1428 	return hci_update_discoverable_sync(hdev);
1429 }
1430 
1431 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1432 			    u16 len)
1433 {
1434 	struct mgmt_cp_set_discoverable *cp = data;
1435 	struct mgmt_pending_cmd *cmd;
1436 	u16 timeout;
1437 	int err;
1438 
1439 	bt_dev_dbg(hdev, "sock %p", sk);
1440 
1441 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1442 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1443 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1444 				       MGMT_STATUS_REJECTED);
1445 
1446 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1447 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1448 				       MGMT_STATUS_INVALID_PARAMS);
1449 
1450 	timeout = __le16_to_cpu(cp->timeout);
1451 
1452 	/* Disabling discoverable requires that no timeout is set,
1453 	 * and enabling limited discoverable requires a timeout.
1454 	 */
1455 	if ((cp->val == 0x00 && timeout > 0) ||
1456 	    (cp->val == 0x02 && timeout == 0))
1457 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1458 				       MGMT_STATUS_INVALID_PARAMS);
1459 
1460 	hci_dev_lock(hdev);
1461 
1462 	if (!hdev_is_powered(hdev) && timeout > 0) {
1463 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1464 				      MGMT_STATUS_NOT_POWERED);
1465 		goto failed;
1466 	}
1467 
1468 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1469 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1470 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1471 				      MGMT_STATUS_BUSY);
1472 		goto failed;
1473 	}
1474 
1475 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1476 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1477 				      MGMT_STATUS_REJECTED);
1478 		goto failed;
1479 	}
1480 
1481 	if (hdev->advertising_paused) {
1482 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1483 				      MGMT_STATUS_BUSY);
1484 		goto failed;
1485 	}
1486 
1487 	if (!hdev_is_powered(hdev)) {
1488 		bool changed = false;
1489 
1490 		/* Setting limited discoverable when powered off is
1491 		 * not a valid operation since it requires a timeout
1492 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1493 		 */
1494 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1495 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1496 			changed = true;
1497 		}
1498 
1499 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1500 		if (err < 0)
1501 			goto failed;
1502 
1503 		if (changed)
1504 			err = new_settings(hdev, sk);
1505 
1506 		goto failed;
1507 	}
1508 
1509 	/* If the current mode is the same, then just update the timeout
1510 	 * value with the new value. And if only the timeout gets updated,
1511 	 * then no need for any HCI transactions.
1512 	 */
1513 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1514 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1515 						   HCI_LIMITED_DISCOVERABLE)) {
1516 		cancel_delayed_work(&hdev->discov_off);
1517 		hdev->discov_timeout = timeout;
1518 
1519 		if (cp->val && hdev->discov_timeout > 0) {
1520 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1521 			queue_delayed_work(hdev->req_workqueue,
1522 					   &hdev->discov_off, to);
1523 		}
1524 
1525 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1526 		goto failed;
1527 	}
1528 
1529 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1530 	if (!cmd) {
1531 		err = -ENOMEM;
1532 		goto failed;
1533 	}
1534 
1535 	/* Cancel any potential discoverable timeout that might be
1536 	 * still active and store new timeout value. The arming of
1537 	 * the timeout happens in the complete handler.
1538 	 */
1539 	cancel_delayed_work(&hdev->discov_off);
1540 	hdev->discov_timeout = timeout;
1541 
1542 	if (cp->val)
1543 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1544 	else
1545 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1546 
1547 	/* Limited discoverable mode */
1548 	if (cp->val == 0x02)
1549 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1550 	else
1551 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1552 
1553 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1554 				 mgmt_set_discoverable_complete);
1555 
1556 	if (err < 0)
1557 		mgmt_pending_remove(cmd);
1558 
1559 failed:
1560 	hci_dev_unlock(hdev);
1561 	return err;
1562 }
1563 
1564 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1565 					  int err)
1566 {
1567 	struct mgmt_pending_cmd *cmd = data;
1568 
1569 	bt_dev_dbg(hdev, "err %d", err);
1570 
1571 	/* Make sure cmd still outstanding. */
1572 	if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1573 		return;
1574 
1575 	hci_dev_lock(hdev);
1576 
1577 	if (err) {
1578 		u8 mgmt_err = mgmt_status(err);
1579 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1580 		goto done;
1581 	}
1582 
1583 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1584 	new_settings(hdev, cmd->sk);
1585 
1586 done:
1587 	if (cmd)
1588 		mgmt_pending_remove(cmd);
1589 
1590 	hci_dev_unlock(hdev);
1591 }
1592 
1593 static int set_connectable_update_settings(struct hci_dev *hdev,
1594 					   struct sock *sk, u8 val)
1595 {
1596 	bool changed = false;
1597 	int err;
1598 
1599 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1600 		changed = true;
1601 
1602 	if (val) {
1603 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1604 	} else {
1605 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1606 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1607 	}
1608 
1609 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1610 	if (err < 0)
1611 		return err;
1612 
1613 	if (changed) {
1614 		hci_req_update_scan(hdev);
1615 		hci_update_passive_scan(hdev);
1616 		return new_settings(hdev, sk);
1617 	}
1618 
1619 	return 0;
1620 }
1621 
1622 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1623 {
1624 	BT_DBG("%s", hdev->name);
1625 
1626 	return hci_update_connectable_sync(hdev);
1627 }
1628 
1629 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1630 			   u16 len)
1631 {
1632 	struct mgmt_mode *cp = data;
1633 	struct mgmt_pending_cmd *cmd;
1634 	int err;
1635 
1636 	bt_dev_dbg(hdev, "sock %p", sk);
1637 
1638 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1639 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1640 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1641 				       MGMT_STATUS_REJECTED);
1642 
1643 	if (cp->val != 0x00 && cp->val != 0x01)
1644 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1645 				       MGMT_STATUS_INVALID_PARAMS);
1646 
1647 	hci_dev_lock(hdev);
1648 
1649 	if (!hdev_is_powered(hdev)) {
1650 		err = set_connectable_update_settings(hdev, sk, cp->val);
1651 		goto failed;
1652 	}
1653 
1654 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1655 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1656 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1657 				      MGMT_STATUS_BUSY);
1658 		goto failed;
1659 	}
1660 
1661 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1662 	if (!cmd) {
1663 		err = -ENOMEM;
1664 		goto failed;
1665 	}
1666 
1667 	if (cp->val) {
1668 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1669 	} else {
1670 		if (hdev->discov_timeout > 0)
1671 			cancel_delayed_work(&hdev->discov_off);
1672 
1673 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1674 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1675 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1676 	}
1677 
1678 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1679 				 mgmt_set_connectable_complete);
1680 
1681 	if (err < 0)
1682 		mgmt_pending_remove(cmd);
1683 
1684 failed:
1685 	hci_dev_unlock(hdev);
1686 	return err;
1687 }
1688 
1689 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1690 			u16 len)
1691 {
1692 	struct mgmt_mode *cp = data;
1693 	bool changed;
1694 	int err;
1695 
1696 	bt_dev_dbg(hdev, "sock %p", sk);
1697 
1698 	if (cp->val != 0x00 && cp->val != 0x01)
1699 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1700 				       MGMT_STATUS_INVALID_PARAMS);
1701 
1702 	hci_dev_lock(hdev);
1703 
1704 	if (cp->val)
1705 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1706 	else
1707 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1708 
1709 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1710 	if (err < 0)
1711 		goto unlock;
1712 
1713 	if (changed) {
1714 		/* In limited privacy mode the change of bondable mode
1715 		 * may affect the local advertising address.
1716 		 */
1717 		hci_update_discoverable(hdev);
1718 
1719 		err = new_settings(hdev, sk);
1720 	}
1721 
1722 unlock:
1723 	hci_dev_unlock(hdev);
1724 	return err;
1725 }
1726 
1727 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1728 			     u16 len)
1729 {
1730 	struct mgmt_mode *cp = data;
1731 	struct mgmt_pending_cmd *cmd;
1732 	u8 val, status;
1733 	int err;
1734 
1735 	bt_dev_dbg(hdev, "sock %p", sk);
1736 
1737 	status = mgmt_bredr_support(hdev);
1738 	if (status)
1739 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1740 				       status);
1741 
1742 	if (cp->val != 0x00 && cp->val != 0x01)
1743 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1744 				       MGMT_STATUS_INVALID_PARAMS);
1745 
1746 	hci_dev_lock(hdev);
1747 
1748 	if (!hdev_is_powered(hdev)) {
1749 		bool changed = false;
1750 
1751 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1752 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1753 			changed = true;
1754 		}
1755 
1756 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1757 		if (err < 0)
1758 			goto failed;
1759 
1760 		if (changed)
1761 			err = new_settings(hdev, sk);
1762 
1763 		goto failed;
1764 	}
1765 
1766 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1767 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1768 				      MGMT_STATUS_BUSY);
1769 		goto failed;
1770 	}
1771 
1772 	val = !!cp->val;
1773 
1774 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1775 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1776 		goto failed;
1777 	}
1778 
1779 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1780 	if (!cmd) {
1781 		err = -ENOMEM;
1782 		goto failed;
1783 	}
1784 
1785 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1786 	if (err < 0) {
1787 		mgmt_pending_remove(cmd);
1788 		goto failed;
1789 	}
1790 
1791 failed:
1792 	hci_dev_unlock(hdev);
1793 	return err;
1794 }
1795 
1796 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1797 {
1798 	struct cmd_lookup match = { NULL, hdev };
1799 	struct mgmt_pending_cmd *cmd = data;
1800 	struct mgmt_mode *cp = cmd->param;
1801 	u8 enable = cp->val;
1802 	bool changed;
1803 
1804 	/* Make sure cmd still outstanding. */
1805 	if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1806 		return;
1807 
1808 	if (err) {
1809 		u8 mgmt_err = mgmt_status(err);
1810 
1811 		if (enable && hci_dev_test_and_clear_flag(hdev,
1812 							  HCI_SSP_ENABLED)) {
1813 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1814 			new_settings(hdev, NULL);
1815 		}
1816 
1817 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1818 				     &mgmt_err);
1819 		return;
1820 	}
1821 
1822 	if (enable) {
1823 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1824 	} else {
1825 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1826 
1827 		if (!changed)
1828 			changed = hci_dev_test_and_clear_flag(hdev,
1829 							      HCI_HS_ENABLED);
1830 		else
1831 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1832 	}
1833 
1834 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1835 
1836 	if (changed)
1837 		new_settings(hdev, match.sk);
1838 
1839 	if (match.sk)
1840 		sock_put(match.sk);
1841 
1842 	hci_update_eir_sync(hdev);
1843 }
1844 
1845 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1846 {
1847 	struct mgmt_pending_cmd *cmd = data;
1848 	struct mgmt_mode *cp = cmd->param;
1849 	bool changed = false;
1850 	int err;
1851 
1852 	if (cp->val)
1853 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1854 
1855 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1856 
1857 	if (!err && changed)
1858 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1859 
1860 	return err;
1861 }
1862 
1863 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1864 {
1865 	struct mgmt_mode *cp = data;
1866 	struct mgmt_pending_cmd *cmd;
1867 	u8 status;
1868 	int err;
1869 
1870 	bt_dev_dbg(hdev, "sock %p", sk);
1871 
1872 	status = mgmt_bredr_support(hdev);
1873 	if (status)
1874 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1875 
1876 	if (!lmp_ssp_capable(hdev))
1877 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1878 				       MGMT_STATUS_NOT_SUPPORTED);
1879 
1880 	if (cp->val != 0x00 && cp->val != 0x01)
1881 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1882 				       MGMT_STATUS_INVALID_PARAMS);
1883 
1884 	hci_dev_lock(hdev);
1885 
1886 	if (!hdev_is_powered(hdev)) {
1887 		bool changed;
1888 
1889 		if (cp->val) {
1890 			changed = !hci_dev_test_and_set_flag(hdev,
1891 							     HCI_SSP_ENABLED);
1892 		} else {
1893 			changed = hci_dev_test_and_clear_flag(hdev,
1894 							      HCI_SSP_ENABLED);
1895 			if (!changed)
1896 				changed = hci_dev_test_and_clear_flag(hdev,
1897 								      HCI_HS_ENABLED);
1898 			else
1899 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1900 		}
1901 
1902 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1903 		if (err < 0)
1904 			goto failed;
1905 
1906 		if (changed)
1907 			err = new_settings(hdev, sk);
1908 
1909 		goto failed;
1910 	}
1911 
1912 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1913 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1914 				      MGMT_STATUS_BUSY);
1915 		goto failed;
1916 	}
1917 
1918 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1919 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1920 		goto failed;
1921 	}
1922 
1923 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1924 	if (!cmd)
1925 		err = -ENOMEM;
1926 	else
1927 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
1928 					 set_ssp_complete);
1929 
1930 	if (err < 0) {
1931 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1932 				      MGMT_STATUS_FAILED);
1933 
1934 		if (cmd)
1935 			mgmt_pending_remove(cmd);
1936 	}
1937 
1938 failed:
1939 	hci_dev_unlock(hdev);
1940 	return err;
1941 }
1942 
1943 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1944 {
1945 	struct mgmt_mode *cp = data;
1946 	bool changed;
1947 	u8 status;
1948 	int err;
1949 
1950 	bt_dev_dbg(hdev, "sock %p", sk);
1951 
1952 	if (!IS_ENABLED(CONFIG_BT_HS))
1953 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1954 				       MGMT_STATUS_NOT_SUPPORTED);
1955 
1956 	status = mgmt_bredr_support(hdev);
1957 	if (status)
1958 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1959 
1960 	if (!lmp_ssp_capable(hdev))
1961 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1962 				       MGMT_STATUS_NOT_SUPPORTED);
1963 
1964 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1965 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1966 				       MGMT_STATUS_REJECTED);
1967 
1968 	if (cp->val != 0x00 && cp->val != 0x01)
1969 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1970 				       MGMT_STATUS_INVALID_PARAMS);
1971 
1972 	hci_dev_lock(hdev);
1973 
1974 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1975 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1976 				      MGMT_STATUS_BUSY);
1977 		goto unlock;
1978 	}
1979 
1980 	if (cp->val) {
1981 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1982 	} else {
1983 		if (hdev_is_powered(hdev)) {
1984 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1985 					      MGMT_STATUS_REJECTED);
1986 			goto unlock;
1987 		}
1988 
1989 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1990 	}
1991 
1992 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1993 	if (err < 0)
1994 		goto unlock;
1995 
1996 	if (changed)
1997 		err = new_settings(hdev, sk);
1998 
1999 unlock:
2000 	hci_dev_unlock(hdev);
2001 	return err;
2002 }
2003 
2004 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2005 {
2006 	struct cmd_lookup match = { NULL, hdev };
2007 	u8 status = mgmt_status(err);
2008 
2009 	bt_dev_dbg(hdev, "err %d", err);
2010 
2011 	if (status) {
2012 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2013 							&status);
2014 		return;
2015 	}
2016 
2017 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2018 
2019 	new_settings(hdev, match.sk);
2020 
2021 	if (match.sk)
2022 		sock_put(match.sk);
2023 }
2024 
2025 static int set_le_sync(struct hci_dev *hdev, void *data)
2026 {
2027 	struct mgmt_pending_cmd *cmd = data;
2028 	struct mgmt_mode *cp = cmd->param;
2029 	u8 val = !!cp->val;
2030 	int err;
2031 
2032 	if (!val) {
2033 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2034 			hci_disable_advertising_sync(hdev);
2035 
2036 		if (ext_adv_capable(hdev))
2037 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2038 	} else {
2039 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2040 	}
2041 
2042 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2043 
2044 	/* Make sure the controller has a good default for
2045 	 * advertising data. Restrict the update to when LE
2046 	 * has actually been enabled. During power on, the
2047 	 * update in powered_update_hci will take care of it.
2048 	 */
2049 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2050 		if (ext_adv_capable(hdev)) {
2051 			int status;
2052 
2053 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2054 			if (!status)
2055 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2056 		} else {
2057 			hci_update_adv_data_sync(hdev, 0x00);
2058 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2059 		}
2060 
2061 		hci_update_passive_scan(hdev);
2062 	}
2063 
2064 	return err;
2065 }
2066 
2067 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2068 {
2069 	struct mgmt_mode *cp = data;
2070 	struct mgmt_pending_cmd *cmd;
2071 	int err;
2072 	u8 val, enabled;
2073 
2074 	bt_dev_dbg(hdev, "sock %p", sk);
2075 
2076 	if (!lmp_le_capable(hdev))
2077 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2078 				       MGMT_STATUS_NOT_SUPPORTED);
2079 
2080 	if (cp->val != 0x00 && cp->val != 0x01)
2081 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2082 				       MGMT_STATUS_INVALID_PARAMS);
2083 
2084 	/* Bluetooth single mode LE only controllers or dual-mode
2085 	 * controllers configured as LE only devices, do not allow
2086 	 * switching LE off. These have either LE enabled explicitly
2087 	 * or BR/EDR has been previously switched off.
2088 	 *
2089 	 * When trying to enable an already enabled LE, then gracefully
2090 	 * send a positive response. Trying to disable it however will
2091 	 * result into rejection.
2092 	 */
2093 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2094 		if (cp->val == 0x01)
2095 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2096 
2097 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2098 				       MGMT_STATUS_REJECTED);
2099 	}
2100 
2101 	hci_dev_lock(hdev);
2102 
2103 	val = !!cp->val;
2104 	enabled = lmp_host_le_capable(hdev);
2105 
2106 	if (!val)
2107 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
2108 
2109 	if (!hdev_is_powered(hdev) || val == enabled) {
2110 		bool changed = false;
2111 
2112 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2113 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2114 			changed = true;
2115 		}
2116 
2117 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2118 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2119 			changed = true;
2120 		}
2121 
2122 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2123 		if (err < 0)
2124 			goto unlock;
2125 
2126 		if (changed)
2127 			err = new_settings(hdev, sk);
2128 
2129 		goto unlock;
2130 	}
2131 
2132 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2133 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2134 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2135 				      MGMT_STATUS_BUSY);
2136 		goto unlock;
2137 	}
2138 
2139 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2140 	if (!cmd)
2141 		err = -ENOMEM;
2142 	else
2143 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2144 					 set_le_complete);
2145 
2146 	if (err < 0) {
2147 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2148 				      MGMT_STATUS_FAILED);
2149 
2150 		if (cmd)
2151 			mgmt_pending_remove(cmd);
2152 	}
2153 
2154 unlock:
2155 	hci_dev_unlock(hdev);
2156 	return err;
2157 }
2158 
2159 /* This is a helper function to test for pending mgmt commands that can
2160  * cause CoD or EIR HCI commands. We can only allow one such pending
2161  * mgmt command at a time since otherwise we cannot easily track what
2162  * the current values are, will be, and based on that calculate if a new
2163  * HCI command needs to be sent and if yes with what value.
2164  */
2165 static bool pending_eir_or_class(struct hci_dev *hdev)
2166 {
2167 	struct mgmt_pending_cmd *cmd;
2168 
2169 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2170 		switch (cmd->opcode) {
2171 		case MGMT_OP_ADD_UUID:
2172 		case MGMT_OP_REMOVE_UUID:
2173 		case MGMT_OP_SET_DEV_CLASS:
2174 		case MGMT_OP_SET_POWERED:
2175 			return true;
2176 		}
2177 	}
2178 
2179 	return false;
2180 }
2181 
2182 static const u8 bluetooth_base_uuid[] = {
2183 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2184 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2185 };
2186 
2187 static u8 get_uuid_size(const u8 *uuid)
2188 {
2189 	u32 val;
2190 
2191 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2192 		return 128;
2193 
2194 	val = get_unaligned_le32(&uuid[12]);
2195 	if (val > 0xffff)
2196 		return 32;
2197 
2198 	return 16;
2199 }
2200 
2201 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2202 {
2203 	struct mgmt_pending_cmd *cmd = data;
2204 
2205 	bt_dev_dbg(hdev, "err %d", err);
2206 
2207 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2208 			  mgmt_status(err), hdev->dev_class, 3);
2209 
2210 	mgmt_pending_free(cmd);
2211 }
2212 
2213 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2214 {
2215 	int err;
2216 
2217 	err = hci_update_class_sync(hdev);
2218 	if (err)
2219 		return err;
2220 
2221 	return hci_update_eir_sync(hdev);
2222 }
2223 
2224 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2225 {
2226 	struct mgmt_cp_add_uuid *cp = data;
2227 	struct mgmt_pending_cmd *cmd;
2228 	struct bt_uuid *uuid;
2229 	int err;
2230 
2231 	bt_dev_dbg(hdev, "sock %p", sk);
2232 
2233 	hci_dev_lock(hdev);
2234 
2235 	if (pending_eir_or_class(hdev)) {
2236 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2237 				      MGMT_STATUS_BUSY);
2238 		goto failed;
2239 	}
2240 
2241 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2242 	if (!uuid) {
2243 		err = -ENOMEM;
2244 		goto failed;
2245 	}
2246 
2247 	memcpy(uuid->uuid, cp->uuid, 16);
2248 	uuid->svc_hint = cp->svc_hint;
2249 	uuid->size = get_uuid_size(cp->uuid);
2250 
2251 	list_add_tail(&uuid->list, &hdev->uuids);
2252 
2253 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2254 	if (!cmd) {
2255 		err = -ENOMEM;
2256 		goto failed;
2257 	}
2258 
2259 	err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2260 	if (err < 0) {
2261 		mgmt_pending_free(cmd);
2262 		goto failed;
2263 	}
2264 
2265 failed:
2266 	hci_dev_unlock(hdev);
2267 	return err;
2268 }
2269 
2270 static bool enable_service_cache(struct hci_dev *hdev)
2271 {
2272 	if (!hdev_is_powered(hdev))
2273 		return false;
2274 
2275 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2276 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2277 				   CACHE_TIMEOUT);
2278 		return true;
2279 	}
2280 
2281 	return false;
2282 }
2283 
2284 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2285 {
2286 	int err;
2287 
2288 	err = hci_update_class_sync(hdev);
2289 	if (err)
2290 		return err;
2291 
2292 	return hci_update_eir_sync(hdev);
2293 }
2294 
2295 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2296 		       u16 len)
2297 {
2298 	struct mgmt_cp_remove_uuid *cp = data;
2299 	struct mgmt_pending_cmd *cmd;
2300 	struct bt_uuid *match, *tmp;
2301 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2302 	int err, found;
2303 
2304 	bt_dev_dbg(hdev, "sock %p", sk);
2305 
2306 	hci_dev_lock(hdev);
2307 
2308 	if (pending_eir_or_class(hdev)) {
2309 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2310 				      MGMT_STATUS_BUSY);
2311 		goto unlock;
2312 	}
2313 
2314 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2315 		hci_uuids_clear(hdev);
2316 
2317 		if (enable_service_cache(hdev)) {
2318 			err = mgmt_cmd_complete(sk, hdev->id,
2319 						MGMT_OP_REMOVE_UUID,
2320 						0, hdev->dev_class, 3);
2321 			goto unlock;
2322 		}
2323 
2324 		goto update_class;
2325 	}
2326 
2327 	found = 0;
2328 
2329 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2330 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2331 			continue;
2332 
2333 		list_del(&match->list);
2334 		kfree(match);
2335 		found++;
2336 	}
2337 
2338 	if (found == 0) {
2339 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2340 				      MGMT_STATUS_INVALID_PARAMS);
2341 		goto unlock;
2342 	}
2343 
2344 update_class:
2345 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2346 	if (!cmd) {
2347 		err = -ENOMEM;
2348 		goto unlock;
2349 	}
2350 
2351 	err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2352 				 mgmt_class_complete);
2353 	if (err < 0)
2354 		mgmt_pending_free(cmd);
2355 
2356 unlock:
2357 	hci_dev_unlock(hdev);
2358 	return err;
2359 }
2360 
2361 static int set_class_sync(struct hci_dev *hdev, void *data)
2362 {
2363 	int err = 0;
2364 
2365 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2366 		cancel_delayed_work_sync(&hdev->service_cache);
2367 		err = hci_update_eir_sync(hdev);
2368 	}
2369 
2370 	if (err)
2371 		return err;
2372 
2373 	return hci_update_class_sync(hdev);
2374 }
2375 
2376 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2377 			 u16 len)
2378 {
2379 	struct mgmt_cp_set_dev_class *cp = data;
2380 	struct mgmt_pending_cmd *cmd;
2381 	int err;
2382 
2383 	bt_dev_dbg(hdev, "sock %p", sk);
2384 
2385 	if (!lmp_bredr_capable(hdev))
2386 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2387 				       MGMT_STATUS_NOT_SUPPORTED);
2388 
2389 	hci_dev_lock(hdev);
2390 
2391 	if (pending_eir_or_class(hdev)) {
2392 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2393 				      MGMT_STATUS_BUSY);
2394 		goto unlock;
2395 	}
2396 
2397 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2398 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2399 				      MGMT_STATUS_INVALID_PARAMS);
2400 		goto unlock;
2401 	}
2402 
2403 	hdev->major_class = cp->major;
2404 	hdev->minor_class = cp->minor;
2405 
2406 	if (!hdev_is_powered(hdev)) {
2407 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2408 					hdev->dev_class, 3);
2409 		goto unlock;
2410 	}
2411 
2412 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2413 	if (!cmd) {
2414 		err = -ENOMEM;
2415 		goto unlock;
2416 	}
2417 
2418 	err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2419 				 mgmt_class_complete);
2420 	if (err < 0)
2421 		mgmt_pending_free(cmd);
2422 
2423 unlock:
2424 	hci_dev_unlock(hdev);
2425 	return err;
2426 }
2427 
2428 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2429 			  u16 len)
2430 {
2431 	struct mgmt_cp_load_link_keys *cp = data;
2432 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2433 				   sizeof(struct mgmt_link_key_info));
2434 	u16 key_count, expected_len;
2435 	bool changed;
2436 	int i;
2437 
2438 	bt_dev_dbg(hdev, "sock %p", sk);
2439 
2440 	if (!lmp_bredr_capable(hdev))
2441 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2442 				       MGMT_STATUS_NOT_SUPPORTED);
2443 
2444 	key_count = __le16_to_cpu(cp->key_count);
2445 	if (key_count > max_key_count) {
2446 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2447 			   key_count);
2448 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2449 				       MGMT_STATUS_INVALID_PARAMS);
2450 	}
2451 
2452 	expected_len = struct_size(cp, keys, key_count);
2453 	if (expected_len != len) {
2454 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2455 			   expected_len, len);
2456 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2457 				       MGMT_STATUS_INVALID_PARAMS);
2458 	}
2459 
2460 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2461 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2462 				       MGMT_STATUS_INVALID_PARAMS);
2463 
2464 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2465 		   key_count);
2466 
2467 	for (i = 0; i < key_count; i++) {
2468 		struct mgmt_link_key_info *key = &cp->keys[i];
2469 
2470 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2471 			return mgmt_cmd_status(sk, hdev->id,
2472 					       MGMT_OP_LOAD_LINK_KEYS,
2473 					       MGMT_STATUS_INVALID_PARAMS);
2474 	}
2475 
2476 	hci_dev_lock(hdev);
2477 
2478 	hci_link_keys_clear(hdev);
2479 
2480 	if (cp->debug_keys)
2481 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2482 	else
2483 		changed = hci_dev_test_and_clear_flag(hdev,
2484 						      HCI_KEEP_DEBUG_KEYS);
2485 
2486 	if (changed)
2487 		new_settings(hdev, NULL);
2488 
2489 	for (i = 0; i < key_count; i++) {
2490 		struct mgmt_link_key_info *key = &cp->keys[i];
2491 
2492 		if (hci_is_blocked_key(hdev,
2493 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2494 				       key->val)) {
2495 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2496 				    &key->addr.bdaddr);
2497 			continue;
2498 		}
2499 
2500 		/* Always ignore debug keys and require a new pairing if
2501 		 * the user wants to use them.
2502 		 */
2503 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2504 			continue;
2505 
2506 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2507 				 key->type, key->pin_len, NULL);
2508 	}
2509 
2510 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2511 
2512 	hci_dev_unlock(hdev);
2513 
2514 	return 0;
2515 }
2516 
2517 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2518 			   u8 addr_type, struct sock *skip_sk)
2519 {
2520 	struct mgmt_ev_device_unpaired ev;
2521 
2522 	bacpy(&ev.addr.bdaddr, bdaddr);
2523 	ev.addr.type = addr_type;
2524 
2525 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2526 			  skip_sk);
2527 }
2528 
2529 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2530 			 u16 len)
2531 {
2532 	struct mgmt_cp_unpair_device *cp = data;
2533 	struct mgmt_rp_unpair_device rp;
2534 	struct hci_conn_params *params;
2535 	struct mgmt_pending_cmd *cmd;
2536 	struct hci_conn *conn;
2537 	u8 addr_type;
2538 	int err;
2539 
2540 	memset(&rp, 0, sizeof(rp));
2541 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2542 	rp.addr.type = cp->addr.type;
2543 
2544 	if (!bdaddr_type_is_valid(cp->addr.type))
2545 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2546 					 MGMT_STATUS_INVALID_PARAMS,
2547 					 &rp, sizeof(rp));
2548 
2549 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2550 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2551 					 MGMT_STATUS_INVALID_PARAMS,
2552 					 &rp, sizeof(rp));
2553 
2554 	hci_dev_lock(hdev);
2555 
2556 	if (!hdev_is_powered(hdev)) {
2557 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2558 					MGMT_STATUS_NOT_POWERED, &rp,
2559 					sizeof(rp));
2560 		goto unlock;
2561 	}
2562 
2563 	if (cp->addr.type == BDADDR_BREDR) {
2564 		/* If disconnection is requested, then look up the
2565 		 * connection. If the remote device is connected, it
2566 		 * will be later used to terminate the link.
2567 		 *
2568 		 * Setting it to NULL explicitly will cause no
2569 		 * termination of the link.
2570 		 */
2571 		if (cp->disconnect)
2572 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2573 						       &cp->addr.bdaddr);
2574 		else
2575 			conn = NULL;
2576 
2577 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2578 		if (err < 0) {
2579 			err = mgmt_cmd_complete(sk, hdev->id,
2580 						MGMT_OP_UNPAIR_DEVICE,
2581 						MGMT_STATUS_NOT_PAIRED, &rp,
2582 						sizeof(rp));
2583 			goto unlock;
2584 		}
2585 
2586 		goto done;
2587 	}
2588 
2589 	/* LE address type */
2590 	addr_type = le_addr_type(cp->addr.type);
2591 
2592 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2593 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2594 	if (err < 0) {
2595 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2596 					MGMT_STATUS_NOT_PAIRED, &rp,
2597 					sizeof(rp));
2598 		goto unlock;
2599 	}
2600 
2601 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2602 	if (!conn) {
2603 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2604 		goto done;
2605 	}
2606 
2607 
2608 	/* Defer clearing up the connection parameters until closing to
2609 	 * give a chance of keeping them if a repairing happens.
2610 	 */
2611 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2612 
2613 	/* Disable auto-connection parameters if present */
2614 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2615 	if (params) {
2616 		if (params->explicit_connect)
2617 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2618 		else
2619 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2620 	}
2621 
2622 	/* If disconnection is not requested, then clear the connection
2623 	 * variable so that the link is not terminated.
2624 	 */
2625 	if (!cp->disconnect)
2626 		conn = NULL;
2627 
2628 done:
2629 	/* If the connection variable is set, then termination of the
2630 	 * link is requested.
2631 	 */
2632 	if (!conn) {
2633 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2634 					&rp, sizeof(rp));
2635 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2636 		goto unlock;
2637 	}
2638 
2639 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2640 			       sizeof(*cp));
2641 	if (!cmd) {
2642 		err = -ENOMEM;
2643 		goto unlock;
2644 	}
2645 
2646 	cmd->cmd_complete = addr_cmd_complete;
2647 
2648 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2649 	if (err < 0)
2650 		mgmt_pending_remove(cmd);
2651 
2652 unlock:
2653 	hci_dev_unlock(hdev);
2654 	return err;
2655 }
2656 
2657 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2658 		      u16 len)
2659 {
2660 	struct mgmt_cp_disconnect *cp = data;
2661 	struct mgmt_rp_disconnect rp;
2662 	struct mgmt_pending_cmd *cmd;
2663 	struct hci_conn *conn;
2664 	int err;
2665 
2666 	bt_dev_dbg(hdev, "sock %p", sk);
2667 
2668 	memset(&rp, 0, sizeof(rp));
2669 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2670 	rp.addr.type = cp->addr.type;
2671 
2672 	if (!bdaddr_type_is_valid(cp->addr.type))
2673 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2674 					 MGMT_STATUS_INVALID_PARAMS,
2675 					 &rp, sizeof(rp));
2676 
2677 	hci_dev_lock(hdev);
2678 
2679 	if (!test_bit(HCI_UP, &hdev->flags)) {
2680 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2681 					MGMT_STATUS_NOT_POWERED, &rp,
2682 					sizeof(rp));
2683 		goto failed;
2684 	}
2685 
2686 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2687 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2688 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2689 		goto failed;
2690 	}
2691 
2692 	if (cp->addr.type == BDADDR_BREDR)
2693 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2694 					       &cp->addr.bdaddr);
2695 	else
2696 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2697 					       le_addr_type(cp->addr.type));
2698 
2699 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2700 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2701 					MGMT_STATUS_NOT_CONNECTED, &rp,
2702 					sizeof(rp));
2703 		goto failed;
2704 	}
2705 
2706 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2707 	if (!cmd) {
2708 		err = -ENOMEM;
2709 		goto failed;
2710 	}
2711 
2712 	cmd->cmd_complete = generic_cmd_complete;
2713 
2714 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2715 	if (err < 0)
2716 		mgmt_pending_remove(cmd);
2717 
2718 failed:
2719 	hci_dev_unlock(hdev);
2720 	return err;
2721 }
2722 
2723 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2724 {
2725 	switch (link_type) {
2726 	case LE_LINK:
2727 		switch (addr_type) {
2728 		case ADDR_LE_DEV_PUBLIC:
2729 			return BDADDR_LE_PUBLIC;
2730 
2731 		default:
2732 			/* Fallback to LE Random address type */
2733 			return BDADDR_LE_RANDOM;
2734 		}
2735 
2736 	default:
2737 		/* Fallback to BR/EDR type */
2738 		return BDADDR_BREDR;
2739 	}
2740 }
2741 
2742 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2743 			   u16 data_len)
2744 {
2745 	struct mgmt_rp_get_connections *rp;
2746 	struct hci_conn *c;
2747 	int err;
2748 	u16 i;
2749 
2750 	bt_dev_dbg(hdev, "sock %p", sk);
2751 
2752 	hci_dev_lock(hdev);
2753 
2754 	if (!hdev_is_powered(hdev)) {
2755 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2756 				      MGMT_STATUS_NOT_POWERED);
2757 		goto unlock;
2758 	}
2759 
2760 	i = 0;
2761 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2762 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2763 			i++;
2764 	}
2765 
2766 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2767 	if (!rp) {
2768 		err = -ENOMEM;
2769 		goto unlock;
2770 	}
2771 
2772 	i = 0;
2773 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2774 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2775 			continue;
2776 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2777 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2778 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2779 			continue;
2780 		i++;
2781 	}
2782 
2783 	rp->conn_count = cpu_to_le16(i);
2784 
2785 	/* Recalculate length in case of filtered SCO connections, etc */
2786 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2787 				struct_size(rp, addr, i));
2788 
2789 	kfree(rp);
2790 
2791 unlock:
2792 	hci_dev_unlock(hdev);
2793 	return err;
2794 }
2795 
2796 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2797 				   struct mgmt_cp_pin_code_neg_reply *cp)
2798 {
2799 	struct mgmt_pending_cmd *cmd;
2800 	int err;
2801 
2802 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2803 			       sizeof(*cp));
2804 	if (!cmd)
2805 		return -ENOMEM;
2806 
2807 	cmd->cmd_complete = addr_cmd_complete;
2808 
2809 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2810 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2811 	if (err < 0)
2812 		mgmt_pending_remove(cmd);
2813 
2814 	return err;
2815 }
2816 
2817 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2818 			  u16 len)
2819 {
2820 	struct hci_conn *conn;
2821 	struct mgmt_cp_pin_code_reply *cp = data;
2822 	struct hci_cp_pin_code_reply reply;
2823 	struct mgmt_pending_cmd *cmd;
2824 	int err;
2825 
2826 	bt_dev_dbg(hdev, "sock %p", sk);
2827 
2828 	hci_dev_lock(hdev);
2829 
2830 	if (!hdev_is_powered(hdev)) {
2831 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2832 				      MGMT_STATUS_NOT_POWERED);
2833 		goto failed;
2834 	}
2835 
2836 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2837 	if (!conn) {
2838 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2839 				      MGMT_STATUS_NOT_CONNECTED);
2840 		goto failed;
2841 	}
2842 
2843 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2844 		struct mgmt_cp_pin_code_neg_reply ncp;
2845 
2846 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2847 
2848 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2849 
2850 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2851 		if (err >= 0)
2852 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2853 					      MGMT_STATUS_INVALID_PARAMS);
2854 
2855 		goto failed;
2856 	}
2857 
2858 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2859 	if (!cmd) {
2860 		err = -ENOMEM;
2861 		goto failed;
2862 	}
2863 
2864 	cmd->cmd_complete = addr_cmd_complete;
2865 
2866 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2867 	reply.pin_len = cp->pin_len;
2868 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2869 
2870 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2871 	if (err < 0)
2872 		mgmt_pending_remove(cmd);
2873 
2874 failed:
2875 	hci_dev_unlock(hdev);
2876 	return err;
2877 }
2878 
2879 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2880 			     u16 len)
2881 {
2882 	struct mgmt_cp_set_io_capability *cp = data;
2883 
2884 	bt_dev_dbg(hdev, "sock %p", sk);
2885 
2886 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2887 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2888 				       MGMT_STATUS_INVALID_PARAMS);
2889 
2890 	hci_dev_lock(hdev);
2891 
2892 	hdev->io_capability = cp->io_capability;
2893 
2894 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2895 
2896 	hci_dev_unlock(hdev);
2897 
2898 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2899 				 NULL, 0);
2900 }
2901 
2902 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2903 {
2904 	struct hci_dev *hdev = conn->hdev;
2905 	struct mgmt_pending_cmd *cmd;
2906 
2907 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2908 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2909 			continue;
2910 
2911 		if (cmd->user_data != conn)
2912 			continue;
2913 
2914 		return cmd;
2915 	}
2916 
2917 	return NULL;
2918 }
2919 
2920 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2921 {
2922 	struct mgmt_rp_pair_device rp;
2923 	struct hci_conn *conn = cmd->user_data;
2924 	int err;
2925 
2926 	bacpy(&rp.addr.bdaddr, &conn->dst);
2927 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2928 
2929 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2930 				status, &rp, sizeof(rp));
2931 
2932 	/* So we don't get further callbacks for this connection */
2933 	conn->connect_cfm_cb = NULL;
2934 	conn->security_cfm_cb = NULL;
2935 	conn->disconn_cfm_cb = NULL;
2936 
2937 	hci_conn_drop(conn);
2938 
2939 	/* The device is paired so there is no need to remove
2940 	 * its connection parameters anymore.
2941 	 */
2942 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2943 
2944 	hci_conn_put(conn);
2945 
2946 	return err;
2947 }
2948 
2949 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2950 {
2951 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2952 	struct mgmt_pending_cmd *cmd;
2953 
2954 	cmd = find_pairing(conn);
2955 	if (cmd) {
2956 		cmd->cmd_complete(cmd, status);
2957 		mgmt_pending_remove(cmd);
2958 	}
2959 }
2960 
2961 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2962 {
2963 	struct mgmt_pending_cmd *cmd;
2964 
2965 	BT_DBG("status %u", status);
2966 
2967 	cmd = find_pairing(conn);
2968 	if (!cmd) {
2969 		BT_DBG("Unable to find a pending command");
2970 		return;
2971 	}
2972 
2973 	cmd->cmd_complete(cmd, mgmt_status(status));
2974 	mgmt_pending_remove(cmd);
2975 }
2976 
2977 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2978 {
2979 	struct mgmt_pending_cmd *cmd;
2980 
2981 	BT_DBG("status %u", status);
2982 
2983 	if (!status)
2984 		return;
2985 
2986 	cmd = find_pairing(conn);
2987 	if (!cmd) {
2988 		BT_DBG("Unable to find a pending command");
2989 		return;
2990 	}
2991 
2992 	cmd->cmd_complete(cmd, mgmt_status(status));
2993 	mgmt_pending_remove(cmd);
2994 }
2995 
2996 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2997 		       u16 len)
2998 {
2999 	struct mgmt_cp_pair_device *cp = data;
3000 	struct mgmt_rp_pair_device rp;
3001 	struct mgmt_pending_cmd *cmd;
3002 	u8 sec_level, auth_type;
3003 	struct hci_conn *conn;
3004 	int err;
3005 
3006 	bt_dev_dbg(hdev, "sock %p", sk);
3007 
3008 	memset(&rp, 0, sizeof(rp));
3009 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3010 	rp.addr.type = cp->addr.type;
3011 
3012 	if (!bdaddr_type_is_valid(cp->addr.type))
3013 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3014 					 MGMT_STATUS_INVALID_PARAMS,
3015 					 &rp, sizeof(rp));
3016 
3017 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3018 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3019 					 MGMT_STATUS_INVALID_PARAMS,
3020 					 &rp, sizeof(rp));
3021 
3022 	hci_dev_lock(hdev);
3023 
3024 	if (!hdev_is_powered(hdev)) {
3025 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3026 					MGMT_STATUS_NOT_POWERED, &rp,
3027 					sizeof(rp));
3028 		goto unlock;
3029 	}
3030 
3031 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3032 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3033 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3034 					sizeof(rp));
3035 		goto unlock;
3036 	}
3037 
3038 	sec_level = BT_SECURITY_MEDIUM;
3039 	auth_type = HCI_AT_DEDICATED_BONDING;
3040 
3041 	if (cp->addr.type == BDADDR_BREDR) {
3042 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3043 				       auth_type, CONN_REASON_PAIR_DEVICE);
3044 	} else {
3045 		u8 addr_type = le_addr_type(cp->addr.type);
3046 		struct hci_conn_params *p;
3047 
3048 		/* When pairing a new device, it is expected to remember
3049 		 * this device for future connections. Adding the connection
3050 		 * parameter information ahead of time allows tracking
3051 		 * of the peripheral preferred values and will speed up any
3052 		 * further connection establishment.
3053 		 *
3054 		 * If connection parameters already exist, then they
3055 		 * will be kept and this function does nothing.
3056 		 */
3057 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3058 
3059 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3060 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3061 
3062 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3063 					   sec_level, HCI_LE_CONN_TIMEOUT,
3064 					   CONN_REASON_PAIR_DEVICE);
3065 	}
3066 
3067 	if (IS_ERR(conn)) {
3068 		int status;
3069 
3070 		if (PTR_ERR(conn) == -EBUSY)
3071 			status = MGMT_STATUS_BUSY;
3072 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3073 			status = MGMT_STATUS_NOT_SUPPORTED;
3074 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3075 			status = MGMT_STATUS_REJECTED;
3076 		else
3077 			status = MGMT_STATUS_CONNECT_FAILED;
3078 
3079 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3080 					status, &rp, sizeof(rp));
3081 		goto unlock;
3082 	}
3083 
3084 	if (conn->connect_cfm_cb) {
3085 		hci_conn_drop(conn);
3086 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3087 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3088 		goto unlock;
3089 	}
3090 
3091 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3092 	if (!cmd) {
3093 		err = -ENOMEM;
3094 		hci_conn_drop(conn);
3095 		goto unlock;
3096 	}
3097 
3098 	cmd->cmd_complete = pairing_complete;
3099 
3100 	/* For LE, just connecting isn't a proof that the pairing finished */
3101 	if (cp->addr.type == BDADDR_BREDR) {
3102 		conn->connect_cfm_cb = pairing_complete_cb;
3103 		conn->security_cfm_cb = pairing_complete_cb;
3104 		conn->disconn_cfm_cb = pairing_complete_cb;
3105 	} else {
3106 		conn->connect_cfm_cb = le_pairing_complete_cb;
3107 		conn->security_cfm_cb = le_pairing_complete_cb;
3108 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3109 	}
3110 
3111 	conn->io_capability = cp->io_cap;
3112 	cmd->user_data = hci_conn_get(conn);
3113 
3114 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3115 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3116 		cmd->cmd_complete(cmd, 0);
3117 		mgmt_pending_remove(cmd);
3118 	}
3119 
3120 	err = 0;
3121 
3122 unlock:
3123 	hci_dev_unlock(hdev);
3124 	return err;
3125 }
3126 
3127 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3128 			      u16 len)
3129 {
3130 	struct mgmt_addr_info *addr = data;
3131 	struct mgmt_pending_cmd *cmd;
3132 	struct hci_conn *conn;
3133 	int err;
3134 
3135 	bt_dev_dbg(hdev, "sock %p", sk);
3136 
3137 	hci_dev_lock(hdev);
3138 
3139 	if (!hdev_is_powered(hdev)) {
3140 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3141 				      MGMT_STATUS_NOT_POWERED);
3142 		goto unlock;
3143 	}
3144 
3145 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3146 	if (!cmd) {
3147 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3148 				      MGMT_STATUS_INVALID_PARAMS);
3149 		goto unlock;
3150 	}
3151 
3152 	conn = cmd->user_data;
3153 
3154 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3155 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3156 				      MGMT_STATUS_INVALID_PARAMS);
3157 		goto unlock;
3158 	}
3159 
3160 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3161 	mgmt_pending_remove(cmd);
3162 
3163 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3164 				addr, sizeof(*addr));
3165 
3166 	/* Since user doesn't want to proceed with the connection, abort any
3167 	 * ongoing pairing and then terminate the link if it was created
3168 	 * because of the pair device action.
3169 	 */
3170 	if (addr->type == BDADDR_BREDR)
3171 		hci_remove_link_key(hdev, &addr->bdaddr);
3172 	else
3173 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3174 					      le_addr_type(addr->type));
3175 
3176 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3177 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3178 
3179 unlock:
3180 	hci_dev_unlock(hdev);
3181 	return err;
3182 }
3183 
3184 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3185 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3186 			     u16 hci_op, __le32 passkey)
3187 {
3188 	struct mgmt_pending_cmd *cmd;
3189 	struct hci_conn *conn;
3190 	int err;
3191 
3192 	hci_dev_lock(hdev);
3193 
3194 	if (!hdev_is_powered(hdev)) {
3195 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3196 					MGMT_STATUS_NOT_POWERED, addr,
3197 					sizeof(*addr));
3198 		goto done;
3199 	}
3200 
3201 	if (addr->type == BDADDR_BREDR)
3202 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3203 	else
3204 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3205 					       le_addr_type(addr->type));
3206 
3207 	if (!conn) {
3208 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3209 					MGMT_STATUS_NOT_CONNECTED, addr,
3210 					sizeof(*addr));
3211 		goto done;
3212 	}
3213 
3214 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3215 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3216 		if (!err)
3217 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3218 						MGMT_STATUS_SUCCESS, addr,
3219 						sizeof(*addr));
3220 		else
3221 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3222 						MGMT_STATUS_FAILED, addr,
3223 						sizeof(*addr));
3224 
3225 		goto done;
3226 	}
3227 
3228 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3229 	if (!cmd) {
3230 		err = -ENOMEM;
3231 		goto done;
3232 	}
3233 
3234 	cmd->cmd_complete = addr_cmd_complete;
3235 
3236 	/* Continue with pairing via HCI */
3237 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3238 		struct hci_cp_user_passkey_reply cp;
3239 
3240 		bacpy(&cp.bdaddr, &addr->bdaddr);
3241 		cp.passkey = passkey;
3242 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3243 	} else
3244 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3245 				   &addr->bdaddr);
3246 
3247 	if (err < 0)
3248 		mgmt_pending_remove(cmd);
3249 
3250 done:
3251 	hci_dev_unlock(hdev);
3252 	return err;
3253 }
3254 
3255 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3256 			      void *data, u16 len)
3257 {
3258 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3259 
3260 	bt_dev_dbg(hdev, "sock %p", sk);
3261 
3262 	return user_pairing_resp(sk, hdev, &cp->addr,
3263 				MGMT_OP_PIN_CODE_NEG_REPLY,
3264 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3265 }
3266 
3267 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3268 			      u16 len)
3269 {
3270 	struct mgmt_cp_user_confirm_reply *cp = data;
3271 
3272 	bt_dev_dbg(hdev, "sock %p", sk);
3273 
3274 	if (len != sizeof(*cp))
3275 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3276 				       MGMT_STATUS_INVALID_PARAMS);
3277 
3278 	return user_pairing_resp(sk, hdev, &cp->addr,
3279 				 MGMT_OP_USER_CONFIRM_REPLY,
3280 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3281 }
3282 
3283 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3284 				  void *data, u16 len)
3285 {
3286 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3287 
3288 	bt_dev_dbg(hdev, "sock %p", sk);
3289 
3290 	return user_pairing_resp(sk, hdev, &cp->addr,
3291 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3292 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3293 }
3294 
3295 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3296 			      u16 len)
3297 {
3298 	struct mgmt_cp_user_passkey_reply *cp = data;
3299 
3300 	bt_dev_dbg(hdev, "sock %p", sk);
3301 
3302 	return user_pairing_resp(sk, hdev, &cp->addr,
3303 				 MGMT_OP_USER_PASSKEY_REPLY,
3304 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3305 }
3306 
3307 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3308 				  void *data, u16 len)
3309 {
3310 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3311 
3312 	bt_dev_dbg(hdev, "sock %p", sk);
3313 
3314 	return user_pairing_resp(sk, hdev, &cp->addr,
3315 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3316 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3317 }
3318 
3319 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3320 {
3321 	struct adv_info *adv_instance;
3322 
3323 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3324 	if (!adv_instance)
3325 		return 0;
3326 
3327 	/* stop if current instance doesn't need to be changed */
3328 	if (!(adv_instance->flags & flags))
3329 		return 0;
3330 
3331 	cancel_adv_timeout(hdev);
3332 
3333 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3334 	if (!adv_instance)
3335 		return 0;
3336 
3337 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3338 
3339 	return 0;
3340 }
3341 
3342 static int name_changed_sync(struct hci_dev *hdev, void *data)
3343 {
3344 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3345 }
3346 
3347 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3348 {
3349 	struct mgmt_pending_cmd *cmd = data;
3350 	struct mgmt_cp_set_local_name *cp = cmd->param;
3351 	u8 status = mgmt_status(err);
3352 
3353 	bt_dev_dbg(hdev, "err %d", err);
3354 
3355 	if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3356 		return;
3357 
3358 	if (status) {
3359 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3360 				status);
3361 	} else {
3362 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3363 				  cp, sizeof(*cp));
3364 
3365 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3366 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3367 	}
3368 
3369 	mgmt_pending_remove(cmd);
3370 }
3371 
3372 static int set_name_sync(struct hci_dev *hdev, void *data)
3373 {
3374 	if (lmp_bredr_capable(hdev)) {
3375 		hci_update_name_sync(hdev);
3376 		hci_update_eir_sync(hdev);
3377 	}
3378 
3379 	/* The name is stored in the scan response data and so
3380 	 * no need to update the advertising data here.
3381 	 */
3382 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3383 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3384 
3385 	return 0;
3386 }
3387 
3388 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3389 			  u16 len)
3390 {
3391 	struct mgmt_cp_set_local_name *cp = data;
3392 	struct mgmt_pending_cmd *cmd;
3393 	int err;
3394 
3395 	bt_dev_dbg(hdev, "sock %p", sk);
3396 
3397 	hci_dev_lock(hdev);
3398 
3399 	/* If the old values are the same as the new ones just return a
3400 	 * direct command complete event.
3401 	 */
3402 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3403 	    !memcmp(hdev->short_name, cp->short_name,
3404 		    sizeof(hdev->short_name))) {
3405 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3406 					data, len);
3407 		goto failed;
3408 	}
3409 
3410 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3411 
3412 	if (!hdev_is_powered(hdev)) {
3413 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3414 
3415 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3416 					data, len);
3417 		if (err < 0)
3418 			goto failed;
3419 
3420 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3421 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3422 		ext_info_changed(hdev, sk);
3423 
3424 		goto failed;
3425 	}
3426 
3427 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3428 	if (!cmd)
3429 		err = -ENOMEM;
3430 	else
3431 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3432 					 set_name_complete);
3433 
3434 	if (err < 0) {
3435 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3436 				      MGMT_STATUS_FAILED);
3437 
3438 		if (cmd)
3439 			mgmt_pending_remove(cmd);
3440 
3441 		goto failed;
3442 	}
3443 
3444 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3445 
3446 failed:
3447 	hci_dev_unlock(hdev);
3448 	return err;
3449 }
3450 
3451 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3452 {
3453 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3454 }
3455 
3456 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3457 			  u16 len)
3458 {
3459 	struct mgmt_cp_set_appearance *cp = data;
3460 	u16 appearance;
3461 	int err;
3462 
3463 	bt_dev_dbg(hdev, "sock %p", sk);
3464 
3465 	if (!lmp_le_capable(hdev))
3466 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3467 				       MGMT_STATUS_NOT_SUPPORTED);
3468 
3469 	appearance = le16_to_cpu(cp->appearance);
3470 
3471 	hci_dev_lock(hdev);
3472 
3473 	if (hdev->appearance != appearance) {
3474 		hdev->appearance = appearance;
3475 
3476 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3477 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3478 					   NULL);
3479 
3480 		ext_info_changed(hdev, sk);
3481 	}
3482 
3483 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3484 				0);
3485 
3486 	hci_dev_unlock(hdev);
3487 
3488 	return err;
3489 }
3490 
3491 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3492 				 void *data, u16 len)
3493 {
3494 	struct mgmt_rp_get_phy_configuration rp;
3495 
3496 	bt_dev_dbg(hdev, "sock %p", sk);
3497 
3498 	hci_dev_lock(hdev);
3499 
3500 	memset(&rp, 0, sizeof(rp));
3501 
3502 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3503 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3504 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3505 
3506 	hci_dev_unlock(hdev);
3507 
3508 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3509 				 &rp, sizeof(rp));
3510 }
3511 
3512 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3513 {
3514 	struct mgmt_ev_phy_configuration_changed ev;
3515 
3516 	memset(&ev, 0, sizeof(ev));
3517 
3518 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3519 
3520 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3521 			  sizeof(ev), skip);
3522 }
3523 
3524 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3525 {
3526 	struct mgmt_pending_cmd *cmd = data;
3527 	struct sk_buff *skb = cmd->skb;
3528 	u8 status = mgmt_status(err);
3529 
3530 	if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3531 		return;
3532 
3533 	if (!status) {
3534 		if (!skb)
3535 			status = MGMT_STATUS_FAILED;
3536 		else if (IS_ERR(skb))
3537 			status = mgmt_status(PTR_ERR(skb));
3538 		else
3539 			status = mgmt_status(skb->data[0]);
3540 	}
3541 
3542 	bt_dev_dbg(hdev, "status %d", status);
3543 
3544 	if (status) {
3545 		mgmt_cmd_status(cmd->sk, hdev->id,
3546 				MGMT_OP_SET_PHY_CONFIGURATION, status);
3547 	} else {
3548 		mgmt_cmd_complete(cmd->sk, hdev->id,
3549 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3550 				  NULL, 0);
3551 
3552 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3553 	}
3554 
3555 	if (skb && !IS_ERR(skb))
3556 		kfree_skb(skb);
3557 
3558 	mgmt_pending_remove(cmd);
3559 }
3560 
3561 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3562 {
3563 	struct mgmt_pending_cmd *cmd = data;
3564 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3565 	struct hci_cp_le_set_default_phy cp_phy;
3566 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3567 
3568 	memset(&cp_phy, 0, sizeof(cp_phy));
3569 
3570 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3571 		cp_phy.all_phys |= 0x01;
3572 
3573 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3574 		cp_phy.all_phys |= 0x02;
3575 
3576 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3577 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3578 
3579 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3580 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3581 
3582 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3583 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3584 
3585 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3586 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3587 
3588 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3589 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3590 
3591 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3592 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3593 
3594 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3595 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
3596 
3597 	return 0;
3598 }
3599 
3600 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3601 				 void *data, u16 len)
3602 {
3603 	struct mgmt_cp_set_phy_configuration *cp = data;
3604 	struct mgmt_pending_cmd *cmd;
3605 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3606 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3607 	bool changed = false;
3608 	int err;
3609 
3610 	bt_dev_dbg(hdev, "sock %p", sk);
3611 
3612 	configurable_phys = get_configurable_phys(hdev);
3613 	supported_phys = get_supported_phys(hdev);
3614 	selected_phys = __le32_to_cpu(cp->selected_phys);
3615 
3616 	if (selected_phys & ~supported_phys)
3617 		return mgmt_cmd_status(sk, hdev->id,
3618 				       MGMT_OP_SET_PHY_CONFIGURATION,
3619 				       MGMT_STATUS_INVALID_PARAMS);
3620 
3621 	unconfigure_phys = supported_phys & ~configurable_phys;
3622 
3623 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3624 		return mgmt_cmd_status(sk, hdev->id,
3625 				       MGMT_OP_SET_PHY_CONFIGURATION,
3626 				       MGMT_STATUS_INVALID_PARAMS);
3627 
3628 	if (selected_phys == get_selected_phys(hdev))
3629 		return mgmt_cmd_complete(sk, hdev->id,
3630 					 MGMT_OP_SET_PHY_CONFIGURATION,
3631 					 0, NULL, 0);
3632 
3633 	hci_dev_lock(hdev);
3634 
3635 	if (!hdev_is_powered(hdev)) {
3636 		err = mgmt_cmd_status(sk, hdev->id,
3637 				      MGMT_OP_SET_PHY_CONFIGURATION,
3638 				      MGMT_STATUS_REJECTED);
3639 		goto unlock;
3640 	}
3641 
3642 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3643 		err = mgmt_cmd_status(sk, hdev->id,
3644 				      MGMT_OP_SET_PHY_CONFIGURATION,
3645 				      MGMT_STATUS_BUSY);
3646 		goto unlock;
3647 	}
3648 
3649 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3650 		pkt_type |= (HCI_DH3 | HCI_DM3);
3651 	else
3652 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3653 
3654 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3655 		pkt_type |= (HCI_DH5 | HCI_DM5);
3656 	else
3657 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3658 
3659 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3660 		pkt_type &= ~HCI_2DH1;
3661 	else
3662 		pkt_type |= HCI_2DH1;
3663 
3664 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3665 		pkt_type &= ~HCI_2DH3;
3666 	else
3667 		pkt_type |= HCI_2DH3;
3668 
3669 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3670 		pkt_type &= ~HCI_2DH5;
3671 	else
3672 		pkt_type |= HCI_2DH5;
3673 
3674 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3675 		pkt_type &= ~HCI_3DH1;
3676 	else
3677 		pkt_type |= HCI_3DH1;
3678 
3679 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3680 		pkt_type &= ~HCI_3DH3;
3681 	else
3682 		pkt_type |= HCI_3DH3;
3683 
3684 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3685 		pkt_type &= ~HCI_3DH5;
3686 	else
3687 		pkt_type |= HCI_3DH5;
3688 
3689 	if (pkt_type != hdev->pkt_type) {
3690 		hdev->pkt_type = pkt_type;
3691 		changed = true;
3692 	}
3693 
3694 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3695 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3696 		if (changed)
3697 			mgmt_phy_configuration_changed(hdev, sk);
3698 
3699 		err = mgmt_cmd_complete(sk, hdev->id,
3700 					MGMT_OP_SET_PHY_CONFIGURATION,
3701 					0, NULL, 0);
3702 
3703 		goto unlock;
3704 	}
3705 
3706 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3707 			       len);
3708 	if (!cmd)
3709 		err = -ENOMEM;
3710 	else
3711 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
3712 					 set_default_phy_complete);
3713 
3714 	if (err < 0) {
3715 		err = mgmt_cmd_status(sk, hdev->id,
3716 				      MGMT_OP_SET_PHY_CONFIGURATION,
3717 				      MGMT_STATUS_FAILED);
3718 
3719 		if (cmd)
3720 			mgmt_pending_remove(cmd);
3721 	}
3722 
3723 unlock:
3724 	hci_dev_unlock(hdev);
3725 
3726 	return err;
3727 }
3728 
3729 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3730 			    u16 len)
3731 {
3732 	int err = MGMT_STATUS_SUCCESS;
3733 	struct mgmt_cp_set_blocked_keys *keys = data;
3734 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3735 				   sizeof(struct mgmt_blocked_key_info));
3736 	u16 key_count, expected_len;
3737 	int i;
3738 
3739 	bt_dev_dbg(hdev, "sock %p", sk);
3740 
3741 	key_count = __le16_to_cpu(keys->key_count);
3742 	if (key_count > max_key_count) {
3743 		bt_dev_err(hdev, "too big key_count value %u", key_count);
3744 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3745 				       MGMT_STATUS_INVALID_PARAMS);
3746 	}
3747 
3748 	expected_len = struct_size(keys, keys, key_count);
3749 	if (expected_len != len) {
3750 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3751 			   expected_len, len);
3752 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3753 				       MGMT_STATUS_INVALID_PARAMS);
3754 	}
3755 
3756 	hci_dev_lock(hdev);
3757 
3758 	hci_blocked_keys_clear(hdev);
3759 
3760 	for (i = 0; i < keys->key_count; ++i) {
3761 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3762 
3763 		if (!b) {
3764 			err = MGMT_STATUS_NO_RESOURCES;
3765 			break;
3766 		}
3767 
3768 		b->type = keys->keys[i].type;
3769 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3770 		list_add_rcu(&b->list, &hdev->blocked_keys);
3771 	}
3772 	hci_dev_unlock(hdev);
3773 
3774 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3775 				err, NULL, 0);
3776 }
3777 
3778 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3779 			       void *data, u16 len)
3780 {
3781 	struct mgmt_mode *cp = data;
3782 	int err;
3783 	bool changed = false;
3784 
3785 	bt_dev_dbg(hdev, "sock %p", sk);
3786 
3787 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3788 		return mgmt_cmd_status(sk, hdev->id,
3789 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3790 				       MGMT_STATUS_NOT_SUPPORTED);
3791 
3792 	if (cp->val != 0x00 && cp->val != 0x01)
3793 		return mgmt_cmd_status(sk, hdev->id,
3794 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3795 				       MGMT_STATUS_INVALID_PARAMS);
3796 
3797 	hci_dev_lock(hdev);
3798 
3799 	if (hdev_is_powered(hdev) &&
3800 	    !!cp->val != hci_dev_test_flag(hdev,
3801 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
3802 		err = mgmt_cmd_status(sk, hdev->id,
3803 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3804 				      MGMT_STATUS_REJECTED);
3805 		goto unlock;
3806 	}
3807 
3808 	if (cp->val)
3809 		changed = !hci_dev_test_and_set_flag(hdev,
3810 						   HCI_WIDEBAND_SPEECH_ENABLED);
3811 	else
3812 		changed = hci_dev_test_and_clear_flag(hdev,
3813 						   HCI_WIDEBAND_SPEECH_ENABLED);
3814 
3815 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3816 	if (err < 0)
3817 		goto unlock;
3818 
3819 	if (changed)
3820 		err = new_settings(hdev, sk);
3821 
3822 unlock:
3823 	hci_dev_unlock(hdev);
3824 	return err;
3825 }
3826 
3827 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3828 			       void *data, u16 data_len)
3829 {
3830 	char buf[20];
3831 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3832 	u16 cap_len = 0;
3833 	u8 flags = 0;
3834 	u8 tx_power_range[2];
3835 
3836 	bt_dev_dbg(hdev, "sock %p", sk);
3837 
3838 	memset(&buf, 0, sizeof(buf));
3839 
3840 	hci_dev_lock(hdev);
3841 
3842 	/* When the Read Simple Pairing Options command is supported, then
3843 	 * the remote public key validation is supported.
3844 	 *
3845 	 * Alternatively, when Microsoft extensions are available, they can
3846 	 * indicate support for public key validation as well.
3847 	 */
3848 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3849 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
3850 
3851 	flags |= 0x02;		/* Remote public key validation (LE) */
3852 
3853 	/* When the Read Encryption Key Size command is supported, then the
3854 	 * encryption key size is enforced.
3855 	 */
3856 	if (hdev->commands[20] & 0x10)
3857 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
3858 
3859 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
3860 
3861 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3862 				  &flags, 1);
3863 
3864 	/* When the Read Simple Pairing Options command is supported, then
3865 	 * also max encryption key size information is provided.
3866 	 */
3867 	if (hdev->commands[41] & 0x08)
3868 		cap_len = eir_append_le16(rp->cap, cap_len,
3869 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
3870 					  hdev->max_enc_key_size);
3871 
3872 	cap_len = eir_append_le16(rp->cap, cap_len,
3873 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3874 				  SMP_MAX_ENC_KEY_SIZE);
3875 
3876 	/* Append the min/max LE tx power parameters if we were able to fetch
3877 	 * it from the controller
3878 	 */
3879 	if (hdev->commands[38] & 0x80) {
3880 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3881 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3882 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3883 					  tx_power_range, 2);
3884 	}
3885 
3886 	rp->cap_len = cpu_to_le16(cap_len);
3887 
3888 	hci_dev_unlock(hdev);
3889 
3890 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3891 				 rp, sizeof(*rp) + cap_len);
3892 }
3893 
3894 #ifdef CONFIG_BT_FEATURE_DEBUG
3895 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3896 static const u8 debug_uuid[16] = {
3897 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3898 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3899 };
3900 #endif
3901 
3902 /* 330859bc-7506-492d-9370-9a6f0614037f */
3903 static const u8 quality_report_uuid[16] = {
3904 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
3905 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
3906 };
3907 
3908 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
3909 static const u8 offload_codecs_uuid[16] = {
3910 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
3911 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
3912 };
3913 
3914 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3915 static const u8 le_simultaneous_roles_uuid[16] = {
3916 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3917 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3918 };
3919 
3920 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3921 static const u8 rpa_resolution_uuid[16] = {
3922 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3923 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3924 };
3925 
3926 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3927 				  void *data, u16 data_len)
3928 {
3929 	char buf[102];   /* Enough space for 5 features: 2 + 20 * 5 */
3930 	struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3931 	u16 idx = 0;
3932 	u32 flags;
3933 
3934 	bt_dev_dbg(hdev, "sock %p", sk);
3935 
3936 	memset(&buf, 0, sizeof(buf));
3937 
3938 #ifdef CONFIG_BT_FEATURE_DEBUG
3939 	if (!hdev) {
3940 		flags = bt_dbg_get() ? BIT(0) : 0;
3941 
3942 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
3943 		rp->features[idx].flags = cpu_to_le32(flags);
3944 		idx++;
3945 	}
3946 #endif
3947 
3948 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
3949 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
3950 			flags = BIT(0);
3951 		else
3952 			flags = 0;
3953 
3954 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
3955 		rp->features[idx].flags = cpu_to_le32(flags);
3956 		idx++;
3957 	}
3958 
3959 	if (hdev && ll_privacy_capable(hdev)) {
3960 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3961 			flags = BIT(0) | BIT(1);
3962 		else
3963 			flags = BIT(1);
3964 
3965 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3966 		rp->features[idx].flags = cpu_to_le32(flags);
3967 		idx++;
3968 	}
3969 
3970 	if (hdev && (aosp_has_quality_report(hdev) ||
3971 		     hdev->set_quality_report)) {
3972 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
3973 			flags = BIT(0);
3974 		else
3975 			flags = 0;
3976 
3977 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
3978 		rp->features[idx].flags = cpu_to_le32(flags);
3979 		idx++;
3980 	}
3981 
3982 	if (hdev && hdev->get_data_path_id) {
3983 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
3984 			flags = BIT(0);
3985 		else
3986 			flags = 0;
3987 
3988 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
3989 		rp->features[idx].flags = cpu_to_le32(flags);
3990 		idx++;
3991 	}
3992 
3993 	rp->feature_count = cpu_to_le16(idx);
3994 
3995 	/* After reading the experimental features information, enable
3996 	 * the events to update client on any future change.
3997 	 */
3998 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3999 
4000 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4001 				 MGMT_OP_READ_EXP_FEATURES_INFO,
4002 				 0, rp, sizeof(*rp) + (20 * idx));
4003 }
4004 
4005 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4006 					  struct sock *skip)
4007 {
4008 	struct mgmt_ev_exp_feature_changed ev;
4009 
4010 	memset(&ev, 0, sizeof(ev));
4011 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
4012 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4013 
4014 	if (enabled && privacy_mode_capable(hdev))
4015 		set_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags);
4016 	else
4017 		clear_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags);
4018 
4019 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4020 				  &ev, sizeof(ev),
4021 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4022 
4023 }
4024 
4025 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4026 			       bool enabled, struct sock *skip)
4027 {
4028 	struct mgmt_ev_exp_feature_changed ev;
4029 
4030 	memset(&ev, 0, sizeof(ev));
4031 	memcpy(ev.uuid, uuid, 16);
4032 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4033 
4034 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4035 				  &ev, sizeof(ev),
4036 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4037 }
4038 
4039 #define EXP_FEAT(_uuid, _set_func)	\
4040 {					\
4041 	.uuid = _uuid,			\
4042 	.set_func = _set_func,		\
4043 }
4044 
4045 /* The zero key uuid is special. Multiple exp features are set through it. */
4046 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4047 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4048 {
4049 	struct mgmt_rp_set_exp_feature rp;
4050 
4051 	memset(rp.uuid, 0, 16);
4052 	rp.flags = cpu_to_le32(0);
4053 
4054 #ifdef CONFIG_BT_FEATURE_DEBUG
4055 	if (!hdev) {
4056 		bool changed = bt_dbg_get();
4057 
4058 		bt_dbg_set(false);
4059 
4060 		if (changed)
4061 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4062 	}
4063 #endif
4064 
4065 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4066 		bool changed;
4067 
4068 		changed = hci_dev_test_and_clear_flag(hdev,
4069 						      HCI_ENABLE_LL_PRIVACY);
4070 		if (changed)
4071 			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4072 					    sk);
4073 	}
4074 
4075 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4076 
4077 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4078 				 MGMT_OP_SET_EXP_FEATURE, 0,
4079 				 &rp, sizeof(rp));
4080 }
4081 
4082 #ifdef CONFIG_BT_FEATURE_DEBUG
4083 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4084 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4085 {
4086 	struct mgmt_rp_set_exp_feature rp;
4087 
4088 	bool val, changed;
4089 	int err;
4090 
4091 	/* Command requires to use the non-controller index */
4092 	if (hdev)
4093 		return mgmt_cmd_status(sk, hdev->id,
4094 				       MGMT_OP_SET_EXP_FEATURE,
4095 				       MGMT_STATUS_INVALID_INDEX);
4096 
4097 	/* Parameters are limited to a single octet */
4098 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4099 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4100 				       MGMT_OP_SET_EXP_FEATURE,
4101 				       MGMT_STATUS_INVALID_PARAMS);
4102 
4103 	/* Only boolean on/off is supported */
4104 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4105 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4106 				       MGMT_OP_SET_EXP_FEATURE,
4107 				       MGMT_STATUS_INVALID_PARAMS);
4108 
4109 	val = !!cp->param[0];
4110 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4111 	bt_dbg_set(val);
4112 
4113 	memcpy(rp.uuid, debug_uuid, 16);
4114 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4115 
4116 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4117 
4118 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4119 				MGMT_OP_SET_EXP_FEATURE, 0,
4120 				&rp, sizeof(rp));
4121 
4122 	if (changed)
4123 		exp_feature_changed(hdev, debug_uuid, val, sk);
4124 
4125 	return err;
4126 }
4127 #endif
4128 
4129 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4130 				   struct mgmt_cp_set_exp_feature *cp,
4131 				   u16 data_len)
4132 {
4133 	struct mgmt_rp_set_exp_feature rp;
4134 	bool val, changed;
4135 	int err;
4136 	u32 flags;
4137 
4138 	/* Command requires to use the controller index */
4139 	if (!hdev)
4140 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4141 				       MGMT_OP_SET_EXP_FEATURE,
4142 				       MGMT_STATUS_INVALID_INDEX);
4143 
4144 	/* Changes can only be made when controller is powered down */
4145 	if (hdev_is_powered(hdev))
4146 		return mgmt_cmd_status(sk, hdev->id,
4147 				       MGMT_OP_SET_EXP_FEATURE,
4148 				       MGMT_STATUS_REJECTED);
4149 
4150 	/* Parameters are limited to a single octet */
4151 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4152 		return mgmt_cmd_status(sk, hdev->id,
4153 				       MGMT_OP_SET_EXP_FEATURE,
4154 				       MGMT_STATUS_INVALID_PARAMS);
4155 
4156 	/* Only boolean on/off is supported */
4157 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4158 		return mgmt_cmd_status(sk, hdev->id,
4159 				       MGMT_OP_SET_EXP_FEATURE,
4160 				       MGMT_STATUS_INVALID_PARAMS);
4161 
4162 	val = !!cp->param[0];
4163 
4164 	if (val) {
4165 		changed = !hci_dev_test_and_set_flag(hdev,
4166 						     HCI_ENABLE_LL_PRIVACY);
4167 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4168 
4169 		/* Enable LL privacy + supported settings changed */
4170 		flags = BIT(0) | BIT(1);
4171 	} else {
4172 		changed = hci_dev_test_and_clear_flag(hdev,
4173 						      HCI_ENABLE_LL_PRIVACY);
4174 
4175 		/* Disable LL privacy + supported settings changed */
4176 		flags = BIT(1);
4177 	}
4178 
4179 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4180 	rp.flags = cpu_to_le32(flags);
4181 
4182 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4183 
4184 	err = mgmt_cmd_complete(sk, hdev->id,
4185 				MGMT_OP_SET_EXP_FEATURE, 0,
4186 				&rp, sizeof(rp));
4187 
4188 	if (changed)
4189 		exp_ll_privacy_feature_changed(val, hdev, sk);
4190 
4191 	return err;
4192 }
4193 
4194 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4195 				   struct mgmt_cp_set_exp_feature *cp,
4196 				   u16 data_len)
4197 {
4198 	struct mgmt_rp_set_exp_feature rp;
4199 	bool val, changed;
4200 	int err;
4201 
4202 	/* Command requires to use a valid controller index */
4203 	if (!hdev)
4204 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4205 				       MGMT_OP_SET_EXP_FEATURE,
4206 				       MGMT_STATUS_INVALID_INDEX);
4207 
4208 	/* Parameters are limited to a single octet */
4209 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4210 		return mgmt_cmd_status(sk, hdev->id,
4211 				       MGMT_OP_SET_EXP_FEATURE,
4212 				       MGMT_STATUS_INVALID_PARAMS);
4213 
4214 	/* Only boolean on/off is supported */
4215 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4216 		return mgmt_cmd_status(sk, hdev->id,
4217 				       MGMT_OP_SET_EXP_FEATURE,
4218 				       MGMT_STATUS_INVALID_PARAMS);
4219 
4220 	hci_req_sync_lock(hdev);
4221 
4222 	val = !!cp->param[0];
4223 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4224 
4225 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4226 		err = mgmt_cmd_status(sk, hdev->id,
4227 				      MGMT_OP_SET_EXP_FEATURE,
4228 				      MGMT_STATUS_NOT_SUPPORTED);
4229 		goto unlock_quality_report;
4230 	}
4231 
4232 	if (changed) {
4233 		if (hdev->set_quality_report)
4234 			err = hdev->set_quality_report(hdev, val);
4235 		else
4236 			err = aosp_set_quality_report(hdev, val);
4237 
4238 		if (err) {
4239 			err = mgmt_cmd_status(sk, hdev->id,
4240 					      MGMT_OP_SET_EXP_FEATURE,
4241 					      MGMT_STATUS_FAILED);
4242 			goto unlock_quality_report;
4243 		}
4244 
4245 		if (val)
4246 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4247 		else
4248 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4249 	}
4250 
4251 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4252 
4253 	memcpy(rp.uuid, quality_report_uuid, 16);
4254 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4255 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4256 
4257 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4258 				&rp, sizeof(rp));
4259 
4260 	if (changed)
4261 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4262 
4263 unlock_quality_report:
4264 	hci_req_sync_unlock(hdev);
4265 	return err;
4266 }
4267 
4268 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4269 				  struct mgmt_cp_set_exp_feature *cp,
4270 				  u16 data_len)
4271 {
4272 	bool val, changed;
4273 	int err;
4274 	struct mgmt_rp_set_exp_feature rp;
4275 
4276 	/* Command requires to use a valid controller index */
4277 	if (!hdev)
4278 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4279 				       MGMT_OP_SET_EXP_FEATURE,
4280 				       MGMT_STATUS_INVALID_INDEX);
4281 
4282 	/* Parameters are limited to a single octet */
4283 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4284 		return mgmt_cmd_status(sk, hdev->id,
4285 				       MGMT_OP_SET_EXP_FEATURE,
4286 				       MGMT_STATUS_INVALID_PARAMS);
4287 
4288 	/* Only boolean on/off is supported */
4289 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4290 		return mgmt_cmd_status(sk, hdev->id,
4291 				       MGMT_OP_SET_EXP_FEATURE,
4292 				       MGMT_STATUS_INVALID_PARAMS);
4293 
4294 	val = !!cp->param[0];
4295 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4296 
4297 	if (!hdev->get_data_path_id) {
4298 		return mgmt_cmd_status(sk, hdev->id,
4299 				       MGMT_OP_SET_EXP_FEATURE,
4300 				       MGMT_STATUS_NOT_SUPPORTED);
4301 	}
4302 
4303 	if (changed) {
4304 		if (val)
4305 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4306 		else
4307 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4308 	}
4309 
4310 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4311 		    val, changed);
4312 
4313 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4314 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4315 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4316 	err = mgmt_cmd_complete(sk, hdev->id,
4317 				MGMT_OP_SET_EXP_FEATURE, 0,
4318 				&rp, sizeof(rp));
4319 
4320 	if (changed)
4321 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4322 
4323 	return err;
4324 }
4325 
4326 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4327 					  struct mgmt_cp_set_exp_feature *cp,
4328 					  u16 data_len)
4329 {
4330 	bool val, changed;
4331 	int err;
4332 	struct mgmt_rp_set_exp_feature rp;
4333 
4334 	/* Command requires to use a valid controller index */
4335 	if (!hdev)
4336 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4337 				       MGMT_OP_SET_EXP_FEATURE,
4338 				       MGMT_STATUS_INVALID_INDEX);
4339 
4340 	/* Parameters are limited to a single octet */
4341 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4342 		return mgmt_cmd_status(sk, hdev->id,
4343 				       MGMT_OP_SET_EXP_FEATURE,
4344 				       MGMT_STATUS_INVALID_PARAMS);
4345 
4346 	/* Only boolean on/off is supported */
4347 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4348 		return mgmt_cmd_status(sk, hdev->id,
4349 				       MGMT_OP_SET_EXP_FEATURE,
4350 				       MGMT_STATUS_INVALID_PARAMS);
4351 
4352 	val = !!cp->param[0];
4353 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4354 
4355 	if (!hci_dev_le_state_simultaneous(hdev)) {
4356 		return mgmt_cmd_status(sk, hdev->id,
4357 				       MGMT_OP_SET_EXP_FEATURE,
4358 				       MGMT_STATUS_NOT_SUPPORTED);
4359 	}
4360 
4361 	if (changed) {
4362 		if (val)
4363 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4364 		else
4365 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4366 	}
4367 
4368 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4369 		    val, changed);
4370 
4371 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4372 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4373 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4374 	err = mgmt_cmd_complete(sk, hdev->id,
4375 				MGMT_OP_SET_EXP_FEATURE, 0,
4376 				&rp, sizeof(rp));
4377 
4378 	if (changed)
4379 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4380 
4381 	return err;
4382 }
4383 
4384 static const struct mgmt_exp_feature {
4385 	const u8 *uuid;
4386 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4387 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4388 } exp_features[] = {
4389 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4390 #ifdef CONFIG_BT_FEATURE_DEBUG
4391 	EXP_FEAT(debug_uuid, set_debug_func),
4392 #endif
4393 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4394 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4395 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4396 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4397 
4398 	/* end with a null feature */
4399 	EXP_FEAT(NULL, NULL)
4400 };
4401 
4402 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4403 			   void *data, u16 data_len)
4404 {
4405 	struct mgmt_cp_set_exp_feature *cp = data;
4406 	size_t i = 0;
4407 
4408 	bt_dev_dbg(hdev, "sock %p", sk);
4409 
4410 	for (i = 0; exp_features[i].uuid; i++) {
4411 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4412 			return exp_features[i].set_func(sk, hdev, cp, data_len);
4413 	}
4414 
4415 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4416 			       MGMT_OP_SET_EXP_FEATURE,
4417 			       MGMT_STATUS_NOT_SUPPORTED);
4418 }
4419 
4420 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4421 			    u16 data_len)
4422 {
4423 	struct mgmt_cp_get_device_flags *cp = data;
4424 	struct mgmt_rp_get_device_flags rp;
4425 	struct bdaddr_list_with_flags *br_params;
4426 	struct hci_conn_params *params;
4427 	u32 supported_flags;
4428 	u32 current_flags = 0;
4429 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4430 
4431 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4432 		   &cp->addr.bdaddr, cp->addr.type);
4433 
4434 	hci_dev_lock(hdev);
4435 
4436 	bitmap_to_arr32(&supported_flags, hdev->conn_flags,
4437 			__HCI_CONN_NUM_FLAGS);
4438 
4439 	memset(&rp, 0, sizeof(rp));
4440 
4441 	if (cp->addr.type == BDADDR_BREDR) {
4442 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4443 							      &cp->addr.bdaddr,
4444 							      cp->addr.type);
4445 		if (!br_params)
4446 			goto done;
4447 
4448 		bitmap_to_arr32(&current_flags, br_params->flags,
4449 				__HCI_CONN_NUM_FLAGS);
4450 	} else {
4451 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4452 						le_addr_type(cp->addr.type));
4453 
4454 		if (!params)
4455 			goto done;
4456 
4457 		bitmap_to_arr32(&current_flags, params->flags,
4458 				__HCI_CONN_NUM_FLAGS);
4459 	}
4460 
4461 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4462 	rp.addr.type = cp->addr.type;
4463 	rp.supported_flags = cpu_to_le32(supported_flags);
4464 	rp.current_flags = cpu_to_le32(current_flags);
4465 
4466 	status = MGMT_STATUS_SUCCESS;
4467 
4468 done:
4469 	hci_dev_unlock(hdev);
4470 
4471 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4472 				&rp, sizeof(rp));
4473 }
4474 
4475 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4476 				 bdaddr_t *bdaddr, u8 bdaddr_type,
4477 				 u32 supported_flags, u32 current_flags)
4478 {
4479 	struct mgmt_ev_device_flags_changed ev;
4480 
4481 	bacpy(&ev.addr.bdaddr, bdaddr);
4482 	ev.addr.type = bdaddr_type;
4483 	ev.supported_flags = cpu_to_le32(supported_flags);
4484 	ev.current_flags = cpu_to_le32(current_flags);
4485 
4486 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4487 }
4488 
4489 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4490 			    u16 len)
4491 {
4492 	struct mgmt_cp_set_device_flags *cp = data;
4493 	struct bdaddr_list_with_flags *br_params;
4494 	struct hci_conn_params *params;
4495 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4496 	u32 supported_flags;
4497 	u32 current_flags = __le32_to_cpu(cp->current_flags);
4498 
4499 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4500 		   &cp->addr.bdaddr, cp->addr.type,
4501 		   __le32_to_cpu(current_flags));
4502 
4503 	bitmap_to_arr32(&supported_flags, hdev->conn_flags,
4504 			__HCI_CONN_NUM_FLAGS);
4505 
4506 	if ((supported_flags | current_flags) != supported_flags) {
4507 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4508 			    current_flags, supported_flags);
4509 		goto done;
4510 	}
4511 
4512 	hci_dev_lock(hdev);
4513 
4514 	if (cp->addr.type == BDADDR_BREDR) {
4515 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4516 							      &cp->addr.bdaddr,
4517 							      cp->addr.type);
4518 
4519 		if (br_params) {
4520 			bitmap_from_u64(br_params->flags, current_flags);
4521 			status = MGMT_STATUS_SUCCESS;
4522 		} else {
4523 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4524 				    &cp->addr.bdaddr, cp->addr.type);
4525 		}
4526 	} else {
4527 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4528 						le_addr_type(cp->addr.type));
4529 		if (params) {
4530 			bitmap_from_u64(params->flags, current_flags);
4531 			status = MGMT_STATUS_SUCCESS;
4532 
4533 			/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
4534 			 * has been set.
4535 			 */
4536 			if (test_bit(HCI_CONN_FLAG_DEVICE_PRIVACY,
4537 				     params->flags))
4538 				hci_update_passive_scan(hdev);
4539 		} else {
4540 			bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4541 				    &cp->addr.bdaddr,
4542 				    le_addr_type(cp->addr.type));
4543 		}
4544 	}
4545 
4546 done:
4547 	hci_dev_unlock(hdev);
4548 
4549 	if (status == MGMT_STATUS_SUCCESS)
4550 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4551 				     supported_flags, current_flags);
4552 
4553 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4554 				 &cp->addr, sizeof(cp->addr));
4555 }
4556 
4557 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4558 				   u16 handle)
4559 {
4560 	struct mgmt_ev_adv_monitor_added ev;
4561 
4562 	ev.monitor_handle = cpu_to_le16(handle);
4563 
4564 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4565 }
4566 
4567 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4568 {
4569 	struct mgmt_ev_adv_monitor_removed ev;
4570 	struct mgmt_pending_cmd *cmd;
4571 	struct sock *sk_skip = NULL;
4572 	struct mgmt_cp_remove_adv_monitor *cp;
4573 
4574 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4575 	if (cmd) {
4576 		cp = cmd->param;
4577 
4578 		if (cp->monitor_handle)
4579 			sk_skip = cmd->sk;
4580 	}
4581 
4582 	ev.monitor_handle = cpu_to_le16(handle);
4583 
4584 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4585 }
4586 
4587 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4588 				 void *data, u16 len)
4589 {
4590 	struct adv_monitor *monitor = NULL;
4591 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4592 	int handle, err;
4593 	size_t rp_size = 0;
4594 	__u32 supported = 0;
4595 	__u32 enabled = 0;
4596 	__u16 num_handles = 0;
4597 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4598 
4599 	BT_DBG("request for %s", hdev->name);
4600 
4601 	hci_dev_lock(hdev);
4602 
4603 	if (msft_monitor_supported(hdev))
4604 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4605 
4606 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4607 		handles[num_handles++] = monitor->handle;
4608 
4609 	hci_dev_unlock(hdev);
4610 
4611 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4612 	rp = kmalloc(rp_size, GFP_KERNEL);
4613 	if (!rp)
4614 		return -ENOMEM;
4615 
4616 	/* All supported features are currently enabled */
4617 	enabled = supported;
4618 
4619 	rp->supported_features = cpu_to_le32(supported);
4620 	rp->enabled_features = cpu_to_le32(enabled);
4621 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4622 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4623 	rp->num_handles = cpu_to_le16(num_handles);
4624 	if (num_handles)
4625 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4626 
4627 	err = mgmt_cmd_complete(sk, hdev->id,
4628 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
4629 				MGMT_STATUS_SUCCESS, rp, rp_size);
4630 
4631 	kfree(rp);
4632 
4633 	return err;
4634 }
4635 
4636 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4637 {
4638 	struct mgmt_rp_add_adv_patterns_monitor rp;
4639 	struct mgmt_pending_cmd *cmd;
4640 	struct adv_monitor *monitor;
4641 	int err = 0;
4642 
4643 	hci_dev_lock(hdev);
4644 
4645 	cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4646 	if (!cmd) {
4647 		cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4648 		if (!cmd)
4649 			goto done;
4650 	}
4651 
4652 	monitor = cmd->user_data;
4653 	rp.monitor_handle = cpu_to_le16(monitor->handle);
4654 
4655 	if (!status) {
4656 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4657 		hdev->adv_monitors_cnt++;
4658 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4659 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
4660 		hci_update_passive_scan(hdev);
4661 	}
4662 
4663 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4664 				mgmt_status(status), &rp, sizeof(rp));
4665 	mgmt_pending_remove(cmd);
4666 
4667 done:
4668 	hci_dev_unlock(hdev);
4669 	bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4670 		   rp.monitor_handle, status);
4671 
4672 	return err;
4673 }
4674 
4675 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4676 				      struct adv_monitor *m, u8 status,
4677 				      void *data, u16 len, u16 op)
4678 {
4679 	struct mgmt_rp_add_adv_patterns_monitor rp;
4680 	struct mgmt_pending_cmd *cmd;
4681 	int err;
4682 	bool pending;
4683 
4684 	hci_dev_lock(hdev);
4685 
4686 	if (status)
4687 		goto unlock;
4688 
4689 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4690 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4691 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4692 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4693 		status = MGMT_STATUS_BUSY;
4694 		goto unlock;
4695 	}
4696 
4697 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4698 	if (!cmd) {
4699 		status = MGMT_STATUS_NO_RESOURCES;
4700 		goto unlock;
4701 	}
4702 
4703 	cmd->user_data = m;
4704 	pending = hci_add_adv_monitor(hdev, m, &err);
4705 	if (err) {
4706 		if (err == -ENOSPC || err == -ENOMEM)
4707 			status = MGMT_STATUS_NO_RESOURCES;
4708 		else if (err == -EINVAL)
4709 			status = MGMT_STATUS_INVALID_PARAMS;
4710 		else
4711 			status = MGMT_STATUS_FAILED;
4712 
4713 		mgmt_pending_remove(cmd);
4714 		goto unlock;
4715 	}
4716 
4717 	if (!pending) {
4718 		mgmt_pending_remove(cmd);
4719 		rp.monitor_handle = cpu_to_le16(m->handle);
4720 		mgmt_adv_monitor_added(sk, hdev, m->handle);
4721 		m->state = ADV_MONITOR_STATE_REGISTERED;
4722 		hdev->adv_monitors_cnt++;
4723 
4724 		hci_dev_unlock(hdev);
4725 		return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4726 					 &rp, sizeof(rp));
4727 	}
4728 
4729 	hci_dev_unlock(hdev);
4730 
4731 	return 0;
4732 
4733 unlock:
4734 	hci_free_adv_monitor(hdev, m);
4735 	hci_dev_unlock(hdev);
4736 	return mgmt_cmd_status(sk, hdev->id, op, status);
4737 }
4738 
4739 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4740 				   struct mgmt_adv_rssi_thresholds *rssi)
4741 {
4742 	if (rssi) {
4743 		m->rssi.low_threshold = rssi->low_threshold;
4744 		m->rssi.low_threshold_timeout =
4745 		    __le16_to_cpu(rssi->low_threshold_timeout);
4746 		m->rssi.high_threshold = rssi->high_threshold;
4747 		m->rssi.high_threshold_timeout =
4748 		    __le16_to_cpu(rssi->high_threshold_timeout);
4749 		m->rssi.sampling_period = rssi->sampling_period;
4750 	} else {
4751 		/* Default values. These numbers are the least constricting
4752 		 * parameters for MSFT API to work, so it behaves as if there
4753 		 * are no rssi parameter to consider. May need to be changed
4754 		 * if other API are to be supported.
4755 		 */
4756 		m->rssi.low_threshold = -127;
4757 		m->rssi.low_threshold_timeout = 60;
4758 		m->rssi.high_threshold = -127;
4759 		m->rssi.high_threshold_timeout = 0;
4760 		m->rssi.sampling_period = 0;
4761 	}
4762 }
4763 
4764 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4765 				    struct mgmt_adv_pattern *patterns)
4766 {
4767 	u8 offset = 0, length = 0;
4768 	struct adv_pattern *p = NULL;
4769 	int i;
4770 
4771 	for (i = 0; i < pattern_count; i++) {
4772 		offset = patterns[i].offset;
4773 		length = patterns[i].length;
4774 		if (offset >= HCI_MAX_AD_LENGTH ||
4775 		    length > HCI_MAX_AD_LENGTH ||
4776 		    (offset + length) > HCI_MAX_AD_LENGTH)
4777 			return MGMT_STATUS_INVALID_PARAMS;
4778 
4779 		p = kmalloc(sizeof(*p), GFP_KERNEL);
4780 		if (!p)
4781 			return MGMT_STATUS_NO_RESOURCES;
4782 
4783 		p->ad_type = patterns[i].ad_type;
4784 		p->offset = patterns[i].offset;
4785 		p->length = patterns[i].length;
4786 		memcpy(p->value, patterns[i].value, p->length);
4787 
4788 		INIT_LIST_HEAD(&p->list);
4789 		list_add(&p->list, &m->patterns);
4790 	}
4791 
4792 	return MGMT_STATUS_SUCCESS;
4793 }
4794 
4795 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4796 				    void *data, u16 len)
4797 {
4798 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4799 	struct adv_monitor *m = NULL;
4800 	u8 status = MGMT_STATUS_SUCCESS;
4801 	size_t expected_size = sizeof(*cp);
4802 
4803 	BT_DBG("request for %s", hdev->name);
4804 
4805 	if (len <= sizeof(*cp)) {
4806 		status = MGMT_STATUS_INVALID_PARAMS;
4807 		goto done;
4808 	}
4809 
4810 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4811 	if (len != expected_size) {
4812 		status = MGMT_STATUS_INVALID_PARAMS;
4813 		goto done;
4814 	}
4815 
4816 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4817 	if (!m) {
4818 		status = MGMT_STATUS_NO_RESOURCES;
4819 		goto done;
4820 	}
4821 
4822 	INIT_LIST_HEAD(&m->patterns);
4823 
4824 	parse_adv_monitor_rssi(m, NULL);
4825 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4826 
4827 done:
4828 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4829 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4830 }
4831 
4832 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4833 					 void *data, u16 len)
4834 {
4835 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4836 	struct adv_monitor *m = NULL;
4837 	u8 status = MGMT_STATUS_SUCCESS;
4838 	size_t expected_size = sizeof(*cp);
4839 
4840 	BT_DBG("request for %s", hdev->name);
4841 
4842 	if (len <= sizeof(*cp)) {
4843 		status = MGMT_STATUS_INVALID_PARAMS;
4844 		goto done;
4845 	}
4846 
4847 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4848 	if (len != expected_size) {
4849 		status = MGMT_STATUS_INVALID_PARAMS;
4850 		goto done;
4851 	}
4852 
4853 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4854 	if (!m) {
4855 		status = MGMT_STATUS_NO_RESOURCES;
4856 		goto done;
4857 	}
4858 
4859 	INIT_LIST_HEAD(&m->patterns);
4860 
4861 	parse_adv_monitor_rssi(m, &cp->rssi);
4862 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4863 
4864 done:
4865 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4866 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4867 }
4868 
4869 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4870 {
4871 	struct mgmt_rp_remove_adv_monitor rp;
4872 	struct mgmt_cp_remove_adv_monitor *cp;
4873 	struct mgmt_pending_cmd *cmd;
4874 	int err = 0;
4875 
4876 	hci_dev_lock(hdev);
4877 
4878 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4879 	if (!cmd)
4880 		goto done;
4881 
4882 	cp = cmd->param;
4883 	rp.monitor_handle = cp->monitor_handle;
4884 
4885 	if (!status)
4886 		hci_update_passive_scan(hdev);
4887 
4888 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4889 				mgmt_status(status), &rp, sizeof(rp));
4890 	mgmt_pending_remove(cmd);
4891 
4892 done:
4893 	hci_dev_unlock(hdev);
4894 	bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4895 		   rp.monitor_handle, status);
4896 
4897 	return err;
4898 }
4899 
4900 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4901 			      void *data, u16 len)
4902 {
4903 	struct mgmt_cp_remove_adv_monitor *cp = data;
4904 	struct mgmt_rp_remove_adv_monitor rp;
4905 	struct mgmt_pending_cmd *cmd;
4906 	u16 handle = __le16_to_cpu(cp->monitor_handle);
4907 	int err, status;
4908 	bool pending;
4909 
4910 	BT_DBG("request for %s", hdev->name);
4911 	rp.monitor_handle = cp->monitor_handle;
4912 
4913 	hci_dev_lock(hdev);
4914 
4915 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4916 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4917 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4918 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4919 		status = MGMT_STATUS_BUSY;
4920 		goto unlock;
4921 	}
4922 
4923 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4924 	if (!cmd) {
4925 		status = MGMT_STATUS_NO_RESOURCES;
4926 		goto unlock;
4927 	}
4928 
4929 	if (handle)
4930 		pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4931 	else
4932 		pending = hci_remove_all_adv_monitor(hdev, &err);
4933 
4934 	if (err) {
4935 		mgmt_pending_remove(cmd);
4936 
4937 		if (err == -ENOENT)
4938 			status = MGMT_STATUS_INVALID_INDEX;
4939 		else
4940 			status = MGMT_STATUS_FAILED;
4941 
4942 		goto unlock;
4943 	}
4944 
4945 	/* monitor can be removed without forwarding request to controller */
4946 	if (!pending) {
4947 		mgmt_pending_remove(cmd);
4948 		hci_dev_unlock(hdev);
4949 
4950 		return mgmt_cmd_complete(sk, hdev->id,
4951 					 MGMT_OP_REMOVE_ADV_MONITOR,
4952 					 MGMT_STATUS_SUCCESS,
4953 					 &rp, sizeof(rp));
4954 	}
4955 
4956 	hci_dev_unlock(hdev);
4957 	return 0;
4958 
4959 unlock:
4960 	hci_dev_unlock(hdev);
4961 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4962 			       status);
4963 }
4964 
4965 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
4966 {
4967 	struct mgmt_rp_read_local_oob_data mgmt_rp;
4968 	size_t rp_size = sizeof(mgmt_rp);
4969 	struct mgmt_pending_cmd *cmd = data;
4970 	struct sk_buff *skb = cmd->skb;
4971 	u8 status = mgmt_status(err);
4972 
4973 	if (!status) {
4974 		if (!skb)
4975 			status = MGMT_STATUS_FAILED;
4976 		else if (IS_ERR(skb))
4977 			status = mgmt_status(PTR_ERR(skb));
4978 		else
4979 			status = mgmt_status(skb->data[0]);
4980 	}
4981 
4982 	bt_dev_dbg(hdev, "status %d", status);
4983 
4984 	if (status) {
4985 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
4986 		goto remove;
4987 	}
4988 
4989 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4990 
4991 	if (!bredr_sc_enabled(hdev)) {
4992 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4993 
4994 		if (skb->len < sizeof(*rp)) {
4995 			mgmt_cmd_status(cmd->sk, hdev->id,
4996 					MGMT_OP_READ_LOCAL_OOB_DATA,
4997 					MGMT_STATUS_FAILED);
4998 			goto remove;
4999 		}
5000 
5001 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5002 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5003 
5004 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5005 	} else {
5006 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5007 
5008 		if (skb->len < sizeof(*rp)) {
5009 			mgmt_cmd_status(cmd->sk, hdev->id,
5010 					MGMT_OP_READ_LOCAL_OOB_DATA,
5011 					MGMT_STATUS_FAILED);
5012 			goto remove;
5013 		}
5014 
5015 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5016 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5017 
5018 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5019 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5020 	}
5021 
5022 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5023 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5024 
5025 remove:
5026 	if (skb && !IS_ERR(skb))
5027 		kfree_skb(skb);
5028 
5029 	mgmt_pending_free(cmd);
5030 }
5031 
5032 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5033 {
5034 	struct mgmt_pending_cmd *cmd = data;
5035 
5036 	if (bredr_sc_enabled(hdev))
5037 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5038 	else
5039 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5040 
5041 	if (IS_ERR(cmd->skb))
5042 		return PTR_ERR(cmd->skb);
5043 	else
5044 		return 0;
5045 }
5046 
5047 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5048 			       void *data, u16 data_len)
5049 {
5050 	struct mgmt_pending_cmd *cmd;
5051 	int err;
5052 
5053 	bt_dev_dbg(hdev, "sock %p", sk);
5054 
5055 	hci_dev_lock(hdev);
5056 
5057 	if (!hdev_is_powered(hdev)) {
5058 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5059 				      MGMT_STATUS_NOT_POWERED);
5060 		goto unlock;
5061 	}
5062 
5063 	if (!lmp_ssp_capable(hdev)) {
5064 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5065 				      MGMT_STATUS_NOT_SUPPORTED);
5066 		goto unlock;
5067 	}
5068 
5069 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5070 	if (!cmd)
5071 		err = -ENOMEM;
5072 	else
5073 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5074 					 read_local_oob_data_complete);
5075 
5076 	if (err < 0) {
5077 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5078 				      MGMT_STATUS_FAILED);
5079 
5080 		if (cmd)
5081 			mgmt_pending_free(cmd);
5082 	}
5083 
5084 unlock:
5085 	hci_dev_unlock(hdev);
5086 	return err;
5087 }
5088 
5089 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5090 			       void *data, u16 len)
5091 {
5092 	struct mgmt_addr_info *addr = data;
5093 	int err;
5094 
5095 	bt_dev_dbg(hdev, "sock %p", sk);
5096 
5097 	if (!bdaddr_type_is_valid(addr->type))
5098 		return mgmt_cmd_complete(sk, hdev->id,
5099 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5100 					 MGMT_STATUS_INVALID_PARAMS,
5101 					 addr, sizeof(*addr));
5102 
5103 	hci_dev_lock(hdev);
5104 
5105 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5106 		struct mgmt_cp_add_remote_oob_data *cp = data;
5107 		u8 status;
5108 
5109 		if (cp->addr.type != BDADDR_BREDR) {
5110 			err = mgmt_cmd_complete(sk, hdev->id,
5111 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5112 						MGMT_STATUS_INVALID_PARAMS,
5113 						&cp->addr, sizeof(cp->addr));
5114 			goto unlock;
5115 		}
5116 
5117 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5118 					      cp->addr.type, cp->hash,
5119 					      cp->rand, NULL, NULL);
5120 		if (err < 0)
5121 			status = MGMT_STATUS_FAILED;
5122 		else
5123 			status = MGMT_STATUS_SUCCESS;
5124 
5125 		err = mgmt_cmd_complete(sk, hdev->id,
5126 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5127 					&cp->addr, sizeof(cp->addr));
5128 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5129 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5130 		u8 *rand192, *hash192, *rand256, *hash256;
5131 		u8 status;
5132 
5133 		if (bdaddr_type_is_le(cp->addr.type)) {
5134 			/* Enforce zero-valued 192-bit parameters as
5135 			 * long as legacy SMP OOB isn't implemented.
5136 			 */
5137 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5138 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5139 				err = mgmt_cmd_complete(sk, hdev->id,
5140 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5141 							MGMT_STATUS_INVALID_PARAMS,
5142 							addr, sizeof(*addr));
5143 				goto unlock;
5144 			}
5145 
5146 			rand192 = NULL;
5147 			hash192 = NULL;
5148 		} else {
5149 			/* In case one of the P-192 values is set to zero,
5150 			 * then just disable OOB data for P-192.
5151 			 */
5152 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5153 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5154 				rand192 = NULL;
5155 				hash192 = NULL;
5156 			} else {
5157 				rand192 = cp->rand192;
5158 				hash192 = cp->hash192;
5159 			}
5160 		}
5161 
5162 		/* In case one of the P-256 values is set to zero, then just
5163 		 * disable OOB data for P-256.
5164 		 */
5165 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5166 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5167 			rand256 = NULL;
5168 			hash256 = NULL;
5169 		} else {
5170 			rand256 = cp->rand256;
5171 			hash256 = cp->hash256;
5172 		}
5173 
5174 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5175 					      cp->addr.type, hash192, rand192,
5176 					      hash256, rand256);
5177 		if (err < 0)
5178 			status = MGMT_STATUS_FAILED;
5179 		else
5180 			status = MGMT_STATUS_SUCCESS;
5181 
5182 		err = mgmt_cmd_complete(sk, hdev->id,
5183 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5184 					status, &cp->addr, sizeof(cp->addr));
5185 	} else {
5186 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5187 			   len);
5188 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5189 				      MGMT_STATUS_INVALID_PARAMS);
5190 	}
5191 
5192 unlock:
5193 	hci_dev_unlock(hdev);
5194 	return err;
5195 }
5196 
5197 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5198 				  void *data, u16 len)
5199 {
5200 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5201 	u8 status;
5202 	int err;
5203 
5204 	bt_dev_dbg(hdev, "sock %p", sk);
5205 
5206 	if (cp->addr.type != BDADDR_BREDR)
5207 		return mgmt_cmd_complete(sk, hdev->id,
5208 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5209 					 MGMT_STATUS_INVALID_PARAMS,
5210 					 &cp->addr, sizeof(cp->addr));
5211 
5212 	hci_dev_lock(hdev);
5213 
5214 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5215 		hci_remote_oob_data_clear(hdev);
5216 		status = MGMT_STATUS_SUCCESS;
5217 		goto done;
5218 	}
5219 
5220 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5221 	if (err < 0)
5222 		status = MGMT_STATUS_INVALID_PARAMS;
5223 	else
5224 		status = MGMT_STATUS_SUCCESS;
5225 
5226 done:
5227 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5228 				status, &cp->addr, sizeof(cp->addr));
5229 
5230 	hci_dev_unlock(hdev);
5231 	return err;
5232 }
5233 
5234 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5235 {
5236 	struct mgmt_pending_cmd *cmd;
5237 
5238 	bt_dev_dbg(hdev, "status %u", status);
5239 
5240 	hci_dev_lock(hdev);
5241 
5242 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5243 	if (!cmd)
5244 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5245 
5246 	if (!cmd)
5247 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5248 
5249 	if (cmd) {
5250 		cmd->cmd_complete(cmd, mgmt_status(status));
5251 		mgmt_pending_remove(cmd);
5252 	}
5253 
5254 	hci_dev_unlock(hdev);
5255 }
5256 
5257 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5258 				    uint8_t *mgmt_status)
5259 {
5260 	switch (type) {
5261 	case DISCOV_TYPE_LE:
5262 		*mgmt_status = mgmt_le_support(hdev);
5263 		if (*mgmt_status)
5264 			return false;
5265 		break;
5266 	case DISCOV_TYPE_INTERLEAVED:
5267 		*mgmt_status = mgmt_le_support(hdev);
5268 		if (*mgmt_status)
5269 			return false;
5270 		fallthrough;
5271 	case DISCOV_TYPE_BREDR:
5272 		*mgmt_status = mgmt_bredr_support(hdev);
5273 		if (*mgmt_status)
5274 			return false;
5275 		break;
5276 	default:
5277 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5278 		return false;
5279 	}
5280 
5281 	return true;
5282 }
5283 
5284 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5285 {
5286 	struct mgmt_pending_cmd *cmd = data;
5287 
5288 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5289 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5290 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5291 		return;
5292 
5293 	bt_dev_dbg(hdev, "err %d", err);
5294 
5295 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5296 			  cmd->param, 1);
5297 	mgmt_pending_remove(cmd);
5298 
5299 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5300 				DISCOVERY_FINDING);
5301 }
5302 
5303 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5304 {
5305 	return hci_start_discovery_sync(hdev);
5306 }
5307 
5308 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5309 				    u16 op, void *data, u16 len)
5310 {
5311 	struct mgmt_cp_start_discovery *cp = data;
5312 	struct mgmt_pending_cmd *cmd;
5313 	u8 status;
5314 	int err;
5315 
5316 	bt_dev_dbg(hdev, "sock %p", sk);
5317 
5318 	hci_dev_lock(hdev);
5319 
5320 	if (!hdev_is_powered(hdev)) {
5321 		err = mgmt_cmd_complete(sk, hdev->id, op,
5322 					MGMT_STATUS_NOT_POWERED,
5323 					&cp->type, sizeof(cp->type));
5324 		goto failed;
5325 	}
5326 
5327 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5328 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5329 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5330 					&cp->type, sizeof(cp->type));
5331 		goto failed;
5332 	}
5333 
5334 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5335 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5336 					&cp->type, sizeof(cp->type));
5337 		goto failed;
5338 	}
5339 
5340 	/* Can't start discovery when it is paused */
5341 	if (hdev->discovery_paused) {
5342 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5343 					&cp->type, sizeof(cp->type));
5344 		goto failed;
5345 	}
5346 
5347 	/* Clear the discovery filter first to free any previously
5348 	 * allocated memory for the UUID list.
5349 	 */
5350 	hci_discovery_filter_clear(hdev);
5351 
5352 	hdev->discovery.type = cp->type;
5353 	hdev->discovery.report_invalid_rssi = false;
5354 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5355 		hdev->discovery.limited = true;
5356 	else
5357 		hdev->discovery.limited = false;
5358 
5359 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5360 	if (!cmd) {
5361 		err = -ENOMEM;
5362 		goto failed;
5363 	}
5364 
5365 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5366 				 start_discovery_complete);
5367 	if (err < 0) {
5368 		mgmt_pending_remove(cmd);
5369 		goto failed;
5370 	}
5371 
5372 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5373 
5374 failed:
5375 	hci_dev_unlock(hdev);
5376 	return err;
5377 }
5378 
5379 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5380 			   void *data, u16 len)
5381 {
5382 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5383 					data, len);
5384 }
5385 
5386 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5387 				   void *data, u16 len)
5388 {
5389 	return start_discovery_internal(sk, hdev,
5390 					MGMT_OP_START_LIMITED_DISCOVERY,
5391 					data, len);
5392 }
5393 
5394 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5395 				   void *data, u16 len)
5396 {
5397 	struct mgmt_cp_start_service_discovery *cp = data;
5398 	struct mgmt_pending_cmd *cmd;
5399 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5400 	u16 uuid_count, expected_len;
5401 	u8 status;
5402 	int err;
5403 
5404 	bt_dev_dbg(hdev, "sock %p", sk);
5405 
5406 	hci_dev_lock(hdev);
5407 
5408 	if (!hdev_is_powered(hdev)) {
5409 		err = mgmt_cmd_complete(sk, hdev->id,
5410 					MGMT_OP_START_SERVICE_DISCOVERY,
5411 					MGMT_STATUS_NOT_POWERED,
5412 					&cp->type, sizeof(cp->type));
5413 		goto failed;
5414 	}
5415 
5416 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5417 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5418 		err = mgmt_cmd_complete(sk, hdev->id,
5419 					MGMT_OP_START_SERVICE_DISCOVERY,
5420 					MGMT_STATUS_BUSY, &cp->type,
5421 					sizeof(cp->type));
5422 		goto failed;
5423 	}
5424 
5425 	if (hdev->discovery_paused) {
5426 		err = mgmt_cmd_complete(sk, hdev->id,
5427 					MGMT_OP_START_SERVICE_DISCOVERY,
5428 					MGMT_STATUS_BUSY, &cp->type,
5429 					sizeof(cp->type));
5430 		goto failed;
5431 	}
5432 
5433 	uuid_count = __le16_to_cpu(cp->uuid_count);
5434 	if (uuid_count > max_uuid_count) {
5435 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5436 			   uuid_count);
5437 		err = mgmt_cmd_complete(sk, hdev->id,
5438 					MGMT_OP_START_SERVICE_DISCOVERY,
5439 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5440 					sizeof(cp->type));
5441 		goto failed;
5442 	}
5443 
5444 	expected_len = sizeof(*cp) + uuid_count * 16;
5445 	if (expected_len != len) {
5446 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5447 			   expected_len, len);
5448 		err = mgmt_cmd_complete(sk, hdev->id,
5449 					MGMT_OP_START_SERVICE_DISCOVERY,
5450 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5451 					sizeof(cp->type));
5452 		goto failed;
5453 	}
5454 
5455 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5456 		err = mgmt_cmd_complete(sk, hdev->id,
5457 					MGMT_OP_START_SERVICE_DISCOVERY,
5458 					status, &cp->type, sizeof(cp->type));
5459 		goto failed;
5460 	}
5461 
5462 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5463 			       hdev, data, len);
5464 	if (!cmd) {
5465 		err = -ENOMEM;
5466 		goto failed;
5467 	}
5468 
5469 	/* Clear the discovery filter first to free any previously
5470 	 * allocated memory for the UUID list.
5471 	 */
5472 	hci_discovery_filter_clear(hdev);
5473 
5474 	hdev->discovery.result_filtering = true;
5475 	hdev->discovery.type = cp->type;
5476 	hdev->discovery.rssi = cp->rssi;
5477 	hdev->discovery.uuid_count = uuid_count;
5478 
5479 	if (uuid_count > 0) {
5480 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5481 						GFP_KERNEL);
5482 		if (!hdev->discovery.uuids) {
5483 			err = mgmt_cmd_complete(sk, hdev->id,
5484 						MGMT_OP_START_SERVICE_DISCOVERY,
5485 						MGMT_STATUS_FAILED,
5486 						&cp->type, sizeof(cp->type));
5487 			mgmt_pending_remove(cmd);
5488 			goto failed;
5489 		}
5490 	}
5491 
5492 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5493 				 start_discovery_complete);
5494 	if (err < 0) {
5495 		mgmt_pending_remove(cmd);
5496 		goto failed;
5497 	}
5498 
5499 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5500 
5501 failed:
5502 	hci_dev_unlock(hdev);
5503 	return err;
5504 }
5505 
5506 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5507 {
5508 	struct mgmt_pending_cmd *cmd;
5509 
5510 	bt_dev_dbg(hdev, "status %u", status);
5511 
5512 	hci_dev_lock(hdev);
5513 
5514 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5515 	if (cmd) {
5516 		cmd->cmd_complete(cmd, mgmt_status(status));
5517 		mgmt_pending_remove(cmd);
5518 	}
5519 
5520 	hci_dev_unlock(hdev);
5521 }
5522 
5523 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
5524 {
5525 	struct mgmt_pending_cmd *cmd = data;
5526 
5527 	if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
5528 		return;
5529 
5530 	bt_dev_dbg(hdev, "err %d", err);
5531 
5532 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5533 			  cmd->param, 1);
5534 	mgmt_pending_remove(cmd);
5535 
5536 	if (!err)
5537 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5538 }
5539 
5540 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
5541 {
5542 	return hci_stop_discovery_sync(hdev);
5543 }
5544 
5545 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5546 			  u16 len)
5547 {
5548 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
5549 	struct mgmt_pending_cmd *cmd;
5550 	int err;
5551 
5552 	bt_dev_dbg(hdev, "sock %p", sk);
5553 
5554 	hci_dev_lock(hdev);
5555 
5556 	if (!hci_discovery_active(hdev)) {
5557 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5558 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
5559 					sizeof(mgmt_cp->type));
5560 		goto unlock;
5561 	}
5562 
5563 	if (hdev->discovery.type != mgmt_cp->type) {
5564 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5565 					MGMT_STATUS_INVALID_PARAMS,
5566 					&mgmt_cp->type, sizeof(mgmt_cp->type));
5567 		goto unlock;
5568 	}
5569 
5570 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5571 	if (!cmd) {
5572 		err = -ENOMEM;
5573 		goto unlock;
5574 	}
5575 
5576 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
5577 				 stop_discovery_complete);
5578 	if (err < 0) {
5579 		mgmt_pending_remove(cmd);
5580 		goto unlock;
5581 	}
5582 
5583 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5584 
5585 unlock:
5586 	hci_dev_unlock(hdev);
5587 	return err;
5588 }
5589 
5590 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5591 			u16 len)
5592 {
5593 	struct mgmt_cp_confirm_name *cp = data;
5594 	struct inquiry_entry *e;
5595 	int err;
5596 
5597 	bt_dev_dbg(hdev, "sock %p", sk);
5598 
5599 	hci_dev_lock(hdev);
5600 
5601 	if (!hci_discovery_active(hdev)) {
5602 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5603 					MGMT_STATUS_FAILED, &cp->addr,
5604 					sizeof(cp->addr));
5605 		goto failed;
5606 	}
5607 
5608 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5609 	if (!e) {
5610 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5611 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5612 					sizeof(cp->addr));
5613 		goto failed;
5614 	}
5615 
5616 	if (cp->name_known) {
5617 		e->name_state = NAME_KNOWN;
5618 		list_del(&e->list);
5619 	} else {
5620 		e->name_state = NAME_NEEDED;
5621 		hci_inquiry_cache_update_resolve(hdev, e);
5622 	}
5623 
5624 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5625 				&cp->addr, sizeof(cp->addr));
5626 
5627 failed:
5628 	hci_dev_unlock(hdev);
5629 	return err;
5630 }
5631 
5632 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5633 			u16 len)
5634 {
5635 	struct mgmt_cp_block_device *cp = data;
5636 	u8 status;
5637 	int err;
5638 
5639 	bt_dev_dbg(hdev, "sock %p", sk);
5640 
5641 	if (!bdaddr_type_is_valid(cp->addr.type))
5642 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5643 					 MGMT_STATUS_INVALID_PARAMS,
5644 					 &cp->addr, sizeof(cp->addr));
5645 
5646 	hci_dev_lock(hdev);
5647 
5648 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5649 				  cp->addr.type);
5650 	if (err < 0) {
5651 		status = MGMT_STATUS_FAILED;
5652 		goto done;
5653 	}
5654 
5655 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5656 		   sk);
5657 	status = MGMT_STATUS_SUCCESS;
5658 
5659 done:
5660 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5661 				&cp->addr, sizeof(cp->addr));
5662 
5663 	hci_dev_unlock(hdev);
5664 
5665 	return err;
5666 }
5667 
5668 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5669 			  u16 len)
5670 {
5671 	struct mgmt_cp_unblock_device *cp = data;
5672 	u8 status;
5673 	int err;
5674 
5675 	bt_dev_dbg(hdev, "sock %p", sk);
5676 
5677 	if (!bdaddr_type_is_valid(cp->addr.type))
5678 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5679 					 MGMT_STATUS_INVALID_PARAMS,
5680 					 &cp->addr, sizeof(cp->addr));
5681 
5682 	hci_dev_lock(hdev);
5683 
5684 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5685 				  cp->addr.type);
5686 	if (err < 0) {
5687 		status = MGMT_STATUS_INVALID_PARAMS;
5688 		goto done;
5689 	}
5690 
5691 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5692 		   sk);
5693 	status = MGMT_STATUS_SUCCESS;
5694 
5695 done:
5696 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5697 				&cp->addr, sizeof(cp->addr));
5698 
5699 	hci_dev_unlock(hdev);
5700 
5701 	return err;
5702 }
5703 
5704 static int set_device_id_sync(struct hci_dev *hdev, void *data)
5705 {
5706 	return hci_update_eir_sync(hdev);
5707 }
5708 
5709 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5710 			 u16 len)
5711 {
5712 	struct mgmt_cp_set_device_id *cp = data;
5713 	int err;
5714 	__u16 source;
5715 
5716 	bt_dev_dbg(hdev, "sock %p", sk);
5717 
5718 	source = __le16_to_cpu(cp->source);
5719 
5720 	if (source > 0x0002)
5721 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5722 				       MGMT_STATUS_INVALID_PARAMS);
5723 
5724 	hci_dev_lock(hdev);
5725 
5726 	hdev->devid_source = source;
5727 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5728 	hdev->devid_product = __le16_to_cpu(cp->product);
5729 	hdev->devid_version = __le16_to_cpu(cp->version);
5730 
5731 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5732 				NULL, 0);
5733 
5734 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
5735 
5736 	hci_dev_unlock(hdev);
5737 
5738 	return err;
5739 }
5740 
5741 static void enable_advertising_instance(struct hci_dev *hdev, int err)
5742 {
5743 	if (err)
5744 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
5745 	else
5746 		bt_dev_dbg(hdev, "status %d", err);
5747 }
5748 
5749 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
5750 {
5751 	struct cmd_lookup match = { NULL, hdev };
5752 	u8 instance;
5753 	struct adv_info *adv_instance;
5754 	u8 status = mgmt_status(err);
5755 
5756 	if (status) {
5757 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5758 				     cmd_status_rsp, &status);
5759 		return;
5760 	}
5761 
5762 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5763 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
5764 	else
5765 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5766 
5767 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5768 			     &match);
5769 
5770 	new_settings(hdev, match.sk);
5771 
5772 	if (match.sk)
5773 		sock_put(match.sk);
5774 
5775 	/* If "Set Advertising" was just disabled and instance advertising was
5776 	 * set up earlier, then re-enable multi-instance advertising.
5777 	 */
5778 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5779 	    list_empty(&hdev->adv_instances))
5780 		return;
5781 
5782 	instance = hdev->cur_adv_instance;
5783 	if (!instance) {
5784 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5785 							struct adv_info, list);
5786 		if (!adv_instance)
5787 			return;
5788 
5789 		instance = adv_instance->instance;
5790 	}
5791 
5792 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
5793 
5794 	enable_advertising_instance(hdev, err);
5795 }
5796 
5797 static int set_adv_sync(struct hci_dev *hdev, void *data)
5798 {
5799 	struct mgmt_pending_cmd *cmd = data;
5800 	struct mgmt_mode *cp = cmd->param;
5801 	u8 val = !!cp->val;
5802 
5803 	if (cp->val == 0x02)
5804 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5805 	else
5806 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5807 
5808 	cancel_adv_timeout(hdev);
5809 
5810 	if (val) {
5811 		/* Switch to instance "0" for the Set Advertising setting.
5812 		 * We cannot use update_[adv|scan_rsp]_data() here as the
5813 		 * HCI_ADVERTISING flag is not yet set.
5814 		 */
5815 		hdev->cur_adv_instance = 0x00;
5816 
5817 		if (ext_adv_capable(hdev)) {
5818 			hci_start_ext_adv_sync(hdev, 0x00);
5819 		} else {
5820 			hci_update_adv_data_sync(hdev, 0x00);
5821 			hci_update_scan_rsp_data_sync(hdev, 0x00);
5822 			hci_enable_advertising_sync(hdev);
5823 		}
5824 	} else {
5825 		hci_disable_advertising_sync(hdev);
5826 	}
5827 
5828 	return 0;
5829 }
5830 
5831 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5832 			   u16 len)
5833 {
5834 	struct mgmt_mode *cp = data;
5835 	struct mgmt_pending_cmd *cmd;
5836 	u8 val, status;
5837 	int err;
5838 
5839 	bt_dev_dbg(hdev, "sock %p", sk);
5840 
5841 	status = mgmt_le_support(hdev);
5842 	if (status)
5843 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5844 				       status);
5845 
5846 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5847 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5848 				       MGMT_STATUS_INVALID_PARAMS);
5849 
5850 	if (hdev->advertising_paused)
5851 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5852 				       MGMT_STATUS_BUSY);
5853 
5854 	hci_dev_lock(hdev);
5855 
5856 	val = !!cp->val;
5857 
5858 	/* The following conditions are ones which mean that we should
5859 	 * not do any HCI communication but directly send a mgmt
5860 	 * response to user space (after toggling the flag if
5861 	 * necessary).
5862 	 */
5863 	if (!hdev_is_powered(hdev) ||
5864 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5865 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5866 	    hci_conn_num(hdev, LE_LINK) > 0 ||
5867 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5868 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5869 		bool changed;
5870 
5871 		if (cp->val) {
5872 			hdev->cur_adv_instance = 0x00;
5873 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5874 			if (cp->val == 0x02)
5875 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5876 			else
5877 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5878 		} else {
5879 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5880 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5881 		}
5882 
5883 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5884 		if (err < 0)
5885 			goto unlock;
5886 
5887 		if (changed)
5888 			err = new_settings(hdev, sk);
5889 
5890 		goto unlock;
5891 	}
5892 
5893 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5894 	    pending_find(MGMT_OP_SET_LE, hdev)) {
5895 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5896 				      MGMT_STATUS_BUSY);
5897 		goto unlock;
5898 	}
5899 
5900 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5901 	if (!cmd)
5902 		err = -ENOMEM;
5903 	else
5904 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
5905 					 set_advertising_complete);
5906 
5907 	if (err < 0 && cmd)
5908 		mgmt_pending_remove(cmd);
5909 
5910 unlock:
5911 	hci_dev_unlock(hdev);
5912 	return err;
5913 }
5914 
5915 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5916 			      void *data, u16 len)
5917 {
5918 	struct mgmt_cp_set_static_address *cp = data;
5919 	int err;
5920 
5921 	bt_dev_dbg(hdev, "sock %p", sk);
5922 
5923 	if (!lmp_le_capable(hdev))
5924 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5925 				       MGMT_STATUS_NOT_SUPPORTED);
5926 
5927 	if (hdev_is_powered(hdev))
5928 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5929 				       MGMT_STATUS_REJECTED);
5930 
5931 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5932 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5933 			return mgmt_cmd_status(sk, hdev->id,
5934 					       MGMT_OP_SET_STATIC_ADDRESS,
5935 					       MGMT_STATUS_INVALID_PARAMS);
5936 
5937 		/* Two most significant bits shall be set */
5938 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5939 			return mgmt_cmd_status(sk, hdev->id,
5940 					       MGMT_OP_SET_STATIC_ADDRESS,
5941 					       MGMT_STATUS_INVALID_PARAMS);
5942 	}
5943 
5944 	hci_dev_lock(hdev);
5945 
5946 	bacpy(&hdev->static_addr, &cp->bdaddr);
5947 
5948 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5949 	if (err < 0)
5950 		goto unlock;
5951 
5952 	err = new_settings(hdev, sk);
5953 
5954 unlock:
5955 	hci_dev_unlock(hdev);
5956 	return err;
5957 }
5958 
5959 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5960 			   void *data, u16 len)
5961 {
5962 	struct mgmt_cp_set_scan_params *cp = data;
5963 	__u16 interval, window;
5964 	int err;
5965 
5966 	bt_dev_dbg(hdev, "sock %p", sk);
5967 
5968 	if (!lmp_le_capable(hdev))
5969 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5970 				       MGMT_STATUS_NOT_SUPPORTED);
5971 
5972 	interval = __le16_to_cpu(cp->interval);
5973 
5974 	if (interval < 0x0004 || interval > 0x4000)
5975 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5976 				       MGMT_STATUS_INVALID_PARAMS);
5977 
5978 	window = __le16_to_cpu(cp->window);
5979 
5980 	if (window < 0x0004 || window > 0x4000)
5981 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5982 				       MGMT_STATUS_INVALID_PARAMS);
5983 
5984 	if (window > interval)
5985 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5986 				       MGMT_STATUS_INVALID_PARAMS);
5987 
5988 	hci_dev_lock(hdev);
5989 
5990 	hdev->le_scan_interval = interval;
5991 	hdev->le_scan_window = window;
5992 
5993 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5994 				NULL, 0);
5995 
5996 	/* If background scan is running, restart it so new parameters are
5997 	 * loaded.
5998 	 */
5999 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6000 	    hdev->discovery.state == DISCOVERY_STOPPED)
6001 		hci_update_passive_scan(hdev);
6002 
6003 	hci_dev_unlock(hdev);
6004 
6005 	return err;
6006 }
6007 
6008 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6009 {
6010 	struct mgmt_pending_cmd *cmd = data;
6011 
6012 	bt_dev_dbg(hdev, "err %d", err);
6013 
6014 	if (err) {
6015 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6016 				mgmt_status(err));
6017 	} else {
6018 		struct mgmt_mode *cp = cmd->param;
6019 
6020 		if (cp->val)
6021 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6022 		else
6023 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6024 
6025 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6026 		new_settings(hdev, cmd->sk);
6027 	}
6028 
6029 	mgmt_pending_free(cmd);
6030 }
6031 
6032 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6033 {
6034 	struct mgmt_pending_cmd *cmd = data;
6035 	struct mgmt_mode *cp = cmd->param;
6036 
6037 	return hci_write_fast_connectable_sync(hdev, cp->val);
6038 }
6039 
6040 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6041 				void *data, u16 len)
6042 {
6043 	struct mgmt_mode *cp = data;
6044 	struct mgmt_pending_cmd *cmd;
6045 	int err;
6046 
6047 	bt_dev_dbg(hdev, "sock %p", sk);
6048 
6049 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6050 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6051 		return mgmt_cmd_status(sk, hdev->id,
6052 				       MGMT_OP_SET_FAST_CONNECTABLE,
6053 				       MGMT_STATUS_NOT_SUPPORTED);
6054 
6055 	if (cp->val != 0x00 && cp->val != 0x01)
6056 		return mgmt_cmd_status(sk, hdev->id,
6057 				       MGMT_OP_SET_FAST_CONNECTABLE,
6058 				       MGMT_STATUS_INVALID_PARAMS);
6059 
6060 	hci_dev_lock(hdev);
6061 
6062 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6063 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6064 		goto unlock;
6065 	}
6066 
6067 	if (!hdev_is_powered(hdev)) {
6068 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6069 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6070 		new_settings(hdev, sk);
6071 		goto unlock;
6072 	}
6073 
6074 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6075 			       len);
6076 	if (!cmd)
6077 		err = -ENOMEM;
6078 	else
6079 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6080 					 fast_connectable_complete);
6081 
6082 	if (err < 0) {
6083 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6084 				MGMT_STATUS_FAILED);
6085 
6086 		if (cmd)
6087 			mgmt_pending_free(cmd);
6088 	}
6089 
6090 unlock:
6091 	hci_dev_unlock(hdev);
6092 
6093 	return err;
6094 }
6095 
6096 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6097 {
6098 	struct mgmt_pending_cmd *cmd = data;
6099 
6100 	bt_dev_dbg(hdev, "err %d", err);
6101 
6102 	if (err) {
6103 		u8 mgmt_err = mgmt_status(err);
6104 
6105 		/* We need to restore the flag if related HCI commands
6106 		 * failed.
6107 		 */
6108 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6109 
6110 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6111 	} else {
6112 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6113 		new_settings(hdev, cmd->sk);
6114 	}
6115 
6116 	mgmt_pending_free(cmd);
6117 }
6118 
6119 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6120 {
6121 	int status;
6122 
6123 	status = hci_write_fast_connectable_sync(hdev, false);
6124 
6125 	if (!status)
6126 		status = hci_update_scan_sync(hdev);
6127 
6128 	/* Since only the advertising data flags will change, there
6129 	 * is no need to update the scan response data.
6130 	 */
6131 	if (!status)
6132 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6133 
6134 	return status;
6135 }
6136 
6137 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6138 {
6139 	struct mgmt_mode *cp = data;
6140 	struct mgmt_pending_cmd *cmd;
6141 	int err;
6142 
6143 	bt_dev_dbg(hdev, "sock %p", sk);
6144 
6145 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6146 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6147 				       MGMT_STATUS_NOT_SUPPORTED);
6148 
6149 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6150 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6151 				       MGMT_STATUS_REJECTED);
6152 
6153 	if (cp->val != 0x00 && cp->val != 0x01)
6154 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6155 				       MGMT_STATUS_INVALID_PARAMS);
6156 
6157 	hci_dev_lock(hdev);
6158 
6159 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6160 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6161 		goto unlock;
6162 	}
6163 
6164 	if (!hdev_is_powered(hdev)) {
6165 		if (!cp->val) {
6166 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6167 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6168 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6169 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6170 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6171 		}
6172 
6173 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6174 
6175 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6176 		if (err < 0)
6177 			goto unlock;
6178 
6179 		err = new_settings(hdev, sk);
6180 		goto unlock;
6181 	}
6182 
6183 	/* Reject disabling when powered on */
6184 	if (!cp->val) {
6185 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6186 				      MGMT_STATUS_REJECTED);
6187 		goto unlock;
6188 	} else {
6189 		/* When configuring a dual-mode controller to operate
6190 		 * with LE only and using a static address, then switching
6191 		 * BR/EDR back on is not allowed.
6192 		 *
6193 		 * Dual-mode controllers shall operate with the public
6194 		 * address as its identity address for BR/EDR and LE. So
6195 		 * reject the attempt to create an invalid configuration.
6196 		 *
6197 		 * The same restrictions applies when secure connections
6198 		 * has been enabled. For BR/EDR this is a controller feature
6199 		 * while for LE it is a host stack feature. This means that
6200 		 * switching BR/EDR back on when secure connections has been
6201 		 * enabled is not a supported transaction.
6202 		 */
6203 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6204 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6205 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6206 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6207 					      MGMT_STATUS_REJECTED);
6208 			goto unlock;
6209 		}
6210 	}
6211 
6212 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6213 	if (!cmd)
6214 		err = -ENOMEM;
6215 	else
6216 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6217 					 set_bredr_complete);
6218 
6219 	if (err < 0) {
6220 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6221 				MGMT_STATUS_FAILED);
6222 		if (cmd)
6223 			mgmt_pending_free(cmd);
6224 
6225 		goto unlock;
6226 	}
6227 
6228 	/* We need to flip the bit already here so that
6229 	 * hci_req_update_adv_data generates the correct flags.
6230 	 */
6231 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6232 
6233 unlock:
6234 	hci_dev_unlock(hdev);
6235 	return err;
6236 }
6237 
6238 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6239 {
6240 	struct mgmt_pending_cmd *cmd = data;
6241 	struct mgmt_mode *cp;
6242 
6243 	bt_dev_dbg(hdev, "err %d", err);
6244 
6245 	if (err) {
6246 		u8 mgmt_err = mgmt_status(err);
6247 
6248 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6249 		goto done;
6250 	}
6251 
6252 	cp = cmd->param;
6253 
6254 	switch (cp->val) {
6255 	case 0x00:
6256 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6257 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6258 		break;
6259 	case 0x01:
6260 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6261 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6262 		break;
6263 	case 0x02:
6264 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6265 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6266 		break;
6267 	}
6268 
6269 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6270 	new_settings(hdev, cmd->sk);
6271 
6272 done:
6273 	mgmt_pending_free(cmd);
6274 }
6275 
6276 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6277 {
6278 	struct mgmt_pending_cmd *cmd = data;
6279 	struct mgmt_mode *cp = cmd->param;
6280 	u8 val = !!cp->val;
6281 
6282 	/* Force write of val */
6283 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6284 
6285 	return hci_write_sc_support_sync(hdev, val);
6286 }
6287 
6288 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6289 			   void *data, u16 len)
6290 {
6291 	struct mgmt_mode *cp = data;
6292 	struct mgmt_pending_cmd *cmd;
6293 	u8 val;
6294 	int err;
6295 
6296 	bt_dev_dbg(hdev, "sock %p", sk);
6297 
6298 	if (!lmp_sc_capable(hdev) &&
6299 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6300 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6301 				       MGMT_STATUS_NOT_SUPPORTED);
6302 
6303 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6304 	    lmp_sc_capable(hdev) &&
6305 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6306 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6307 				       MGMT_STATUS_REJECTED);
6308 
6309 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6310 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6311 				       MGMT_STATUS_INVALID_PARAMS);
6312 
6313 	hci_dev_lock(hdev);
6314 
6315 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6316 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6317 		bool changed;
6318 
6319 		if (cp->val) {
6320 			changed = !hci_dev_test_and_set_flag(hdev,
6321 							     HCI_SC_ENABLED);
6322 			if (cp->val == 0x02)
6323 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6324 			else
6325 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6326 		} else {
6327 			changed = hci_dev_test_and_clear_flag(hdev,
6328 							      HCI_SC_ENABLED);
6329 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6330 		}
6331 
6332 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6333 		if (err < 0)
6334 			goto failed;
6335 
6336 		if (changed)
6337 			err = new_settings(hdev, sk);
6338 
6339 		goto failed;
6340 	}
6341 
6342 	val = !!cp->val;
6343 
6344 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6345 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6346 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6347 		goto failed;
6348 	}
6349 
6350 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6351 	if (!cmd)
6352 		err = -ENOMEM;
6353 	else
6354 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6355 					 set_secure_conn_complete);
6356 
6357 	if (err < 0) {
6358 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6359 				MGMT_STATUS_FAILED);
6360 		if (cmd)
6361 			mgmt_pending_free(cmd);
6362 	}
6363 
6364 failed:
6365 	hci_dev_unlock(hdev);
6366 	return err;
6367 }
6368 
6369 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6370 			  void *data, u16 len)
6371 {
6372 	struct mgmt_mode *cp = data;
6373 	bool changed, use_changed;
6374 	int err;
6375 
6376 	bt_dev_dbg(hdev, "sock %p", sk);
6377 
6378 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6379 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6380 				       MGMT_STATUS_INVALID_PARAMS);
6381 
6382 	hci_dev_lock(hdev);
6383 
6384 	if (cp->val)
6385 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6386 	else
6387 		changed = hci_dev_test_and_clear_flag(hdev,
6388 						      HCI_KEEP_DEBUG_KEYS);
6389 
6390 	if (cp->val == 0x02)
6391 		use_changed = !hci_dev_test_and_set_flag(hdev,
6392 							 HCI_USE_DEBUG_KEYS);
6393 	else
6394 		use_changed = hci_dev_test_and_clear_flag(hdev,
6395 							  HCI_USE_DEBUG_KEYS);
6396 
6397 	if (hdev_is_powered(hdev) && use_changed &&
6398 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6399 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6400 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6401 			     sizeof(mode), &mode);
6402 	}
6403 
6404 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6405 	if (err < 0)
6406 		goto unlock;
6407 
6408 	if (changed)
6409 		err = new_settings(hdev, sk);
6410 
6411 unlock:
6412 	hci_dev_unlock(hdev);
6413 	return err;
6414 }
6415 
6416 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6417 		       u16 len)
6418 {
6419 	struct mgmt_cp_set_privacy *cp = cp_data;
6420 	bool changed;
6421 	int err;
6422 
6423 	bt_dev_dbg(hdev, "sock %p", sk);
6424 
6425 	if (!lmp_le_capable(hdev))
6426 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6427 				       MGMT_STATUS_NOT_SUPPORTED);
6428 
6429 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6430 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6431 				       MGMT_STATUS_INVALID_PARAMS);
6432 
6433 	if (hdev_is_powered(hdev))
6434 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6435 				       MGMT_STATUS_REJECTED);
6436 
6437 	hci_dev_lock(hdev);
6438 
6439 	/* If user space supports this command it is also expected to
6440 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6441 	 */
6442 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6443 
6444 	if (cp->privacy) {
6445 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6446 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6447 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6448 		hci_adv_instances_set_rpa_expired(hdev, true);
6449 		if (cp->privacy == 0x02)
6450 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6451 		else
6452 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6453 	} else {
6454 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6455 		memset(hdev->irk, 0, sizeof(hdev->irk));
6456 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6457 		hci_adv_instances_set_rpa_expired(hdev, false);
6458 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6459 	}
6460 
6461 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6462 	if (err < 0)
6463 		goto unlock;
6464 
6465 	if (changed)
6466 		err = new_settings(hdev, sk);
6467 
6468 unlock:
6469 	hci_dev_unlock(hdev);
6470 	return err;
6471 }
6472 
6473 static bool irk_is_valid(struct mgmt_irk_info *irk)
6474 {
6475 	switch (irk->addr.type) {
6476 	case BDADDR_LE_PUBLIC:
6477 		return true;
6478 
6479 	case BDADDR_LE_RANDOM:
6480 		/* Two most significant bits shall be set */
6481 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6482 			return false;
6483 		return true;
6484 	}
6485 
6486 	return false;
6487 }
6488 
6489 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6490 		     u16 len)
6491 {
6492 	struct mgmt_cp_load_irks *cp = cp_data;
6493 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6494 				   sizeof(struct mgmt_irk_info));
6495 	u16 irk_count, expected_len;
6496 	int i, err;
6497 
6498 	bt_dev_dbg(hdev, "sock %p", sk);
6499 
6500 	if (!lmp_le_capable(hdev))
6501 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6502 				       MGMT_STATUS_NOT_SUPPORTED);
6503 
6504 	irk_count = __le16_to_cpu(cp->irk_count);
6505 	if (irk_count > max_irk_count) {
6506 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6507 			   irk_count);
6508 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6509 				       MGMT_STATUS_INVALID_PARAMS);
6510 	}
6511 
6512 	expected_len = struct_size(cp, irks, irk_count);
6513 	if (expected_len != len) {
6514 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6515 			   expected_len, len);
6516 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6517 				       MGMT_STATUS_INVALID_PARAMS);
6518 	}
6519 
6520 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
6521 
6522 	for (i = 0; i < irk_count; i++) {
6523 		struct mgmt_irk_info *key = &cp->irks[i];
6524 
6525 		if (!irk_is_valid(key))
6526 			return mgmt_cmd_status(sk, hdev->id,
6527 					       MGMT_OP_LOAD_IRKS,
6528 					       MGMT_STATUS_INVALID_PARAMS);
6529 	}
6530 
6531 	hci_dev_lock(hdev);
6532 
6533 	hci_smp_irks_clear(hdev);
6534 
6535 	for (i = 0; i < irk_count; i++) {
6536 		struct mgmt_irk_info *irk = &cp->irks[i];
6537 
6538 		if (hci_is_blocked_key(hdev,
6539 				       HCI_BLOCKED_KEY_TYPE_IRK,
6540 				       irk->val)) {
6541 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6542 				    &irk->addr.bdaddr);
6543 			continue;
6544 		}
6545 
6546 		hci_add_irk(hdev, &irk->addr.bdaddr,
6547 			    le_addr_type(irk->addr.type), irk->val,
6548 			    BDADDR_ANY);
6549 	}
6550 
6551 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6552 
6553 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6554 
6555 	hci_dev_unlock(hdev);
6556 
6557 	return err;
6558 }
6559 
6560 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6561 {
6562 	if (key->initiator != 0x00 && key->initiator != 0x01)
6563 		return false;
6564 
6565 	switch (key->addr.type) {
6566 	case BDADDR_LE_PUBLIC:
6567 		return true;
6568 
6569 	case BDADDR_LE_RANDOM:
6570 		/* Two most significant bits shall be set */
6571 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6572 			return false;
6573 		return true;
6574 	}
6575 
6576 	return false;
6577 }
6578 
6579 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6580 			       void *cp_data, u16 len)
6581 {
6582 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
6583 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6584 				   sizeof(struct mgmt_ltk_info));
6585 	u16 key_count, expected_len;
6586 	int i, err;
6587 
6588 	bt_dev_dbg(hdev, "sock %p", sk);
6589 
6590 	if (!lmp_le_capable(hdev))
6591 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6592 				       MGMT_STATUS_NOT_SUPPORTED);
6593 
6594 	key_count = __le16_to_cpu(cp->key_count);
6595 	if (key_count > max_key_count) {
6596 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6597 			   key_count);
6598 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6599 				       MGMT_STATUS_INVALID_PARAMS);
6600 	}
6601 
6602 	expected_len = struct_size(cp, keys, key_count);
6603 	if (expected_len != len) {
6604 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6605 			   expected_len, len);
6606 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6607 				       MGMT_STATUS_INVALID_PARAMS);
6608 	}
6609 
6610 	bt_dev_dbg(hdev, "key_count %u", key_count);
6611 
6612 	for (i = 0; i < key_count; i++) {
6613 		struct mgmt_ltk_info *key = &cp->keys[i];
6614 
6615 		if (!ltk_is_valid(key))
6616 			return mgmt_cmd_status(sk, hdev->id,
6617 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
6618 					       MGMT_STATUS_INVALID_PARAMS);
6619 	}
6620 
6621 	hci_dev_lock(hdev);
6622 
6623 	hci_smp_ltks_clear(hdev);
6624 
6625 	for (i = 0; i < key_count; i++) {
6626 		struct mgmt_ltk_info *key = &cp->keys[i];
6627 		u8 type, authenticated;
6628 
6629 		if (hci_is_blocked_key(hdev,
6630 				       HCI_BLOCKED_KEY_TYPE_LTK,
6631 				       key->val)) {
6632 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6633 				    &key->addr.bdaddr);
6634 			continue;
6635 		}
6636 
6637 		switch (key->type) {
6638 		case MGMT_LTK_UNAUTHENTICATED:
6639 			authenticated = 0x00;
6640 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6641 			break;
6642 		case MGMT_LTK_AUTHENTICATED:
6643 			authenticated = 0x01;
6644 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6645 			break;
6646 		case MGMT_LTK_P256_UNAUTH:
6647 			authenticated = 0x00;
6648 			type = SMP_LTK_P256;
6649 			break;
6650 		case MGMT_LTK_P256_AUTH:
6651 			authenticated = 0x01;
6652 			type = SMP_LTK_P256;
6653 			break;
6654 		case MGMT_LTK_P256_DEBUG:
6655 			authenticated = 0x00;
6656 			type = SMP_LTK_P256_DEBUG;
6657 			fallthrough;
6658 		default:
6659 			continue;
6660 		}
6661 
6662 		hci_add_ltk(hdev, &key->addr.bdaddr,
6663 			    le_addr_type(key->addr.type), type, authenticated,
6664 			    key->val, key->enc_size, key->ediv, key->rand);
6665 	}
6666 
6667 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6668 			   NULL, 0);
6669 
6670 	hci_dev_unlock(hdev);
6671 
6672 	return err;
6673 }
6674 
6675 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
6676 {
6677 	struct mgmt_pending_cmd *cmd = data;
6678 	struct hci_conn *conn = cmd->user_data;
6679 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6680 	struct mgmt_rp_get_conn_info rp;
6681 	u8 status;
6682 
6683 	bt_dev_dbg(hdev, "err %d", err);
6684 
6685 	memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
6686 
6687 	status = mgmt_status(err);
6688 	if (status == MGMT_STATUS_SUCCESS) {
6689 		rp.rssi = conn->rssi;
6690 		rp.tx_power = conn->tx_power;
6691 		rp.max_tx_power = conn->max_tx_power;
6692 	} else {
6693 		rp.rssi = HCI_RSSI_INVALID;
6694 		rp.tx_power = HCI_TX_POWER_INVALID;
6695 		rp.max_tx_power = HCI_TX_POWER_INVALID;
6696 	}
6697 
6698 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
6699 			  &rp, sizeof(rp));
6700 
6701 	if (conn) {
6702 		hci_conn_drop(conn);
6703 		hci_conn_put(conn);
6704 	}
6705 
6706 	mgmt_pending_free(cmd);
6707 }
6708 
6709 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
6710 {
6711 	struct mgmt_pending_cmd *cmd = data;
6712 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6713 	struct hci_conn *conn;
6714 	int err;
6715 	__le16   handle;
6716 
6717 	/* Make sure we are still connected */
6718 	if (cp->addr.type == BDADDR_BREDR)
6719 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6720 					       &cp->addr.bdaddr);
6721 	else
6722 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6723 
6724 	if (!conn || conn != cmd->user_data || conn->state != BT_CONNECTED) {
6725 		if (cmd->user_data) {
6726 			hci_conn_drop(cmd->user_data);
6727 			hci_conn_put(cmd->user_data);
6728 			cmd->user_data = NULL;
6729 		}
6730 		return MGMT_STATUS_NOT_CONNECTED;
6731 	}
6732 
6733 	handle = cpu_to_le16(conn->handle);
6734 
6735 	/* Refresh RSSI each time */
6736 	err = hci_read_rssi_sync(hdev, handle);
6737 
6738 	/* For LE links TX power does not change thus we don't need to
6739 	 * query for it once value is known.
6740 	 */
6741 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
6742 		     conn->tx_power == HCI_TX_POWER_INVALID))
6743 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
6744 
6745 	/* Max TX power needs to be read only once per connection */
6746 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
6747 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
6748 
6749 	return err;
6750 }
6751 
6752 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6753 			 u16 len)
6754 {
6755 	struct mgmt_cp_get_conn_info *cp = data;
6756 	struct mgmt_rp_get_conn_info rp;
6757 	struct hci_conn *conn;
6758 	unsigned long conn_info_age;
6759 	int err = 0;
6760 
6761 	bt_dev_dbg(hdev, "sock %p", sk);
6762 
6763 	memset(&rp, 0, sizeof(rp));
6764 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6765 	rp.addr.type = cp->addr.type;
6766 
6767 	if (!bdaddr_type_is_valid(cp->addr.type))
6768 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6769 					 MGMT_STATUS_INVALID_PARAMS,
6770 					 &rp, sizeof(rp));
6771 
6772 	hci_dev_lock(hdev);
6773 
6774 	if (!hdev_is_powered(hdev)) {
6775 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6776 					MGMT_STATUS_NOT_POWERED, &rp,
6777 					sizeof(rp));
6778 		goto unlock;
6779 	}
6780 
6781 	if (cp->addr.type == BDADDR_BREDR)
6782 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6783 					       &cp->addr.bdaddr);
6784 	else
6785 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6786 
6787 	if (!conn || conn->state != BT_CONNECTED) {
6788 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6789 					MGMT_STATUS_NOT_CONNECTED, &rp,
6790 					sizeof(rp));
6791 		goto unlock;
6792 	}
6793 
6794 	/* To avoid client trying to guess when to poll again for information we
6795 	 * calculate conn info age as random value between min/max set in hdev.
6796 	 */
6797 	conn_info_age = hdev->conn_info_min_age +
6798 			prandom_u32_max(hdev->conn_info_max_age -
6799 					hdev->conn_info_min_age);
6800 
6801 	/* Query controller to refresh cached values if they are too old or were
6802 	 * never read.
6803 	 */
6804 	if (time_after(jiffies, conn->conn_info_timestamp +
6805 		       msecs_to_jiffies(conn_info_age)) ||
6806 	    !conn->conn_info_timestamp) {
6807 		struct mgmt_pending_cmd *cmd;
6808 
6809 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
6810 				       len);
6811 		if (!cmd)
6812 			err = -ENOMEM;
6813 		else
6814 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
6815 						 cmd, get_conn_info_complete);
6816 
6817 		if (err < 0) {
6818 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6819 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
6820 
6821 			if (cmd)
6822 				mgmt_pending_free(cmd);
6823 
6824 			goto unlock;
6825 		}
6826 
6827 		hci_conn_hold(conn);
6828 		cmd->user_data = hci_conn_get(conn);
6829 
6830 		conn->conn_info_timestamp = jiffies;
6831 	} else {
6832 		/* Cache is valid, just reply with values cached in hci_conn */
6833 		rp.rssi = conn->rssi;
6834 		rp.tx_power = conn->tx_power;
6835 		rp.max_tx_power = conn->max_tx_power;
6836 
6837 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6838 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6839 	}
6840 
6841 unlock:
6842 	hci_dev_unlock(hdev);
6843 	return err;
6844 }
6845 
6846 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
6847 {
6848 	struct mgmt_pending_cmd *cmd = data;
6849 	struct mgmt_cp_get_clock_info *cp = cmd->param;
6850 	struct mgmt_rp_get_clock_info rp;
6851 	struct hci_conn *conn = cmd->user_data;
6852 	u8 status = mgmt_status(err);
6853 
6854 	bt_dev_dbg(hdev, "err %d", err);
6855 
6856 	memset(&rp, 0, sizeof(rp));
6857 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6858 	rp.addr.type = cp->addr.type;
6859 
6860 	if (err)
6861 		goto complete;
6862 
6863 	rp.local_clock = cpu_to_le32(hdev->clock);
6864 
6865 	if (conn) {
6866 		rp.piconet_clock = cpu_to_le32(conn->clock);
6867 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6868 		hci_conn_drop(conn);
6869 		hci_conn_put(conn);
6870 	}
6871 
6872 complete:
6873 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6874 			  sizeof(rp));
6875 
6876 	mgmt_pending_free(cmd);
6877 }
6878 
6879 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
6880 {
6881 	struct mgmt_pending_cmd *cmd = data;
6882 	struct mgmt_cp_get_clock_info *cp = cmd->param;
6883 	struct hci_cp_read_clock hci_cp;
6884 	struct hci_conn *conn = cmd->user_data;
6885 	int err;
6886 
6887 	memset(&hci_cp, 0, sizeof(hci_cp));
6888 	err = hci_read_clock_sync(hdev, &hci_cp);
6889 
6890 	if (conn) {
6891 		/* Make sure connection still exists */
6892 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6893 					       &cp->addr.bdaddr);
6894 
6895 		if (conn && conn == cmd->user_data &&
6896 		    conn->state == BT_CONNECTED) {
6897 			hci_cp.handle = cpu_to_le16(conn->handle);
6898 			hci_cp.which = 0x01; /* Piconet clock */
6899 			err = hci_read_clock_sync(hdev, &hci_cp);
6900 		} else if (cmd->user_data) {
6901 			hci_conn_drop(cmd->user_data);
6902 			hci_conn_put(cmd->user_data);
6903 			cmd->user_data = NULL;
6904 		}
6905 	}
6906 
6907 	return err;
6908 }
6909 
6910 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6911 								u16 len)
6912 {
6913 	struct mgmt_cp_get_clock_info *cp = data;
6914 	struct mgmt_rp_get_clock_info rp;
6915 	struct mgmt_pending_cmd *cmd;
6916 	struct hci_conn *conn;
6917 	int err;
6918 
6919 	bt_dev_dbg(hdev, "sock %p", sk);
6920 
6921 	memset(&rp, 0, sizeof(rp));
6922 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6923 	rp.addr.type = cp->addr.type;
6924 
6925 	if (cp->addr.type != BDADDR_BREDR)
6926 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6927 					 MGMT_STATUS_INVALID_PARAMS,
6928 					 &rp, sizeof(rp));
6929 
6930 	hci_dev_lock(hdev);
6931 
6932 	if (!hdev_is_powered(hdev)) {
6933 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6934 					MGMT_STATUS_NOT_POWERED, &rp,
6935 					sizeof(rp));
6936 		goto unlock;
6937 	}
6938 
6939 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6940 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6941 					       &cp->addr.bdaddr);
6942 		if (!conn || conn->state != BT_CONNECTED) {
6943 			err = mgmt_cmd_complete(sk, hdev->id,
6944 						MGMT_OP_GET_CLOCK_INFO,
6945 						MGMT_STATUS_NOT_CONNECTED,
6946 						&rp, sizeof(rp));
6947 			goto unlock;
6948 		}
6949 	} else {
6950 		conn = NULL;
6951 	}
6952 
6953 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6954 	if (!cmd)
6955 		err = -ENOMEM;
6956 	else
6957 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
6958 					 get_clock_info_complete);
6959 
6960 	if (err < 0) {
6961 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6962 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
6963 
6964 		if (cmd)
6965 			mgmt_pending_free(cmd);
6966 
6967 	} else if (conn) {
6968 		hci_conn_hold(conn);
6969 		cmd->user_data = hci_conn_get(conn);
6970 	}
6971 
6972 
6973 unlock:
6974 	hci_dev_unlock(hdev);
6975 	return err;
6976 }
6977 
6978 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6979 {
6980 	struct hci_conn *conn;
6981 
6982 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6983 	if (!conn)
6984 		return false;
6985 
6986 	if (conn->dst_type != type)
6987 		return false;
6988 
6989 	if (conn->state != BT_CONNECTED)
6990 		return false;
6991 
6992 	return true;
6993 }
6994 
6995 /* This function requires the caller holds hdev->lock */
6996 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6997 			       u8 addr_type, u8 auto_connect)
6998 {
6999 	struct hci_conn_params *params;
7000 
7001 	params = hci_conn_params_add(hdev, addr, addr_type);
7002 	if (!params)
7003 		return -EIO;
7004 
7005 	if (params->auto_connect == auto_connect)
7006 		return 0;
7007 
7008 	list_del_init(&params->action);
7009 
7010 	switch (auto_connect) {
7011 	case HCI_AUTO_CONN_DISABLED:
7012 	case HCI_AUTO_CONN_LINK_LOSS:
7013 		/* If auto connect is being disabled when we're trying to
7014 		 * connect to device, keep connecting.
7015 		 */
7016 		if (params->explicit_connect)
7017 			list_add(&params->action, &hdev->pend_le_conns);
7018 		break;
7019 	case HCI_AUTO_CONN_REPORT:
7020 		if (params->explicit_connect)
7021 			list_add(&params->action, &hdev->pend_le_conns);
7022 		else
7023 			list_add(&params->action, &hdev->pend_le_reports);
7024 		break;
7025 	case HCI_AUTO_CONN_DIRECT:
7026 	case HCI_AUTO_CONN_ALWAYS:
7027 		if (!is_connected(hdev, addr, addr_type))
7028 			list_add(&params->action, &hdev->pend_le_conns);
7029 		break;
7030 	}
7031 
7032 	params->auto_connect = auto_connect;
7033 
7034 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7035 		   addr, addr_type, auto_connect);
7036 
7037 	return 0;
7038 }
7039 
7040 static void device_added(struct sock *sk, struct hci_dev *hdev,
7041 			 bdaddr_t *bdaddr, u8 type, u8 action)
7042 {
7043 	struct mgmt_ev_device_added ev;
7044 
7045 	bacpy(&ev.addr.bdaddr, bdaddr);
7046 	ev.addr.type = type;
7047 	ev.action = action;
7048 
7049 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7050 }
7051 
7052 static int add_device_sync(struct hci_dev *hdev, void *data)
7053 {
7054 	return hci_update_passive_scan_sync(hdev);
7055 }
7056 
7057 static int add_device(struct sock *sk, struct hci_dev *hdev,
7058 		      void *data, u16 len)
7059 {
7060 	struct mgmt_cp_add_device *cp = data;
7061 	u8 auto_conn, addr_type;
7062 	struct hci_conn_params *params;
7063 	int err;
7064 	u32 current_flags = 0;
7065 	u32 supported_flags;
7066 
7067 	bt_dev_dbg(hdev, "sock %p", sk);
7068 
7069 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7070 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7071 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7072 					 MGMT_STATUS_INVALID_PARAMS,
7073 					 &cp->addr, sizeof(cp->addr));
7074 
7075 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7076 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7077 					 MGMT_STATUS_INVALID_PARAMS,
7078 					 &cp->addr, sizeof(cp->addr));
7079 
7080 	hci_dev_lock(hdev);
7081 
7082 	if (cp->addr.type == BDADDR_BREDR) {
7083 		/* Only incoming connections action is supported for now */
7084 		if (cp->action != 0x01) {
7085 			err = mgmt_cmd_complete(sk, hdev->id,
7086 						MGMT_OP_ADD_DEVICE,
7087 						MGMT_STATUS_INVALID_PARAMS,
7088 						&cp->addr, sizeof(cp->addr));
7089 			goto unlock;
7090 		}
7091 
7092 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7093 						     &cp->addr.bdaddr,
7094 						     cp->addr.type, 0);
7095 		if (err)
7096 			goto unlock;
7097 
7098 		hci_req_update_scan(hdev);
7099 
7100 		goto added;
7101 	}
7102 
7103 	addr_type = le_addr_type(cp->addr.type);
7104 
7105 	if (cp->action == 0x02)
7106 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7107 	else if (cp->action == 0x01)
7108 		auto_conn = HCI_AUTO_CONN_DIRECT;
7109 	else
7110 		auto_conn = HCI_AUTO_CONN_REPORT;
7111 
7112 	/* Kernel internally uses conn_params with resolvable private
7113 	 * address, but Add Device allows only identity addresses.
7114 	 * Make sure it is enforced before calling
7115 	 * hci_conn_params_lookup.
7116 	 */
7117 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7118 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7119 					MGMT_STATUS_INVALID_PARAMS,
7120 					&cp->addr, sizeof(cp->addr));
7121 		goto unlock;
7122 	}
7123 
7124 	/* If the connection parameters don't exist for this device,
7125 	 * they will be created and configured with defaults.
7126 	 */
7127 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7128 				auto_conn) < 0) {
7129 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7130 					MGMT_STATUS_FAILED, &cp->addr,
7131 					sizeof(cp->addr));
7132 		goto unlock;
7133 	} else {
7134 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7135 						addr_type);
7136 		if (params)
7137 			bitmap_to_arr32(&current_flags, params->flags,
7138 					__HCI_CONN_NUM_FLAGS);
7139 	}
7140 
7141 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7142 	if (err < 0)
7143 		goto unlock;
7144 
7145 added:
7146 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7147 	bitmap_to_arr32(&supported_flags, hdev->conn_flags,
7148 			__HCI_CONN_NUM_FLAGS);
7149 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7150 			     supported_flags, current_flags);
7151 
7152 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7153 				MGMT_STATUS_SUCCESS, &cp->addr,
7154 				sizeof(cp->addr));
7155 
7156 unlock:
7157 	hci_dev_unlock(hdev);
7158 	return err;
7159 }
7160 
7161 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7162 			   bdaddr_t *bdaddr, u8 type)
7163 {
7164 	struct mgmt_ev_device_removed ev;
7165 
7166 	bacpy(&ev.addr.bdaddr, bdaddr);
7167 	ev.addr.type = type;
7168 
7169 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7170 }
7171 
7172 static int remove_device_sync(struct hci_dev *hdev, void *data)
7173 {
7174 	return hci_update_passive_scan_sync(hdev);
7175 }
7176 
7177 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7178 			 void *data, u16 len)
7179 {
7180 	struct mgmt_cp_remove_device *cp = data;
7181 	int err;
7182 
7183 	bt_dev_dbg(hdev, "sock %p", sk);
7184 
7185 	hci_dev_lock(hdev);
7186 
7187 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7188 		struct hci_conn_params *params;
7189 		u8 addr_type;
7190 
7191 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7192 			err = mgmt_cmd_complete(sk, hdev->id,
7193 						MGMT_OP_REMOVE_DEVICE,
7194 						MGMT_STATUS_INVALID_PARAMS,
7195 						&cp->addr, sizeof(cp->addr));
7196 			goto unlock;
7197 		}
7198 
7199 		if (cp->addr.type == BDADDR_BREDR) {
7200 			err = hci_bdaddr_list_del(&hdev->accept_list,
7201 						  &cp->addr.bdaddr,
7202 						  cp->addr.type);
7203 			if (err) {
7204 				err = mgmt_cmd_complete(sk, hdev->id,
7205 							MGMT_OP_REMOVE_DEVICE,
7206 							MGMT_STATUS_INVALID_PARAMS,
7207 							&cp->addr,
7208 							sizeof(cp->addr));
7209 				goto unlock;
7210 			}
7211 
7212 			hci_req_update_scan(hdev);
7213 
7214 			device_removed(sk, hdev, &cp->addr.bdaddr,
7215 				       cp->addr.type);
7216 			goto complete;
7217 		}
7218 
7219 		addr_type = le_addr_type(cp->addr.type);
7220 
7221 		/* Kernel internally uses conn_params with resolvable private
7222 		 * address, but Remove Device allows only identity addresses.
7223 		 * Make sure it is enforced before calling
7224 		 * hci_conn_params_lookup.
7225 		 */
7226 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7227 			err = mgmt_cmd_complete(sk, hdev->id,
7228 						MGMT_OP_REMOVE_DEVICE,
7229 						MGMT_STATUS_INVALID_PARAMS,
7230 						&cp->addr, sizeof(cp->addr));
7231 			goto unlock;
7232 		}
7233 
7234 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7235 						addr_type);
7236 		if (!params) {
7237 			err = mgmt_cmd_complete(sk, hdev->id,
7238 						MGMT_OP_REMOVE_DEVICE,
7239 						MGMT_STATUS_INVALID_PARAMS,
7240 						&cp->addr, sizeof(cp->addr));
7241 			goto unlock;
7242 		}
7243 
7244 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7245 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7246 			err = mgmt_cmd_complete(sk, hdev->id,
7247 						MGMT_OP_REMOVE_DEVICE,
7248 						MGMT_STATUS_INVALID_PARAMS,
7249 						&cp->addr, sizeof(cp->addr));
7250 			goto unlock;
7251 		}
7252 
7253 		list_del(&params->action);
7254 		list_del(&params->list);
7255 		kfree(params);
7256 
7257 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7258 	} else {
7259 		struct hci_conn_params *p, *tmp;
7260 		struct bdaddr_list *b, *btmp;
7261 
7262 		if (cp->addr.type) {
7263 			err = mgmt_cmd_complete(sk, hdev->id,
7264 						MGMT_OP_REMOVE_DEVICE,
7265 						MGMT_STATUS_INVALID_PARAMS,
7266 						&cp->addr, sizeof(cp->addr));
7267 			goto unlock;
7268 		}
7269 
7270 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7271 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7272 			list_del(&b->list);
7273 			kfree(b);
7274 		}
7275 
7276 		hci_req_update_scan(hdev);
7277 
7278 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7279 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7280 				continue;
7281 			device_removed(sk, hdev, &p->addr, p->addr_type);
7282 			if (p->explicit_connect) {
7283 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7284 				continue;
7285 			}
7286 			list_del(&p->action);
7287 			list_del(&p->list);
7288 			kfree(p);
7289 		}
7290 
7291 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7292 	}
7293 
7294 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7295 
7296 complete:
7297 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7298 				MGMT_STATUS_SUCCESS, &cp->addr,
7299 				sizeof(cp->addr));
7300 unlock:
7301 	hci_dev_unlock(hdev);
7302 	return err;
7303 }
7304 
7305 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7306 			   u16 len)
7307 {
7308 	struct mgmt_cp_load_conn_param *cp = data;
7309 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7310 				     sizeof(struct mgmt_conn_param));
7311 	u16 param_count, expected_len;
7312 	int i;
7313 
7314 	if (!lmp_le_capable(hdev))
7315 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7316 				       MGMT_STATUS_NOT_SUPPORTED);
7317 
7318 	param_count = __le16_to_cpu(cp->param_count);
7319 	if (param_count > max_param_count) {
7320 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7321 			   param_count);
7322 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7323 				       MGMT_STATUS_INVALID_PARAMS);
7324 	}
7325 
7326 	expected_len = struct_size(cp, params, param_count);
7327 	if (expected_len != len) {
7328 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7329 			   expected_len, len);
7330 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7331 				       MGMT_STATUS_INVALID_PARAMS);
7332 	}
7333 
7334 	bt_dev_dbg(hdev, "param_count %u", param_count);
7335 
7336 	hci_dev_lock(hdev);
7337 
7338 	hci_conn_params_clear_disabled(hdev);
7339 
7340 	for (i = 0; i < param_count; i++) {
7341 		struct mgmt_conn_param *param = &cp->params[i];
7342 		struct hci_conn_params *hci_param;
7343 		u16 min, max, latency, timeout;
7344 		u8 addr_type;
7345 
7346 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7347 			   param->addr.type);
7348 
7349 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7350 			addr_type = ADDR_LE_DEV_PUBLIC;
7351 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7352 			addr_type = ADDR_LE_DEV_RANDOM;
7353 		} else {
7354 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7355 			continue;
7356 		}
7357 
7358 		min = le16_to_cpu(param->min_interval);
7359 		max = le16_to_cpu(param->max_interval);
7360 		latency = le16_to_cpu(param->latency);
7361 		timeout = le16_to_cpu(param->timeout);
7362 
7363 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7364 			   min, max, latency, timeout);
7365 
7366 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7367 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7368 			continue;
7369 		}
7370 
7371 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7372 						addr_type);
7373 		if (!hci_param) {
7374 			bt_dev_err(hdev, "failed to add connection parameters");
7375 			continue;
7376 		}
7377 
7378 		hci_param->conn_min_interval = min;
7379 		hci_param->conn_max_interval = max;
7380 		hci_param->conn_latency = latency;
7381 		hci_param->supervision_timeout = timeout;
7382 	}
7383 
7384 	hci_dev_unlock(hdev);
7385 
7386 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7387 				 NULL, 0);
7388 }
7389 
7390 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7391 			       void *data, u16 len)
7392 {
7393 	struct mgmt_cp_set_external_config *cp = data;
7394 	bool changed;
7395 	int err;
7396 
7397 	bt_dev_dbg(hdev, "sock %p", sk);
7398 
7399 	if (hdev_is_powered(hdev))
7400 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7401 				       MGMT_STATUS_REJECTED);
7402 
7403 	if (cp->config != 0x00 && cp->config != 0x01)
7404 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7405 				         MGMT_STATUS_INVALID_PARAMS);
7406 
7407 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7408 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7409 				       MGMT_STATUS_NOT_SUPPORTED);
7410 
7411 	hci_dev_lock(hdev);
7412 
7413 	if (cp->config)
7414 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7415 	else
7416 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7417 
7418 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7419 	if (err < 0)
7420 		goto unlock;
7421 
7422 	if (!changed)
7423 		goto unlock;
7424 
7425 	err = new_options(hdev, sk);
7426 
7427 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7428 		mgmt_index_removed(hdev);
7429 
7430 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7431 			hci_dev_set_flag(hdev, HCI_CONFIG);
7432 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7433 
7434 			queue_work(hdev->req_workqueue, &hdev->power_on);
7435 		} else {
7436 			set_bit(HCI_RAW, &hdev->flags);
7437 			mgmt_index_added(hdev);
7438 		}
7439 	}
7440 
7441 unlock:
7442 	hci_dev_unlock(hdev);
7443 	return err;
7444 }
7445 
7446 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7447 			      void *data, u16 len)
7448 {
7449 	struct mgmt_cp_set_public_address *cp = data;
7450 	bool changed;
7451 	int err;
7452 
7453 	bt_dev_dbg(hdev, "sock %p", sk);
7454 
7455 	if (hdev_is_powered(hdev))
7456 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7457 				       MGMT_STATUS_REJECTED);
7458 
7459 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7460 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7461 				       MGMT_STATUS_INVALID_PARAMS);
7462 
7463 	if (!hdev->set_bdaddr)
7464 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7465 				       MGMT_STATUS_NOT_SUPPORTED);
7466 
7467 	hci_dev_lock(hdev);
7468 
7469 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7470 	bacpy(&hdev->public_addr, &cp->bdaddr);
7471 
7472 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7473 	if (err < 0)
7474 		goto unlock;
7475 
7476 	if (!changed)
7477 		goto unlock;
7478 
7479 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7480 		err = new_options(hdev, sk);
7481 
7482 	if (is_configured(hdev)) {
7483 		mgmt_index_removed(hdev);
7484 
7485 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7486 
7487 		hci_dev_set_flag(hdev, HCI_CONFIG);
7488 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7489 
7490 		queue_work(hdev->req_workqueue, &hdev->power_on);
7491 	}
7492 
7493 unlock:
7494 	hci_dev_unlock(hdev);
7495 	return err;
7496 }
7497 
7498 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
7499 					     int err)
7500 {
7501 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7502 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7503 	u8 *h192, *r192, *h256, *r256;
7504 	struct mgmt_pending_cmd *cmd = data;
7505 	struct sk_buff *skb = cmd->skb;
7506 	u8 status = mgmt_status(err);
7507 	u16 eir_len;
7508 
7509 	if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
7510 		return;
7511 
7512 	if (!status) {
7513 		if (!skb)
7514 			status = MGMT_STATUS_FAILED;
7515 		else if (IS_ERR(skb))
7516 			status = mgmt_status(PTR_ERR(skb));
7517 		else
7518 			status = mgmt_status(skb->data[0]);
7519 	}
7520 
7521 	bt_dev_dbg(hdev, "status %u", status);
7522 
7523 	mgmt_cp = cmd->param;
7524 
7525 	if (status) {
7526 		status = mgmt_status(status);
7527 		eir_len = 0;
7528 
7529 		h192 = NULL;
7530 		r192 = NULL;
7531 		h256 = NULL;
7532 		r256 = NULL;
7533 	} else if (!bredr_sc_enabled(hdev)) {
7534 		struct hci_rp_read_local_oob_data *rp;
7535 
7536 		if (skb->len != sizeof(*rp)) {
7537 			status = MGMT_STATUS_FAILED;
7538 			eir_len = 0;
7539 		} else {
7540 			status = MGMT_STATUS_SUCCESS;
7541 			rp = (void *)skb->data;
7542 
7543 			eir_len = 5 + 18 + 18;
7544 			h192 = rp->hash;
7545 			r192 = rp->rand;
7546 			h256 = NULL;
7547 			r256 = NULL;
7548 		}
7549 	} else {
7550 		struct hci_rp_read_local_oob_ext_data *rp;
7551 
7552 		if (skb->len != sizeof(*rp)) {
7553 			status = MGMT_STATUS_FAILED;
7554 			eir_len = 0;
7555 		} else {
7556 			status = MGMT_STATUS_SUCCESS;
7557 			rp = (void *)skb->data;
7558 
7559 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7560 				eir_len = 5 + 18 + 18;
7561 				h192 = NULL;
7562 				r192 = NULL;
7563 			} else {
7564 				eir_len = 5 + 18 + 18 + 18 + 18;
7565 				h192 = rp->hash192;
7566 				r192 = rp->rand192;
7567 			}
7568 
7569 			h256 = rp->hash256;
7570 			r256 = rp->rand256;
7571 		}
7572 	}
7573 
7574 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7575 	if (!mgmt_rp)
7576 		goto done;
7577 
7578 	if (eir_len == 0)
7579 		goto send_rsp;
7580 
7581 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7582 				  hdev->dev_class, 3);
7583 
7584 	if (h192 && r192) {
7585 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7586 					  EIR_SSP_HASH_C192, h192, 16);
7587 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7588 					  EIR_SSP_RAND_R192, r192, 16);
7589 	}
7590 
7591 	if (h256 && r256) {
7592 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7593 					  EIR_SSP_HASH_C256, h256, 16);
7594 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7595 					  EIR_SSP_RAND_R256, r256, 16);
7596 	}
7597 
7598 send_rsp:
7599 	mgmt_rp->type = mgmt_cp->type;
7600 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
7601 
7602 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
7603 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7604 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7605 	if (err < 0 || status)
7606 		goto done;
7607 
7608 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7609 
7610 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7611 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7612 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7613 done:
7614 	if (skb && !IS_ERR(skb))
7615 		kfree_skb(skb);
7616 
7617 	kfree(mgmt_rp);
7618 	mgmt_pending_remove(cmd);
7619 }
7620 
7621 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7622 				  struct mgmt_cp_read_local_oob_ext_data *cp)
7623 {
7624 	struct mgmt_pending_cmd *cmd;
7625 	int err;
7626 
7627 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7628 			       cp, sizeof(*cp));
7629 	if (!cmd)
7630 		return -ENOMEM;
7631 
7632 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
7633 				 read_local_oob_ext_data_complete);
7634 
7635 	if (err < 0) {
7636 		mgmt_pending_remove(cmd);
7637 		return err;
7638 	}
7639 
7640 	return 0;
7641 }
7642 
7643 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7644 				   void *data, u16 data_len)
7645 {
7646 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
7647 	struct mgmt_rp_read_local_oob_ext_data *rp;
7648 	size_t rp_len;
7649 	u16 eir_len;
7650 	u8 status, flags, role, addr[7], hash[16], rand[16];
7651 	int err;
7652 
7653 	bt_dev_dbg(hdev, "sock %p", sk);
7654 
7655 	if (hdev_is_powered(hdev)) {
7656 		switch (cp->type) {
7657 		case BIT(BDADDR_BREDR):
7658 			status = mgmt_bredr_support(hdev);
7659 			if (status)
7660 				eir_len = 0;
7661 			else
7662 				eir_len = 5;
7663 			break;
7664 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7665 			status = mgmt_le_support(hdev);
7666 			if (status)
7667 				eir_len = 0;
7668 			else
7669 				eir_len = 9 + 3 + 18 + 18 + 3;
7670 			break;
7671 		default:
7672 			status = MGMT_STATUS_INVALID_PARAMS;
7673 			eir_len = 0;
7674 			break;
7675 		}
7676 	} else {
7677 		status = MGMT_STATUS_NOT_POWERED;
7678 		eir_len = 0;
7679 	}
7680 
7681 	rp_len = sizeof(*rp) + eir_len;
7682 	rp = kmalloc(rp_len, GFP_ATOMIC);
7683 	if (!rp)
7684 		return -ENOMEM;
7685 
7686 	if (!status && !lmp_ssp_capable(hdev)) {
7687 		status = MGMT_STATUS_NOT_SUPPORTED;
7688 		eir_len = 0;
7689 	}
7690 
7691 	if (status)
7692 		goto complete;
7693 
7694 	hci_dev_lock(hdev);
7695 
7696 	eir_len = 0;
7697 	switch (cp->type) {
7698 	case BIT(BDADDR_BREDR):
7699 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7700 			err = read_local_ssp_oob_req(hdev, sk, cp);
7701 			hci_dev_unlock(hdev);
7702 			if (!err)
7703 				goto done;
7704 
7705 			status = MGMT_STATUS_FAILED;
7706 			goto complete;
7707 		} else {
7708 			eir_len = eir_append_data(rp->eir, eir_len,
7709 						  EIR_CLASS_OF_DEV,
7710 						  hdev->dev_class, 3);
7711 		}
7712 		break;
7713 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7714 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7715 		    smp_generate_oob(hdev, hash, rand) < 0) {
7716 			hci_dev_unlock(hdev);
7717 			status = MGMT_STATUS_FAILED;
7718 			goto complete;
7719 		}
7720 
7721 		/* This should return the active RPA, but since the RPA
7722 		 * is only programmed on demand, it is really hard to fill
7723 		 * this in at the moment. For now disallow retrieving
7724 		 * local out-of-band data when privacy is in use.
7725 		 *
7726 		 * Returning the identity address will not help here since
7727 		 * pairing happens before the identity resolving key is
7728 		 * known and thus the connection establishment happens
7729 		 * based on the RPA and not the identity address.
7730 		 */
7731 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7732 			hci_dev_unlock(hdev);
7733 			status = MGMT_STATUS_REJECTED;
7734 			goto complete;
7735 		}
7736 
7737 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7738 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7739 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7740 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
7741 			memcpy(addr, &hdev->static_addr, 6);
7742 			addr[6] = 0x01;
7743 		} else {
7744 			memcpy(addr, &hdev->bdaddr, 6);
7745 			addr[6] = 0x00;
7746 		}
7747 
7748 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7749 					  addr, sizeof(addr));
7750 
7751 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7752 			role = 0x02;
7753 		else
7754 			role = 0x01;
7755 
7756 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7757 					  &role, sizeof(role));
7758 
7759 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7760 			eir_len = eir_append_data(rp->eir, eir_len,
7761 						  EIR_LE_SC_CONFIRM,
7762 						  hash, sizeof(hash));
7763 
7764 			eir_len = eir_append_data(rp->eir, eir_len,
7765 						  EIR_LE_SC_RANDOM,
7766 						  rand, sizeof(rand));
7767 		}
7768 
7769 		flags = mgmt_get_adv_discov_flags(hdev);
7770 
7771 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7772 			flags |= LE_AD_NO_BREDR;
7773 
7774 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7775 					  &flags, sizeof(flags));
7776 		break;
7777 	}
7778 
7779 	hci_dev_unlock(hdev);
7780 
7781 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7782 
7783 	status = MGMT_STATUS_SUCCESS;
7784 
7785 complete:
7786 	rp->type = cp->type;
7787 	rp->eir_len = cpu_to_le16(eir_len);
7788 
7789 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7790 				status, rp, sizeof(*rp) + eir_len);
7791 	if (err < 0 || status)
7792 		goto done;
7793 
7794 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7795 				 rp, sizeof(*rp) + eir_len,
7796 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
7797 
7798 done:
7799 	kfree(rp);
7800 
7801 	return err;
7802 }
7803 
7804 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7805 {
7806 	u32 flags = 0;
7807 
7808 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
7809 	flags |= MGMT_ADV_FLAG_DISCOV;
7810 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7811 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7812 	flags |= MGMT_ADV_FLAG_APPEARANCE;
7813 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7814 	flags |= MGMT_ADV_PARAM_DURATION;
7815 	flags |= MGMT_ADV_PARAM_TIMEOUT;
7816 	flags |= MGMT_ADV_PARAM_INTERVALS;
7817 	flags |= MGMT_ADV_PARAM_TX_POWER;
7818 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
7819 
7820 	/* In extended adv TX_POWER returned from Set Adv Param
7821 	 * will be always valid.
7822 	 */
7823 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7824 	    ext_adv_capable(hdev))
7825 		flags |= MGMT_ADV_FLAG_TX_POWER;
7826 
7827 	if (ext_adv_capable(hdev)) {
7828 		flags |= MGMT_ADV_FLAG_SEC_1M;
7829 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7830 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7831 
7832 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
7833 			flags |= MGMT_ADV_FLAG_SEC_2M;
7834 
7835 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7836 			flags |= MGMT_ADV_FLAG_SEC_CODED;
7837 	}
7838 
7839 	return flags;
7840 }
7841 
7842 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7843 			     void *data, u16 data_len)
7844 {
7845 	struct mgmt_rp_read_adv_features *rp;
7846 	size_t rp_len;
7847 	int err;
7848 	struct adv_info *adv_instance;
7849 	u32 supported_flags;
7850 	u8 *instance;
7851 
7852 	bt_dev_dbg(hdev, "sock %p", sk);
7853 
7854 	if (!lmp_le_capable(hdev))
7855 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7856 				       MGMT_STATUS_REJECTED);
7857 
7858 	hci_dev_lock(hdev);
7859 
7860 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7861 	rp = kmalloc(rp_len, GFP_ATOMIC);
7862 	if (!rp) {
7863 		hci_dev_unlock(hdev);
7864 		return -ENOMEM;
7865 	}
7866 
7867 	supported_flags = get_supported_adv_flags(hdev);
7868 
7869 	rp->supported_flags = cpu_to_le32(supported_flags);
7870 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7871 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7872 	rp->max_instances = hdev->le_num_of_adv_sets;
7873 	rp->num_instances = hdev->adv_instance_cnt;
7874 
7875 	instance = rp->instance;
7876 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7877 		*instance = adv_instance->instance;
7878 		instance++;
7879 	}
7880 
7881 	hci_dev_unlock(hdev);
7882 
7883 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7884 				MGMT_STATUS_SUCCESS, rp, rp_len);
7885 
7886 	kfree(rp);
7887 
7888 	return err;
7889 }
7890 
7891 static u8 calculate_name_len(struct hci_dev *hdev)
7892 {
7893 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7894 
7895 	return eir_append_local_name(hdev, buf, 0);
7896 }
7897 
7898 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7899 			   bool is_adv_data)
7900 {
7901 	u8 max_len = HCI_MAX_AD_LENGTH;
7902 
7903 	if (is_adv_data) {
7904 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7905 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
7906 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
7907 			max_len -= 3;
7908 
7909 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7910 			max_len -= 3;
7911 	} else {
7912 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7913 			max_len -= calculate_name_len(hdev);
7914 
7915 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7916 			max_len -= 4;
7917 	}
7918 
7919 	return max_len;
7920 }
7921 
7922 static bool flags_managed(u32 adv_flags)
7923 {
7924 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7925 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
7926 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
7927 }
7928 
7929 static bool tx_power_managed(u32 adv_flags)
7930 {
7931 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7932 }
7933 
7934 static bool name_managed(u32 adv_flags)
7935 {
7936 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7937 }
7938 
7939 static bool appearance_managed(u32 adv_flags)
7940 {
7941 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7942 }
7943 
7944 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7945 			      u8 len, bool is_adv_data)
7946 {
7947 	int i, cur_len;
7948 	u8 max_len;
7949 
7950 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7951 
7952 	if (len > max_len)
7953 		return false;
7954 
7955 	/* Make sure that the data is correctly formatted. */
7956 	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7957 		cur_len = data[i];
7958 
7959 		if (!cur_len)
7960 			continue;
7961 
7962 		if (data[i + 1] == EIR_FLAGS &&
7963 		    (!is_adv_data || flags_managed(adv_flags)))
7964 			return false;
7965 
7966 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7967 			return false;
7968 
7969 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7970 			return false;
7971 
7972 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7973 			return false;
7974 
7975 		if (data[i + 1] == EIR_APPEARANCE &&
7976 		    appearance_managed(adv_flags))
7977 			return false;
7978 
7979 		/* If the current field length would exceed the total data
7980 		 * length, then it's invalid.
7981 		 */
7982 		if (i + cur_len >= len)
7983 			return false;
7984 	}
7985 
7986 	return true;
7987 }
7988 
7989 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
7990 {
7991 	u32 supported_flags, phy_flags;
7992 
7993 	/* The current implementation only supports a subset of the specified
7994 	 * flags. Also need to check mutual exclusiveness of sec flags.
7995 	 */
7996 	supported_flags = get_supported_adv_flags(hdev);
7997 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
7998 	if (adv_flags & ~supported_flags ||
7999 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8000 		return false;
8001 
8002 	return true;
8003 }
8004 
8005 static bool adv_busy(struct hci_dev *hdev)
8006 {
8007 	return pending_find(MGMT_OP_SET_LE, hdev);
8008 }
8009 
8010 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8011 			     int err)
8012 {
8013 	struct adv_info *adv, *n;
8014 
8015 	bt_dev_dbg(hdev, "err %d", err);
8016 
8017 	hci_dev_lock(hdev);
8018 
8019 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8020 		u8 instance;
8021 
8022 		if (!adv->pending)
8023 			continue;
8024 
8025 		if (!err) {
8026 			adv->pending = false;
8027 			continue;
8028 		}
8029 
8030 		instance = adv->instance;
8031 
8032 		if (hdev->cur_adv_instance == instance)
8033 			cancel_adv_timeout(hdev);
8034 
8035 		hci_remove_adv_instance(hdev, instance);
8036 		mgmt_advertising_removed(sk, hdev, instance);
8037 	}
8038 
8039 	hci_dev_unlock(hdev);
8040 }
8041 
8042 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8043 {
8044 	struct mgmt_pending_cmd *cmd = data;
8045 	struct mgmt_cp_add_advertising *cp = cmd->param;
8046 	struct mgmt_rp_add_advertising rp;
8047 
8048 	memset(&rp, 0, sizeof(rp));
8049 
8050 	rp.instance = cp->instance;
8051 
8052 	if (err)
8053 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8054 				mgmt_status(err));
8055 	else
8056 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8057 				  mgmt_status(err), &rp, sizeof(rp));
8058 
8059 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8060 
8061 	mgmt_pending_free(cmd);
8062 }
8063 
8064 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8065 {
8066 	struct mgmt_pending_cmd *cmd = data;
8067 	struct mgmt_cp_add_advertising *cp = cmd->param;
8068 
8069 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8070 }
8071 
8072 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8073 			   void *data, u16 data_len)
8074 {
8075 	struct mgmt_cp_add_advertising *cp = data;
8076 	struct mgmt_rp_add_advertising rp;
8077 	u32 flags;
8078 	u8 status;
8079 	u16 timeout, duration;
8080 	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
8081 	u8 schedule_instance = 0;
8082 	struct adv_info *next_instance;
8083 	int err;
8084 	struct mgmt_pending_cmd *cmd;
8085 
8086 	bt_dev_dbg(hdev, "sock %p", sk);
8087 
8088 	status = mgmt_le_support(hdev);
8089 	if (status)
8090 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8091 				       status);
8092 
8093 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8094 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8095 				       MGMT_STATUS_INVALID_PARAMS);
8096 
8097 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8098 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8099 				       MGMT_STATUS_INVALID_PARAMS);
8100 
8101 	flags = __le32_to_cpu(cp->flags);
8102 	timeout = __le16_to_cpu(cp->timeout);
8103 	duration = __le16_to_cpu(cp->duration);
8104 
8105 	if (!requested_adv_flags_are_valid(hdev, flags))
8106 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8107 				       MGMT_STATUS_INVALID_PARAMS);
8108 
8109 	hci_dev_lock(hdev);
8110 
8111 	if (timeout && !hdev_is_powered(hdev)) {
8112 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8113 				      MGMT_STATUS_REJECTED);
8114 		goto unlock;
8115 	}
8116 
8117 	if (adv_busy(hdev)) {
8118 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8119 				      MGMT_STATUS_BUSY);
8120 		goto unlock;
8121 	}
8122 
8123 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8124 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8125 			       cp->scan_rsp_len, false)) {
8126 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8127 				      MGMT_STATUS_INVALID_PARAMS);
8128 		goto unlock;
8129 	}
8130 
8131 	err = hci_add_adv_instance(hdev, cp->instance, flags,
8132 				   cp->adv_data_len, cp->data,
8133 				   cp->scan_rsp_len,
8134 				   cp->data + cp->adv_data_len,
8135 				   timeout, duration,
8136 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8137 				   hdev->le_adv_min_interval,
8138 				   hdev->le_adv_max_interval);
8139 	if (err < 0) {
8140 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8141 				      MGMT_STATUS_FAILED);
8142 		goto unlock;
8143 	}
8144 
8145 	/* Only trigger an advertising added event if a new instance was
8146 	 * actually added.
8147 	 */
8148 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8149 		mgmt_advertising_added(sk, hdev, cp->instance);
8150 
8151 	if (hdev->cur_adv_instance == cp->instance) {
8152 		/* If the currently advertised instance is being changed then
8153 		 * cancel the current advertising and schedule the next
8154 		 * instance. If there is only one instance then the overridden
8155 		 * advertising data will be visible right away.
8156 		 */
8157 		cancel_adv_timeout(hdev);
8158 
8159 		next_instance = hci_get_next_instance(hdev, cp->instance);
8160 		if (next_instance)
8161 			schedule_instance = next_instance->instance;
8162 	} else if (!hdev->adv_instance_timeout) {
8163 		/* Immediately advertise the new instance if no other
8164 		 * instance is currently being advertised.
8165 		 */
8166 		schedule_instance = cp->instance;
8167 	}
8168 
8169 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8170 	 * there is no instance to be advertised then we have no HCI
8171 	 * communication to make. Simply return.
8172 	 */
8173 	if (!hdev_is_powered(hdev) ||
8174 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8175 	    !schedule_instance) {
8176 		rp.instance = cp->instance;
8177 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8178 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8179 		goto unlock;
8180 	}
8181 
8182 	/* We're good to go, update advertising data, parameters, and start
8183 	 * advertising.
8184 	 */
8185 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8186 			       data_len);
8187 	if (!cmd) {
8188 		err = -ENOMEM;
8189 		goto unlock;
8190 	}
8191 
8192 	cp->instance = schedule_instance;
8193 
8194 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8195 				 add_advertising_complete);
8196 	if (err < 0)
8197 		mgmt_pending_free(cmd);
8198 
8199 unlock:
8200 	hci_dev_unlock(hdev);
8201 
8202 	return err;
8203 }
8204 
8205 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8206 					int err)
8207 {
8208 	struct mgmt_pending_cmd *cmd = data;
8209 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8210 	struct mgmt_rp_add_ext_adv_params rp;
8211 	struct adv_info *adv;
8212 	u32 flags;
8213 
8214 	BT_DBG("%s", hdev->name);
8215 
8216 	hci_dev_lock(hdev);
8217 
8218 	adv = hci_find_adv_instance(hdev, cp->instance);
8219 	if (!adv)
8220 		goto unlock;
8221 
8222 	rp.instance = cp->instance;
8223 	rp.tx_power = adv->tx_power;
8224 
8225 	/* While we're at it, inform userspace of the available space for this
8226 	 * advertisement, given the flags that will be used.
8227 	 */
8228 	flags = __le32_to_cpu(cp->flags);
8229 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8230 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8231 
8232 	if (err) {
8233 		/* If this advertisement was previously advertising and we
8234 		 * failed to update it, we signal that it has been removed and
8235 		 * delete its structure
8236 		 */
8237 		if (!adv->pending)
8238 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8239 
8240 		hci_remove_adv_instance(hdev, cp->instance);
8241 
8242 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8243 				mgmt_status(err));
8244 	} else {
8245 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8246 				  mgmt_status(err), &rp, sizeof(rp));
8247 	}
8248 
8249 unlock:
8250 	if (cmd)
8251 		mgmt_pending_free(cmd);
8252 
8253 	hci_dev_unlock(hdev);
8254 }
8255 
8256 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8257 {
8258 	struct mgmt_pending_cmd *cmd = data;
8259 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8260 
8261 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8262 }
8263 
8264 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8265 			      void *data, u16 data_len)
8266 {
8267 	struct mgmt_cp_add_ext_adv_params *cp = data;
8268 	struct mgmt_rp_add_ext_adv_params rp;
8269 	struct mgmt_pending_cmd *cmd = NULL;
8270 	u32 flags, min_interval, max_interval;
8271 	u16 timeout, duration;
8272 	u8 status;
8273 	s8 tx_power;
8274 	int err;
8275 
8276 	BT_DBG("%s", hdev->name);
8277 
8278 	status = mgmt_le_support(hdev);
8279 	if (status)
8280 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8281 				       status);
8282 
8283 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8284 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8285 				       MGMT_STATUS_INVALID_PARAMS);
8286 
8287 	/* The purpose of breaking add_advertising into two separate MGMT calls
8288 	 * for params and data is to allow more parameters to be added to this
8289 	 * structure in the future. For this reason, we verify that we have the
8290 	 * bare minimum structure we know of when the interface was defined. Any
8291 	 * extra parameters we don't know about will be ignored in this request.
8292 	 */
8293 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8294 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8295 				       MGMT_STATUS_INVALID_PARAMS);
8296 
8297 	flags = __le32_to_cpu(cp->flags);
8298 
8299 	if (!requested_adv_flags_are_valid(hdev, flags))
8300 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8301 				       MGMT_STATUS_INVALID_PARAMS);
8302 
8303 	hci_dev_lock(hdev);
8304 
8305 	/* In new interface, we require that we are powered to register */
8306 	if (!hdev_is_powered(hdev)) {
8307 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8308 				      MGMT_STATUS_REJECTED);
8309 		goto unlock;
8310 	}
8311 
8312 	if (adv_busy(hdev)) {
8313 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8314 				      MGMT_STATUS_BUSY);
8315 		goto unlock;
8316 	}
8317 
8318 	/* Parse defined parameters from request, use defaults otherwise */
8319 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8320 		  __le16_to_cpu(cp->timeout) : 0;
8321 
8322 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8323 		   __le16_to_cpu(cp->duration) :
8324 		   hdev->def_multi_adv_rotation_duration;
8325 
8326 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8327 		       __le32_to_cpu(cp->min_interval) :
8328 		       hdev->le_adv_min_interval;
8329 
8330 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8331 		       __le32_to_cpu(cp->max_interval) :
8332 		       hdev->le_adv_max_interval;
8333 
8334 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8335 		   cp->tx_power :
8336 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8337 
8338 	/* Create advertising instance with no advertising or response data */
8339 	err = hci_add_adv_instance(hdev, cp->instance, flags,
8340 				   0, NULL, 0, NULL, timeout, duration,
8341 				   tx_power, min_interval, max_interval);
8342 
8343 	if (err < 0) {
8344 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8345 				      MGMT_STATUS_FAILED);
8346 		goto unlock;
8347 	}
8348 
8349 	/* Submit request for advertising params if ext adv available */
8350 	if (ext_adv_capable(hdev)) {
8351 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8352 				       data, data_len);
8353 		if (!cmd) {
8354 			err = -ENOMEM;
8355 			hci_remove_adv_instance(hdev, cp->instance);
8356 			goto unlock;
8357 		}
8358 
8359 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8360 					 add_ext_adv_params_complete);
8361 		if (err < 0)
8362 			mgmt_pending_free(cmd);
8363 	} else {
8364 		rp.instance = cp->instance;
8365 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8366 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8367 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8368 		err = mgmt_cmd_complete(sk, hdev->id,
8369 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8370 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8371 	}
8372 
8373 unlock:
8374 	hci_dev_unlock(hdev);
8375 
8376 	return err;
8377 }
8378 
8379 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8380 {
8381 	struct mgmt_pending_cmd *cmd = data;
8382 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8383 	struct mgmt_rp_add_advertising rp;
8384 
8385 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8386 
8387 	memset(&rp, 0, sizeof(rp));
8388 
8389 	rp.instance = cp->instance;
8390 
8391 	if (err)
8392 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8393 				mgmt_status(err));
8394 	else
8395 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8396 				  mgmt_status(err), &rp, sizeof(rp));
8397 
8398 	mgmt_pending_free(cmd);
8399 }
8400 
8401 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8402 {
8403 	struct mgmt_pending_cmd *cmd = data;
8404 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8405 	int err;
8406 
8407 	if (ext_adv_capable(hdev)) {
8408 		err = hci_update_adv_data_sync(hdev, cp->instance);
8409 		if (err)
8410 			return err;
8411 
8412 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8413 		if (err)
8414 			return err;
8415 
8416 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8417 	}
8418 
8419 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8420 }
8421 
8422 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8423 			    u16 data_len)
8424 {
8425 	struct mgmt_cp_add_ext_adv_data *cp = data;
8426 	struct mgmt_rp_add_ext_adv_data rp;
8427 	u8 schedule_instance = 0;
8428 	struct adv_info *next_instance;
8429 	struct adv_info *adv_instance;
8430 	int err = 0;
8431 	struct mgmt_pending_cmd *cmd;
8432 
8433 	BT_DBG("%s", hdev->name);
8434 
8435 	hci_dev_lock(hdev);
8436 
8437 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8438 
8439 	if (!adv_instance) {
8440 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8441 				      MGMT_STATUS_INVALID_PARAMS);
8442 		goto unlock;
8443 	}
8444 
8445 	/* In new interface, we require that we are powered to register */
8446 	if (!hdev_is_powered(hdev)) {
8447 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8448 				      MGMT_STATUS_REJECTED);
8449 		goto clear_new_instance;
8450 	}
8451 
8452 	if (adv_busy(hdev)) {
8453 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8454 				      MGMT_STATUS_BUSY);
8455 		goto clear_new_instance;
8456 	}
8457 
8458 	/* Validate new data */
8459 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8460 			       cp->adv_data_len, true) ||
8461 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8462 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
8463 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8464 				      MGMT_STATUS_INVALID_PARAMS);
8465 		goto clear_new_instance;
8466 	}
8467 
8468 	/* Set the data in the advertising instance */
8469 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8470 				  cp->data, cp->scan_rsp_len,
8471 				  cp->data + cp->adv_data_len);
8472 
8473 	/* If using software rotation, determine next instance to use */
8474 	if (hdev->cur_adv_instance == cp->instance) {
8475 		/* If the currently advertised instance is being changed
8476 		 * then cancel the current advertising and schedule the
8477 		 * next instance. If there is only one instance then the
8478 		 * overridden advertising data will be visible right
8479 		 * away
8480 		 */
8481 		cancel_adv_timeout(hdev);
8482 
8483 		next_instance = hci_get_next_instance(hdev, cp->instance);
8484 		if (next_instance)
8485 			schedule_instance = next_instance->instance;
8486 	} else if (!hdev->adv_instance_timeout) {
8487 		/* Immediately advertise the new instance if no other
8488 		 * instance is currently being advertised.
8489 		 */
8490 		schedule_instance = cp->instance;
8491 	}
8492 
8493 	/* If the HCI_ADVERTISING flag is set or there is no instance to
8494 	 * be advertised then we have no HCI communication to make.
8495 	 * Simply return.
8496 	 */
8497 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
8498 		if (adv_instance->pending) {
8499 			mgmt_advertising_added(sk, hdev, cp->instance);
8500 			adv_instance->pending = false;
8501 		}
8502 		rp.instance = cp->instance;
8503 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8504 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8505 		goto unlock;
8506 	}
8507 
8508 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8509 			       data_len);
8510 	if (!cmd) {
8511 		err = -ENOMEM;
8512 		goto clear_new_instance;
8513 	}
8514 
8515 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
8516 				 add_ext_adv_data_complete);
8517 	if (err < 0) {
8518 		mgmt_pending_free(cmd);
8519 		goto clear_new_instance;
8520 	}
8521 
8522 	/* We were successful in updating data, so trigger advertising_added
8523 	 * event if this is an instance that wasn't previously advertising. If
8524 	 * a failure occurs in the requests we initiated, we will remove the
8525 	 * instance again in add_advertising_complete
8526 	 */
8527 	if (adv_instance->pending)
8528 		mgmt_advertising_added(sk, hdev, cp->instance);
8529 
8530 	goto unlock;
8531 
8532 clear_new_instance:
8533 	hci_remove_adv_instance(hdev, cp->instance);
8534 
8535 unlock:
8536 	hci_dev_unlock(hdev);
8537 
8538 	return err;
8539 }
8540 
8541 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
8542 					int err)
8543 {
8544 	struct mgmt_pending_cmd *cmd = data;
8545 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8546 	struct mgmt_rp_remove_advertising rp;
8547 
8548 	bt_dev_dbg(hdev, "err %d", err);
8549 
8550 	memset(&rp, 0, sizeof(rp));
8551 	rp.instance = cp->instance;
8552 
8553 	if (err)
8554 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8555 				mgmt_status(err));
8556 	else
8557 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8558 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8559 
8560 	mgmt_pending_free(cmd);
8561 }
8562 
8563 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
8564 {
8565 	struct mgmt_pending_cmd *cmd = data;
8566 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8567 	int err;
8568 
8569 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
8570 	if (err)
8571 		return err;
8572 
8573 	if (list_empty(&hdev->adv_instances))
8574 		err = hci_disable_advertising_sync(hdev);
8575 
8576 	return err;
8577 }
8578 
8579 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8580 			      void *data, u16 data_len)
8581 {
8582 	struct mgmt_cp_remove_advertising *cp = data;
8583 	struct mgmt_pending_cmd *cmd;
8584 	int err;
8585 
8586 	bt_dev_dbg(hdev, "sock %p", sk);
8587 
8588 	hci_dev_lock(hdev);
8589 
8590 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8591 		err = mgmt_cmd_status(sk, hdev->id,
8592 				      MGMT_OP_REMOVE_ADVERTISING,
8593 				      MGMT_STATUS_INVALID_PARAMS);
8594 		goto unlock;
8595 	}
8596 
8597 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
8598 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8599 				      MGMT_STATUS_BUSY);
8600 		goto unlock;
8601 	}
8602 
8603 	if (list_empty(&hdev->adv_instances)) {
8604 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8605 				      MGMT_STATUS_INVALID_PARAMS);
8606 		goto unlock;
8607 	}
8608 
8609 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8610 			       data_len);
8611 	if (!cmd) {
8612 		err = -ENOMEM;
8613 		goto unlock;
8614 	}
8615 
8616 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
8617 				 remove_advertising_complete);
8618 	if (err < 0)
8619 		mgmt_pending_free(cmd);
8620 
8621 unlock:
8622 	hci_dev_unlock(hdev);
8623 
8624 	return err;
8625 }
8626 
8627 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8628 			     void *data, u16 data_len)
8629 {
8630 	struct mgmt_cp_get_adv_size_info *cp = data;
8631 	struct mgmt_rp_get_adv_size_info rp;
8632 	u32 flags, supported_flags;
8633 	int err;
8634 
8635 	bt_dev_dbg(hdev, "sock %p", sk);
8636 
8637 	if (!lmp_le_capable(hdev))
8638 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8639 				       MGMT_STATUS_REJECTED);
8640 
8641 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8642 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8643 				       MGMT_STATUS_INVALID_PARAMS);
8644 
8645 	flags = __le32_to_cpu(cp->flags);
8646 
8647 	/* The current implementation only supports a subset of the specified
8648 	 * flags.
8649 	 */
8650 	supported_flags = get_supported_adv_flags(hdev);
8651 	if (flags & ~supported_flags)
8652 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8653 				       MGMT_STATUS_INVALID_PARAMS);
8654 
8655 	rp.instance = cp->instance;
8656 	rp.flags = cp->flags;
8657 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8658 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8659 
8660 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8661 				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8662 
8663 	return err;
8664 }
8665 
8666 static const struct hci_mgmt_handler mgmt_handlers[] = {
8667 	{ NULL }, /* 0x0000 (no command) */
8668 	{ read_version,            MGMT_READ_VERSION_SIZE,
8669 						HCI_MGMT_NO_HDEV |
8670 						HCI_MGMT_UNTRUSTED },
8671 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
8672 						HCI_MGMT_NO_HDEV |
8673 						HCI_MGMT_UNTRUSTED },
8674 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
8675 						HCI_MGMT_NO_HDEV |
8676 						HCI_MGMT_UNTRUSTED },
8677 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
8678 						HCI_MGMT_UNTRUSTED },
8679 	{ set_powered,             MGMT_SETTING_SIZE },
8680 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
8681 	{ set_connectable,         MGMT_SETTING_SIZE },
8682 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
8683 	{ set_bondable,            MGMT_SETTING_SIZE },
8684 	{ set_link_security,       MGMT_SETTING_SIZE },
8685 	{ set_ssp,                 MGMT_SETTING_SIZE },
8686 	{ set_hs,                  MGMT_SETTING_SIZE },
8687 	{ set_le,                  MGMT_SETTING_SIZE },
8688 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
8689 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
8690 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
8691 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
8692 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
8693 						HCI_MGMT_VAR_LEN },
8694 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8695 						HCI_MGMT_VAR_LEN },
8696 	{ disconnect,              MGMT_DISCONNECT_SIZE },
8697 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
8698 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
8699 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
8700 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
8701 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
8702 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
8703 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
8704 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
8705 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8706 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
8707 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8708 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
8709 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8710 						HCI_MGMT_VAR_LEN },
8711 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8712 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
8713 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
8714 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
8715 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
8716 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
8717 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
8718 	{ set_advertising,         MGMT_SETTING_SIZE },
8719 	{ set_bredr,               MGMT_SETTING_SIZE },
8720 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
8721 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
8722 	{ set_secure_conn,         MGMT_SETTING_SIZE },
8723 	{ set_debug_keys,          MGMT_SETTING_SIZE },
8724 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
8725 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
8726 						HCI_MGMT_VAR_LEN },
8727 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
8728 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
8729 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
8730 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
8731 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
8732 						HCI_MGMT_VAR_LEN },
8733 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8734 						HCI_MGMT_NO_HDEV |
8735 						HCI_MGMT_UNTRUSTED },
8736 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
8737 						HCI_MGMT_UNCONFIGURED |
8738 						HCI_MGMT_UNTRUSTED },
8739 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
8740 						HCI_MGMT_UNCONFIGURED },
8741 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
8742 						HCI_MGMT_UNCONFIGURED },
8743 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8744 						HCI_MGMT_VAR_LEN },
8745 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8746 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
8747 						HCI_MGMT_NO_HDEV |
8748 						HCI_MGMT_UNTRUSTED },
8749 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
8750 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
8751 						HCI_MGMT_VAR_LEN },
8752 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
8753 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
8754 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8755 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8756 						HCI_MGMT_UNTRUSTED },
8757 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
8758 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
8759 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
8760 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8761 						HCI_MGMT_VAR_LEN },
8762 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
8763 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
8764 						HCI_MGMT_UNTRUSTED },
8765 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
8766 						HCI_MGMT_UNTRUSTED |
8767 						HCI_MGMT_HDEV_OPTIONAL },
8768 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
8769 						HCI_MGMT_VAR_LEN |
8770 						HCI_MGMT_HDEV_OPTIONAL },
8771 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8772 						HCI_MGMT_UNTRUSTED },
8773 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8774 						HCI_MGMT_VAR_LEN },
8775 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8776 						HCI_MGMT_UNTRUSTED },
8777 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8778 						HCI_MGMT_VAR_LEN },
8779 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
8780 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
8781 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8782 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8783 						HCI_MGMT_VAR_LEN },
8784 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
8785 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8786 						HCI_MGMT_VAR_LEN },
8787 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
8788 						HCI_MGMT_VAR_LEN },
8789 	{ add_adv_patterns_monitor_rssi,
8790 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8791 						HCI_MGMT_VAR_LEN },
8792 };
8793 
8794 void mgmt_index_added(struct hci_dev *hdev)
8795 {
8796 	struct mgmt_ev_ext_index ev;
8797 
8798 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8799 		return;
8800 
8801 	switch (hdev->dev_type) {
8802 	case HCI_PRIMARY:
8803 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8804 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8805 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8806 			ev.type = 0x01;
8807 		} else {
8808 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8809 					 HCI_MGMT_INDEX_EVENTS);
8810 			ev.type = 0x00;
8811 		}
8812 		break;
8813 	case HCI_AMP:
8814 		ev.type = 0x02;
8815 		break;
8816 	default:
8817 		return;
8818 	}
8819 
8820 	ev.bus = hdev->bus;
8821 
8822 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8823 			 HCI_MGMT_EXT_INDEX_EVENTS);
8824 }
8825 
8826 void mgmt_index_removed(struct hci_dev *hdev)
8827 {
8828 	struct mgmt_ev_ext_index ev;
8829 	u8 status = MGMT_STATUS_INVALID_INDEX;
8830 
8831 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8832 		return;
8833 
8834 	switch (hdev->dev_type) {
8835 	case HCI_PRIMARY:
8836 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8837 
8838 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8839 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8840 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8841 			ev.type = 0x01;
8842 		} else {
8843 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8844 					 HCI_MGMT_INDEX_EVENTS);
8845 			ev.type = 0x00;
8846 		}
8847 		break;
8848 	case HCI_AMP:
8849 		ev.type = 0x02;
8850 		break;
8851 	default:
8852 		return;
8853 	}
8854 
8855 	ev.bus = hdev->bus;
8856 
8857 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8858 			 HCI_MGMT_EXT_INDEX_EVENTS);
8859 }
8860 
8861 void mgmt_power_on(struct hci_dev *hdev, int err)
8862 {
8863 	struct cmd_lookup match = { NULL, hdev };
8864 
8865 	bt_dev_dbg(hdev, "err %d", err);
8866 
8867 	hci_dev_lock(hdev);
8868 
8869 	if (!err) {
8870 		restart_le_actions(hdev);
8871 		hci_update_passive_scan(hdev);
8872 	}
8873 
8874 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8875 
8876 	new_settings(hdev, match.sk);
8877 
8878 	if (match.sk)
8879 		sock_put(match.sk);
8880 
8881 	hci_dev_unlock(hdev);
8882 }
8883 
8884 void __mgmt_power_off(struct hci_dev *hdev)
8885 {
8886 	struct cmd_lookup match = { NULL, hdev };
8887 	u8 status, zero_cod[] = { 0, 0, 0 };
8888 
8889 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8890 
8891 	/* If the power off is because of hdev unregistration let
8892 	 * use the appropriate INVALID_INDEX status. Otherwise use
8893 	 * NOT_POWERED. We cover both scenarios here since later in
8894 	 * mgmt_index_removed() any hci_conn callbacks will have already
8895 	 * been triggered, potentially causing misleading DISCONNECTED
8896 	 * status responses.
8897 	 */
8898 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8899 		status = MGMT_STATUS_INVALID_INDEX;
8900 	else
8901 		status = MGMT_STATUS_NOT_POWERED;
8902 
8903 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8904 
8905 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8906 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8907 				   zero_cod, sizeof(zero_cod),
8908 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8909 		ext_info_changed(hdev, NULL);
8910 	}
8911 
8912 	new_settings(hdev, match.sk);
8913 
8914 	if (match.sk)
8915 		sock_put(match.sk);
8916 }
8917 
8918 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8919 {
8920 	struct mgmt_pending_cmd *cmd;
8921 	u8 status;
8922 
8923 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8924 	if (!cmd)
8925 		return;
8926 
8927 	if (err == -ERFKILL)
8928 		status = MGMT_STATUS_RFKILLED;
8929 	else
8930 		status = MGMT_STATUS_FAILED;
8931 
8932 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8933 
8934 	mgmt_pending_remove(cmd);
8935 }
8936 
8937 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8938 		       bool persistent)
8939 {
8940 	struct mgmt_ev_new_link_key ev;
8941 
8942 	memset(&ev, 0, sizeof(ev));
8943 
8944 	ev.store_hint = persistent;
8945 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8946 	ev.key.addr.type = BDADDR_BREDR;
8947 	ev.key.type = key->type;
8948 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8949 	ev.key.pin_len = key->pin_len;
8950 
8951 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8952 }
8953 
8954 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8955 {
8956 	switch (ltk->type) {
8957 	case SMP_LTK:
8958 	case SMP_LTK_RESPONDER:
8959 		if (ltk->authenticated)
8960 			return MGMT_LTK_AUTHENTICATED;
8961 		return MGMT_LTK_UNAUTHENTICATED;
8962 	case SMP_LTK_P256:
8963 		if (ltk->authenticated)
8964 			return MGMT_LTK_P256_AUTH;
8965 		return MGMT_LTK_P256_UNAUTH;
8966 	case SMP_LTK_P256_DEBUG:
8967 		return MGMT_LTK_P256_DEBUG;
8968 	}
8969 
8970 	return MGMT_LTK_UNAUTHENTICATED;
8971 }
8972 
8973 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8974 {
8975 	struct mgmt_ev_new_long_term_key ev;
8976 
8977 	memset(&ev, 0, sizeof(ev));
8978 
8979 	/* Devices using resolvable or non-resolvable random addresses
8980 	 * without providing an identity resolving key don't require
8981 	 * to store long term keys. Their addresses will change the
8982 	 * next time around.
8983 	 *
8984 	 * Only when a remote device provides an identity address
8985 	 * make sure the long term key is stored. If the remote
8986 	 * identity is known, the long term keys are internally
8987 	 * mapped to the identity address. So allow static random
8988 	 * and public addresses here.
8989 	 */
8990 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8991 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
8992 		ev.store_hint = 0x00;
8993 	else
8994 		ev.store_hint = persistent;
8995 
8996 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8997 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8998 	ev.key.type = mgmt_ltk_type(key);
8999 	ev.key.enc_size = key->enc_size;
9000 	ev.key.ediv = key->ediv;
9001 	ev.key.rand = key->rand;
9002 
9003 	if (key->type == SMP_LTK)
9004 		ev.key.initiator = 1;
9005 
9006 	/* Make sure we copy only the significant bytes based on the
9007 	 * encryption key size, and set the rest of the value to zeroes.
9008 	 */
9009 	memcpy(ev.key.val, key->val, key->enc_size);
9010 	memset(ev.key.val + key->enc_size, 0,
9011 	       sizeof(ev.key.val) - key->enc_size);
9012 
9013 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9014 }
9015 
9016 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9017 {
9018 	struct mgmt_ev_new_irk ev;
9019 
9020 	memset(&ev, 0, sizeof(ev));
9021 
9022 	ev.store_hint = persistent;
9023 
9024 	bacpy(&ev.rpa, &irk->rpa);
9025 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9026 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9027 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9028 
9029 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9030 }
9031 
9032 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9033 		   bool persistent)
9034 {
9035 	struct mgmt_ev_new_csrk ev;
9036 
9037 	memset(&ev, 0, sizeof(ev));
9038 
9039 	/* Devices using resolvable or non-resolvable random addresses
9040 	 * without providing an identity resolving key don't require
9041 	 * to store signature resolving keys. Their addresses will change
9042 	 * the next time around.
9043 	 *
9044 	 * Only when a remote device provides an identity address
9045 	 * make sure the signature resolving key is stored. So allow
9046 	 * static random and public addresses here.
9047 	 */
9048 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9049 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9050 		ev.store_hint = 0x00;
9051 	else
9052 		ev.store_hint = persistent;
9053 
9054 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9055 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9056 	ev.key.type = csrk->type;
9057 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9058 
9059 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9060 }
9061 
9062 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9063 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9064 			 u16 max_interval, u16 latency, u16 timeout)
9065 {
9066 	struct mgmt_ev_new_conn_param ev;
9067 
9068 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9069 		return;
9070 
9071 	memset(&ev, 0, sizeof(ev));
9072 	bacpy(&ev.addr.bdaddr, bdaddr);
9073 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9074 	ev.store_hint = store_hint;
9075 	ev.min_interval = cpu_to_le16(min_interval);
9076 	ev.max_interval = cpu_to_le16(max_interval);
9077 	ev.latency = cpu_to_le16(latency);
9078 	ev.timeout = cpu_to_le16(timeout);
9079 
9080 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9081 }
9082 
9083 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9084 			   u8 *name, u8 name_len)
9085 {
9086 	struct sk_buff *skb;
9087 	struct mgmt_ev_device_connected *ev;
9088 	u16 eir_len = 0;
9089 	u32 flags = 0;
9090 
9091 	if (conn->le_adv_data_len > 0)
9092 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9093 				     conn->le_adv_data_len);
9094 	else
9095 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9096 				     2 + name_len + 5);
9097 
9098 	ev = skb_put(skb, sizeof(*ev));
9099 	bacpy(&ev->addr.bdaddr, &conn->dst);
9100 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9101 
9102 	if (conn->out)
9103 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9104 
9105 	ev->flags = __cpu_to_le32(flags);
9106 
9107 	/* We must ensure that the EIR Data fields are ordered and
9108 	 * unique. Keep it simple for now and avoid the problem by not
9109 	 * adding any BR/EDR data to the LE adv.
9110 	 */
9111 	if (conn->le_adv_data_len > 0) {
9112 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9113 		eir_len = conn->le_adv_data_len;
9114 	} else {
9115 		if (name_len > 0) {
9116 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
9117 						  name, name_len);
9118 			skb_put(skb, eir_len);
9119 		}
9120 
9121 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0) {
9122 			eir_len = eir_append_data(ev->eir, eir_len,
9123 						  EIR_CLASS_OF_DEV,
9124 						  conn->dev_class, 3);
9125 			skb_put(skb, 5);
9126 		}
9127 	}
9128 
9129 	ev->eir_len = cpu_to_le16(eir_len);
9130 
9131 	mgmt_event_skb(skb, NULL);
9132 }
9133 
9134 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9135 {
9136 	struct sock **sk = data;
9137 
9138 	cmd->cmd_complete(cmd, 0);
9139 
9140 	*sk = cmd->sk;
9141 	sock_hold(*sk);
9142 
9143 	mgmt_pending_remove(cmd);
9144 }
9145 
9146 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9147 {
9148 	struct hci_dev *hdev = data;
9149 	struct mgmt_cp_unpair_device *cp = cmd->param;
9150 
9151 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9152 
9153 	cmd->cmd_complete(cmd, 0);
9154 	mgmt_pending_remove(cmd);
9155 }
9156 
9157 bool mgmt_powering_down(struct hci_dev *hdev)
9158 {
9159 	struct mgmt_pending_cmd *cmd;
9160 	struct mgmt_mode *cp;
9161 
9162 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9163 	if (!cmd)
9164 		return false;
9165 
9166 	cp = cmd->param;
9167 	if (!cp->val)
9168 		return true;
9169 
9170 	return false;
9171 }
9172 
9173 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9174 			      u8 link_type, u8 addr_type, u8 reason,
9175 			      bool mgmt_connected)
9176 {
9177 	struct mgmt_ev_device_disconnected ev;
9178 	struct sock *sk = NULL;
9179 
9180 	/* The connection is still in hci_conn_hash so test for 1
9181 	 * instead of 0 to know if this is the last one.
9182 	 */
9183 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9184 		cancel_delayed_work(&hdev->power_off);
9185 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9186 	}
9187 
9188 	if (!mgmt_connected)
9189 		return;
9190 
9191 	if (link_type != ACL_LINK && link_type != LE_LINK)
9192 		return;
9193 
9194 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9195 
9196 	bacpy(&ev.addr.bdaddr, bdaddr);
9197 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9198 	ev.reason = reason;
9199 
9200 	/* Report disconnects due to suspend */
9201 	if (hdev->suspended)
9202 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9203 
9204 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9205 
9206 	if (sk)
9207 		sock_put(sk);
9208 
9209 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9210 			     hdev);
9211 }
9212 
9213 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9214 			    u8 link_type, u8 addr_type, u8 status)
9215 {
9216 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9217 	struct mgmt_cp_disconnect *cp;
9218 	struct mgmt_pending_cmd *cmd;
9219 
9220 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9221 			     hdev);
9222 
9223 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9224 	if (!cmd)
9225 		return;
9226 
9227 	cp = cmd->param;
9228 
9229 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9230 		return;
9231 
9232 	if (cp->addr.type != bdaddr_type)
9233 		return;
9234 
9235 	cmd->cmd_complete(cmd, mgmt_status(status));
9236 	mgmt_pending_remove(cmd);
9237 }
9238 
9239 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9240 			 u8 addr_type, u8 status)
9241 {
9242 	struct mgmt_ev_connect_failed ev;
9243 
9244 	/* The connection is still in hci_conn_hash so test for 1
9245 	 * instead of 0 to know if this is the last one.
9246 	 */
9247 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9248 		cancel_delayed_work(&hdev->power_off);
9249 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9250 	}
9251 
9252 	bacpy(&ev.addr.bdaddr, bdaddr);
9253 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9254 	ev.status = mgmt_status(status);
9255 
9256 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9257 }
9258 
9259 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9260 {
9261 	struct mgmt_ev_pin_code_request ev;
9262 
9263 	bacpy(&ev.addr.bdaddr, bdaddr);
9264 	ev.addr.type = BDADDR_BREDR;
9265 	ev.secure = secure;
9266 
9267 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9268 }
9269 
9270 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9271 				  u8 status)
9272 {
9273 	struct mgmt_pending_cmd *cmd;
9274 
9275 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9276 	if (!cmd)
9277 		return;
9278 
9279 	cmd->cmd_complete(cmd, mgmt_status(status));
9280 	mgmt_pending_remove(cmd);
9281 }
9282 
9283 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9284 				      u8 status)
9285 {
9286 	struct mgmt_pending_cmd *cmd;
9287 
9288 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9289 	if (!cmd)
9290 		return;
9291 
9292 	cmd->cmd_complete(cmd, mgmt_status(status));
9293 	mgmt_pending_remove(cmd);
9294 }
9295 
9296 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9297 			      u8 link_type, u8 addr_type, u32 value,
9298 			      u8 confirm_hint)
9299 {
9300 	struct mgmt_ev_user_confirm_request ev;
9301 
9302 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9303 
9304 	bacpy(&ev.addr.bdaddr, bdaddr);
9305 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9306 	ev.confirm_hint = confirm_hint;
9307 	ev.value = cpu_to_le32(value);
9308 
9309 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9310 			  NULL);
9311 }
9312 
9313 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9314 			      u8 link_type, u8 addr_type)
9315 {
9316 	struct mgmt_ev_user_passkey_request ev;
9317 
9318 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9319 
9320 	bacpy(&ev.addr.bdaddr, bdaddr);
9321 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9322 
9323 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9324 			  NULL);
9325 }
9326 
9327 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9328 				      u8 link_type, u8 addr_type, u8 status,
9329 				      u8 opcode)
9330 {
9331 	struct mgmt_pending_cmd *cmd;
9332 
9333 	cmd = pending_find(opcode, hdev);
9334 	if (!cmd)
9335 		return -ENOENT;
9336 
9337 	cmd->cmd_complete(cmd, mgmt_status(status));
9338 	mgmt_pending_remove(cmd);
9339 
9340 	return 0;
9341 }
9342 
9343 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9344 				     u8 link_type, u8 addr_type, u8 status)
9345 {
9346 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9347 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9348 }
9349 
9350 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9351 					 u8 link_type, u8 addr_type, u8 status)
9352 {
9353 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9354 					  status,
9355 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9356 }
9357 
9358 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9359 				     u8 link_type, u8 addr_type, u8 status)
9360 {
9361 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9362 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9363 }
9364 
9365 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9366 					 u8 link_type, u8 addr_type, u8 status)
9367 {
9368 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9369 					  status,
9370 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9371 }
9372 
9373 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9374 			     u8 link_type, u8 addr_type, u32 passkey,
9375 			     u8 entered)
9376 {
9377 	struct mgmt_ev_passkey_notify ev;
9378 
9379 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9380 
9381 	bacpy(&ev.addr.bdaddr, bdaddr);
9382 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9383 	ev.passkey = __cpu_to_le32(passkey);
9384 	ev.entered = entered;
9385 
9386 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9387 }
9388 
9389 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9390 {
9391 	struct mgmt_ev_auth_failed ev;
9392 	struct mgmt_pending_cmd *cmd;
9393 	u8 status = mgmt_status(hci_status);
9394 
9395 	bacpy(&ev.addr.bdaddr, &conn->dst);
9396 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9397 	ev.status = status;
9398 
9399 	cmd = find_pairing(conn);
9400 
9401 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9402 		    cmd ? cmd->sk : NULL);
9403 
9404 	if (cmd) {
9405 		cmd->cmd_complete(cmd, status);
9406 		mgmt_pending_remove(cmd);
9407 	}
9408 }
9409 
9410 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9411 {
9412 	struct cmd_lookup match = { NULL, hdev };
9413 	bool changed;
9414 
9415 	if (status) {
9416 		u8 mgmt_err = mgmt_status(status);
9417 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9418 				     cmd_status_rsp, &mgmt_err);
9419 		return;
9420 	}
9421 
9422 	if (test_bit(HCI_AUTH, &hdev->flags))
9423 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9424 	else
9425 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9426 
9427 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9428 			     &match);
9429 
9430 	if (changed)
9431 		new_settings(hdev, match.sk);
9432 
9433 	if (match.sk)
9434 		sock_put(match.sk);
9435 }
9436 
9437 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9438 {
9439 	struct cmd_lookup *match = data;
9440 
9441 	if (match->sk == NULL) {
9442 		match->sk = cmd->sk;
9443 		sock_hold(match->sk);
9444 	}
9445 }
9446 
9447 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9448 				    u8 status)
9449 {
9450 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9451 
9452 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9453 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9454 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9455 
9456 	if (!status) {
9457 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9458 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9459 		ext_info_changed(hdev, NULL);
9460 	}
9461 
9462 	if (match.sk)
9463 		sock_put(match.sk);
9464 }
9465 
9466 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9467 {
9468 	struct mgmt_cp_set_local_name ev;
9469 	struct mgmt_pending_cmd *cmd;
9470 
9471 	if (status)
9472 		return;
9473 
9474 	memset(&ev, 0, sizeof(ev));
9475 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9476 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9477 
9478 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9479 	if (!cmd) {
9480 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9481 
9482 		/* If this is a HCI command related to powering on the
9483 		 * HCI dev don't send any mgmt signals.
9484 		 */
9485 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
9486 			return;
9487 	}
9488 
9489 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9490 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9491 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9492 }
9493 
9494 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9495 {
9496 	int i;
9497 
9498 	for (i = 0; i < uuid_count; i++) {
9499 		if (!memcmp(uuid, uuids[i], 16))
9500 			return true;
9501 	}
9502 
9503 	return false;
9504 }
9505 
9506 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9507 {
9508 	u16 parsed = 0;
9509 
9510 	while (parsed < eir_len) {
9511 		u8 field_len = eir[0];
9512 		u8 uuid[16];
9513 		int i;
9514 
9515 		if (field_len == 0)
9516 			break;
9517 
9518 		if (eir_len - parsed < field_len + 1)
9519 			break;
9520 
9521 		switch (eir[1]) {
9522 		case EIR_UUID16_ALL:
9523 		case EIR_UUID16_SOME:
9524 			for (i = 0; i + 3 <= field_len; i += 2) {
9525 				memcpy(uuid, bluetooth_base_uuid, 16);
9526 				uuid[13] = eir[i + 3];
9527 				uuid[12] = eir[i + 2];
9528 				if (has_uuid(uuid, uuid_count, uuids))
9529 					return true;
9530 			}
9531 			break;
9532 		case EIR_UUID32_ALL:
9533 		case EIR_UUID32_SOME:
9534 			for (i = 0; i + 5 <= field_len; i += 4) {
9535 				memcpy(uuid, bluetooth_base_uuid, 16);
9536 				uuid[15] = eir[i + 5];
9537 				uuid[14] = eir[i + 4];
9538 				uuid[13] = eir[i + 3];
9539 				uuid[12] = eir[i + 2];
9540 				if (has_uuid(uuid, uuid_count, uuids))
9541 					return true;
9542 			}
9543 			break;
9544 		case EIR_UUID128_ALL:
9545 		case EIR_UUID128_SOME:
9546 			for (i = 0; i + 17 <= field_len; i += 16) {
9547 				memcpy(uuid, eir + i + 2, 16);
9548 				if (has_uuid(uuid, uuid_count, uuids))
9549 					return true;
9550 			}
9551 			break;
9552 		}
9553 
9554 		parsed += field_len + 1;
9555 		eir += field_len + 1;
9556 	}
9557 
9558 	return false;
9559 }
9560 
9561 static void restart_le_scan(struct hci_dev *hdev)
9562 {
9563 	/* If controller is not scanning we are done. */
9564 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9565 		return;
9566 
9567 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9568 		       hdev->discovery.scan_start +
9569 		       hdev->discovery.scan_duration))
9570 		return;
9571 
9572 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9573 			   DISCOV_LE_RESTART_DELAY);
9574 }
9575 
9576 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9577 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9578 {
9579 	/* If a RSSI threshold has been specified, and
9580 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9581 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9582 	 * is set, let it through for further processing, as we might need to
9583 	 * restart the scan.
9584 	 *
9585 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9586 	 * the results are also dropped.
9587 	 */
9588 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9589 	    (rssi == HCI_RSSI_INVALID ||
9590 	    (rssi < hdev->discovery.rssi &&
9591 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9592 		return  false;
9593 
9594 	if (hdev->discovery.uuid_count != 0) {
9595 		/* If a list of UUIDs is provided in filter, results with no
9596 		 * matching UUID should be dropped.
9597 		 */
9598 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9599 				   hdev->discovery.uuids) &&
9600 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
9601 				   hdev->discovery.uuid_count,
9602 				   hdev->discovery.uuids))
9603 			return false;
9604 	}
9605 
9606 	/* If duplicate filtering does not report RSSI changes, then restart
9607 	 * scanning to ensure updated result with updated RSSI values.
9608 	 */
9609 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9610 		restart_le_scan(hdev);
9611 
9612 		/* Validate RSSI value against the RSSI threshold once more. */
9613 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9614 		    rssi < hdev->discovery.rssi)
9615 			return false;
9616 	}
9617 
9618 	return true;
9619 }
9620 
9621 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
9622 				  bdaddr_t *bdaddr, u8 addr_type)
9623 {
9624 	struct mgmt_ev_adv_monitor_device_lost ev;
9625 
9626 	ev.monitor_handle = cpu_to_le16(handle);
9627 	bacpy(&ev.addr.bdaddr, bdaddr);
9628 	ev.addr.type = addr_type;
9629 
9630 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
9631 		   NULL);
9632 }
9633 
9634 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
9635 					  bdaddr_t *bdaddr, bool report_device,
9636 					  struct sk_buff *skb,
9637 					  struct sock *skip_sk)
9638 {
9639 	struct sk_buff *advmon_skb;
9640 	size_t advmon_skb_len;
9641 	__le16 *monitor_handle;
9642 	struct monitored_device *dev, *tmp;
9643 	bool matched = false;
9644 	bool notify = false;
9645 
9646 	/* We have received the Advertisement Report because:
9647 	 * 1. the kernel has initiated active discovery
9648 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
9649 	 *    passive scanning
9650 	 * 3. if none of the above is true, we have one or more active
9651 	 *    Advertisement Monitor
9652 	 *
9653 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
9654 	 * and report ONLY one advertisement per device for the matched Monitor
9655 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
9656 	 *
9657 	 * For case 3, since we are not active scanning and all advertisements
9658 	 * received are due to a matched Advertisement Monitor, report all
9659 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
9660 	 */
9661 	if (report_device && !hdev->advmon_pend_notify) {
9662 		mgmt_event_skb(skb, skip_sk);
9663 		return;
9664 	}
9665 
9666 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
9667 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
9668 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
9669 				    advmon_skb_len);
9670 	if (!advmon_skb) {
9671 		if (report_device)
9672 			mgmt_event_skb(skb, skip_sk);
9673 		else
9674 			kfree_skb(skb);
9675 		return;
9676 	}
9677 
9678 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
9679 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
9680 	 * store monitor_handle of the matched monitor.
9681 	 */
9682 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
9683 	skb_put_data(advmon_skb, skb->data, skb->len);
9684 
9685 	hdev->advmon_pend_notify = false;
9686 
9687 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
9688 		if (!bacmp(&dev->bdaddr, bdaddr)) {
9689 			matched = true;
9690 
9691 			if (!dev->notified) {
9692 				*monitor_handle = cpu_to_le16(dev->handle);
9693 				notify = true;
9694 				dev->notified = true;
9695 			}
9696 		}
9697 
9698 		if (!dev->notified)
9699 			hdev->advmon_pend_notify = true;
9700 	}
9701 
9702 	if (!report_device &&
9703 	    ((matched && !notify) || !msft_monitor_supported(hdev))) {
9704 		/* Handle 0 indicates that we are not active scanning and this
9705 		 * is a subsequent advertisement report for an already matched
9706 		 * Advertisement Monitor or the controller offloading support
9707 		 * is not available.
9708 		 */
9709 		*monitor_handle = 0;
9710 		notify = true;
9711 	}
9712 
9713 	if (report_device)
9714 		mgmt_event_skb(skb, skip_sk);
9715 	else
9716 		kfree_skb(skb);
9717 
9718 	if (notify)
9719 		mgmt_event_skb(advmon_skb, skip_sk);
9720 	else
9721 		kfree_skb(advmon_skb);
9722 }
9723 
9724 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9725 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9726 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9727 {
9728 	struct sk_buff *skb;
9729 	struct mgmt_ev_device_found *ev;
9730 	bool report_device = hci_discovery_active(hdev);
9731 
9732 	/* Don't send events for a non-kernel initiated discovery. With
9733 	 * LE one exception is if we have pend_le_reports > 0 in which
9734 	 * case we're doing passive scanning and want these events.
9735 	 */
9736 	if (!hci_discovery_active(hdev)) {
9737 		if (link_type == ACL_LINK)
9738 			return;
9739 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
9740 			report_device = true;
9741 		else if (!hci_is_adv_monitoring(hdev))
9742 			return;
9743 	}
9744 
9745 	if (hdev->discovery.result_filtering) {
9746 		/* We are using service discovery */
9747 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9748 				     scan_rsp_len))
9749 			return;
9750 	}
9751 
9752 	if (hdev->discovery.limited) {
9753 		/* Check for limited discoverable bit */
9754 		if (dev_class) {
9755 			if (!(dev_class[1] & 0x20))
9756 				return;
9757 		} else {
9758 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9759 			if (!flags || !(flags[0] & LE_AD_LIMITED))
9760 				return;
9761 		}
9762 	}
9763 
9764 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
9765 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9766 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
9767 	if (!skb)
9768 		return;
9769 
9770 	ev = skb_put(skb, sizeof(*ev));
9771 
9772 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
9773 	 * RSSI value was reported as 0 when not available. This behavior
9774 	 * is kept when using device discovery. This is required for full
9775 	 * backwards compatibility with the API.
9776 	 *
9777 	 * However when using service discovery, the value 127 will be
9778 	 * returned when the RSSI is not available.
9779 	 */
9780 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9781 	    link_type == ACL_LINK)
9782 		rssi = 0;
9783 
9784 	bacpy(&ev->addr.bdaddr, bdaddr);
9785 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9786 	ev->rssi = rssi;
9787 	ev->flags = cpu_to_le32(flags);
9788 
9789 	if (eir_len > 0)
9790 		/* Copy EIR or advertising data into event */
9791 		skb_put_data(skb, eir, eir_len);
9792 
9793 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
9794 		u8 eir_cod[5];
9795 
9796 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
9797 					   dev_class, 3);
9798 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
9799 	}
9800 
9801 	if (scan_rsp_len > 0)
9802 		/* Append scan response data to event */
9803 		skb_put_data(skb, scan_rsp, scan_rsp_len);
9804 
9805 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9806 
9807 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
9808 }
9809 
9810 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9811 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9812 {
9813 	struct sk_buff *skb;
9814 	struct mgmt_ev_device_found *ev;
9815 	u16 eir_len;
9816 	u32 flags;
9817 
9818 	if (name_len)
9819 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, 2 + name_len);
9820 	else
9821 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, 0);
9822 
9823 	ev = skb_put(skb, sizeof(*ev));
9824 	bacpy(&ev->addr.bdaddr, bdaddr);
9825 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9826 	ev->rssi = rssi;
9827 
9828 	if (name) {
9829 		eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9830 					  name_len);
9831 		flags = 0;
9832 		skb_put(skb, eir_len);
9833 	} else {
9834 		eir_len = 0;
9835 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
9836 	}
9837 
9838 	ev->eir_len = cpu_to_le16(eir_len);
9839 	ev->flags = cpu_to_le32(flags);
9840 
9841 	mgmt_event_skb(skb, NULL);
9842 }
9843 
9844 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9845 {
9846 	struct mgmt_ev_discovering ev;
9847 
9848 	bt_dev_dbg(hdev, "discovering %u", discovering);
9849 
9850 	memset(&ev, 0, sizeof(ev));
9851 	ev.type = hdev->discovery.type;
9852 	ev.discovering = discovering;
9853 
9854 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9855 }
9856 
9857 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9858 {
9859 	struct mgmt_ev_controller_suspend ev;
9860 
9861 	ev.suspend_state = state;
9862 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9863 }
9864 
9865 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9866 		   u8 addr_type)
9867 {
9868 	struct mgmt_ev_controller_resume ev;
9869 
9870 	ev.wake_reason = reason;
9871 	if (bdaddr) {
9872 		bacpy(&ev.addr.bdaddr, bdaddr);
9873 		ev.addr.type = addr_type;
9874 	} else {
9875 		memset(&ev.addr, 0, sizeof(ev.addr));
9876 	}
9877 
9878 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9879 }
9880 
9881 static struct hci_mgmt_chan chan = {
9882 	.channel	= HCI_CHANNEL_CONTROL,
9883 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
9884 	.handlers	= mgmt_handlers,
9885 	.hdev_init	= mgmt_init_hdev,
9886 };
9887 
9888 int mgmt_init(void)
9889 {
9890 	return hci_mgmt_chan_register(&chan);
9891 }
9892 
9893 void mgmt_exit(void)
9894 {
9895 	hci_mgmt_chan_unregister(&chan);
9896 }
9897