xref: /linux/net/bluetooth/mgmt.c (revision cfc4ca8986bb1f6182da6cd7bb57f228590b4643)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <linux/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42 
43 #define MGMT_VERSION	1
44 #define MGMT_REVISION	23
45 
46 static const u16 mgmt_commands[] = {
47 	MGMT_OP_READ_INDEX_LIST,
48 	MGMT_OP_READ_INFO,
49 	MGMT_OP_SET_POWERED,
50 	MGMT_OP_SET_DISCOVERABLE,
51 	MGMT_OP_SET_CONNECTABLE,
52 	MGMT_OP_SET_FAST_CONNECTABLE,
53 	MGMT_OP_SET_BONDABLE,
54 	MGMT_OP_SET_LINK_SECURITY,
55 	MGMT_OP_SET_SSP,
56 	MGMT_OP_SET_HS,
57 	MGMT_OP_SET_LE,
58 	MGMT_OP_SET_DEV_CLASS,
59 	MGMT_OP_SET_LOCAL_NAME,
60 	MGMT_OP_ADD_UUID,
61 	MGMT_OP_REMOVE_UUID,
62 	MGMT_OP_LOAD_LINK_KEYS,
63 	MGMT_OP_LOAD_LONG_TERM_KEYS,
64 	MGMT_OP_DISCONNECT,
65 	MGMT_OP_GET_CONNECTIONS,
66 	MGMT_OP_PIN_CODE_REPLY,
67 	MGMT_OP_PIN_CODE_NEG_REPLY,
68 	MGMT_OP_SET_IO_CAPABILITY,
69 	MGMT_OP_PAIR_DEVICE,
70 	MGMT_OP_CANCEL_PAIR_DEVICE,
71 	MGMT_OP_UNPAIR_DEVICE,
72 	MGMT_OP_USER_CONFIRM_REPLY,
73 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 	MGMT_OP_USER_PASSKEY_REPLY,
75 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 	MGMT_OP_READ_LOCAL_OOB_DATA,
77 	MGMT_OP_ADD_REMOTE_OOB_DATA,
78 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 	MGMT_OP_START_DISCOVERY,
80 	MGMT_OP_STOP_DISCOVERY,
81 	MGMT_OP_CONFIRM_NAME,
82 	MGMT_OP_BLOCK_DEVICE,
83 	MGMT_OP_UNBLOCK_DEVICE,
84 	MGMT_OP_SET_DEVICE_ID,
85 	MGMT_OP_SET_ADVERTISING,
86 	MGMT_OP_SET_BREDR,
87 	MGMT_OP_SET_STATIC_ADDRESS,
88 	MGMT_OP_SET_SCAN_PARAMS,
89 	MGMT_OP_SET_SECURE_CONN,
90 	MGMT_OP_SET_DEBUG_KEYS,
91 	MGMT_OP_SET_PRIVACY,
92 	MGMT_OP_LOAD_IRKS,
93 	MGMT_OP_GET_CONN_INFO,
94 	MGMT_OP_GET_CLOCK_INFO,
95 	MGMT_OP_ADD_DEVICE,
96 	MGMT_OP_REMOVE_DEVICE,
97 	MGMT_OP_LOAD_CONN_PARAM,
98 	MGMT_OP_READ_UNCONF_INDEX_LIST,
99 	MGMT_OP_READ_CONFIG_INFO,
100 	MGMT_OP_SET_EXTERNAL_CONFIG,
101 	MGMT_OP_SET_PUBLIC_ADDRESS,
102 	MGMT_OP_START_SERVICE_DISCOVERY,
103 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 	MGMT_OP_READ_EXT_INDEX_LIST,
105 	MGMT_OP_READ_ADV_FEATURES,
106 	MGMT_OP_ADD_ADVERTISING,
107 	MGMT_OP_REMOVE_ADVERTISING,
108 	MGMT_OP_GET_ADV_SIZE_INFO,
109 	MGMT_OP_START_LIMITED_DISCOVERY,
110 	MGMT_OP_READ_EXT_INFO,
111 	MGMT_OP_SET_APPEARANCE,
112 	MGMT_OP_GET_PHY_CONFIGURATION,
113 	MGMT_OP_SET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_BLOCKED_KEYS,
115 	MGMT_OP_SET_WIDEBAND_SPEECH,
116 	MGMT_OP_READ_CONTROLLER_CAP,
117 	MGMT_OP_READ_EXP_FEATURES_INFO,
118 	MGMT_OP_SET_EXP_FEATURE,
119 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_GET_DEVICE_FLAGS,
124 	MGMT_OP_SET_DEVICE_FLAGS,
125 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 	MGMT_OP_REMOVE_ADV_MONITOR,
128 	MGMT_OP_ADD_EXT_ADV_PARAMS,
129 	MGMT_OP_ADD_EXT_ADV_DATA,
130 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 	MGMT_OP_SET_MESH_RECEIVER,
132 	MGMT_OP_MESH_READ_FEATURES,
133 	MGMT_OP_MESH_SEND,
134 	MGMT_OP_MESH_SEND_CANCEL,
135 	MGMT_OP_HCI_CMD_SYNC,
136 };
137 
138 static const u16 mgmt_events[] = {
139 	MGMT_EV_CONTROLLER_ERROR,
140 	MGMT_EV_INDEX_ADDED,
141 	MGMT_EV_INDEX_REMOVED,
142 	MGMT_EV_NEW_SETTINGS,
143 	MGMT_EV_CLASS_OF_DEV_CHANGED,
144 	MGMT_EV_LOCAL_NAME_CHANGED,
145 	MGMT_EV_NEW_LINK_KEY,
146 	MGMT_EV_NEW_LONG_TERM_KEY,
147 	MGMT_EV_DEVICE_CONNECTED,
148 	MGMT_EV_DEVICE_DISCONNECTED,
149 	MGMT_EV_CONNECT_FAILED,
150 	MGMT_EV_PIN_CODE_REQUEST,
151 	MGMT_EV_USER_CONFIRM_REQUEST,
152 	MGMT_EV_USER_PASSKEY_REQUEST,
153 	MGMT_EV_AUTH_FAILED,
154 	MGMT_EV_DEVICE_FOUND,
155 	MGMT_EV_DISCOVERING,
156 	MGMT_EV_DEVICE_BLOCKED,
157 	MGMT_EV_DEVICE_UNBLOCKED,
158 	MGMT_EV_DEVICE_UNPAIRED,
159 	MGMT_EV_PASSKEY_NOTIFY,
160 	MGMT_EV_NEW_IRK,
161 	MGMT_EV_NEW_CSRK,
162 	MGMT_EV_DEVICE_ADDED,
163 	MGMT_EV_DEVICE_REMOVED,
164 	MGMT_EV_NEW_CONN_PARAM,
165 	MGMT_EV_UNCONF_INDEX_ADDED,
166 	MGMT_EV_UNCONF_INDEX_REMOVED,
167 	MGMT_EV_NEW_CONFIG_OPTIONS,
168 	MGMT_EV_EXT_INDEX_ADDED,
169 	MGMT_EV_EXT_INDEX_REMOVED,
170 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 	MGMT_EV_ADVERTISING_ADDED,
172 	MGMT_EV_ADVERTISING_REMOVED,
173 	MGMT_EV_EXT_INFO_CHANGED,
174 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 	MGMT_EV_EXP_FEATURE_CHANGED,
176 	MGMT_EV_DEVICE_FLAGS_CHANGED,
177 	MGMT_EV_ADV_MONITOR_ADDED,
178 	MGMT_EV_ADV_MONITOR_REMOVED,
179 	MGMT_EV_CONTROLLER_SUSPEND,
180 	MGMT_EV_CONTROLLER_RESUME,
181 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184 
185 static const u16 mgmt_untrusted_commands[] = {
186 	MGMT_OP_READ_INDEX_LIST,
187 	MGMT_OP_READ_INFO,
188 	MGMT_OP_READ_UNCONF_INDEX_LIST,
189 	MGMT_OP_READ_CONFIG_INFO,
190 	MGMT_OP_READ_EXT_INDEX_LIST,
191 	MGMT_OP_READ_EXT_INFO,
192 	MGMT_OP_READ_CONTROLLER_CAP,
193 	MGMT_OP_READ_EXP_FEATURES_INFO,
194 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197 
198 static const u16 mgmt_untrusted_events[] = {
199 	MGMT_EV_INDEX_ADDED,
200 	MGMT_EV_INDEX_REMOVED,
201 	MGMT_EV_NEW_SETTINGS,
202 	MGMT_EV_CLASS_OF_DEV_CHANGED,
203 	MGMT_EV_LOCAL_NAME_CHANGED,
204 	MGMT_EV_UNCONF_INDEX_ADDED,
205 	MGMT_EV_UNCONF_INDEX_REMOVED,
206 	MGMT_EV_NEW_CONFIG_OPTIONS,
207 	MGMT_EV_EXT_INDEX_ADDED,
208 	MGMT_EV_EXT_INDEX_REMOVED,
209 	MGMT_EV_EXT_INFO_CHANGED,
210 	MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212 
213 #define CACHE_TIMEOUT	secs_to_jiffies(2)
214 
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
217 
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 	MGMT_STATUS_SUCCESS,
221 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
222 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
223 	MGMT_STATUS_FAILED,		/* Hardware Failure */
224 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
225 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
226 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
227 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
228 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
229 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
230 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
231 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
232 	MGMT_STATUS_BUSY,		/* Command Disallowed */
233 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
234 	MGMT_STATUS_REJECTED,		/* Rejected Security */
235 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
236 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
237 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
238 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
239 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
240 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
241 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
242 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
243 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
244 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
245 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
246 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
247 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
248 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
249 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
250 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
251 	MGMT_STATUS_FAILED,		/* Unspecified Error */
252 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
253 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
254 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
255 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
256 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
257 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
258 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
259 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
260 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
261 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
262 	MGMT_STATUS_FAILED,		/* Transaction Collision */
263 	MGMT_STATUS_FAILED,		/* Reserved for future use */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
265 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
266 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
267 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
268 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
269 	MGMT_STATUS_FAILED,		/* Reserved for future use */
270 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
271 	MGMT_STATUS_FAILED,		/* Reserved for future use */
272 	MGMT_STATUS_FAILED,		/* Slot Violation */
273 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
274 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
275 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
276 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
277 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
278 	MGMT_STATUS_BUSY,		/* Controller Busy */
279 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
280 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
281 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
282 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
283 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
284 };
285 
286 static u8 mgmt_errno_status(int err)
287 {
288 	switch (err) {
289 	case 0:
290 		return MGMT_STATUS_SUCCESS;
291 	case -EPERM:
292 		return MGMT_STATUS_REJECTED;
293 	case -EINVAL:
294 		return MGMT_STATUS_INVALID_PARAMS;
295 	case -EOPNOTSUPP:
296 		return MGMT_STATUS_NOT_SUPPORTED;
297 	case -EBUSY:
298 		return MGMT_STATUS_BUSY;
299 	case -ETIMEDOUT:
300 		return MGMT_STATUS_AUTH_FAILED;
301 	case -ENOMEM:
302 		return MGMT_STATUS_NO_RESOURCES;
303 	case -EISCONN:
304 		return MGMT_STATUS_ALREADY_CONNECTED;
305 	case -ENOTCONN:
306 		return MGMT_STATUS_DISCONNECTED;
307 	}
308 
309 	return MGMT_STATUS_FAILED;
310 }
311 
312 static u8 mgmt_status(int err)
313 {
314 	if (err < 0)
315 		return mgmt_errno_status(err);
316 
317 	if (err < ARRAY_SIZE(mgmt_status_table))
318 		return mgmt_status_table[err];
319 
320 	return MGMT_STATUS_FAILED;
321 }
322 
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 			    u16 len, int flag)
325 {
326 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 			       flag, NULL);
328 }
329 
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 			      u16 len, int flag, struct sock *skip_sk)
332 {
333 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 			       flag, skip_sk);
335 }
336 
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 		      struct sock *skip_sk)
339 {
340 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 			       HCI_SOCK_TRUSTED, skip_sk);
342 }
343 
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 				   skip_sk);
348 }
349 
350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 		return ADDR_LE_DEV_PUBLIC;
354 	else
355 		return ADDR_LE_DEV_RANDOM;
356 }
357 
358 void mgmt_fill_version_info(void *ver)
359 {
360 	struct mgmt_rp_read_version *rp = ver;
361 
362 	rp->version = MGMT_VERSION;
363 	rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365 
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 			u16 data_len)
368 {
369 	struct mgmt_rp_read_version rp;
370 
371 	bt_dev_dbg(hdev, "sock %p", sk);
372 
373 	mgmt_fill_version_info(&rp);
374 
375 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 				 &rp, sizeof(rp));
377 }
378 
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 			 u16 data_len)
381 {
382 	struct mgmt_rp_read_commands *rp;
383 	u16 num_commands, num_events;
384 	size_t rp_size;
385 	int i, err;
386 
387 	bt_dev_dbg(hdev, "sock %p", sk);
388 
389 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 		num_commands = ARRAY_SIZE(mgmt_commands);
391 		num_events = ARRAY_SIZE(mgmt_events);
392 	} else {
393 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 	}
396 
397 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398 
399 	rp = kmalloc(rp_size, GFP_KERNEL);
400 	if (!rp)
401 		return -ENOMEM;
402 
403 	rp->num_commands = cpu_to_le16(num_commands);
404 	rp->num_events = cpu_to_le16(num_events);
405 
406 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 		__le16 *opcode = rp->opcodes;
408 
409 		for (i = 0; i < num_commands; i++, opcode++)
410 			put_unaligned_le16(mgmt_commands[i], opcode);
411 
412 		for (i = 0; i < num_events; i++, opcode++)
413 			put_unaligned_le16(mgmt_events[i], opcode);
414 	} else {
415 		__le16 *opcode = rp->opcodes;
416 
417 		for (i = 0; i < num_commands; i++, opcode++)
418 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419 
420 		for (i = 0; i < num_events; i++, opcode++)
421 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 	}
423 
424 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 				rp, rp_size);
426 	kfree(rp);
427 
428 	return err;
429 }
430 
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 			   u16 data_len)
433 {
434 	struct mgmt_rp_read_index_list *rp;
435 	struct hci_dev *d;
436 	size_t rp_len;
437 	u16 count;
438 	int err;
439 
440 	bt_dev_dbg(hdev, "sock %p", sk);
441 
442 	read_lock(&hci_dev_list_lock);
443 
444 	count = 0;
445 	list_for_each_entry(d, &hci_dev_list, list) {
446 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 			count++;
448 	}
449 
450 	rp_len = sizeof(*rp) + (2 * count);
451 	rp = kmalloc(rp_len, GFP_ATOMIC);
452 	if (!rp) {
453 		read_unlock(&hci_dev_list_lock);
454 		return -ENOMEM;
455 	}
456 
457 	count = 0;
458 	list_for_each_entry(d, &hci_dev_list, list) {
459 		if (hci_dev_test_flag(d, HCI_SETUP) ||
460 		    hci_dev_test_flag(d, HCI_CONFIG) ||
461 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 			continue;
463 
464 		/* Devices marked as raw-only are neither configured
465 		 * nor unconfigured controllers.
466 		 */
467 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
468 			continue;
469 
470 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 			rp->index[count++] = cpu_to_le16(d->id);
472 			bt_dev_dbg(hdev, "Added hci%u", d->id);
473 		}
474 	}
475 
476 	rp->num_controllers = cpu_to_le16(count);
477 	rp_len = sizeof(*rp) + (2 * count);
478 
479 	read_unlock(&hci_dev_list_lock);
480 
481 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 				0, rp, rp_len);
483 
484 	kfree(rp);
485 
486 	return err;
487 }
488 
489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 				  void *data, u16 data_len)
491 {
492 	struct mgmt_rp_read_unconf_index_list *rp;
493 	struct hci_dev *d;
494 	size_t rp_len;
495 	u16 count;
496 	int err;
497 
498 	bt_dev_dbg(hdev, "sock %p", sk);
499 
500 	read_lock(&hci_dev_list_lock);
501 
502 	count = 0;
503 	list_for_each_entry(d, &hci_dev_list, list) {
504 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 			count++;
506 	}
507 
508 	rp_len = sizeof(*rp) + (2 * count);
509 	rp = kmalloc(rp_len, GFP_ATOMIC);
510 	if (!rp) {
511 		read_unlock(&hci_dev_list_lock);
512 		return -ENOMEM;
513 	}
514 
515 	count = 0;
516 	list_for_each_entry(d, &hci_dev_list, list) {
517 		if (hci_dev_test_flag(d, HCI_SETUP) ||
518 		    hci_dev_test_flag(d, HCI_CONFIG) ||
519 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 			continue;
521 
522 		/* Devices marked as raw-only are neither configured
523 		 * nor unconfigured controllers.
524 		 */
525 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
526 			continue;
527 
528 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 			rp->index[count++] = cpu_to_le16(d->id);
530 			bt_dev_dbg(hdev, "Added hci%u", d->id);
531 		}
532 	}
533 
534 	rp->num_controllers = cpu_to_le16(count);
535 	rp_len = sizeof(*rp) + (2 * count);
536 
537 	read_unlock(&hci_dev_list_lock);
538 
539 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541 
542 	kfree(rp);
543 
544 	return err;
545 }
546 
547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 			       void *data, u16 data_len)
549 {
550 	struct mgmt_rp_read_ext_index_list *rp;
551 	struct hci_dev *d;
552 	u16 count;
553 	int err;
554 
555 	bt_dev_dbg(hdev, "sock %p", sk);
556 
557 	read_lock(&hci_dev_list_lock);
558 
559 	count = 0;
560 	list_for_each_entry(d, &hci_dev_list, list)
561 		count++;
562 
563 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 	if (!rp) {
565 		read_unlock(&hci_dev_list_lock);
566 		return -ENOMEM;
567 	}
568 
569 	count = 0;
570 	list_for_each_entry(d, &hci_dev_list, list) {
571 		if (hci_dev_test_flag(d, HCI_SETUP) ||
572 		    hci_dev_test_flag(d, HCI_CONFIG) ||
573 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 			continue;
575 
576 		/* Devices marked as raw-only are neither configured
577 		 * nor unconfigured controllers.
578 		 */
579 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
580 			continue;
581 
582 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 			rp->entry[count].type = 0x01;
584 		else
585 			rp->entry[count].type = 0x00;
586 
587 		rp->entry[count].bus = d->bus;
588 		rp->entry[count++].index = cpu_to_le16(d->id);
589 		bt_dev_dbg(hdev, "Added hci%u", d->id);
590 	}
591 
592 	rp->num_controllers = cpu_to_le16(count);
593 
594 	read_unlock(&hci_dev_list_lock);
595 
596 	/* If this command is called at least once, then all the
597 	 * default index and unconfigured index events are disabled
598 	 * and from now on only extended index events are used.
599 	 */
600 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603 
604 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 				struct_size(rp, entry, count));
607 
608 	kfree(rp);
609 
610 	return err;
611 }
612 
613 static bool is_configured(struct hci_dev *hdev)
614 {
615 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
616 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 		return false;
618 
619 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
620 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
621 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
622 		return false;
623 
624 	return true;
625 }
626 
627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 	u32 options = 0;
630 
631 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
634 
635 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
638 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
639 
640 	return cpu_to_le32(options);
641 }
642 
643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 	__le32 options = get_missing_options(hdev);
646 
647 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650 
651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 	__le32 options = get_missing_options(hdev);
654 
655 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 				 sizeof(options));
657 }
658 
659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 			    void *data, u16 data_len)
661 {
662 	struct mgmt_rp_read_config_info rp;
663 	u32 options = 0;
664 
665 	bt_dev_dbg(hdev, "sock %p", sk);
666 
667 	hci_dev_lock(hdev);
668 
669 	memset(&rp, 0, sizeof(rp));
670 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671 
672 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
673 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
674 
675 	if (hdev->set_bdaddr)
676 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
677 
678 	rp.supported_options = cpu_to_le32(options);
679 	rp.missing_options = get_missing_options(hdev);
680 
681 	hci_dev_unlock(hdev);
682 
683 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 				 &rp, sizeof(rp));
685 }
686 
687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 	u32 supported_phys = 0;
690 
691 	if (lmp_bredr_capable(hdev)) {
692 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693 
694 		if (hdev->features[0][0] & LMP_3SLOT)
695 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696 
697 		if (hdev->features[0][0] & LMP_5SLOT)
698 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699 
700 		if (lmp_edr_2m_capable(hdev)) {
701 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702 
703 			if (lmp_edr_3slot_capable(hdev))
704 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705 
706 			if (lmp_edr_5slot_capable(hdev))
707 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708 
709 			if (lmp_edr_3m_capable(hdev)) {
710 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711 
712 				if (lmp_edr_3slot_capable(hdev))
713 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714 
715 				if (lmp_edr_5slot_capable(hdev))
716 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 			}
718 		}
719 	}
720 
721 	if (lmp_le_capable(hdev)) {
722 		supported_phys |= MGMT_PHY_LE_1M_TX;
723 		supported_phys |= MGMT_PHY_LE_1M_RX;
724 
725 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 			supported_phys |= MGMT_PHY_LE_2M_TX;
727 			supported_phys |= MGMT_PHY_LE_2M_RX;
728 		}
729 
730 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 			supported_phys |= MGMT_PHY_LE_CODED_TX;
732 			supported_phys |= MGMT_PHY_LE_CODED_RX;
733 		}
734 	}
735 
736 	return supported_phys;
737 }
738 
739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 	u32 selected_phys = 0;
742 
743 	if (lmp_bredr_capable(hdev)) {
744 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745 
746 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748 
749 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751 
752 		if (lmp_edr_2m_capable(hdev)) {
753 			if (!(hdev->pkt_type & HCI_2DH1))
754 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755 
756 			if (lmp_edr_3slot_capable(hdev) &&
757 			    !(hdev->pkt_type & HCI_2DH3))
758 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759 
760 			if (lmp_edr_5slot_capable(hdev) &&
761 			    !(hdev->pkt_type & HCI_2DH5))
762 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763 
764 			if (lmp_edr_3m_capable(hdev)) {
765 				if (!(hdev->pkt_type & HCI_3DH1))
766 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767 
768 				if (lmp_edr_3slot_capable(hdev) &&
769 				    !(hdev->pkt_type & HCI_3DH3))
770 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771 
772 				if (lmp_edr_5slot_capable(hdev) &&
773 				    !(hdev->pkt_type & HCI_3DH5))
774 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 			}
776 		}
777 	}
778 
779 	if (lmp_le_capable(hdev)) {
780 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 			selected_phys |= MGMT_PHY_LE_1M_TX;
782 
783 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 			selected_phys |= MGMT_PHY_LE_1M_RX;
785 
786 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 			selected_phys |= MGMT_PHY_LE_2M_TX;
788 
789 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 			selected_phys |= MGMT_PHY_LE_2M_RX;
791 
792 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 			selected_phys |= MGMT_PHY_LE_CODED_TX;
794 
795 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 			selected_phys |= MGMT_PHY_LE_CODED_RX;
797 	}
798 
799 	return selected_phys;
800 }
801 
802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807 
808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 	u32 settings = 0;
811 
812 	settings |= MGMT_SETTING_POWERED;
813 	settings |= MGMT_SETTING_BONDABLE;
814 	settings |= MGMT_SETTING_DEBUG_KEYS;
815 	settings |= MGMT_SETTING_CONNECTABLE;
816 	settings |= MGMT_SETTING_DISCOVERABLE;
817 
818 	if (lmp_bredr_capable(hdev)) {
819 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 		settings |= MGMT_SETTING_BREDR;
822 		settings |= MGMT_SETTING_LINK_SECURITY;
823 
824 		if (lmp_ssp_capable(hdev)) {
825 			settings |= MGMT_SETTING_SSP;
826 		}
827 
828 		if (lmp_sc_capable(hdev))
829 			settings |= MGMT_SETTING_SECURE_CONN;
830 
831 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
832 			     &hdev->quirks))
833 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
834 	}
835 
836 	if (lmp_le_capable(hdev)) {
837 		settings |= MGMT_SETTING_LE;
838 		settings |= MGMT_SETTING_SECURE_CONN;
839 		settings |= MGMT_SETTING_PRIVACY;
840 		settings |= MGMT_SETTING_STATIC_ADDRESS;
841 		settings |= MGMT_SETTING_ADVERTISING;
842 	}
843 
844 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
845 	    hdev->set_bdaddr)
846 		settings |= MGMT_SETTING_CONFIGURATION;
847 
848 	if (cis_central_capable(hdev))
849 		settings |= MGMT_SETTING_CIS_CENTRAL;
850 
851 	if (cis_peripheral_capable(hdev))
852 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
853 
854 	if (ll_privacy_capable(hdev))
855 		settings |= MGMT_SETTING_LL_PRIVACY;
856 
857 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
858 
859 	return settings;
860 }
861 
862 static u32 get_current_settings(struct hci_dev *hdev)
863 {
864 	u32 settings = 0;
865 
866 	if (hdev_is_powered(hdev))
867 		settings |= MGMT_SETTING_POWERED;
868 
869 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
870 		settings |= MGMT_SETTING_CONNECTABLE;
871 
872 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
873 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
874 
875 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
876 		settings |= MGMT_SETTING_DISCOVERABLE;
877 
878 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
879 		settings |= MGMT_SETTING_BONDABLE;
880 
881 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
882 		settings |= MGMT_SETTING_BREDR;
883 
884 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
885 		settings |= MGMT_SETTING_LE;
886 
887 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
888 		settings |= MGMT_SETTING_LINK_SECURITY;
889 
890 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
891 		settings |= MGMT_SETTING_SSP;
892 
893 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
894 		settings |= MGMT_SETTING_ADVERTISING;
895 
896 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
897 		settings |= MGMT_SETTING_SECURE_CONN;
898 
899 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
900 		settings |= MGMT_SETTING_DEBUG_KEYS;
901 
902 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
903 		settings |= MGMT_SETTING_PRIVACY;
904 
905 	/* The current setting for static address has two purposes. The
906 	 * first is to indicate if the static address will be used and
907 	 * the second is to indicate if it is actually set.
908 	 *
909 	 * This means if the static address is not configured, this flag
910 	 * will never be set. If the address is configured, then if the
911 	 * address is actually used decides if the flag is set or not.
912 	 *
913 	 * For single mode LE only controllers and dual-mode controllers
914 	 * with BR/EDR disabled, the existence of the static address will
915 	 * be evaluated.
916 	 */
917 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
918 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
919 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
920 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
921 			settings |= MGMT_SETTING_STATIC_ADDRESS;
922 	}
923 
924 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
925 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
926 
927 	if (cis_central_capable(hdev))
928 		settings |= MGMT_SETTING_CIS_CENTRAL;
929 
930 	if (cis_peripheral_capable(hdev))
931 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
932 
933 	if (bis_capable(hdev))
934 		settings |= MGMT_SETTING_ISO_BROADCASTER;
935 
936 	if (sync_recv_capable(hdev))
937 		settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
938 
939 	if (ll_privacy_capable(hdev))
940 		settings |= MGMT_SETTING_LL_PRIVACY;
941 
942 	return settings;
943 }
944 
945 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
946 {
947 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
948 }
949 
950 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
951 {
952 	struct mgmt_pending_cmd *cmd;
953 
954 	/* If there's a pending mgmt command the flags will not yet have
955 	 * their final values, so check for this first.
956 	 */
957 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
958 	if (cmd) {
959 		struct mgmt_mode *cp = cmd->param;
960 		if (cp->val == 0x01)
961 			return LE_AD_GENERAL;
962 		else if (cp->val == 0x02)
963 			return LE_AD_LIMITED;
964 	} else {
965 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
966 			return LE_AD_LIMITED;
967 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
968 			return LE_AD_GENERAL;
969 	}
970 
971 	return 0;
972 }
973 
974 bool mgmt_get_connectable(struct hci_dev *hdev)
975 {
976 	struct mgmt_pending_cmd *cmd;
977 
978 	/* If there's a pending mgmt command the flag will not yet have
979 	 * it's final value, so check for this first.
980 	 */
981 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
982 	if (cmd) {
983 		struct mgmt_mode *cp = cmd->param;
984 
985 		return cp->val;
986 	}
987 
988 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
989 }
990 
991 static int service_cache_sync(struct hci_dev *hdev, void *data)
992 {
993 	hci_update_eir_sync(hdev);
994 	hci_update_class_sync(hdev);
995 
996 	return 0;
997 }
998 
999 static void service_cache_off(struct work_struct *work)
1000 {
1001 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1002 					    service_cache.work);
1003 
1004 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1005 		return;
1006 
1007 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1008 }
1009 
1010 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1011 {
1012 	/* The generation of a new RPA and programming it into the
1013 	 * controller happens in the hci_req_enable_advertising()
1014 	 * function.
1015 	 */
1016 	if (ext_adv_capable(hdev))
1017 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1018 	else
1019 		return hci_enable_advertising_sync(hdev);
1020 }
1021 
1022 static void rpa_expired(struct work_struct *work)
1023 {
1024 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1025 					    rpa_expired.work);
1026 
1027 	bt_dev_dbg(hdev, "");
1028 
1029 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1030 
1031 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1032 		return;
1033 
1034 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1035 }
1036 
1037 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1038 
1039 static void discov_off(struct work_struct *work)
1040 {
1041 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1042 					    discov_off.work);
1043 
1044 	bt_dev_dbg(hdev, "");
1045 
1046 	hci_dev_lock(hdev);
1047 
1048 	/* When discoverable timeout triggers, then just make sure
1049 	 * the limited discoverable flag is cleared. Even in the case
1050 	 * of a timeout triggered from general discoverable, it is
1051 	 * safe to unconditionally clear the flag.
1052 	 */
1053 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1054 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1055 	hdev->discov_timeout = 0;
1056 
1057 	hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1058 
1059 	mgmt_new_settings(hdev);
1060 
1061 	hci_dev_unlock(hdev);
1062 }
1063 
1064 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1065 
1066 static void mesh_send_complete(struct hci_dev *hdev,
1067 			       struct mgmt_mesh_tx *mesh_tx, bool silent)
1068 {
1069 	u8 handle = mesh_tx->handle;
1070 
1071 	if (!silent)
1072 		mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1073 			   sizeof(handle), NULL);
1074 
1075 	mgmt_mesh_remove(mesh_tx);
1076 }
1077 
1078 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1079 {
1080 	struct mgmt_mesh_tx *mesh_tx;
1081 
1082 	hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1083 	hci_disable_advertising_sync(hdev);
1084 	mesh_tx = mgmt_mesh_next(hdev, NULL);
1085 
1086 	if (mesh_tx)
1087 		mesh_send_complete(hdev, mesh_tx, false);
1088 
1089 	return 0;
1090 }
1091 
1092 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1093 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1094 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1095 {
1096 	struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1097 
1098 	if (!mesh_tx)
1099 		return;
1100 
1101 	err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1102 				 mesh_send_start_complete);
1103 
1104 	if (err < 0)
1105 		mesh_send_complete(hdev, mesh_tx, false);
1106 	else
1107 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1108 }
1109 
1110 static void mesh_send_done(struct work_struct *work)
1111 {
1112 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1113 					    mesh_send_done.work);
1114 
1115 	if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1116 		return;
1117 
1118 	hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1119 }
1120 
1121 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1122 {
1123 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1124 		return;
1125 
1126 	BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1127 
1128 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1129 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1130 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1131 	INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1132 
1133 	/* Non-mgmt controlled devices get this bit set
1134 	 * implicitly so that pairing works for them, however
1135 	 * for mgmt we require user-space to explicitly enable
1136 	 * it
1137 	 */
1138 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1139 
1140 	hci_dev_set_flag(hdev, HCI_MGMT);
1141 }
1142 
1143 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1144 				void *data, u16 data_len)
1145 {
1146 	struct mgmt_rp_read_info rp;
1147 
1148 	bt_dev_dbg(hdev, "sock %p", sk);
1149 
1150 	hci_dev_lock(hdev);
1151 
1152 	memset(&rp, 0, sizeof(rp));
1153 
1154 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1155 
1156 	rp.version = hdev->hci_ver;
1157 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1158 
1159 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1160 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1161 
1162 	memcpy(rp.dev_class, hdev->dev_class, 3);
1163 
1164 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1165 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1166 
1167 	hci_dev_unlock(hdev);
1168 
1169 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1170 				 sizeof(rp));
1171 }
1172 
1173 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1174 {
1175 	u16 eir_len = 0;
1176 	size_t name_len;
1177 
1178 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1179 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1180 					  hdev->dev_class, 3);
1181 
1182 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1183 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1184 					  hdev->appearance);
1185 
1186 	name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1187 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1188 				  hdev->dev_name, name_len);
1189 
1190 	name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1191 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1192 				  hdev->short_name, name_len);
1193 
1194 	return eir_len;
1195 }
1196 
1197 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1198 				    void *data, u16 data_len)
1199 {
1200 	char buf[512];
1201 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1202 	u16 eir_len;
1203 
1204 	bt_dev_dbg(hdev, "sock %p", sk);
1205 
1206 	memset(&buf, 0, sizeof(buf));
1207 
1208 	hci_dev_lock(hdev);
1209 
1210 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1211 
1212 	rp->version = hdev->hci_ver;
1213 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1214 
1215 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1216 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1217 
1218 
1219 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1220 	rp->eir_len = cpu_to_le16(eir_len);
1221 
1222 	hci_dev_unlock(hdev);
1223 
1224 	/* If this command is called at least once, then the events
1225 	 * for class of device and local name changes are disabled
1226 	 * and only the new extended controller information event
1227 	 * is used.
1228 	 */
1229 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1230 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1231 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1232 
1233 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1234 				 sizeof(*rp) + eir_len);
1235 }
1236 
1237 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1238 {
1239 	char buf[512];
1240 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1241 	u16 eir_len;
1242 
1243 	memset(buf, 0, sizeof(buf));
1244 
1245 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1246 	ev->eir_len = cpu_to_le16(eir_len);
1247 
1248 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1249 				  sizeof(*ev) + eir_len,
1250 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1251 }
1252 
1253 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1254 {
1255 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1256 
1257 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1258 				 sizeof(settings));
1259 }
1260 
1261 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1262 {
1263 	struct mgmt_ev_advertising_added ev;
1264 
1265 	ev.instance = instance;
1266 
1267 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1268 }
1269 
1270 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1271 			      u8 instance)
1272 {
1273 	struct mgmt_ev_advertising_removed ev;
1274 
1275 	ev.instance = instance;
1276 
1277 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1278 }
1279 
1280 static void cancel_adv_timeout(struct hci_dev *hdev)
1281 {
1282 	if (hdev->adv_instance_timeout) {
1283 		hdev->adv_instance_timeout = 0;
1284 		cancel_delayed_work(&hdev->adv_instance_expire);
1285 	}
1286 }
1287 
1288 /* This function requires the caller holds hdev->lock */
1289 static void restart_le_actions(struct hci_dev *hdev)
1290 {
1291 	struct hci_conn_params *p;
1292 
1293 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1294 		/* Needed for AUTO_OFF case where might not "really"
1295 		 * have been powered off.
1296 		 */
1297 		hci_pend_le_list_del_init(p);
1298 
1299 		switch (p->auto_connect) {
1300 		case HCI_AUTO_CONN_DIRECT:
1301 		case HCI_AUTO_CONN_ALWAYS:
1302 			hci_pend_le_list_add(p, &hdev->pend_le_conns);
1303 			break;
1304 		case HCI_AUTO_CONN_REPORT:
1305 			hci_pend_le_list_add(p, &hdev->pend_le_reports);
1306 			break;
1307 		default:
1308 			break;
1309 		}
1310 	}
1311 }
1312 
1313 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1314 {
1315 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1316 
1317 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1318 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1319 }
1320 
1321 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1322 {
1323 	struct mgmt_pending_cmd *cmd = data;
1324 	struct mgmt_mode *cp;
1325 
1326 	/* Make sure cmd still outstanding. */
1327 	if (err == -ECANCELED ||
1328 	    cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1329 		return;
1330 
1331 	cp = cmd->param;
1332 
1333 	bt_dev_dbg(hdev, "err %d", err);
1334 
1335 	if (!err) {
1336 		if (cp->val) {
1337 			hci_dev_lock(hdev);
1338 			restart_le_actions(hdev);
1339 			hci_update_passive_scan(hdev);
1340 			hci_dev_unlock(hdev);
1341 		}
1342 
1343 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1344 
1345 		/* Only call new_setting for power on as power off is deferred
1346 		 * to hdev->power_off work which does call hci_dev_do_close.
1347 		 */
1348 		if (cp->val)
1349 			new_settings(hdev, cmd->sk);
1350 	} else {
1351 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1352 				mgmt_status(err));
1353 	}
1354 
1355 	mgmt_pending_remove(cmd);
1356 }
1357 
1358 static int set_powered_sync(struct hci_dev *hdev, void *data)
1359 {
1360 	struct mgmt_pending_cmd *cmd = data;
1361 	struct mgmt_mode *cp;
1362 
1363 	/* Make sure cmd still outstanding. */
1364 	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1365 		return -ECANCELED;
1366 
1367 	cp = cmd->param;
1368 
1369 	BT_DBG("%s", hdev->name);
1370 
1371 	return hci_set_powered_sync(hdev, cp->val);
1372 }
1373 
1374 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1375 		       u16 len)
1376 {
1377 	struct mgmt_mode *cp = data;
1378 	struct mgmt_pending_cmd *cmd;
1379 	int err;
1380 
1381 	bt_dev_dbg(hdev, "sock %p", sk);
1382 
1383 	if (cp->val != 0x00 && cp->val != 0x01)
1384 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1385 				       MGMT_STATUS_INVALID_PARAMS);
1386 
1387 	hci_dev_lock(hdev);
1388 
1389 	if (!cp->val) {
1390 		if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1391 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1392 					      MGMT_STATUS_BUSY);
1393 			goto failed;
1394 		}
1395 	}
1396 
1397 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1398 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1399 				      MGMT_STATUS_BUSY);
1400 		goto failed;
1401 	}
1402 
1403 	if (!!cp->val == hdev_is_powered(hdev)) {
1404 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1405 		goto failed;
1406 	}
1407 
1408 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1409 	if (!cmd) {
1410 		err = -ENOMEM;
1411 		goto failed;
1412 	}
1413 
1414 	/* Cancel potentially blocking sync operation before power off */
1415 	if (cp->val == 0x00) {
1416 		hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1417 		err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1418 					 mgmt_set_powered_complete);
1419 	} else {
1420 		/* Use hci_cmd_sync_submit since hdev might not be running */
1421 		err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1422 					  mgmt_set_powered_complete);
1423 	}
1424 
1425 	if (err < 0)
1426 		mgmt_pending_remove(cmd);
1427 
1428 failed:
1429 	hci_dev_unlock(hdev);
1430 	return err;
1431 }
1432 
1433 int mgmt_new_settings(struct hci_dev *hdev)
1434 {
1435 	return new_settings(hdev, NULL);
1436 }
1437 
1438 struct cmd_lookup {
1439 	struct sock *sk;
1440 	struct hci_dev *hdev;
1441 	u8 mgmt_status;
1442 };
1443 
1444 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1445 {
1446 	struct cmd_lookup *match = data;
1447 
1448 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1449 
1450 	list_del(&cmd->list);
1451 
1452 	if (match->sk == NULL) {
1453 		match->sk = cmd->sk;
1454 		sock_hold(match->sk);
1455 	}
1456 
1457 	mgmt_pending_free(cmd);
1458 }
1459 
1460 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1461 {
1462 	u8 *status = data;
1463 
1464 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1465 	mgmt_pending_remove(cmd);
1466 }
1467 
1468 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1469 {
1470 	struct cmd_lookup *match = data;
1471 
1472 	/* dequeue cmd_sync entries using cmd as data as that is about to be
1473 	 * removed/freed.
1474 	 */
1475 	hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1476 
1477 	if (cmd->cmd_complete) {
1478 		cmd->cmd_complete(cmd, match->mgmt_status);
1479 		mgmt_pending_remove(cmd);
1480 
1481 		return;
1482 	}
1483 
1484 	cmd_status_rsp(cmd, data);
1485 }
1486 
1487 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1488 {
1489 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1490 				 cmd->param, cmd->param_len);
1491 }
1492 
1493 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1494 {
1495 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1496 				 cmd->param, sizeof(struct mgmt_addr_info));
1497 }
1498 
1499 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1500 {
1501 	if (!lmp_bredr_capable(hdev))
1502 		return MGMT_STATUS_NOT_SUPPORTED;
1503 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1504 		return MGMT_STATUS_REJECTED;
1505 	else
1506 		return MGMT_STATUS_SUCCESS;
1507 }
1508 
1509 static u8 mgmt_le_support(struct hci_dev *hdev)
1510 {
1511 	if (!lmp_le_capable(hdev))
1512 		return MGMT_STATUS_NOT_SUPPORTED;
1513 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1514 		return MGMT_STATUS_REJECTED;
1515 	else
1516 		return MGMT_STATUS_SUCCESS;
1517 }
1518 
1519 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1520 					   int err)
1521 {
1522 	struct mgmt_pending_cmd *cmd = data;
1523 
1524 	bt_dev_dbg(hdev, "err %d", err);
1525 
1526 	/* Make sure cmd still outstanding. */
1527 	if (err == -ECANCELED ||
1528 	    cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1529 		return;
1530 
1531 	hci_dev_lock(hdev);
1532 
1533 	if (err) {
1534 		u8 mgmt_err = mgmt_status(err);
1535 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1536 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1537 		goto done;
1538 	}
1539 
1540 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1541 	    hdev->discov_timeout > 0) {
1542 		int to = secs_to_jiffies(hdev->discov_timeout);
1543 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1544 	}
1545 
1546 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1547 	new_settings(hdev, cmd->sk);
1548 
1549 done:
1550 	mgmt_pending_remove(cmd);
1551 	hci_dev_unlock(hdev);
1552 }
1553 
1554 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1555 {
1556 	BT_DBG("%s", hdev->name);
1557 
1558 	return hci_update_discoverable_sync(hdev);
1559 }
1560 
1561 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1562 			    u16 len)
1563 {
1564 	struct mgmt_cp_set_discoverable *cp = data;
1565 	struct mgmt_pending_cmd *cmd;
1566 	u16 timeout;
1567 	int err;
1568 
1569 	bt_dev_dbg(hdev, "sock %p", sk);
1570 
1571 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1572 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1573 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1574 				       MGMT_STATUS_REJECTED);
1575 
1576 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1577 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1578 				       MGMT_STATUS_INVALID_PARAMS);
1579 
1580 	timeout = __le16_to_cpu(cp->timeout);
1581 
1582 	/* Disabling discoverable requires that no timeout is set,
1583 	 * and enabling limited discoverable requires a timeout.
1584 	 */
1585 	if ((cp->val == 0x00 && timeout > 0) ||
1586 	    (cp->val == 0x02 && timeout == 0))
1587 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1588 				       MGMT_STATUS_INVALID_PARAMS);
1589 
1590 	hci_dev_lock(hdev);
1591 
1592 	if (!hdev_is_powered(hdev) && timeout > 0) {
1593 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1594 				      MGMT_STATUS_NOT_POWERED);
1595 		goto failed;
1596 	}
1597 
1598 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1599 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1600 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1601 				      MGMT_STATUS_BUSY);
1602 		goto failed;
1603 	}
1604 
1605 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1606 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1607 				      MGMT_STATUS_REJECTED);
1608 		goto failed;
1609 	}
1610 
1611 	if (hdev->advertising_paused) {
1612 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1613 				      MGMT_STATUS_BUSY);
1614 		goto failed;
1615 	}
1616 
1617 	if (!hdev_is_powered(hdev)) {
1618 		bool changed = false;
1619 
1620 		/* Setting limited discoverable when powered off is
1621 		 * not a valid operation since it requires a timeout
1622 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1623 		 */
1624 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1625 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1626 			changed = true;
1627 		}
1628 
1629 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1630 		if (err < 0)
1631 			goto failed;
1632 
1633 		if (changed)
1634 			err = new_settings(hdev, sk);
1635 
1636 		goto failed;
1637 	}
1638 
1639 	/* If the current mode is the same, then just update the timeout
1640 	 * value with the new value. And if only the timeout gets updated,
1641 	 * then no need for any HCI transactions.
1642 	 */
1643 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1644 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1645 						   HCI_LIMITED_DISCOVERABLE)) {
1646 		cancel_delayed_work(&hdev->discov_off);
1647 		hdev->discov_timeout = timeout;
1648 
1649 		if (cp->val && hdev->discov_timeout > 0) {
1650 			int to = secs_to_jiffies(hdev->discov_timeout);
1651 			queue_delayed_work(hdev->req_workqueue,
1652 					   &hdev->discov_off, to);
1653 		}
1654 
1655 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1656 		goto failed;
1657 	}
1658 
1659 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1660 	if (!cmd) {
1661 		err = -ENOMEM;
1662 		goto failed;
1663 	}
1664 
1665 	/* Cancel any potential discoverable timeout that might be
1666 	 * still active and store new timeout value. The arming of
1667 	 * the timeout happens in the complete handler.
1668 	 */
1669 	cancel_delayed_work(&hdev->discov_off);
1670 	hdev->discov_timeout = timeout;
1671 
1672 	if (cp->val)
1673 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1674 	else
1675 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1676 
1677 	/* Limited discoverable mode */
1678 	if (cp->val == 0x02)
1679 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1680 	else
1681 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1682 
1683 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1684 				 mgmt_set_discoverable_complete);
1685 
1686 	if (err < 0)
1687 		mgmt_pending_remove(cmd);
1688 
1689 failed:
1690 	hci_dev_unlock(hdev);
1691 	return err;
1692 }
1693 
1694 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1695 					  int err)
1696 {
1697 	struct mgmt_pending_cmd *cmd = data;
1698 
1699 	bt_dev_dbg(hdev, "err %d", err);
1700 
1701 	/* Make sure cmd still outstanding. */
1702 	if (err == -ECANCELED ||
1703 	    cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1704 		return;
1705 
1706 	hci_dev_lock(hdev);
1707 
1708 	if (err) {
1709 		u8 mgmt_err = mgmt_status(err);
1710 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1711 		goto done;
1712 	}
1713 
1714 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1715 	new_settings(hdev, cmd->sk);
1716 
1717 done:
1718 	mgmt_pending_remove(cmd);
1719 
1720 	hci_dev_unlock(hdev);
1721 }
1722 
1723 static int set_connectable_update_settings(struct hci_dev *hdev,
1724 					   struct sock *sk, u8 val)
1725 {
1726 	bool changed = false;
1727 	int err;
1728 
1729 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1730 		changed = true;
1731 
1732 	if (val) {
1733 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1734 	} else {
1735 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1736 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1737 	}
1738 
1739 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1740 	if (err < 0)
1741 		return err;
1742 
1743 	if (changed) {
1744 		hci_update_scan(hdev);
1745 		hci_update_passive_scan(hdev);
1746 		return new_settings(hdev, sk);
1747 	}
1748 
1749 	return 0;
1750 }
1751 
1752 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1753 {
1754 	BT_DBG("%s", hdev->name);
1755 
1756 	return hci_update_connectable_sync(hdev);
1757 }
1758 
1759 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1760 			   u16 len)
1761 {
1762 	struct mgmt_mode *cp = data;
1763 	struct mgmt_pending_cmd *cmd;
1764 	int err;
1765 
1766 	bt_dev_dbg(hdev, "sock %p", sk);
1767 
1768 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1769 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1770 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1771 				       MGMT_STATUS_REJECTED);
1772 
1773 	if (cp->val != 0x00 && cp->val != 0x01)
1774 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1775 				       MGMT_STATUS_INVALID_PARAMS);
1776 
1777 	hci_dev_lock(hdev);
1778 
1779 	if (!hdev_is_powered(hdev)) {
1780 		err = set_connectable_update_settings(hdev, sk, cp->val);
1781 		goto failed;
1782 	}
1783 
1784 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1785 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1786 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1787 				      MGMT_STATUS_BUSY);
1788 		goto failed;
1789 	}
1790 
1791 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1792 	if (!cmd) {
1793 		err = -ENOMEM;
1794 		goto failed;
1795 	}
1796 
1797 	if (cp->val) {
1798 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1799 	} else {
1800 		if (hdev->discov_timeout > 0)
1801 			cancel_delayed_work(&hdev->discov_off);
1802 
1803 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1804 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1805 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1806 	}
1807 
1808 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1809 				 mgmt_set_connectable_complete);
1810 
1811 	if (err < 0)
1812 		mgmt_pending_remove(cmd);
1813 
1814 failed:
1815 	hci_dev_unlock(hdev);
1816 	return err;
1817 }
1818 
1819 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1820 			u16 len)
1821 {
1822 	struct mgmt_mode *cp = data;
1823 	bool changed;
1824 	int err;
1825 
1826 	bt_dev_dbg(hdev, "sock %p", sk);
1827 
1828 	if (cp->val != 0x00 && cp->val != 0x01)
1829 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1830 				       MGMT_STATUS_INVALID_PARAMS);
1831 
1832 	hci_dev_lock(hdev);
1833 
1834 	if (cp->val)
1835 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1836 	else
1837 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1838 
1839 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1840 	if (err < 0)
1841 		goto unlock;
1842 
1843 	if (changed) {
1844 		/* In limited privacy mode the change of bondable mode
1845 		 * may affect the local advertising address.
1846 		 */
1847 		hci_update_discoverable(hdev);
1848 
1849 		err = new_settings(hdev, sk);
1850 	}
1851 
1852 unlock:
1853 	hci_dev_unlock(hdev);
1854 	return err;
1855 }
1856 
1857 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1858 			     u16 len)
1859 {
1860 	struct mgmt_mode *cp = data;
1861 	struct mgmt_pending_cmd *cmd;
1862 	u8 val, status;
1863 	int err;
1864 
1865 	bt_dev_dbg(hdev, "sock %p", sk);
1866 
1867 	status = mgmt_bredr_support(hdev);
1868 	if (status)
1869 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1870 				       status);
1871 
1872 	if (cp->val != 0x00 && cp->val != 0x01)
1873 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1874 				       MGMT_STATUS_INVALID_PARAMS);
1875 
1876 	hci_dev_lock(hdev);
1877 
1878 	if (!hdev_is_powered(hdev)) {
1879 		bool changed = false;
1880 
1881 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1882 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1883 			changed = true;
1884 		}
1885 
1886 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1887 		if (err < 0)
1888 			goto failed;
1889 
1890 		if (changed)
1891 			err = new_settings(hdev, sk);
1892 
1893 		goto failed;
1894 	}
1895 
1896 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1897 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1898 				      MGMT_STATUS_BUSY);
1899 		goto failed;
1900 	}
1901 
1902 	val = !!cp->val;
1903 
1904 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1905 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1906 		goto failed;
1907 	}
1908 
1909 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1910 	if (!cmd) {
1911 		err = -ENOMEM;
1912 		goto failed;
1913 	}
1914 
1915 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1916 	if (err < 0) {
1917 		mgmt_pending_remove(cmd);
1918 		goto failed;
1919 	}
1920 
1921 failed:
1922 	hci_dev_unlock(hdev);
1923 	return err;
1924 }
1925 
1926 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1927 {
1928 	struct cmd_lookup match = { NULL, hdev };
1929 	struct mgmt_pending_cmd *cmd = data;
1930 	struct mgmt_mode *cp = cmd->param;
1931 	u8 enable = cp->val;
1932 	bool changed;
1933 
1934 	/* Make sure cmd still outstanding. */
1935 	if (err == -ECANCELED || cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1936 		return;
1937 
1938 	if (err) {
1939 		u8 mgmt_err = mgmt_status(err);
1940 
1941 		if (enable && hci_dev_test_and_clear_flag(hdev,
1942 							  HCI_SSP_ENABLED)) {
1943 			new_settings(hdev, NULL);
1944 		}
1945 
1946 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1947 				     &mgmt_err);
1948 		return;
1949 	}
1950 
1951 	if (enable) {
1952 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1953 	} else {
1954 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1955 	}
1956 
1957 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1958 
1959 	if (changed)
1960 		new_settings(hdev, match.sk);
1961 
1962 	if (match.sk)
1963 		sock_put(match.sk);
1964 
1965 	hci_update_eir_sync(hdev);
1966 }
1967 
1968 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1969 {
1970 	struct mgmt_pending_cmd *cmd = data;
1971 	struct mgmt_mode *cp = cmd->param;
1972 	bool changed = false;
1973 	int err;
1974 
1975 	if (cp->val)
1976 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1977 
1978 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1979 
1980 	if (!err && changed)
1981 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1982 
1983 	return err;
1984 }
1985 
1986 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1987 {
1988 	struct mgmt_mode *cp = data;
1989 	struct mgmt_pending_cmd *cmd;
1990 	u8 status;
1991 	int err;
1992 
1993 	bt_dev_dbg(hdev, "sock %p", sk);
1994 
1995 	status = mgmt_bredr_support(hdev);
1996 	if (status)
1997 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1998 
1999 	if (!lmp_ssp_capable(hdev))
2000 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2001 				       MGMT_STATUS_NOT_SUPPORTED);
2002 
2003 	if (cp->val != 0x00 && cp->val != 0x01)
2004 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2005 				       MGMT_STATUS_INVALID_PARAMS);
2006 
2007 	hci_dev_lock(hdev);
2008 
2009 	if (!hdev_is_powered(hdev)) {
2010 		bool changed;
2011 
2012 		if (cp->val) {
2013 			changed = !hci_dev_test_and_set_flag(hdev,
2014 							     HCI_SSP_ENABLED);
2015 		} else {
2016 			changed = hci_dev_test_and_clear_flag(hdev,
2017 							      HCI_SSP_ENABLED);
2018 		}
2019 
2020 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2021 		if (err < 0)
2022 			goto failed;
2023 
2024 		if (changed)
2025 			err = new_settings(hdev, sk);
2026 
2027 		goto failed;
2028 	}
2029 
2030 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2031 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2032 				      MGMT_STATUS_BUSY);
2033 		goto failed;
2034 	}
2035 
2036 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2037 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2038 		goto failed;
2039 	}
2040 
2041 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2042 	if (!cmd)
2043 		err = -ENOMEM;
2044 	else
2045 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2046 					 set_ssp_complete);
2047 
2048 	if (err < 0) {
2049 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2050 				      MGMT_STATUS_FAILED);
2051 
2052 		if (cmd)
2053 			mgmt_pending_remove(cmd);
2054 	}
2055 
2056 failed:
2057 	hci_dev_unlock(hdev);
2058 	return err;
2059 }
2060 
2061 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2062 {
2063 	bt_dev_dbg(hdev, "sock %p", sk);
2064 
2065 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2066 				       MGMT_STATUS_NOT_SUPPORTED);
2067 }
2068 
2069 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2070 {
2071 	struct cmd_lookup match = { NULL, hdev };
2072 	u8 status = mgmt_status(err);
2073 
2074 	bt_dev_dbg(hdev, "err %d", err);
2075 
2076 	if (status) {
2077 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2078 							&status);
2079 		return;
2080 	}
2081 
2082 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2083 
2084 	new_settings(hdev, match.sk);
2085 
2086 	if (match.sk)
2087 		sock_put(match.sk);
2088 }
2089 
2090 static int set_le_sync(struct hci_dev *hdev, void *data)
2091 {
2092 	struct mgmt_pending_cmd *cmd = data;
2093 	struct mgmt_mode *cp = cmd->param;
2094 	u8 val = !!cp->val;
2095 	int err;
2096 
2097 	if (!val) {
2098 		hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2099 
2100 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2101 			hci_disable_advertising_sync(hdev);
2102 
2103 		if (ext_adv_capable(hdev))
2104 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2105 	} else {
2106 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2107 	}
2108 
2109 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2110 
2111 	/* Make sure the controller has a good default for
2112 	 * advertising data. Restrict the update to when LE
2113 	 * has actually been enabled. During power on, the
2114 	 * update in powered_update_hci will take care of it.
2115 	 */
2116 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2117 		if (ext_adv_capable(hdev)) {
2118 			int status;
2119 
2120 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2121 			if (!status)
2122 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2123 		} else {
2124 			hci_update_adv_data_sync(hdev, 0x00);
2125 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2126 		}
2127 
2128 		hci_update_passive_scan(hdev);
2129 	}
2130 
2131 	return err;
2132 }
2133 
2134 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2135 {
2136 	struct mgmt_pending_cmd *cmd = data;
2137 	u8 status = mgmt_status(err);
2138 	struct sock *sk = cmd->sk;
2139 
2140 	if (status) {
2141 		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2142 				     cmd_status_rsp, &status);
2143 		return;
2144 	}
2145 
2146 	mgmt_pending_remove(cmd);
2147 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2148 }
2149 
2150 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2151 {
2152 	struct mgmt_pending_cmd *cmd = data;
2153 	struct mgmt_cp_set_mesh *cp = cmd->param;
2154 	size_t len = cmd->param_len;
2155 
2156 	memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2157 
2158 	if (cp->enable)
2159 		hci_dev_set_flag(hdev, HCI_MESH);
2160 	else
2161 		hci_dev_clear_flag(hdev, HCI_MESH);
2162 
2163 	len -= sizeof(*cp);
2164 
2165 	/* If filters don't fit, forward all adv pkts */
2166 	if (len <= sizeof(hdev->mesh_ad_types))
2167 		memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2168 
2169 	hci_update_passive_scan_sync(hdev);
2170 	return 0;
2171 }
2172 
2173 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2174 {
2175 	struct mgmt_cp_set_mesh *cp = data;
2176 	struct mgmt_pending_cmd *cmd;
2177 	int err = 0;
2178 
2179 	bt_dev_dbg(hdev, "sock %p", sk);
2180 
2181 	if (!lmp_le_capable(hdev) ||
2182 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2183 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2184 				       MGMT_STATUS_NOT_SUPPORTED);
2185 
2186 	if (cp->enable != 0x00 && cp->enable != 0x01)
2187 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2188 				       MGMT_STATUS_INVALID_PARAMS);
2189 
2190 	hci_dev_lock(hdev);
2191 
2192 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2193 	if (!cmd)
2194 		err = -ENOMEM;
2195 	else
2196 		err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2197 					 set_mesh_complete);
2198 
2199 	if (err < 0) {
2200 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2201 				      MGMT_STATUS_FAILED);
2202 
2203 		if (cmd)
2204 			mgmt_pending_remove(cmd);
2205 	}
2206 
2207 	hci_dev_unlock(hdev);
2208 	return err;
2209 }
2210 
2211 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2212 {
2213 	struct mgmt_mesh_tx *mesh_tx = data;
2214 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2215 	unsigned long mesh_send_interval;
2216 	u8 mgmt_err = mgmt_status(err);
2217 
2218 	/* Report any errors here, but don't report completion */
2219 
2220 	if (mgmt_err) {
2221 		hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2222 		/* Send Complete Error Code for handle */
2223 		mesh_send_complete(hdev, mesh_tx, false);
2224 		return;
2225 	}
2226 
2227 	mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2228 	queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2229 			   mesh_send_interval);
2230 }
2231 
2232 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2233 {
2234 	struct mgmt_mesh_tx *mesh_tx = data;
2235 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2236 	struct adv_info *adv, *next_instance;
2237 	u8 instance = hdev->le_num_of_adv_sets + 1;
2238 	u16 timeout, duration;
2239 	int err = 0;
2240 
2241 	if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2242 		return MGMT_STATUS_BUSY;
2243 
2244 	timeout = 1000;
2245 	duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2246 	adv = hci_add_adv_instance(hdev, instance, 0,
2247 				   send->adv_data_len, send->adv_data,
2248 				   0, NULL,
2249 				   timeout, duration,
2250 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
2251 				   hdev->le_adv_min_interval,
2252 				   hdev->le_adv_max_interval,
2253 				   mesh_tx->handle);
2254 
2255 	if (!IS_ERR(adv))
2256 		mesh_tx->instance = instance;
2257 	else
2258 		err = PTR_ERR(adv);
2259 
2260 	if (hdev->cur_adv_instance == instance) {
2261 		/* If the currently advertised instance is being changed then
2262 		 * cancel the current advertising and schedule the next
2263 		 * instance. If there is only one instance then the overridden
2264 		 * advertising data will be visible right away.
2265 		 */
2266 		cancel_adv_timeout(hdev);
2267 
2268 		next_instance = hci_get_next_instance(hdev, instance);
2269 		if (next_instance)
2270 			instance = next_instance->instance;
2271 		else
2272 			instance = 0;
2273 	} else if (hdev->adv_instance_timeout) {
2274 		/* Immediately advertise the new instance if no other, or
2275 		 * let it go naturally from queue if ADV is already happening
2276 		 */
2277 		instance = 0;
2278 	}
2279 
2280 	if (instance)
2281 		return hci_schedule_adv_instance_sync(hdev, instance, true);
2282 
2283 	return err;
2284 }
2285 
2286 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2287 {
2288 	struct mgmt_rp_mesh_read_features *rp = data;
2289 
2290 	if (rp->used_handles >= rp->max_handles)
2291 		return;
2292 
2293 	rp->handles[rp->used_handles++] = mesh_tx->handle;
2294 }
2295 
2296 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2297 			 void *data, u16 len)
2298 {
2299 	struct mgmt_rp_mesh_read_features rp;
2300 
2301 	if (!lmp_le_capable(hdev) ||
2302 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2303 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2304 				       MGMT_STATUS_NOT_SUPPORTED);
2305 
2306 	memset(&rp, 0, sizeof(rp));
2307 	rp.index = cpu_to_le16(hdev->id);
2308 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2309 		rp.max_handles = MESH_HANDLES_MAX;
2310 
2311 	hci_dev_lock(hdev);
2312 
2313 	if (rp.max_handles)
2314 		mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2315 
2316 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2317 			  rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2318 
2319 	hci_dev_unlock(hdev);
2320 	return 0;
2321 }
2322 
2323 static int send_cancel(struct hci_dev *hdev, void *data)
2324 {
2325 	struct mgmt_pending_cmd *cmd = data;
2326 	struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2327 	struct mgmt_mesh_tx *mesh_tx;
2328 
2329 	if (!cancel->handle) {
2330 		do {
2331 			mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2332 
2333 			if (mesh_tx)
2334 				mesh_send_complete(hdev, mesh_tx, false);
2335 		} while (mesh_tx);
2336 	} else {
2337 		mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2338 
2339 		if (mesh_tx && mesh_tx->sk == cmd->sk)
2340 			mesh_send_complete(hdev, mesh_tx, false);
2341 	}
2342 
2343 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2344 			  0, NULL, 0);
2345 	mgmt_pending_free(cmd);
2346 
2347 	return 0;
2348 }
2349 
2350 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2351 			    void *data, u16 len)
2352 {
2353 	struct mgmt_pending_cmd *cmd;
2354 	int err;
2355 
2356 	if (!lmp_le_capable(hdev) ||
2357 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2358 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2359 				       MGMT_STATUS_NOT_SUPPORTED);
2360 
2361 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2362 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2363 				       MGMT_STATUS_REJECTED);
2364 
2365 	hci_dev_lock(hdev);
2366 	cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2367 	if (!cmd)
2368 		err = -ENOMEM;
2369 	else
2370 		err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2371 
2372 	if (err < 0) {
2373 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2374 				      MGMT_STATUS_FAILED);
2375 
2376 		if (cmd)
2377 			mgmt_pending_free(cmd);
2378 	}
2379 
2380 	hci_dev_unlock(hdev);
2381 	return err;
2382 }
2383 
2384 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2385 {
2386 	struct mgmt_mesh_tx *mesh_tx;
2387 	struct mgmt_cp_mesh_send *send = data;
2388 	struct mgmt_rp_mesh_read_features rp;
2389 	bool sending;
2390 	int err = 0;
2391 
2392 	if (!lmp_le_capable(hdev) ||
2393 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2394 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2395 				       MGMT_STATUS_NOT_SUPPORTED);
2396 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2397 	    len <= MGMT_MESH_SEND_SIZE ||
2398 	    len > (MGMT_MESH_SEND_SIZE + 31))
2399 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2400 				       MGMT_STATUS_REJECTED);
2401 
2402 	hci_dev_lock(hdev);
2403 
2404 	memset(&rp, 0, sizeof(rp));
2405 	rp.max_handles = MESH_HANDLES_MAX;
2406 
2407 	mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2408 
2409 	if (rp.max_handles <= rp.used_handles) {
2410 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2411 				      MGMT_STATUS_BUSY);
2412 		goto done;
2413 	}
2414 
2415 	sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2416 	mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2417 
2418 	if (!mesh_tx)
2419 		err = -ENOMEM;
2420 	else if (!sending)
2421 		err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2422 					 mesh_send_start_complete);
2423 
2424 	if (err < 0) {
2425 		bt_dev_err(hdev, "Send Mesh Failed %d", err);
2426 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2427 				      MGMT_STATUS_FAILED);
2428 
2429 		if (mesh_tx) {
2430 			if (sending)
2431 				mgmt_mesh_remove(mesh_tx);
2432 		}
2433 	} else {
2434 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2435 
2436 		mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2437 				  &mesh_tx->handle, 1);
2438 	}
2439 
2440 done:
2441 	hci_dev_unlock(hdev);
2442 	return err;
2443 }
2444 
2445 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2446 {
2447 	struct mgmt_mode *cp = data;
2448 	struct mgmt_pending_cmd *cmd;
2449 	int err;
2450 	u8 val, enabled;
2451 
2452 	bt_dev_dbg(hdev, "sock %p", sk);
2453 
2454 	if (!lmp_le_capable(hdev))
2455 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2456 				       MGMT_STATUS_NOT_SUPPORTED);
2457 
2458 	if (cp->val != 0x00 && cp->val != 0x01)
2459 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2460 				       MGMT_STATUS_INVALID_PARAMS);
2461 
2462 	/* Bluetooth single mode LE only controllers or dual-mode
2463 	 * controllers configured as LE only devices, do not allow
2464 	 * switching LE off. These have either LE enabled explicitly
2465 	 * or BR/EDR has been previously switched off.
2466 	 *
2467 	 * When trying to enable an already enabled LE, then gracefully
2468 	 * send a positive response. Trying to disable it however will
2469 	 * result into rejection.
2470 	 */
2471 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2472 		if (cp->val == 0x01)
2473 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2474 
2475 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2476 				       MGMT_STATUS_REJECTED);
2477 	}
2478 
2479 	hci_dev_lock(hdev);
2480 
2481 	val = !!cp->val;
2482 	enabled = lmp_host_le_capable(hdev);
2483 
2484 	if (!hdev_is_powered(hdev) || val == enabled) {
2485 		bool changed = false;
2486 
2487 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2488 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2489 			changed = true;
2490 		}
2491 
2492 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2493 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2494 			changed = true;
2495 		}
2496 
2497 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2498 		if (err < 0)
2499 			goto unlock;
2500 
2501 		if (changed)
2502 			err = new_settings(hdev, sk);
2503 
2504 		goto unlock;
2505 	}
2506 
2507 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2508 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2509 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2510 				      MGMT_STATUS_BUSY);
2511 		goto unlock;
2512 	}
2513 
2514 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2515 	if (!cmd)
2516 		err = -ENOMEM;
2517 	else
2518 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2519 					 set_le_complete);
2520 
2521 	if (err < 0) {
2522 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2523 				      MGMT_STATUS_FAILED);
2524 
2525 		if (cmd)
2526 			mgmt_pending_remove(cmd);
2527 	}
2528 
2529 unlock:
2530 	hci_dev_unlock(hdev);
2531 	return err;
2532 }
2533 
2534 static int send_hci_cmd_sync(struct hci_dev *hdev, void *data)
2535 {
2536 	struct mgmt_pending_cmd *cmd = data;
2537 	struct mgmt_cp_hci_cmd_sync *cp = cmd->param;
2538 	struct sk_buff *skb;
2539 
2540 	skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode),
2541 				le16_to_cpu(cp->params_len), cp->params,
2542 				cp->event, cp->timeout ?
2543 				secs_to_jiffies(cp->timeout) :
2544 				HCI_CMD_TIMEOUT);
2545 	if (IS_ERR(skb)) {
2546 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2547 				mgmt_status(PTR_ERR(skb)));
2548 		goto done;
2549 	}
2550 
2551 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, 0,
2552 			  skb->data, skb->len);
2553 
2554 	kfree_skb(skb);
2555 
2556 done:
2557 	mgmt_pending_free(cmd);
2558 
2559 	return 0;
2560 }
2561 
2562 static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev,
2563 			     void *data, u16 len)
2564 {
2565 	struct mgmt_cp_hci_cmd_sync *cp = data;
2566 	struct mgmt_pending_cmd *cmd;
2567 	int err;
2568 
2569 	if (len < sizeof(*cp))
2570 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2571 				       MGMT_STATUS_INVALID_PARAMS);
2572 
2573 	hci_dev_lock(hdev);
2574 	cmd = mgmt_pending_new(sk, MGMT_OP_HCI_CMD_SYNC, hdev, data, len);
2575 	if (!cmd)
2576 		err = -ENOMEM;
2577 	else
2578 		err = hci_cmd_sync_queue(hdev, send_hci_cmd_sync, cmd, NULL);
2579 
2580 	if (err < 0) {
2581 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2582 				      MGMT_STATUS_FAILED);
2583 
2584 		if (cmd)
2585 			mgmt_pending_free(cmd);
2586 	}
2587 
2588 	hci_dev_unlock(hdev);
2589 	return err;
2590 }
2591 
2592 /* This is a helper function to test for pending mgmt commands that can
2593  * cause CoD or EIR HCI commands. We can only allow one such pending
2594  * mgmt command at a time since otherwise we cannot easily track what
2595  * the current values are, will be, and based on that calculate if a new
2596  * HCI command needs to be sent and if yes with what value.
2597  */
2598 static bool pending_eir_or_class(struct hci_dev *hdev)
2599 {
2600 	struct mgmt_pending_cmd *cmd;
2601 
2602 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2603 		switch (cmd->opcode) {
2604 		case MGMT_OP_ADD_UUID:
2605 		case MGMT_OP_REMOVE_UUID:
2606 		case MGMT_OP_SET_DEV_CLASS:
2607 		case MGMT_OP_SET_POWERED:
2608 			return true;
2609 		}
2610 	}
2611 
2612 	return false;
2613 }
2614 
2615 static const u8 bluetooth_base_uuid[] = {
2616 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2617 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2618 };
2619 
2620 static u8 get_uuid_size(const u8 *uuid)
2621 {
2622 	u32 val;
2623 
2624 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2625 		return 128;
2626 
2627 	val = get_unaligned_le32(&uuid[12]);
2628 	if (val > 0xffff)
2629 		return 32;
2630 
2631 	return 16;
2632 }
2633 
2634 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2635 {
2636 	struct mgmt_pending_cmd *cmd = data;
2637 
2638 	bt_dev_dbg(hdev, "err %d", err);
2639 
2640 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2641 			  mgmt_status(err), hdev->dev_class, 3);
2642 
2643 	mgmt_pending_free(cmd);
2644 }
2645 
2646 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2647 {
2648 	int err;
2649 
2650 	err = hci_update_class_sync(hdev);
2651 	if (err)
2652 		return err;
2653 
2654 	return hci_update_eir_sync(hdev);
2655 }
2656 
2657 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2658 {
2659 	struct mgmt_cp_add_uuid *cp = data;
2660 	struct mgmt_pending_cmd *cmd;
2661 	struct bt_uuid *uuid;
2662 	int err;
2663 
2664 	bt_dev_dbg(hdev, "sock %p", sk);
2665 
2666 	hci_dev_lock(hdev);
2667 
2668 	if (pending_eir_or_class(hdev)) {
2669 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2670 				      MGMT_STATUS_BUSY);
2671 		goto failed;
2672 	}
2673 
2674 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2675 	if (!uuid) {
2676 		err = -ENOMEM;
2677 		goto failed;
2678 	}
2679 
2680 	memcpy(uuid->uuid, cp->uuid, 16);
2681 	uuid->svc_hint = cp->svc_hint;
2682 	uuid->size = get_uuid_size(cp->uuid);
2683 
2684 	list_add_tail(&uuid->list, &hdev->uuids);
2685 
2686 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2687 	if (!cmd) {
2688 		err = -ENOMEM;
2689 		goto failed;
2690 	}
2691 
2692 	/* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2693 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2694 	 */
2695 	err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2696 				  mgmt_class_complete);
2697 	if (err < 0) {
2698 		mgmt_pending_free(cmd);
2699 		goto failed;
2700 	}
2701 
2702 failed:
2703 	hci_dev_unlock(hdev);
2704 	return err;
2705 }
2706 
2707 static bool enable_service_cache(struct hci_dev *hdev)
2708 {
2709 	if (!hdev_is_powered(hdev))
2710 		return false;
2711 
2712 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2713 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2714 				   CACHE_TIMEOUT);
2715 		return true;
2716 	}
2717 
2718 	return false;
2719 }
2720 
2721 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2722 {
2723 	int err;
2724 
2725 	err = hci_update_class_sync(hdev);
2726 	if (err)
2727 		return err;
2728 
2729 	return hci_update_eir_sync(hdev);
2730 }
2731 
2732 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2733 		       u16 len)
2734 {
2735 	struct mgmt_cp_remove_uuid *cp = data;
2736 	struct mgmt_pending_cmd *cmd;
2737 	struct bt_uuid *match, *tmp;
2738 	static const u8 bt_uuid_any[] = {
2739 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2740 	};
2741 	int err, found;
2742 
2743 	bt_dev_dbg(hdev, "sock %p", sk);
2744 
2745 	hci_dev_lock(hdev);
2746 
2747 	if (pending_eir_or_class(hdev)) {
2748 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2749 				      MGMT_STATUS_BUSY);
2750 		goto unlock;
2751 	}
2752 
2753 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2754 		hci_uuids_clear(hdev);
2755 
2756 		if (enable_service_cache(hdev)) {
2757 			err = mgmt_cmd_complete(sk, hdev->id,
2758 						MGMT_OP_REMOVE_UUID,
2759 						0, hdev->dev_class, 3);
2760 			goto unlock;
2761 		}
2762 
2763 		goto update_class;
2764 	}
2765 
2766 	found = 0;
2767 
2768 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2769 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2770 			continue;
2771 
2772 		list_del(&match->list);
2773 		kfree(match);
2774 		found++;
2775 	}
2776 
2777 	if (found == 0) {
2778 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2779 				      MGMT_STATUS_INVALID_PARAMS);
2780 		goto unlock;
2781 	}
2782 
2783 update_class:
2784 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2785 	if (!cmd) {
2786 		err = -ENOMEM;
2787 		goto unlock;
2788 	}
2789 
2790 	/* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2791 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2792 	 */
2793 	err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2794 				  mgmt_class_complete);
2795 	if (err < 0)
2796 		mgmt_pending_free(cmd);
2797 
2798 unlock:
2799 	hci_dev_unlock(hdev);
2800 	return err;
2801 }
2802 
2803 static int set_class_sync(struct hci_dev *hdev, void *data)
2804 {
2805 	int err = 0;
2806 
2807 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2808 		cancel_delayed_work_sync(&hdev->service_cache);
2809 		err = hci_update_eir_sync(hdev);
2810 	}
2811 
2812 	if (err)
2813 		return err;
2814 
2815 	return hci_update_class_sync(hdev);
2816 }
2817 
2818 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2819 			 u16 len)
2820 {
2821 	struct mgmt_cp_set_dev_class *cp = data;
2822 	struct mgmt_pending_cmd *cmd;
2823 	int err;
2824 
2825 	bt_dev_dbg(hdev, "sock %p", sk);
2826 
2827 	if (!lmp_bredr_capable(hdev))
2828 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2829 				       MGMT_STATUS_NOT_SUPPORTED);
2830 
2831 	hci_dev_lock(hdev);
2832 
2833 	if (pending_eir_or_class(hdev)) {
2834 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2835 				      MGMT_STATUS_BUSY);
2836 		goto unlock;
2837 	}
2838 
2839 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2840 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2841 				      MGMT_STATUS_INVALID_PARAMS);
2842 		goto unlock;
2843 	}
2844 
2845 	hdev->major_class = cp->major;
2846 	hdev->minor_class = cp->minor;
2847 
2848 	if (!hdev_is_powered(hdev)) {
2849 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2850 					hdev->dev_class, 3);
2851 		goto unlock;
2852 	}
2853 
2854 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2855 	if (!cmd) {
2856 		err = -ENOMEM;
2857 		goto unlock;
2858 	}
2859 
2860 	/* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2861 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2862 	 */
2863 	err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2864 				  mgmt_class_complete);
2865 	if (err < 0)
2866 		mgmt_pending_free(cmd);
2867 
2868 unlock:
2869 	hci_dev_unlock(hdev);
2870 	return err;
2871 }
2872 
2873 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2874 			  u16 len)
2875 {
2876 	struct mgmt_cp_load_link_keys *cp = data;
2877 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2878 				   sizeof(struct mgmt_link_key_info));
2879 	u16 key_count, expected_len;
2880 	bool changed;
2881 	int i;
2882 
2883 	bt_dev_dbg(hdev, "sock %p", sk);
2884 
2885 	if (!lmp_bredr_capable(hdev))
2886 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2887 				       MGMT_STATUS_NOT_SUPPORTED);
2888 
2889 	key_count = __le16_to_cpu(cp->key_count);
2890 	if (key_count > max_key_count) {
2891 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2892 			   key_count);
2893 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2894 				       MGMT_STATUS_INVALID_PARAMS);
2895 	}
2896 
2897 	expected_len = struct_size(cp, keys, key_count);
2898 	if (expected_len != len) {
2899 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2900 			   expected_len, len);
2901 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2902 				       MGMT_STATUS_INVALID_PARAMS);
2903 	}
2904 
2905 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2906 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2907 				       MGMT_STATUS_INVALID_PARAMS);
2908 
2909 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2910 		   key_count);
2911 
2912 	hci_dev_lock(hdev);
2913 
2914 	hci_link_keys_clear(hdev);
2915 
2916 	if (cp->debug_keys)
2917 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2918 	else
2919 		changed = hci_dev_test_and_clear_flag(hdev,
2920 						      HCI_KEEP_DEBUG_KEYS);
2921 
2922 	if (changed)
2923 		new_settings(hdev, NULL);
2924 
2925 	for (i = 0; i < key_count; i++) {
2926 		struct mgmt_link_key_info *key = &cp->keys[i];
2927 
2928 		if (hci_is_blocked_key(hdev,
2929 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2930 				       key->val)) {
2931 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2932 				    &key->addr.bdaddr);
2933 			continue;
2934 		}
2935 
2936 		if (key->addr.type != BDADDR_BREDR) {
2937 			bt_dev_warn(hdev,
2938 				    "Invalid link address type %u for %pMR",
2939 				    key->addr.type, &key->addr.bdaddr);
2940 			continue;
2941 		}
2942 
2943 		if (key->type > 0x08) {
2944 			bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2945 				    key->type, &key->addr.bdaddr);
2946 			continue;
2947 		}
2948 
2949 		/* Always ignore debug keys and require a new pairing if
2950 		 * the user wants to use them.
2951 		 */
2952 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2953 			continue;
2954 
2955 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2956 				 key->type, key->pin_len, NULL);
2957 	}
2958 
2959 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2960 
2961 	hci_dev_unlock(hdev);
2962 
2963 	return 0;
2964 }
2965 
2966 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2967 			   u8 addr_type, struct sock *skip_sk)
2968 {
2969 	struct mgmt_ev_device_unpaired ev;
2970 
2971 	bacpy(&ev.addr.bdaddr, bdaddr);
2972 	ev.addr.type = addr_type;
2973 
2974 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2975 			  skip_sk);
2976 }
2977 
2978 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2979 {
2980 	struct mgmt_pending_cmd *cmd = data;
2981 	struct mgmt_cp_unpair_device *cp = cmd->param;
2982 
2983 	if (!err)
2984 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2985 
2986 	cmd->cmd_complete(cmd, err);
2987 	mgmt_pending_free(cmd);
2988 }
2989 
2990 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2991 {
2992 	struct mgmt_pending_cmd *cmd = data;
2993 	struct mgmt_cp_unpair_device *cp = cmd->param;
2994 	struct hci_conn *conn;
2995 
2996 	if (cp->addr.type == BDADDR_BREDR)
2997 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2998 					       &cp->addr.bdaddr);
2999 	else
3000 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3001 					       le_addr_type(cp->addr.type));
3002 
3003 	if (!conn)
3004 		return 0;
3005 
3006 	/* Disregard any possible error since the likes of hci_abort_conn_sync
3007 	 * will clean up the connection no matter the error.
3008 	 */
3009 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3010 
3011 	return 0;
3012 }
3013 
3014 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3015 			 u16 len)
3016 {
3017 	struct mgmt_cp_unpair_device *cp = data;
3018 	struct mgmt_rp_unpair_device rp;
3019 	struct hci_conn_params *params;
3020 	struct mgmt_pending_cmd *cmd;
3021 	struct hci_conn *conn;
3022 	u8 addr_type;
3023 	int err;
3024 
3025 	memset(&rp, 0, sizeof(rp));
3026 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3027 	rp.addr.type = cp->addr.type;
3028 
3029 	if (!bdaddr_type_is_valid(cp->addr.type))
3030 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3031 					 MGMT_STATUS_INVALID_PARAMS,
3032 					 &rp, sizeof(rp));
3033 
3034 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3035 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3036 					 MGMT_STATUS_INVALID_PARAMS,
3037 					 &rp, sizeof(rp));
3038 
3039 	hci_dev_lock(hdev);
3040 
3041 	if (!hdev_is_powered(hdev)) {
3042 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3043 					MGMT_STATUS_NOT_POWERED, &rp,
3044 					sizeof(rp));
3045 		goto unlock;
3046 	}
3047 
3048 	if (cp->addr.type == BDADDR_BREDR) {
3049 		/* If disconnection is requested, then look up the
3050 		 * connection. If the remote device is connected, it
3051 		 * will be later used to terminate the link.
3052 		 *
3053 		 * Setting it to NULL explicitly will cause no
3054 		 * termination of the link.
3055 		 */
3056 		if (cp->disconnect)
3057 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3058 						       &cp->addr.bdaddr);
3059 		else
3060 			conn = NULL;
3061 
3062 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3063 		if (err < 0) {
3064 			err = mgmt_cmd_complete(sk, hdev->id,
3065 						MGMT_OP_UNPAIR_DEVICE,
3066 						MGMT_STATUS_NOT_PAIRED, &rp,
3067 						sizeof(rp));
3068 			goto unlock;
3069 		}
3070 
3071 		goto done;
3072 	}
3073 
3074 	/* LE address type */
3075 	addr_type = le_addr_type(cp->addr.type);
3076 
3077 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3078 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3079 	if (err < 0) {
3080 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3081 					MGMT_STATUS_NOT_PAIRED, &rp,
3082 					sizeof(rp));
3083 		goto unlock;
3084 	}
3085 
3086 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3087 	if (!conn) {
3088 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3089 		goto done;
3090 	}
3091 
3092 
3093 	/* Defer clearing up the connection parameters until closing to
3094 	 * give a chance of keeping them if a repairing happens.
3095 	 */
3096 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3097 
3098 	/* Disable auto-connection parameters if present */
3099 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3100 	if (params) {
3101 		if (params->explicit_connect)
3102 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3103 		else
3104 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
3105 	}
3106 
3107 	/* If disconnection is not requested, then clear the connection
3108 	 * variable so that the link is not terminated.
3109 	 */
3110 	if (!cp->disconnect)
3111 		conn = NULL;
3112 
3113 done:
3114 	/* If the connection variable is set, then termination of the
3115 	 * link is requested.
3116 	 */
3117 	if (!conn) {
3118 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3119 					&rp, sizeof(rp));
3120 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3121 		goto unlock;
3122 	}
3123 
3124 	cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3125 			       sizeof(*cp));
3126 	if (!cmd) {
3127 		err = -ENOMEM;
3128 		goto unlock;
3129 	}
3130 
3131 	cmd->cmd_complete = addr_cmd_complete;
3132 
3133 	err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3134 				 unpair_device_complete);
3135 	if (err < 0)
3136 		mgmt_pending_free(cmd);
3137 
3138 unlock:
3139 	hci_dev_unlock(hdev);
3140 	return err;
3141 }
3142 
3143 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3144 {
3145 	struct mgmt_pending_cmd *cmd = data;
3146 
3147 	cmd->cmd_complete(cmd, mgmt_status(err));
3148 	mgmt_pending_free(cmd);
3149 }
3150 
3151 static int disconnect_sync(struct hci_dev *hdev, void *data)
3152 {
3153 	struct mgmt_pending_cmd *cmd = data;
3154 	struct mgmt_cp_disconnect *cp = cmd->param;
3155 	struct hci_conn *conn;
3156 
3157 	if (cp->addr.type == BDADDR_BREDR)
3158 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3159 					       &cp->addr.bdaddr);
3160 	else
3161 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3162 					       le_addr_type(cp->addr.type));
3163 
3164 	if (!conn)
3165 		return -ENOTCONN;
3166 
3167 	/* Disregard any possible error since the likes of hci_abort_conn_sync
3168 	 * will clean up the connection no matter the error.
3169 	 */
3170 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3171 
3172 	return 0;
3173 }
3174 
3175 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3176 		      u16 len)
3177 {
3178 	struct mgmt_cp_disconnect *cp = data;
3179 	struct mgmt_rp_disconnect rp;
3180 	struct mgmt_pending_cmd *cmd;
3181 	int err;
3182 
3183 	bt_dev_dbg(hdev, "sock %p", sk);
3184 
3185 	memset(&rp, 0, sizeof(rp));
3186 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3187 	rp.addr.type = cp->addr.type;
3188 
3189 	if (!bdaddr_type_is_valid(cp->addr.type))
3190 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3191 					 MGMT_STATUS_INVALID_PARAMS,
3192 					 &rp, sizeof(rp));
3193 
3194 	hci_dev_lock(hdev);
3195 
3196 	if (!test_bit(HCI_UP, &hdev->flags)) {
3197 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3198 					MGMT_STATUS_NOT_POWERED, &rp,
3199 					sizeof(rp));
3200 		goto failed;
3201 	}
3202 
3203 	cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3204 	if (!cmd) {
3205 		err = -ENOMEM;
3206 		goto failed;
3207 	}
3208 
3209 	cmd->cmd_complete = generic_cmd_complete;
3210 
3211 	err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3212 				 disconnect_complete);
3213 	if (err < 0)
3214 		mgmt_pending_free(cmd);
3215 
3216 failed:
3217 	hci_dev_unlock(hdev);
3218 	return err;
3219 }
3220 
3221 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3222 {
3223 	switch (link_type) {
3224 	case CIS_LINK:
3225 	case BIS_LINK:
3226 	case LE_LINK:
3227 		switch (addr_type) {
3228 		case ADDR_LE_DEV_PUBLIC:
3229 			return BDADDR_LE_PUBLIC;
3230 
3231 		default:
3232 			/* Fallback to LE Random address type */
3233 			return BDADDR_LE_RANDOM;
3234 		}
3235 
3236 	default:
3237 		/* Fallback to BR/EDR type */
3238 		return BDADDR_BREDR;
3239 	}
3240 }
3241 
3242 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3243 			   u16 data_len)
3244 {
3245 	struct mgmt_rp_get_connections *rp;
3246 	struct hci_conn *c;
3247 	int err;
3248 	u16 i;
3249 
3250 	bt_dev_dbg(hdev, "sock %p", sk);
3251 
3252 	hci_dev_lock(hdev);
3253 
3254 	if (!hdev_is_powered(hdev)) {
3255 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3256 				      MGMT_STATUS_NOT_POWERED);
3257 		goto unlock;
3258 	}
3259 
3260 	i = 0;
3261 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3262 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3263 			i++;
3264 	}
3265 
3266 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3267 	if (!rp) {
3268 		err = -ENOMEM;
3269 		goto unlock;
3270 	}
3271 
3272 	i = 0;
3273 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3274 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3275 			continue;
3276 		bacpy(&rp->addr[i].bdaddr, &c->dst);
3277 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3278 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3279 			continue;
3280 		i++;
3281 	}
3282 
3283 	rp->conn_count = cpu_to_le16(i);
3284 
3285 	/* Recalculate length in case of filtered SCO connections, etc */
3286 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3287 				struct_size(rp, addr, i));
3288 
3289 	kfree(rp);
3290 
3291 unlock:
3292 	hci_dev_unlock(hdev);
3293 	return err;
3294 }
3295 
3296 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3297 				   struct mgmt_cp_pin_code_neg_reply *cp)
3298 {
3299 	struct mgmt_pending_cmd *cmd;
3300 	int err;
3301 
3302 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3303 			       sizeof(*cp));
3304 	if (!cmd)
3305 		return -ENOMEM;
3306 
3307 	cmd->cmd_complete = addr_cmd_complete;
3308 
3309 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3310 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3311 	if (err < 0)
3312 		mgmt_pending_remove(cmd);
3313 
3314 	return err;
3315 }
3316 
3317 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3318 			  u16 len)
3319 {
3320 	struct hci_conn *conn;
3321 	struct mgmt_cp_pin_code_reply *cp = data;
3322 	struct hci_cp_pin_code_reply reply;
3323 	struct mgmt_pending_cmd *cmd;
3324 	int err;
3325 
3326 	bt_dev_dbg(hdev, "sock %p", sk);
3327 
3328 	hci_dev_lock(hdev);
3329 
3330 	if (!hdev_is_powered(hdev)) {
3331 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3332 				      MGMT_STATUS_NOT_POWERED);
3333 		goto failed;
3334 	}
3335 
3336 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3337 	if (!conn) {
3338 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3339 				      MGMT_STATUS_NOT_CONNECTED);
3340 		goto failed;
3341 	}
3342 
3343 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3344 		struct mgmt_cp_pin_code_neg_reply ncp;
3345 
3346 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3347 
3348 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
3349 
3350 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3351 		if (err >= 0)
3352 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3353 					      MGMT_STATUS_INVALID_PARAMS);
3354 
3355 		goto failed;
3356 	}
3357 
3358 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3359 	if (!cmd) {
3360 		err = -ENOMEM;
3361 		goto failed;
3362 	}
3363 
3364 	cmd->cmd_complete = addr_cmd_complete;
3365 
3366 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3367 	reply.pin_len = cp->pin_len;
3368 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3369 
3370 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3371 	if (err < 0)
3372 		mgmt_pending_remove(cmd);
3373 
3374 failed:
3375 	hci_dev_unlock(hdev);
3376 	return err;
3377 }
3378 
3379 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3380 			     u16 len)
3381 {
3382 	struct mgmt_cp_set_io_capability *cp = data;
3383 
3384 	bt_dev_dbg(hdev, "sock %p", sk);
3385 
3386 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3387 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3388 				       MGMT_STATUS_INVALID_PARAMS);
3389 
3390 	hci_dev_lock(hdev);
3391 
3392 	hdev->io_capability = cp->io_capability;
3393 
3394 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3395 
3396 	hci_dev_unlock(hdev);
3397 
3398 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3399 				 NULL, 0);
3400 }
3401 
3402 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3403 {
3404 	struct hci_dev *hdev = conn->hdev;
3405 	struct mgmt_pending_cmd *cmd;
3406 
3407 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3408 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3409 			continue;
3410 
3411 		if (cmd->user_data != conn)
3412 			continue;
3413 
3414 		return cmd;
3415 	}
3416 
3417 	return NULL;
3418 }
3419 
3420 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3421 {
3422 	struct mgmt_rp_pair_device rp;
3423 	struct hci_conn *conn = cmd->user_data;
3424 	int err;
3425 
3426 	bacpy(&rp.addr.bdaddr, &conn->dst);
3427 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3428 
3429 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3430 				status, &rp, sizeof(rp));
3431 
3432 	/* So we don't get further callbacks for this connection */
3433 	conn->connect_cfm_cb = NULL;
3434 	conn->security_cfm_cb = NULL;
3435 	conn->disconn_cfm_cb = NULL;
3436 
3437 	hci_conn_drop(conn);
3438 
3439 	/* The device is paired so there is no need to remove
3440 	 * its connection parameters anymore.
3441 	 */
3442 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3443 
3444 	hci_conn_put(conn);
3445 
3446 	return err;
3447 }
3448 
3449 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3450 {
3451 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3452 	struct mgmt_pending_cmd *cmd;
3453 
3454 	cmd = find_pairing(conn);
3455 	if (cmd) {
3456 		cmd->cmd_complete(cmd, status);
3457 		mgmt_pending_remove(cmd);
3458 	}
3459 }
3460 
3461 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3462 {
3463 	struct mgmt_pending_cmd *cmd;
3464 
3465 	BT_DBG("status %u", status);
3466 
3467 	cmd = find_pairing(conn);
3468 	if (!cmd) {
3469 		BT_DBG("Unable to find a pending command");
3470 		return;
3471 	}
3472 
3473 	cmd->cmd_complete(cmd, mgmt_status(status));
3474 	mgmt_pending_remove(cmd);
3475 }
3476 
3477 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3478 {
3479 	struct mgmt_pending_cmd *cmd;
3480 
3481 	BT_DBG("status %u", status);
3482 
3483 	if (!status)
3484 		return;
3485 
3486 	cmd = find_pairing(conn);
3487 	if (!cmd) {
3488 		BT_DBG("Unable to find a pending command");
3489 		return;
3490 	}
3491 
3492 	cmd->cmd_complete(cmd, mgmt_status(status));
3493 	mgmt_pending_remove(cmd);
3494 }
3495 
3496 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3497 		       u16 len)
3498 {
3499 	struct mgmt_cp_pair_device *cp = data;
3500 	struct mgmt_rp_pair_device rp;
3501 	struct mgmt_pending_cmd *cmd;
3502 	u8 sec_level, auth_type;
3503 	struct hci_conn *conn;
3504 	int err;
3505 
3506 	bt_dev_dbg(hdev, "sock %p", sk);
3507 
3508 	memset(&rp, 0, sizeof(rp));
3509 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3510 	rp.addr.type = cp->addr.type;
3511 
3512 	if (!bdaddr_type_is_valid(cp->addr.type))
3513 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3514 					 MGMT_STATUS_INVALID_PARAMS,
3515 					 &rp, sizeof(rp));
3516 
3517 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3518 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3519 					 MGMT_STATUS_INVALID_PARAMS,
3520 					 &rp, sizeof(rp));
3521 
3522 	hci_dev_lock(hdev);
3523 
3524 	if (!hdev_is_powered(hdev)) {
3525 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3526 					MGMT_STATUS_NOT_POWERED, &rp,
3527 					sizeof(rp));
3528 		goto unlock;
3529 	}
3530 
3531 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3532 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3533 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3534 					sizeof(rp));
3535 		goto unlock;
3536 	}
3537 
3538 	sec_level = BT_SECURITY_MEDIUM;
3539 	auth_type = HCI_AT_DEDICATED_BONDING;
3540 
3541 	if (cp->addr.type == BDADDR_BREDR) {
3542 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3543 				       auth_type, CONN_REASON_PAIR_DEVICE,
3544 				       HCI_ACL_CONN_TIMEOUT);
3545 	} else {
3546 		u8 addr_type = le_addr_type(cp->addr.type);
3547 		struct hci_conn_params *p;
3548 
3549 		/* When pairing a new device, it is expected to remember
3550 		 * this device for future connections. Adding the connection
3551 		 * parameter information ahead of time allows tracking
3552 		 * of the peripheral preferred values and will speed up any
3553 		 * further connection establishment.
3554 		 *
3555 		 * If connection parameters already exist, then they
3556 		 * will be kept and this function does nothing.
3557 		 */
3558 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3559 		if (!p) {
3560 			err = -EIO;
3561 			goto unlock;
3562 		}
3563 
3564 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3565 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3566 
3567 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3568 					   sec_level, HCI_LE_CONN_TIMEOUT,
3569 					   CONN_REASON_PAIR_DEVICE);
3570 	}
3571 
3572 	if (IS_ERR(conn)) {
3573 		int status;
3574 
3575 		if (PTR_ERR(conn) == -EBUSY)
3576 			status = MGMT_STATUS_BUSY;
3577 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3578 			status = MGMT_STATUS_NOT_SUPPORTED;
3579 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3580 			status = MGMT_STATUS_REJECTED;
3581 		else
3582 			status = MGMT_STATUS_CONNECT_FAILED;
3583 
3584 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3585 					status, &rp, sizeof(rp));
3586 		goto unlock;
3587 	}
3588 
3589 	if (conn->connect_cfm_cb) {
3590 		hci_conn_drop(conn);
3591 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3592 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3593 		goto unlock;
3594 	}
3595 
3596 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3597 	if (!cmd) {
3598 		err = -ENOMEM;
3599 		hci_conn_drop(conn);
3600 		goto unlock;
3601 	}
3602 
3603 	cmd->cmd_complete = pairing_complete;
3604 
3605 	/* For LE, just connecting isn't a proof that the pairing finished */
3606 	if (cp->addr.type == BDADDR_BREDR) {
3607 		conn->connect_cfm_cb = pairing_complete_cb;
3608 		conn->security_cfm_cb = pairing_complete_cb;
3609 		conn->disconn_cfm_cb = pairing_complete_cb;
3610 	} else {
3611 		conn->connect_cfm_cb = le_pairing_complete_cb;
3612 		conn->security_cfm_cb = le_pairing_complete_cb;
3613 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3614 	}
3615 
3616 	conn->io_capability = cp->io_cap;
3617 	cmd->user_data = hci_conn_get(conn);
3618 
3619 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3620 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3621 		cmd->cmd_complete(cmd, 0);
3622 		mgmt_pending_remove(cmd);
3623 	}
3624 
3625 	err = 0;
3626 
3627 unlock:
3628 	hci_dev_unlock(hdev);
3629 	return err;
3630 }
3631 
3632 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3633 			      u16 len)
3634 {
3635 	struct mgmt_addr_info *addr = data;
3636 	struct mgmt_pending_cmd *cmd;
3637 	struct hci_conn *conn;
3638 	int err;
3639 
3640 	bt_dev_dbg(hdev, "sock %p", sk);
3641 
3642 	hci_dev_lock(hdev);
3643 
3644 	if (!hdev_is_powered(hdev)) {
3645 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3646 				      MGMT_STATUS_NOT_POWERED);
3647 		goto unlock;
3648 	}
3649 
3650 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3651 	if (!cmd) {
3652 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3653 				      MGMT_STATUS_INVALID_PARAMS);
3654 		goto unlock;
3655 	}
3656 
3657 	conn = cmd->user_data;
3658 
3659 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3660 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3661 				      MGMT_STATUS_INVALID_PARAMS);
3662 		goto unlock;
3663 	}
3664 
3665 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3666 	mgmt_pending_remove(cmd);
3667 
3668 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3669 				addr, sizeof(*addr));
3670 
3671 	/* Since user doesn't want to proceed with the connection, abort any
3672 	 * ongoing pairing and then terminate the link if it was created
3673 	 * because of the pair device action.
3674 	 */
3675 	if (addr->type == BDADDR_BREDR)
3676 		hci_remove_link_key(hdev, &addr->bdaddr);
3677 	else
3678 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3679 					      le_addr_type(addr->type));
3680 
3681 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3682 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3683 
3684 unlock:
3685 	hci_dev_unlock(hdev);
3686 	return err;
3687 }
3688 
3689 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3690 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3691 			     u16 hci_op, __le32 passkey)
3692 {
3693 	struct mgmt_pending_cmd *cmd;
3694 	struct hci_conn *conn;
3695 	int err;
3696 
3697 	hci_dev_lock(hdev);
3698 
3699 	if (!hdev_is_powered(hdev)) {
3700 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3701 					MGMT_STATUS_NOT_POWERED, addr,
3702 					sizeof(*addr));
3703 		goto done;
3704 	}
3705 
3706 	if (addr->type == BDADDR_BREDR)
3707 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3708 	else
3709 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3710 					       le_addr_type(addr->type));
3711 
3712 	if (!conn) {
3713 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3714 					MGMT_STATUS_NOT_CONNECTED, addr,
3715 					sizeof(*addr));
3716 		goto done;
3717 	}
3718 
3719 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3720 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3721 		if (!err)
3722 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3723 						MGMT_STATUS_SUCCESS, addr,
3724 						sizeof(*addr));
3725 		else
3726 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3727 						MGMT_STATUS_FAILED, addr,
3728 						sizeof(*addr));
3729 
3730 		goto done;
3731 	}
3732 
3733 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3734 	if (!cmd) {
3735 		err = -ENOMEM;
3736 		goto done;
3737 	}
3738 
3739 	cmd->cmd_complete = addr_cmd_complete;
3740 
3741 	/* Continue with pairing via HCI */
3742 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3743 		struct hci_cp_user_passkey_reply cp;
3744 
3745 		bacpy(&cp.bdaddr, &addr->bdaddr);
3746 		cp.passkey = passkey;
3747 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3748 	} else
3749 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3750 				   &addr->bdaddr);
3751 
3752 	if (err < 0)
3753 		mgmt_pending_remove(cmd);
3754 
3755 done:
3756 	hci_dev_unlock(hdev);
3757 	return err;
3758 }
3759 
3760 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3761 			      void *data, u16 len)
3762 {
3763 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3764 
3765 	bt_dev_dbg(hdev, "sock %p", sk);
3766 
3767 	return user_pairing_resp(sk, hdev, &cp->addr,
3768 				MGMT_OP_PIN_CODE_NEG_REPLY,
3769 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3770 }
3771 
3772 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3773 			      u16 len)
3774 {
3775 	struct mgmt_cp_user_confirm_reply *cp = data;
3776 
3777 	bt_dev_dbg(hdev, "sock %p", sk);
3778 
3779 	if (len != sizeof(*cp))
3780 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3781 				       MGMT_STATUS_INVALID_PARAMS);
3782 
3783 	return user_pairing_resp(sk, hdev, &cp->addr,
3784 				 MGMT_OP_USER_CONFIRM_REPLY,
3785 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3786 }
3787 
3788 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3789 				  void *data, u16 len)
3790 {
3791 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3792 
3793 	bt_dev_dbg(hdev, "sock %p", sk);
3794 
3795 	return user_pairing_resp(sk, hdev, &cp->addr,
3796 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3797 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3798 }
3799 
3800 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3801 			      u16 len)
3802 {
3803 	struct mgmt_cp_user_passkey_reply *cp = data;
3804 
3805 	bt_dev_dbg(hdev, "sock %p", sk);
3806 
3807 	return user_pairing_resp(sk, hdev, &cp->addr,
3808 				 MGMT_OP_USER_PASSKEY_REPLY,
3809 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3810 }
3811 
3812 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3813 				  void *data, u16 len)
3814 {
3815 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3816 
3817 	bt_dev_dbg(hdev, "sock %p", sk);
3818 
3819 	return user_pairing_resp(sk, hdev, &cp->addr,
3820 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3821 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3822 }
3823 
3824 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3825 {
3826 	struct adv_info *adv_instance;
3827 
3828 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3829 	if (!adv_instance)
3830 		return 0;
3831 
3832 	/* stop if current instance doesn't need to be changed */
3833 	if (!(adv_instance->flags & flags))
3834 		return 0;
3835 
3836 	cancel_adv_timeout(hdev);
3837 
3838 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3839 	if (!adv_instance)
3840 		return 0;
3841 
3842 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3843 
3844 	return 0;
3845 }
3846 
3847 static int name_changed_sync(struct hci_dev *hdev, void *data)
3848 {
3849 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3850 }
3851 
3852 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3853 {
3854 	struct mgmt_pending_cmd *cmd = data;
3855 	struct mgmt_cp_set_local_name *cp = cmd->param;
3856 	u8 status = mgmt_status(err);
3857 
3858 	bt_dev_dbg(hdev, "err %d", err);
3859 
3860 	if (err == -ECANCELED ||
3861 	    cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3862 		return;
3863 
3864 	if (status) {
3865 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3866 				status);
3867 	} else {
3868 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3869 				  cp, sizeof(*cp));
3870 
3871 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3872 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3873 	}
3874 
3875 	mgmt_pending_remove(cmd);
3876 }
3877 
3878 static int set_name_sync(struct hci_dev *hdev, void *data)
3879 {
3880 	if (lmp_bredr_capable(hdev)) {
3881 		hci_update_name_sync(hdev);
3882 		hci_update_eir_sync(hdev);
3883 	}
3884 
3885 	/* The name is stored in the scan response data and so
3886 	 * no need to update the advertising data here.
3887 	 */
3888 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3889 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3890 
3891 	return 0;
3892 }
3893 
3894 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3895 			  u16 len)
3896 {
3897 	struct mgmt_cp_set_local_name *cp = data;
3898 	struct mgmt_pending_cmd *cmd;
3899 	int err;
3900 
3901 	bt_dev_dbg(hdev, "sock %p", sk);
3902 
3903 	hci_dev_lock(hdev);
3904 
3905 	/* If the old values are the same as the new ones just return a
3906 	 * direct command complete event.
3907 	 */
3908 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3909 	    !memcmp(hdev->short_name, cp->short_name,
3910 		    sizeof(hdev->short_name))) {
3911 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3912 					data, len);
3913 		goto failed;
3914 	}
3915 
3916 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3917 
3918 	if (!hdev_is_powered(hdev)) {
3919 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3920 
3921 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3922 					data, len);
3923 		if (err < 0)
3924 			goto failed;
3925 
3926 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3927 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3928 		ext_info_changed(hdev, sk);
3929 
3930 		goto failed;
3931 	}
3932 
3933 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3934 	if (!cmd)
3935 		err = -ENOMEM;
3936 	else
3937 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3938 					 set_name_complete);
3939 
3940 	if (err < 0) {
3941 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3942 				      MGMT_STATUS_FAILED);
3943 
3944 		if (cmd)
3945 			mgmt_pending_remove(cmd);
3946 
3947 		goto failed;
3948 	}
3949 
3950 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3951 
3952 failed:
3953 	hci_dev_unlock(hdev);
3954 	return err;
3955 }
3956 
3957 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3958 {
3959 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3960 }
3961 
3962 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3963 			  u16 len)
3964 {
3965 	struct mgmt_cp_set_appearance *cp = data;
3966 	u16 appearance;
3967 	int err;
3968 
3969 	bt_dev_dbg(hdev, "sock %p", sk);
3970 
3971 	if (!lmp_le_capable(hdev))
3972 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3973 				       MGMT_STATUS_NOT_SUPPORTED);
3974 
3975 	appearance = le16_to_cpu(cp->appearance);
3976 
3977 	hci_dev_lock(hdev);
3978 
3979 	if (hdev->appearance != appearance) {
3980 		hdev->appearance = appearance;
3981 
3982 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3983 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3984 					   NULL);
3985 
3986 		ext_info_changed(hdev, sk);
3987 	}
3988 
3989 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3990 				0);
3991 
3992 	hci_dev_unlock(hdev);
3993 
3994 	return err;
3995 }
3996 
3997 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3998 				 void *data, u16 len)
3999 {
4000 	struct mgmt_rp_get_phy_configuration rp;
4001 
4002 	bt_dev_dbg(hdev, "sock %p", sk);
4003 
4004 	hci_dev_lock(hdev);
4005 
4006 	memset(&rp, 0, sizeof(rp));
4007 
4008 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
4009 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4010 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
4011 
4012 	hci_dev_unlock(hdev);
4013 
4014 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
4015 				 &rp, sizeof(rp));
4016 }
4017 
4018 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
4019 {
4020 	struct mgmt_ev_phy_configuration_changed ev;
4021 
4022 	memset(&ev, 0, sizeof(ev));
4023 
4024 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4025 
4026 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
4027 			  sizeof(ev), skip);
4028 }
4029 
4030 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
4031 {
4032 	struct mgmt_pending_cmd *cmd = data;
4033 	struct sk_buff *skb = cmd->skb;
4034 	u8 status = mgmt_status(err);
4035 
4036 	if (err == -ECANCELED ||
4037 	    cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
4038 		return;
4039 
4040 	if (!status) {
4041 		if (!skb)
4042 			status = MGMT_STATUS_FAILED;
4043 		else if (IS_ERR(skb))
4044 			status = mgmt_status(PTR_ERR(skb));
4045 		else
4046 			status = mgmt_status(skb->data[0]);
4047 	}
4048 
4049 	bt_dev_dbg(hdev, "status %d", status);
4050 
4051 	if (status) {
4052 		mgmt_cmd_status(cmd->sk, hdev->id,
4053 				MGMT_OP_SET_PHY_CONFIGURATION, status);
4054 	} else {
4055 		mgmt_cmd_complete(cmd->sk, hdev->id,
4056 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
4057 				  NULL, 0);
4058 
4059 		mgmt_phy_configuration_changed(hdev, cmd->sk);
4060 	}
4061 
4062 	if (skb && !IS_ERR(skb))
4063 		kfree_skb(skb);
4064 
4065 	mgmt_pending_remove(cmd);
4066 }
4067 
4068 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4069 {
4070 	struct mgmt_pending_cmd *cmd = data;
4071 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4072 	struct hci_cp_le_set_default_phy cp_phy;
4073 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4074 
4075 	memset(&cp_phy, 0, sizeof(cp_phy));
4076 
4077 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4078 		cp_phy.all_phys |= 0x01;
4079 
4080 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4081 		cp_phy.all_phys |= 0x02;
4082 
4083 	if (selected_phys & MGMT_PHY_LE_1M_TX)
4084 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4085 
4086 	if (selected_phys & MGMT_PHY_LE_2M_TX)
4087 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4088 
4089 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
4090 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4091 
4092 	if (selected_phys & MGMT_PHY_LE_1M_RX)
4093 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4094 
4095 	if (selected_phys & MGMT_PHY_LE_2M_RX)
4096 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4097 
4098 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
4099 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4100 
4101 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4102 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4103 
4104 	return 0;
4105 }
4106 
4107 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4108 				 void *data, u16 len)
4109 {
4110 	struct mgmt_cp_set_phy_configuration *cp = data;
4111 	struct mgmt_pending_cmd *cmd;
4112 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4113 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
4114 	bool changed = false;
4115 	int err;
4116 
4117 	bt_dev_dbg(hdev, "sock %p", sk);
4118 
4119 	configurable_phys = get_configurable_phys(hdev);
4120 	supported_phys = get_supported_phys(hdev);
4121 	selected_phys = __le32_to_cpu(cp->selected_phys);
4122 
4123 	if (selected_phys & ~supported_phys)
4124 		return mgmt_cmd_status(sk, hdev->id,
4125 				       MGMT_OP_SET_PHY_CONFIGURATION,
4126 				       MGMT_STATUS_INVALID_PARAMS);
4127 
4128 	unconfigure_phys = supported_phys & ~configurable_phys;
4129 
4130 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4131 		return mgmt_cmd_status(sk, hdev->id,
4132 				       MGMT_OP_SET_PHY_CONFIGURATION,
4133 				       MGMT_STATUS_INVALID_PARAMS);
4134 
4135 	if (selected_phys == get_selected_phys(hdev))
4136 		return mgmt_cmd_complete(sk, hdev->id,
4137 					 MGMT_OP_SET_PHY_CONFIGURATION,
4138 					 0, NULL, 0);
4139 
4140 	hci_dev_lock(hdev);
4141 
4142 	if (!hdev_is_powered(hdev)) {
4143 		err = mgmt_cmd_status(sk, hdev->id,
4144 				      MGMT_OP_SET_PHY_CONFIGURATION,
4145 				      MGMT_STATUS_REJECTED);
4146 		goto unlock;
4147 	}
4148 
4149 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4150 		err = mgmt_cmd_status(sk, hdev->id,
4151 				      MGMT_OP_SET_PHY_CONFIGURATION,
4152 				      MGMT_STATUS_BUSY);
4153 		goto unlock;
4154 	}
4155 
4156 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4157 		pkt_type |= (HCI_DH3 | HCI_DM3);
4158 	else
4159 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
4160 
4161 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4162 		pkt_type |= (HCI_DH5 | HCI_DM5);
4163 	else
4164 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
4165 
4166 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4167 		pkt_type &= ~HCI_2DH1;
4168 	else
4169 		pkt_type |= HCI_2DH1;
4170 
4171 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4172 		pkt_type &= ~HCI_2DH3;
4173 	else
4174 		pkt_type |= HCI_2DH3;
4175 
4176 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4177 		pkt_type &= ~HCI_2DH5;
4178 	else
4179 		pkt_type |= HCI_2DH5;
4180 
4181 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4182 		pkt_type &= ~HCI_3DH1;
4183 	else
4184 		pkt_type |= HCI_3DH1;
4185 
4186 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4187 		pkt_type &= ~HCI_3DH3;
4188 	else
4189 		pkt_type |= HCI_3DH3;
4190 
4191 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4192 		pkt_type &= ~HCI_3DH5;
4193 	else
4194 		pkt_type |= HCI_3DH5;
4195 
4196 	if (pkt_type != hdev->pkt_type) {
4197 		hdev->pkt_type = pkt_type;
4198 		changed = true;
4199 	}
4200 
4201 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
4202 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4203 		if (changed)
4204 			mgmt_phy_configuration_changed(hdev, sk);
4205 
4206 		err = mgmt_cmd_complete(sk, hdev->id,
4207 					MGMT_OP_SET_PHY_CONFIGURATION,
4208 					0, NULL, 0);
4209 
4210 		goto unlock;
4211 	}
4212 
4213 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4214 			       len);
4215 	if (!cmd)
4216 		err = -ENOMEM;
4217 	else
4218 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4219 					 set_default_phy_complete);
4220 
4221 	if (err < 0) {
4222 		err = mgmt_cmd_status(sk, hdev->id,
4223 				      MGMT_OP_SET_PHY_CONFIGURATION,
4224 				      MGMT_STATUS_FAILED);
4225 
4226 		if (cmd)
4227 			mgmt_pending_remove(cmd);
4228 	}
4229 
4230 unlock:
4231 	hci_dev_unlock(hdev);
4232 
4233 	return err;
4234 }
4235 
4236 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4237 			    u16 len)
4238 {
4239 	int err = MGMT_STATUS_SUCCESS;
4240 	struct mgmt_cp_set_blocked_keys *keys = data;
4241 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4242 				   sizeof(struct mgmt_blocked_key_info));
4243 	u16 key_count, expected_len;
4244 	int i;
4245 
4246 	bt_dev_dbg(hdev, "sock %p", sk);
4247 
4248 	key_count = __le16_to_cpu(keys->key_count);
4249 	if (key_count > max_key_count) {
4250 		bt_dev_err(hdev, "too big key_count value %u", key_count);
4251 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4252 				       MGMT_STATUS_INVALID_PARAMS);
4253 	}
4254 
4255 	expected_len = struct_size(keys, keys, key_count);
4256 	if (expected_len != len) {
4257 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4258 			   expected_len, len);
4259 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4260 				       MGMT_STATUS_INVALID_PARAMS);
4261 	}
4262 
4263 	hci_dev_lock(hdev);
4264 
4265 	hci_blocked_keys_clear(hdev);
4266 
4267 	for (i = 0; i < key_count; ++i) {
4268 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4269 
4270 		if (!b) {
4271 			err = MGMT_STATUS_NO_RESOURCES;
4272 			break;
4273 		}
4274 
4275 		b->type = keys->keys[i].type;
4276 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4277 		list_add_rcu(&b->list, &hdev->blocked_keys);
4278 	}
4279 	hci_dev_unlock(hdev);
4280 
4281 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4282 				err, NULL, 0);
4283 }
4284 
4285 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4286 			       void *data, u16 len)
4287 {
4288 	struct mgmt_mode *cp = data;
4289 	int err;
4290 	bool changed = false;
4291 
4292 	bt_dev_dbg(hdev, "sock %p", sk);
4293 
4294 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4295 		return mgmt_cmd_status(sk, hdev->id,
4296 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4297 				       MGMT_STATUS_NOT_SUPPORTED);
4298 
4299 	if (cp->val != 0x00 && cp->val != 0x01)
4300 		return mgmt_cmd_status(sk, hdev->id,
4301 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4302 				       MGMT_STATUS_INVALID_PARAMS);
4303 
4304 	hci_dev_lock(hdev);
4305 
4306 	if (hdev_is_powered(hdev) &&
4307 	    !!cp->val != hci_dev_test_flag(hdev,
4308 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
4309 		err = mgmt_cmd_status(sk, hdev->id,
4310 				      MGMT_OP_SET_WIDEBAND_SPEECH,
4311 				      MGMT_STATUS_REJECTED);
4312 		goto unlock;
4313 	}
4314 
4315 	if (cp->val)
4316 		changed = !hci_dev_test_and_set_flag(hdev,
4317 						   HCI_WIDEBAND_SPEECH_ENABLED);
4318 	else
4319 		changed = hci_dev_test_and_clear_flag(hdev,
4320 						   HCI_WIDEBAND_SPEECH_ENABLED);
4321 
4322 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4323 	if (err < 0)
4324 		goto unlock;
4325 
4326 	if (changed)
4327 		err = new_settings(hdev, sk);
4328 
4329 unlock:
4330 	hci_dev_unlock(hdev);
4331 	return err;
4332 }
4333 
4334 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4335 			       void *data, u16 data_len)
4336 {
4337 	char buf[20];
4338 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4339 	u16 cap_len = 0;
4340 	u8 flags = 0;
4341 	u8 tx_power_range[2];
4342 
4343 	bt_dev_dbg(hdev, "sock %p", sk);
4344 
4345 	memset(&buf, 0, sizeof(buf));
4346 
4347 	hci_dev_lock(hdev);
4348 
4349 	/* When the Read Simple Pairing Options command is supported, then
4350 	 * the remote public key validation is supported.
4351 	 *
4352 	 * Alternatively, when Microsoft extensions are available, they can
4353 	 * indicate support for public key validation as well.
4354 	 */
4355 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4356 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
4357 
4358 	flags |= 0x02;		/* Remote public key validation (LE) */
4359 
4360 	/* When the Read Encryption Key Size command is supported, then the
4361 	 * encryption key size is enforced.
4362 	 */
4363 	if (hdev->commands[20] & 0x10)
4364 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
4365 
4366 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
4367 
4368 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4369 				  &flags, 1);
4370 
4371 	/* When the Read Simple Pairing Options command is supported, then
4372 	 * also max encryption key size information is provided.
4373 	 */
4374 	if (hdev->commands[41] & 0x08)
4375 		cap_len = eir_append_le16(rp->cap, cap_len,
4376 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
4377 					  hdev->max_enc_key_size);
4378 
4379 	cap_len = eir_append_le16(rp->cap, cap_len,
4380 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4381 				  SMP_MAX_ENC_KEY_SIZE);
4382 
4383 	/* Append the min/max LE tx power parameters if we were able to fetch
4384 	 * it from the controller
4385 	 */
4386 	if (hdev->commands[38] & 0x80) {
4387 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4388 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4389 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4390 					  tx_power_range, 2);
4391 	}
4392 
4393 	rp->cap_len = cpu_to_le16(cap_len);
4394 
4395 	hci_dev_unlock(hdev);
4396 
4397 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4398 				 rp, sizeof(*rp) + cap_len);
4399 }
4400 
4401 #ifdef CONFIG_BT_FEATURE_DEBUG
4402 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4403 static const u8 debug_uuid[16] = {
4404 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4405 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4406 };
4407 #endif
4408 
4409 /* 330859bc-7506-492d-9370-9a6f0614037f */
4410 static const u8 quality_report_uuid[16] = {
4411 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4412 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4413 };
4414 
4415 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4416 static const u8 offload_codecs_uuid[16] = {
4417 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4418 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4419 };
4420 
4421 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4422 static const u8 le_simultaneous_roles_uuid[16] = {
4423 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4424 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4425 };
4426 
4427 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4428 static const u8 iso_socket_uuid[16] = {
4429 	0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4430 	0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4431 };
4432 
4433 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4434 static const u8 mgmt_mesh_uuid[16] = {
4435 	0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4436 	0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4437 };
4438 
4439 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4440 				  void *data, u16 data_len)
4441 {
4442 	struct mgmt_rp_read_exp_features_info *rp;
4443 	size_t len;
4444 	u16 idx = 0;
4445 	u32 flags;
4446 	int status;
4447 
4448 	bt_dev_dbg(hdev, "sock %p", sk);
4449 
4450 	/* Enough space for 7 features */
4451 	len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4452 	rp = kzalloc(len, GFP_KERNEL);
4453 	if (!rp)
4454 		return -ENOMEM;
4455 
4456 #ifdef CONFIG_BT_FEATURE_DEBUG
4457 	if (!hdev) {
4458 		flags = bt_dbg_get() ? BIT(0) : 0;
4459 
4460 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
4461 		rp->features[idx].flags = cpu_to_le32(flags);
4462 		idx++;
4463 	}
4464 #endif
4465 
4466 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4467 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4468 			flags = BIT(0);
4469 		else
4470 			flags = 0;
4471 
4472 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4473 		rp->features[idx].flags = cpu_to_le32(flags);
4474 		idx++;
4475 	}
4476 
4477 	if (hdev && (aosp_has_quality_report(hdev) ||
4478 		     hdev->set_quality_report)) {
4479 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4480 			flags = BIT(0);
4481 		else
4482 			flags = 0;
4483 
4484 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4485 		rp->features[idx].flags = cpu_to_le32(flags);
4486 		idx++;
4487 	}
4488 
4489 	if (hdev && hdev->get_data_path_id) {
4490 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4491 			flags = BIT(0);
4492 		else
4493 			flags = 0;
4494 
4495 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4496 		rp->features[idx].flags = cpu_to_le32(flags);
4497 		idx++;
4498 	}
4499 
4500 	if (IS_ENABLED(CONFIG_BT_LE)) {
4501 		flags = iso_enabled() ? BIT(0) : 0;
4502 		memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4503 		rp->features[idx].flags = cpu_to_le32(flags);
4504 		idx++;
4505 	}
4506 
4507 	if (hdev && lmp_le_capable(hdev)) {
4508 		if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4509 			flags = BIT(0);
4510 		else
4511 			flags = 0;
4512 
4513 		memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4514 		rp->features[idx].flags = cpu_to_le32(flags);
4515 		idx++;
4516 	}
4517 
4518 	rp->feature_count = cpu_to_le16(idx);
4519 
4520 	/* After reading the experimental features information, enable
4521 	 * the events to update client on any future change.
4522 	 */
4523 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4524 
4525 	status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4526 				   MGMT_OP_READ_EXP_FEATURES_INFO,
4527 				   0, rp, sizeof(*rp) + (20 * idx));
4528 
4529 	kfree(rp);
4530 	return status;
4531 }
4532 
4533 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4534 			       bool enabled, struct sock *skip)
4535 {
4536 	struct mgmt_ev_exp_feature_changed ev;
4537 
4538 	memset(&ev, 0, sizeof(ev));
4539 	memcpy(ev.uuid, uuid, 16);
4540 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4541 
4542 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4543 				  &ev, sizeof(ev),
4544 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4545 }
4546 
4547 #define EXP_FEAT(_uuid, _set_func)	\
4548 {					\
4549 	.uuid = _uuid,			\
4550 	.set_func = _set_func,		\
4551 }
4552 
4553 /* The zero key uuid is special. Multiple exp features are set through it. */
4554 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4555 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4556 {
4557 	struct mgmt_rp_set_exp_feature rp;
4558 
4559 	memset(rp.uuid, 0, 16);
4560 	rp.flags = cpu_to_le32(0);
4561 
4562 #ifdef CONFIG_BT_FEATURE_DEBUG
4563 	if (!hdev) {
4564 		bool changed = bt_dbg_get();
4565 
4566 		bt_dbg_set(false);
4567 
4568 		if (changed)
4569 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4570 	}
4571 #endif
4572 
4573 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4574 
4575 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4576 				 MGMT_OP_SET_EXP_FEATURE, 0,
4577 				 &rp, sizeof(rp));
4578 }
4579 
4580 #ifdef CONFIG_BT_FEATURE_DEBUG
4581 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4582 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4583 {
4584 	struct mgmt_rp_set_exp_feature rp;
4585 
4586 	bool val, changed;
4587 	int err;
4588 
4589 	/* Command requires to use the non-controller index */
4590 	if (hdev)
4591 		return mgmt_cmd_status(sk, hdev->id,
4592 				       MGMT_OP_SET_EXP_FEATURE,
4593 				       MGMT_STATUS_INVALID_INDEX);
4594 
4595 	/* Parameters are limited to a single octet */
4596 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4597 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4598 				       MGMT_OP_SET_EXP_FEATURE,
4599 				       MGMT_STATUS_INVALID_PARAMS);
4600 
4601 	/* Only boolean on/off is supported */
4602 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4603 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4604 				       MGMT_OP_SET_EXP_FEATURE,
4605 				       MGMT_STATUS_INVALID_PARAMS);
4606 
4607 	val = !!cp->param[0];
4608 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4609 	bt_dbg_set(val);
4610 
4611 	memcpy(rp.uuid, debug_uuid, 16);
4612 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4613 
4614 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4615 
4616 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4617 				MGMT_OP_SET_EXP_FEATURE, 0,
4618 				&rp, sizeof(rp));
4619 
4620 	if (changed)
4621 		exp_feature_changed(hdev, debug_uuid, val, sk);
4622 
4623 	return err;
4624 }
4625 #endif
4626 
4627 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4628 			      struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4629 {
4630 	struct mgmt_rp_set_exp_feature rp;
4631 	bool val, changed;
4632 	int err;
4633 
4634 	/* Command requires to use the controller index */
4635 	if (!hdev)
4636 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4637 				       MGMT_OP_SET_EXP_FEATURE,
4638 				       MGMT_STATUS_INVALID_INDEX);
4639 
4640 	/* Parameters are limited to a single octet */
4641 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4642 		return mgmt_cmd_status(sk, hdev->id,
4643 				       MGMT_OP_SET_EXP_FEATURE,
4644 				       MGMT_STATUS_INVALID_PARAMS);
4645 
4646 	/* Only boolean on/off is supported */
4647 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4648 		return mgmt_cmd_status(sk, hdev->id,
4649 				       MGMT_OP_SET_EXP_FEATURE,
4650 				       MGMT_STATUS_INVALID_PARAMS);
4651 
4652 	val = !!cp->param[0];
4653 
4654 	if (val) {
4655 		changed = !hci_dev_test_and_set_flag(hdev,
4656 						     HCI_MESH_EXPERIMENTAL);
4657 	} else {
4658 		hci_dev_clear_flag(hdev, HCI_MESH);
4659 		changed = hci_dev_test_and_clear_flag(hdev,
4660 						      HCI_MESH_EXPERIMENTAL);
4661 	}
4662 
4663 	memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4664 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4665 
4666 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4667 
4668 	err = mgmt_cmd_complete(sk, hdev->id,
4669 				MGMT_OP_SET_EXP_FEATURE, 0,
4670 				&rp, sizeof(rp));
4671 
4672 	if (changed)
4673 		exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4674 
4675 	return err;
4676 }
4677 
4678 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4679 				   struct mgmt_cp_set_exp_feature *cp,
4680 				   u16 data_len)
4681 {
4682 	struct mgmt_rp_set_exp_feature rp;
4683 	bool val, changed;
4684 	int err;
4685 
4686 	/* Command requires to use a valid controller index */
4687 	if (!hdev)
4688 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4689 				       MGMT_OP_SET_EXP_FEATURE,
4690 				       MGMT_STATUS_INVALID_INDEX);
4691 
4692 	/* Parameters are limited to a single octet */
4693 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4694 		return mgmt_cmd_status(sk, hdev->id,
4695 				       MGMT_OP_SET_EXP_FEATURE,
4696 				       MGMT_STATUS_INVALID_PARAMS);
4697 
4698 	/* Only boolean on/off is supported */
4699 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4700 		return mgmt_cmd_status(sk, hdev->id,
4701 				       MGMT_OP_SET_EXP_FEATURE,
4702 				       MGMT_STATUS_INVALID_PARAMS);
4703 
4704 	hci_req_sync_lock(hdev);
4705 
4706 	val = !!cp->param[0];
4707 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4708 
4709 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4710 		err = mgmt_cmd_status(sk, hdev->id,
4711 				      MGMT_OP_SET_EXP_FEATURE,
4712 				      MGMT_STATUS_NOT_SUPPORTED);
4713 		goto unlock_quality_report;
4714 	}
4715 
4716 	if (changed) {
4717 		if (hdev->set_quality_report)
4718 			err = hdev->set_quality_report(hdev, val);
4719 		else
4720 			err = aosp_set_quality_report(hdev, val);
4721 
4722 		if (err) {
4723 			err = mgmt_cmd_status(sk, hdev->id,
4724 					      MGMT_OP_SET_EXP_FEATURE,
4725 					      MGMT_STATUS_FAILED);
4726 			goto unlock_quality_report;
4727 		}
4728 
4729 		if (val)
4730 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4731 		else
4732 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4733 	}
4734 
4735 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4736 
4737 	memcpy(rp.uuid, quality_report_uuid, 16);
4738 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4739 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4740 
4741 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4742 				&rp, sizeof(rp));
4743 
4744 	if (changed)
4745 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4746 
4747 unlock_quality_report:
4748 	hci_req_sync_unlock(hdev);
4749 	return err;
4750 }
4751 
4752 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4753 				  struct mgmt_cp_set_exp_feature *cp,
4754 				  u16 data_len)
4755 {
4756 	bool val, changed;
4757 	int err;
4758 	struct mgmt_rp_set_exp_feature rp;
4759 
4760 	/* Command requires to use a valid controller index */
4761 	if (!hdev)
4762 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4763 				       MGMT_OP_SET_EXP_FEATURE,
4764 				       MGMT_STATUS_INVALID_INDEX);
4765 
4766 	/* Parameters are limited to a single octet */
4767 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4768 		return mgmt_cmd_status(sk, hdev->id,
4769 				       MGMT_OP_SET_EXP_FEATURE,
4770 				       MGMT_STATUS_INVALID_PARAMS);
4771 
4772 	/* Only boolean on/off is supported */
4773 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4774 		return mgmt_cmd_status(sk, hdev->id,
4775 				       MGMT_OP_SET_EXP_FEATURE,
4776 				       MGMT_STATUS_INVALID_PARAMS);
4777 
4778 	val = !!cp->param[0];
4779 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4780 
4781 	if (!hdev->get_data_path_id) {
4782 		return mgmt_cmd_status(sk, hdev->id,
4783 				       MGMT_OP_SET_EXP_FEATURE,
4784 				       MGMT_STATUS_NOT_SUPPORTED);
4785 	}
4786 
4787 	if (changed) {
4788 		if (val)
4789 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4790 		else
4791 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4792 	}
4793 
4794 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4795 		    val, changed);
4796 
4797 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4798 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4799 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4800 	err = mgmt_cmd_complete(sk, hdev->id,
4801 				MGMT_OP_SET_EXP_FEATURE, 0,
4802 				&rp, sizeof(rp));
4803 
4804 	if (changed)
4805 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4806 
4807 	return err;
4808 }
4809 
4810 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4811 					  struct mgmt_cp_set_exp_feature *cp,
4812 					  u16 data_len)
4813 {
4814 	bool val, changed;
4815 	int err;
4816 	struct mgmt_rp_set_exp_feature rp;
4817 
4818 	/* Command requires to use a valid controller index */
4819 	if (!hdev)
4820 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4821 				       MGMT_OP_SET_EXP_FEATURE,
4822 				       MGMT_STATUS_INVALID_INDEX);
4823 
4824 	/* Parameters are limited to a single octet */
4825 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4826 		return mgmt_cmd_status(sk, hdev->id,
4827 				       MGMT_OP_SET_EXP_FEATURE,
4828 				       MGMT_STATUS_INVALID_PARAMS);
4829 
4830 	/* Only boolean on/off is supported */
4831 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4832 		return mgmt_cmd_status(sk, hdev->id,
4833 				       MGMT_OP_SET_EXP_FEATURE,
4834 				       MGMT_STATUS_INVALID_PARAMS);
4835 
4836 	val = !!cp->param[0];
4837 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4838 
4839 	if (!hci_dev_le_state_simultaneous(hdev)) {
4840 		return mgmt_cmd_status(sk, hdev->id,
4841 				       MGMT_OP_SET_EXP_FEATURE,
4842 				       MGMT_STATUS_NOT_SUPPORTED);
4843 	}
4844 
4845 	if (changed) {
4846 		if (val)
4847 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4848 		else
4849 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4850 	}
4851 
4852 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4853 		    val, changed);
4854 
4855 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4856 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4857 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4858 	err = mgmt_cmd_complete(sk, hdev->id,
4859 				MGMT_OP_SET_EXP_FEATURE, 0,
4860 				&rp, sizeof(rp));
4861 
4862 	if (changed)
4863 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4864 
4865 	return err;
4866 }
4867 
4868 #ifdef CONFIG_BT_LE
4869 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4870 			       struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4871 {
4872 	struct mgmt_rp_set_exp_feature rp;
4873 	bool val, changed = false;
4874 	int err;
4875 
4876 	/* Command requires to use the non-controller index */
4877 	if (hdev)
4878 		return mgmt_cmd_status(sk, hdev->id,
4879 				       MGMT_OP_SET_EXP_FEATURE,
4880 				       MGMT_STATUS_INVALID_INDEX);
4881 
4882 	/* Parameters are limited to a single octet */
4883 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4884 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4885 				       MGMT_OP_SET_EXP_FEATURE,
4886 				       MGMT_STATUS_INVALID_PARAMS);
4887 
4888 	/* Only boolean on/off is supported */
4889 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4890 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4891 				       MGMT_OP_SET_EXP_FEATURE,
4892 				       MGMT_STATUS_INVALID_PARAMS);
4893 
4894 	val = cp->param[0] ? true : false;
4895 	if (val)
4896 		err = iso_init();
4897 	else
4898 		err = iso_exit();
4899 
4900 	if (!err)
4901 		changed = true;
4902 
4903 	memcpy(rp.uuid, iso_socket_uuid, 16);
4904 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4905 
4906 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4907 
4908 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4909 				MGMT_OP_SET_EXP_FEATURE, 0,
4910 				&rp, sizeof(rp));
4911 
4912 	if (changed)
4913 		exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4914 
4915 	return err;
4916 }
4917 #endif
4918 
4919 static const struct mgmt_exp_feature {
4920 	const u8 *uuid;
4921 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4922 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4923 } exp_features[] = {
4924 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4925 #ifdef CONFIG_BT_FEATURE_DEBUG
4926 	EXP_FEAT(debug_uuid, set_debug_func),
4927 #endif
4928 	EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4929 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4930 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4931 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4932 #ifdef CONFIG_BT_LE
4933 	EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4934 #endif
4935 
4936 	/* end with a null feature */
4937 	EXP_FEAT(NULL, NULL)
4938 };
4939 
4940 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4941 			   void *data, u16 data_len)
4942 {
4943 	struct mgmt_cp_set_exp_feature *cp = data;
4944 	size_t i = 0;
4945 
4946 	bt_dev_dbg(hdev, "sock %p", sk);
4947 
4948 	for (i = 0; exp_features[i].uuid; i++) {
4949 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4950 			return exp_features[i].set_func(sk, hdev, cp, data_len);
4951 	}
4952 
4953 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4954 			       MGMT_OP_SET_EXP_FEATURE,
4955 			       MGMT_STATUS_NOT_SUPPORTED);
4956 }
4957 
4958 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4959 			    u16 data_len)
4960 {
4961 	struct mgmt_cp_get_device_flags *cp = data;
4962 	struct mgmt_rp_get_device_flags rp;
4963 	struct bdaddr_list_with_flags *br_params;
4964 	struct hci_conn_params *params;
4965 	u32 supported_flags;
4966 	u32 current_flags = 0;
4967 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4968 
4969 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4970 		   &cp->addr.bdaddr, cp->addr.type);
4971 
4972 	hci_dev_lock(hdev);
4973 
4974 	supported_flags = hdev->conn_flags;
4975 
4976 	memset(&rp, 0, sizeof(rp));
4977 
4978 	if (cp->addr.type == BDADDR_BREDR) {
4979 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4980 							      &cp->addr.bdaddr,
4981 							      cp->addr.type);
4982 		if (!br_params)
4983 			goto done;
4984 
4985 		current_flags = br_params->flags;
4986 	} else {
4987 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4988 						le_addr_type(cp->addr.type));
4989 		if (!params)
4990 			goto done;
4991 
4992 		current_flags = params->flags;
4993 	}
4994 
4995 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4996 	rp.addr.type = cp->addr.type;
4997 	rp.supported_flags = cpu_to_le32(supported_flags);
4998 	rp.current_flags = cpu_to_le32(current_flags);
4999 
5000 	status = MGMT_STATUS_SUCCESS;
5001 
5002 done:
5003 	hci_dev_unlock(hdev);
5004 
5005 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5006 				&rp, sizeof(rp));
5007 }
5008 
5009 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5010 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5011 				 u32 supported_flags, u32 current_flags)
5012 {
5013 	struct mgmt_ev_device_flags_changed ev;
5014 
5015 	bacpy(&ev.addr.bdaddr, bdaddr);
5016 	ev.addr.type = bdaddr_type;
5017 	ev.supported_flags = cpu_to_le32(supported_flags);
5018 	ev.current_flags = cpu_to_le32(current_flags);
5019 
5020 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5021 }
5022 
5023 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5024 			    u16 len)
5025 {
5026 	struct mgmt_cp_set_device_flags *cp = data;
5027 	struct bdaddr_list_with_flags *br_params;
5028 	struct hci_conn_params *params;
5029 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5030 	u32 supported_flags;
5031 	u32 current_flags = __le32_to_cpu(cp->current_flags);
5032 
5033 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5034 		   &cp->addr.bdaddr, cp->addr.type, current_flags);
5035 
5036 	// We should take hci_dev_lock() early, I think.. conn_flags can change
5037 	supported_flags = hdev->conn_flags;
5038 
5039 	if ((supported_flags | current_flags) != supported_flags) {
5040 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5041 			    current_flags, supported_flags);
5042 		goto done;
5043 	}
5044 
5045 	hci_dev_lock(hdev);
5046 
5047 	if (cp->addr.type == BDADDR_BREDR) {
5048 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5049 							      &cp->addr.bdaddr,
5050 							      cp->addr.type);
5051 
5052 		if (br_params) {
5053 			br_params->flags = current_flags;
5054 			status = MGMT_STATUS_SUCCESS;
5055 		} else {
5056 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5057 				    &cp->addr.bdaddr, cp->addr.type);
5058 		}
5059 
5060 		goto unlock;
5061 	}
5062 
5063 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5064 					le_addr_type(cp->addr.type));
5065 	if (!params) {
5066 		bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5067 			    &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5068 		goto unlock;
5069 	}
5070 
5071 	supported_flags = hdev->conn_flags;
5072 
5073 	if ((supported_flags | current_flags) != supported_flags) {
5074 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5075 			    current_flags, supported_flags);
5076 		goto unlock;
5077 	}
5078 
5079 	WRITE_ONCE(params->flags, current_flags);
5080 	status = MGMT_STATUS_SUCCESS;
5081 
5082 	/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5083 	 * has been set.
5084 	 */
5085 	if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5086 		hci_update_passive_scan(hdev);
5087 
5088 unlock:
5089 	hci_dev_unlock(hdev);
5090 
5091 done:
5092 	if (status == MGMT_STATUS_SUCCESS)
5093 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5094 				     supported_flags, current_flags);
5095 
5096 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5097 				 &cp->addr, sizeof(cp->addr));
5098 }
5099 
5100 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5101 				   u16 handle)
5102 {
5103 	struct mgmt_ev_adv_monitor_added ev;
5104 
5105 	ev.monitor_handle = cpu_to_le16(handle);
5106 
5107 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5108 }
5109 
5110 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5111 {
5112 	struct mgmt_ev_adv_monitor_removed ev;
5113 	struct mgmt_pending_cmd *cmd;
5114 	struct sock *sk_skip = NULL;
5115 	struct mgmt_cp_remove_adv_monitor *cp;
5116 
5117 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5118 	if (cmd) {
5119 		cp = cmd->param;
5120 
5121 		if (cp->monitor_handle)
5122 			sk_skip = cmd->sk;
5123 	}
5124 
5125 	ev.monitor_handle = cpu_to_le16(handle);
5126 
5127 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5128 }
5129 
5130 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5131 				 void *data, u16 len)
5132 {
5133 	struct adv_monitor *monitor = NULL;
5134 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5135 	int handle, err;
5136 	size_t rp_size = 0;
5137 	__u32 supported = 0;
5138 	__u32 enabled = 0;
5139 	__u16 num_handles = 0;
5140 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5141 
5142 	BT_DBG("request for %s", hdev->name);
5143 
5144 	hci_dev_lock(hdev);
5145 
5146 	if (msft_monitor_supported(hdev))
5147 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5148 
5149 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5150 		handles[num_handles++] = monitor->handle;
5151 
5152 	hci_dev_unlock(hdev);
5153 
5154 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5155 	rp = kmalloc(rp_size, GFP_KERNEL);
5156 	if (!rp)
5157 		return -ENOMEM;
5158 
5159 	/* All supported features are currently enabled */
5160 	enabled = supported;
5161 
5162 	rp->supported_features = cpu_to_le32(supported);
5163 	rp->enabled_features = cpu_to_le32(enabled);
5164 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5165 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5166 	rp->num_handles = cpu_to_le16(num_handles);
5167 	if (num_handles)
5168 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5169 
5170 	err = mgmt_cmd_complete(sk, hdev->id,
5171 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
5172 				MGMT_STATUS_SUCCESS, rp, rp_size);
5173 
5174 	kfree(rp);
5175 
5176 	return err;
5177 }
5178 
5179 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5180 						   void *data, int status)
5181 {
5182 	struct mgmt_rp_add_adv_patterns_monitor rp;
5183 	struct mgmt_pending_cmd *cmd = data;
5184 	struct adv_monitor *monitor = cmd->user_data;
5185 
5186 	hci_dev_lock(hdev);
5187 
5188 	rp.monitor_handle = cpu_to_le16(monitor->handle);
5189 
5190 	if (!status) {
5191 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5192 		hdev->adv_monitors_cnt++;
5193 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5194 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
5195 		hci_update_passive_scan(hdev);
5196 	}
5197 
5198 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5199 			  mgmt_status(status), &rp, sizeof(rp));
5200 	mgmt_pending_remove(cmd);
5201 
5202 	hci_dev_unlock(hdev);
5203 	bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5204 		   rp.monitor_handle, status);
5205 }
5206 
5207 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5208 {
5209 	struct mgmt_pending_cmd *cmd = data;
5210 	struct adv_monitor *monitor = cmd->user_data;
5211 
5212 	return hci_add_adv_monitor(hdev, monitor);
5213 }
5214 
5215 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5216 				      struct adv_monitor *m, u8 status,
5217 				      void *data, u16 len, u16 op)
5218 {
5219 	struct mgmt_pending_cmd *cmd;
5220 	int err;
5221 
5222 	hci_dev_lock(hdev);
5223 
5224 	if (status)
5225 		goto unlock;
5226 
5227 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5228 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5229 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5230 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5231 		status = MGMT_STATUS_BUSY;
5232 		goto unlock;
5233 	}
5234 
5235 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5236 	if (!cmd) {
5237 		status = MGMT_STATUS_NO_RESOURCES;
5238 		goto unlock;
5239 	}
5240 
5241 	cmd->user_data = m;
5242 	err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5243 				 mgmt_add_adv_patterns_monitor_complete);
5244 	if (err) {
5245 		if (err == -ENOMEM)
5246 			status = MGMT_STATUS_NO_RESOURCES;
5247 		else
5248 			status = MGMT_STATUS_FAILED;
5249 
5250 		goto unlock;
5251 	}
5252 
5253 	hci_dev_unlock(hdev);
5254 
5255 	return 0;
5256 
5257 unlock:
5258 	hci_free_adv_monitor(hdev, m);
5259 	hci_dev_unlock(hdev);
5260 	return mgmt_cmd_status(sk, hdev->id, op, status);
5261 }
5262 
5263 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5264 				   struct mgmt_adv_rssi_thresholds *rssi)
5265 {
5266 	if (rssi) {
5267 		m->rssi.low_threshold = rssi->low_threshold;
5268 		m->rssi.low_threshold_timeout =
5269 		    __le16_to_cpu(rssi->low_threshold_timeout);
5270 		m->rssi.high_threshold = rssi->high_threshold;
5271 		m->rssi.high_threshold_timeout =
5272 		    __le16_to_cpu(rssi->high_threshold_timeout);
5273 		m->rssi.sampling_period = rssi->sampling_period;
5274 	} else {
5275 		/* Default values. These numbers are the least constricting
5276 		 * parameters for MSFT API to work, so it behaves as if there
5277 		 * are no rssi parameter to consider. May need to be changed
5278 		 * if other API are to be supported.
5279 		 */
5280 		m->rssi.low_threshold = -127;
5281 		m->rssi.low_threshold_timeout = 60;
5282 		m->rssi.high_threshold = -127;
5283 		m->rssi.high_threshold_timeout = 0;
5284 		m->rssi.sampling_period = 0;
5285 	}
5286 }
5287 
5288 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5289 				    struct mgmt_adv_pattern *patterns)
5290 {
5291 	u8 offset = 0, length = 0;
5292 	struct adv_pattern *p = NULL;
5293 	int i;
5294 
5295 	for (i = 0; i < pattern_count; i++) {
5296 		offset = patterns[i].offset;
5297 		length = patterns[i].length;
5298 		if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5299 		    length > HCI_MAX_EXT_AD_LENGTH ||
5300 		    (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5301 			return MGMT_STATUS_INVALID_PARAMS;
5302 
5303 		p = kmalloc(sizeof(*p), GFP_KERNEL);
5304 		if (!p)
5305 			return MGMT_STATUS_NO_RESOURCES;
5306 
5307 		p->ad_type = patterns[i].ad_type;
5308 		p->offset = patterns[i].offset;
5309 		p->length = patterns[i].length;
5310 		memcpy(p->value, patterns[i].value, p->length);
5311 
5312 		INIT_LIST_HEAD(&p->list);
5313 		list_add(&p->list, &m->patterns);
5314 	}
5315 
5316 	return MGMT_STATUS_SUCCESS;
5317 }
5318 
5319 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5320 				    void *data, u16 len)
5321 {
5322 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5323 	struct adv_monitor *m = NULL;
5324 	u8 status = MGMT_STATUS_SUCCESS;
5325 	size_t expected_size = sizeof(*cp);
5326 
5327 	BT_DBG("request for %s", hdev->name);
5328 
5329 	if (len <= sizeof(*cp)) {
5330 		status = MGMT_STATUS_INVALID_PARAMS;
5331 		goto done;
5332 	}
5333 
5334 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5335 	if (len != expected_size) {
5336 		status = MGMT_STATUS_INVALID_PARAMS;
5337 		goto done;
5338 	}
5339 
5340 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5341 	if (!m) {
5342 		status = MGMT_STATUS_NO_RESOURCES;
5343 		goto done;
5344 	}
5345 
5346 	INIT_LIST_HEAD(&m->patterns);
5347 
5348 	parse_adv_monitor_rssi(m, NULL);
5349 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5350 
5351 done:
5352 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5353 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5354 }
5355 
5356 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5357 					 void *data, u16 len)
5358 {
5359 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5360 	struct adv_monitor *m = NULL;
5361 	u8 status = MGMT_STATUS_SUCCESS;
5362 	size_t expected_size = sizeof(*cp);
5363 
5364 	BT_DBG("request for %s", hdev->name);
5365 
5366 	if (len <= sizeof(*cp)) {
5367 		status = MGMT_STATUS_INVALID_PARAMS;
5368 		goto done;
5369 	}
5370 
5371 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5372 	if (len != expected_size) {
5373 		status = MGMT_STATUS_INVALID_PARAMS;
5374 		goto done;
5375 	}
5376 
5377 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5378 	if (!m) {
5379 		status = MGMT_STATUS_NO_RESOURCES;
5380 		goto done;
5381 	}
5382 
5383 	INIT_LIST_HEAD(&m->patterns);
5384 
5385 	parse_adv_monitor_rssi(m, &cp->rssi);
5386 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5387 
5388 done:
5389 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5390 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5391 }
5392 
5393 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5394 					     void *data, int status)
5395 {
5396 	struct mgmt_rp_remove_adv_monitor rp;
5397 	struct mgmt_pending_cmd *cmd = data;
5398 	struct mgmt_cp_remove_adv_monitor *cp;
5399 
5400 	if (status == -ECANCELED ||
5401 	    cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev))
5402 		return;
5403 
5404 	hci_dev_lock(hdev);
5405 
5406 	cp = cmd->param;
5407 
5408 	rp.monitor_handle = cp->monitor_handle;
5409 
5410 	if (!status)
5411 		hci_update_passive_scan(hdev);
5412 
5413 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5414 			  mgmt_status(status), &rp, sizeof(rp));
5415 	mgmt_pending_remove(cmd);
5416 
5417 	hci_dev_unlock(hdev);
5418 	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5419 		   rp.monitor_handle, status);
5420 }
5421 
5422 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5423 {
5424 	struct mgmt_pending_cmd *cmd = data;
5425 
5426 	if (cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev))
5427 		return -ECANCELED;
5428 
5429 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5430 	u16 handle = __le16_to_cpu(cp->monitor_handle);
5431 
5432 	if (!handle)
5433 		return hci_remove_all_adv_monitor(hdev);
5434 
5435 	return hci_remove_single_adv_monitor(hdev, handle);
5436 }
5437 
5438 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5439 			      void *data, u16 len)
5440 {
5441 	struct mgmt_pending_cmd *cmd;
5442 	int err, status;
5443 
5444 	hci_dev_lock(hdev);
5445 
5446 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5447 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5448 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5449 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5450 		status = MGMT_STATUS_BUSY;
5451 		goto unlock;
5452 	}
5453 
5454 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5455 	if (!cmd) {
5456 		status = MGMT_STATUS_NO_RESOURCES;
5457 		goto unlock;
5458 	}
5459 
5460 	err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5461 				  mgmt_remove_adv_monitor_complete);
5462 
5463 	if (err) {
5464 		mgmt_pending_remove(cmd);
5465 
5466 		if (err == -ENOMEM)
5467 			status = MGMT_STATUS_NO_RESOURCES;
5468 		else
5469 			status = MGMT_STATUS_FAILED;
5470 
5471 		goto unlock;
5472 	}
5473 
5474 	hci_dev_unlock(hdev);
5475 
5476 	return 0;
5477 
5478 unlock:
5479 	hci_dev_unlock(hdev);
5480 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5481 			       status);
5482 }
5483 
5484 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5485 {
5486 	struct mgmt_rp_read_local_oob_data mgmt_rp;
5487 	size_t rp_size = sizeof(mgmt_rp);
5488 	struct mgmt_pending_cmd *cmd = data;
5489 	struct sk_buff *skb = cmd->skb;
5490 	u8 status = mgmt_status(err);
5491 
5492 	if (!status) {
5493 		if (!skb)
5494 			status = MGMT_STATUS_FAILED;
5495 		else if (IS_ERR(skb))
5496 			status = mgmt_status(PTR_ERR(skb));
5497 		else
5498 			status = mgmt_status(skb->data[0]);
5499 	}
5500 
5501 	bt_dev_dbg(hdev, "status %d", status);
5502 
5503 	if (status) {
5504 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5505 		goto remove;
5506 	}
5507 
5508 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5509 
5510 	if (!bredr_sc_enabled(hdev)) {
5511 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5512 
5513 		if (skb->len < sizeof(*rp)) {
5514 			mgmt_cmd_status(cmd->sk, hdev->id,
5515 					MGMT_OP_READ_LOCAL_OOB_DATA,
5516 					MGMT_STATUS_FAILED);
5517 			goto remove;
5518 		}
5519 
5520 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5521 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5522 
5523 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5524 	} else {
5525 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5526 
5527 		if (skb->len < sizeof(*rp)) {
5528 			mgmt_cmd_status(cmd->sk, hdev->id,
5529 					MGMT_OP_READ_LOCAL_OOB_DATA,
5530 					MGMT_STATUS_FAILED);
5531 			goto remove;
5532 		}
5533 
5534 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5535 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5536 
5537 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5538 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5539 	}
5540 
5541 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5542 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5543 
5544 remove:
5545 	if (skb && !IS_ERR(skb))
5546 		kfree_skb(skb);
5547 
5548 	mgmt_pending_free(cmd);
5549 }
5550 
5551 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5552 {
5553 	struct mgmt_pending_cmd *cmd = data;
5554 
5555 	if (bredr_sc_enabled(hdev))
5556 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5557 	else
5558 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5559 
5560 	if (IS_ERR(cmd->skb))
5561 		return PTR_ERR(cmd->skb);
5562 	else
5563 		return 0;
5564 }
5565 
5566 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5567 			       void *data, u16 data_len)
5568 {
5569 	struct mgmt_pending_cmd *cmd;
5570 	int err;
5571 
5572 	bt_dev_dbg(hdev, "sock %p", sk);
5573 
5574 	hci_dev_lock(hdev);
5575 
5576 	if (!hdev_is_powered(hdev)) {
5577 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5578 				      MGMT_STATUS_NOT_POWERED);
5579 		goto unlock;
5580 	}
5581 
5582 	if (!lmp_ssp_capable(hdev)) {
5583 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5584 				      MGMT_STATUS_NOT_SUPPORTED);
5585 		goto unlock;
5586 	}
5587 
5588 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5589 	if (!cmd)
5590 		err = -ENOMEM;
5591 	else
5592 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5593 					 read_local_oob_data_complete);
5594 
5595 	if (err < 0) {
5596 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5597 				      MGMT_STATUS_FAILED);
5598 
5599 		if (cmd)
5600 			mgmt_pending_free(cmd);
5601 	}
5602 
5603 unlock:
5604 	hci_dev_unlock(hdev);
5605 	return err;
5606 }
5607 
5608 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5609 			       void *data, u16 len)
5610 {
5611 	struct mgmt_addr_info *addr = data;
5612 	int err;
5613 
5614 	bt_dev_dbg(hdev, "sock %p", sk);
5615 
5616 	if (!bdaddr_type_is_valid(addr->type))
5617 		return mgmt_cmd_complete(sk, hdev->id,
5618 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5619 					 MGMT_STATUS_INVALID_PARAMS,
5620 					 addr, sizeof(*addr));
5621 
5622 	hci_dev_lock(hdev);
5623 
5624 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5625 		struct mgmt_cp_add_remote_oob_data *cp = data;
5626 		u8 status;
5627 
5628 		if (cp->addr.type != BDADDR_BREDR) {
5629 			err = mgmt_cmd_complete(sk, hdev->id,
5630 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5631 						MGMT_STATUS_INVALID_PARAMS,
5632 						&cp->addr, sizeof(cp->addr));
5633 			goto unlock;
5634 		}
5635 
5636 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5637 					      cp->addr.type, cp->hash,
5638 					      cp->rand, NULL, NULL);
5639 		if (err < 0)
5640 			status = MGMT_STATUS_FAILED;
5641 		else
5642 			status = MGMT_STATUS_SUCCESS;
5643 
5644 		err = mgmt_cmd_complete(sk, hdev->id,
5645 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5646 					&cp->addr, sizeof(cp->addr));
5647 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5648 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5649 		u8 *rand192, *hash192, *rand256, *hash256;
5650 		u8 status;
5651 
5652 		if (bdaddr_type_is_le(cp->addr.type)) {
5653 			/* Enforce zero-valued 192-bit parameters as
5654 			 * long as legacy SMP OOB isn't implemented.
5655 			 */
5656 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5657 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5658 				err = mgmt_cmd_complete(sk, hdev->id,
5659 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5660 							MGMT_STATUS_INVALID_PARAMS,
5661 							addr, sizeof(*addr));
5662 				goto unlock;
5663 			}
5664 
5665 			rand192 = NULL;
5666 			hash192 = NULL;
5667 		} else {
5668 			/* In case one of the P-192 values is set to zero,
5669 			 * then just disable OOB data for P-192.
5670 			 */
5671 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5672 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5673 				rand192 = NULL;
5674 				hash192 = NULL;
5675 			} else {
5676 				rand192 = cp->rand192;
5677 				hash192 = cp->hash192;
5678 			}
5679 		}
5680 
5681 		/* In case one of the P-256 values is set to zero, then just
5682 		 * disable OOB data for P-256.
5683 		 */
5684 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5685 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5686 			rand256 = NULL;
5687 			hash256 = NULL;
5688 		} else {
5689 			rand256 = cp->rand256;
5690 			hash256 = cp->hash256;
5691 		}
5692 
5693 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5694 					      cp->addr.type, hash192, rand192,
5695 					      hash256, rand256);
5696 		if (err < 0)
5697 			status = MGMT_STATUS_FAILED;
5698 		else
5699 			status = MGMT_STATUS_SUCCESS;
5700 
5701 		err = mgmt_cmd_complete(sk, hdev->id,
5702 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5703 					status, &cp->addr, sizeof(cp->addr));
5704 	} else {
5705 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5706 			   len);
5707 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5708 				      MGMT_STATUS_INVALID_PARAMS);
5709 	}
5710 
5711 unlock:
5712 	hci_dev_unlock(hdev);
5713 	return err;
5714 }
5715 
5716 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5717 				  void *data, u16 len)
5718 {
5719 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5720 	u8 status;
5721 	int err;
5722 
5723 	bt_dev_dbg(hdev, "sock %p", sk);
5724 
5725 	if (cp->addr.type != BDADDR_BREDR)
5726 		return mgmt_cmd_complete(sk, hdev->id,
5727 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5728 					 MGMT_STATUS_INVALID_PARAMS,
5729 					 &cp->addr, sizeof(cp->addr));
5730 
5731 	hci_dev_lock(hdev);
5732 
5733 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5734 		hci_remote_oob_data_clear(hdev);
5735 		status = MGMT_STATUS_SUCCESS;
5736 		goto done;
5737 	}
5738 
5739 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5740 	if (err < 0)
5741 		status = MGMT_STATUS_INVALID_PARAMS;
5742 	else
5743 		status = MGMT_STATUS_SUCCESS;
5744 
5745 done:
5746 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5747 				status, &cp->addr, sizeof(cp->addr));
5748 
5749 	hci_dev_unlock(hdev);
5750 	return err;
5751 }
5752 
5753 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5754 				    uint8_t *mgmt_status)
5755 {
5756 	switch (type) {
5757 	case DISCOV_TYPE_LE:
5758 		*mgmt_status = mgmt_le_support(hdev);
5759 		if (*mgmt_status)
5760 			return false;
5761 		break;
5762 	case DISCOV_TYPE_INTERLEAVED:
5763 		*mgmt_status = mgmt_le_support(hdev);
5764 		if (*mgmt_status)
5765 			return false;
5766 		fallthrough;
5767 	case DISCOV_TYPE_BREDR:
5768 		*mgmt_status = mgmt_bredr_support(hdev);
5769 		if (*mgmt_status)
5770 			return false;
5771 		break;
5772 	default:
5773 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5774 		return false;
5775 	}
5776 
5777 	return true;
5778 }
5779 
5780 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5781 {
5782 	struct mgmt_pending_cmd *cmd = data;
5783 
5784 	bt_dev_dbg(hdev, "err %d", err);
5785 
5786 	if (err == -ECANCELED)
5787 		return;
5788 
5789 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5790 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5791 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5792 		return;
5793 
5794 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5795 			  cmd->param, 1);
5796 	mgmt_pending_remove(cmd);
5797 
5798 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5799 				DISCOVERY_FINDING);
5800 }
5801 
5802 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5803 {
5804 	return hci_start_discovery_sync(hdev);
5805 }
5806 
5807 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5808 				    u16 op, void *data, u16 len)
5809 {
5810 	struct mgmt_cp_start_discovery *cp = data;
5811 	struct mgmt_pending_cmd *cmd;
5812 	u8 status;
5813 	int err;
5814 
5815 	bt_dev_dbg(hdev, "sock %p", sk);
5816 
5817 	hci_dev_lock(hdev);
5818 
5819 	if (!hdev_is_powered(hdev)) {
5820 		err = mgmt_cmd_complete(sk, hdev->id, op,
5821 					MGMT_STATUS_NOT_POWERED,
5822 					&cp->type, sizeof(cp->type));
5823 		goto failed;
5824 	}
5825 
5826 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5827 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5828 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5829 					&cp->type, sizeof(cp->type));
5830 		goto failed;
5831 	}
5832 
5833 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5834 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5835 					&cp->type, sizeof(cp->type));
5836 		goto failed;
5837 	}
5838 
5839 	/* Can't start discovery when it is paused */
5840 	if (hdev->discovery_paused) {
5841 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5842 					&cp->type, sizeof(cp->type));
5843 		goto failed;
5844 	}
5845 
5846 	/* Clear the discovery filter first to free any previously
5847 	 * allocated memory for the UUID list.
5848 	 */
5849 	hci_discovery_filter_clear(hdev);
5850 
5851 	hdev->discovery.type = cp->type;
5852 	hdev->discovery.report_invalid_rssi = false;
5853 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5854 		hdev->discovery.limited = true;
5855 	else
5856 		hdev->discovery.limited = false;
5857 
5858 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5859 	if (!cmd) {
5860 		err = -ENOMEM;
5861 		goto failed;
5862 	}
5863 
5864 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5865 				 start_discovery_complete);
5866 	if (err < 0) {
5867 		mgmt_pending_remove(cmd);
5868 		goto failed;
5869 	}
5870 
5871 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5872 
5873 failed:
5874 	hci_dev_unlock(hdev);
5875 	return err;
5876 }
5877 
5878 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5879 			   void *data, u16 len)
5880 {
5881 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5882 					data, len);
5883 }
5884 
5885 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5886 				   void *data, u16 len)
5887 {
5888 	return start_discovery_internal(sk, hdev,
5889 					MGMT_OP_START_LIMITED_DISCOVERY,
5890 					data, len);
5891 }
5892 
5893 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5894 				   void *data, u16 len)
5895 {
5896 	struct mgmt_cp_start_service_discovery *cp = data;
5897 	struct mgmt_pending_cmd *cmd;
5898 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5899 	u16 uuid_count, expected_len;
5900 	u8 status;
5901 	int err;
5902 
5903 	bt_dev_dbg(hdev, "sock %p", sk);
5904 
5905 	hci_dev_lock(hdev);
5906 
5907 	if (!hdev_is_powered(hdev)) {
5908 		err = mgmt_cmd_complete(sk, hdev->id,
5909 					MGMT_OP_START_SERVICE_DISCOVERY,
5910 					MGMT_STATUS_NOT_POWERED,
5911 					&cp->type, sizeof(cp->type));
5912 		goto failed;
5913 	}
5914 
5915 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5916 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5917 		err = mgmt_cmd_complete(sk, hdev->id,
5918 					MGMT_OP_START_SERVICE_DISCOVERY,
5919 					MGMT_STATUS_BUSY, &cp->type,
5920 					sizeof(cp->type));
5921 		goto failed;
5922 	}
5923 
5924 	if (hdev->discovery_paused) {
5925 		err = mgmt_cmd_complete(sk, hdev->id,
5926 					MGMT_OP_START_SERVICE_DISCOVERY,
5927 					MGMT_STATUS_BUSY, &cp->type,
5928 					sizeof(cp->type));
5929 		goto failed;
5930 	}
5931 
5932 	uuid_count = __le16_to_cpu(cp->uuid_count);
5933 	if (uuid_count > max_uuid_count) {
5934 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5935 			   uuid_count);
5936 		err = mgmt_cmd_complete(sk, hdev->id,
5937 					MGMT_OP_START_SERVICE_DISCOVERY,
5938 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5939 					sizeof(cp->type));
5940 		goto failed;
5941 	}
5942 
5943 	expected_len = sizeof(*cp) + uuid_count * 16;
5944 	if (expected_len != len) {
5945 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5946 			   expected_len, len);
5947 		err = mgmt_cmd_complete(sk, hdev->id,
5948 					MGMT_OP_START_SERVICE_DISCOVERY,
5949 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5950 					sizeof(cp->type));
5951 		goto failed;
5952 	}
5953 
5954 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5955 		err = mgmt_cmd_complete(sk, hdev->id,
5956 					MGMT_OP_START_SERVICE_DISCOVERY,
5957 					status, &cp->type, sizeof(cp->type));
5958 		goto failed;
5959 	}
5960 
5961 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5962 			       hdev, data, len);
5963 	if (!cmd) {
5964 		err = -ENOMEM;
5965 		goto failed;
5966 	}
5967 
5968 	/* Clear the discovery filter first to free any previously
5969 	 * allocated memory for the UUID list.
5970 	 */
5971 	hci_discovery_filter_clear(hdev);
5972 
5973 	hdev->discovery.result_filtering = true;
5974 	hdev->discovery.type = cp->type;
5975 	hdev->discovery.rssi = cp->rssi;
5976 	hdev->discovery.uuid_count = uuid_count;
5977 
5978 	if (uuid_count > 0) {
5979 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5980 						GFP_KERNEL);
5981 		if (!hdev->discovery.uuids) {
5982 			err = mgmt_cmd_complete(sk, hdev->id,
5983 						MGMT_OP_START_SERVICE_DISCOVERY,
5984 						MGMT_STATUS_FAILED,
5985 						&cp->type, sizeof(cp->type));
5986 			mgmt_pending_remove(cmd);
5987 			goto failed;
5988 		}
5989 	}
5990 
5991 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5992 				 start_discovery_complete);
5993 	if (err < 0) {
5994 		mgmt_pending_remove(cmd);
5995 		goto failed;
5996 	}
5997 
5998 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5999 
6000 failed:
6001 	hci_dev_unlock(hdev);
6002 	return err;
6003 }
6004 
6005 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6006 {
6007 	struct mgmt_pending_cmd *cmd = data;
6008 
6009 	if (err == -ECANCELED ||
6010 	    cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6011 		return;
6012 
6013 	bt_dev_dbg(hdev, "err %d", err);
6014 
6015 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6016 			  cmd->param, 1);
6017 	mgmt_pending_remove(cmd);
6018 
6019 	if (!err)
6020 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6021 }
6022 
6023 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6024 {
6025 	return hci_stop_discovery_sync(hdev);
6026 }
6027 
6028 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6029 			  u16 len)
6030 {
6031 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
6032 	struct mgmt_pending_cmd *cmd;
6033 	int err;
6034 
6035 	bt_dev_dbg(hdev, "sock %p", sk);
6036 
6037 	hci_dev_lock(hdev);
6038 
6039 	if (!hci_discovery_active(hdev)) {
6040 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6041 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
6042 					sizeof(mgmt_cp->type));
6043 		goto unlock;
6044 	}
6045 
6046 	if (hdev->discovery.type != mgmt_cp->type) {
6047 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6048 					MGMT_STATUS_INVALID_PARAMS,
6049 					&mgmt_cp->type, sizeof(mgmt_cp->type));
6050 		goto unlock;
6051 	}
6052 
6053 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6054 	if (!cmd) {
6055 		err = -ENOMEM;
6056 		goto unlock;
6057 	}
6058 
6059 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6060 				 stop_discovery_complete);
6061 	if (err < 0) {
6062 		mgmt_pending_remove(cmd);
6063 		goto unlock;
6064 	}
6065 
6066 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6067 
6068 unlock:
6069 	hci_dev_unlock(hdev);
6070 	return err;
6071 }
6072 
6073 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6074 			u16 len)
6075 {
6076 	struct mgmt_cp_confirm_name *cp = data;
6077 	struct inquiry_entry *e;
6078 	int err;
6079 
6080 	bt_dev_dbg(hdev, "sock %p", sk);
6081 
6082 	hci_dev_lock(hdev);
6083 
6084 	if (!hci_discovery_active(hdev)) {
6085 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6086 					MGMT_STATUS_FAILED, &cp->addr,
6087 					sizeof(cp->addr));
6088 		goto failed;
6089 	}
6090 
6091 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6092 	if (!e) {
6093 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6094 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6095 					sizeof(cp->addr));
6096 		goto failed;
6097 	}
6098 
6099 	if (cp->name_known) {
6100 		e->name_state = NAME_KNOWN;
6101 		list_del(&e->list);
6102 	} else {
6103 		e->name_state = NAME_NEEDED;
6104 		hci_inquiry_cache_update_resolve(hdev, e);
6105 	}
6106 
6107 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6108 				&cp->addr, sizeof(cp->addr));
6109 
6110 failed:
6111 	hci_dev_unlock(hdev);
6112 	return err;
6113 }
6114 
6115 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6116 			u16 len)
6117 {
6118 	struct mgmt_cp_block_device *cp = data;
6119 	u8 status;
6120 	int err;
6121 
6122 	bt_dev_dbg(hdev, "sock %p", sk);
6123 
6124 	if (!bdaddr_type_is_valid(cp->addr.type))
6125 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6126 					 MGMT_STATUS_INVALID_PARAMS,
6127 					 &cp->addr, sizeof(cp->addr));
6128 
6129 	hci_dev_lock(hdev);
6130 
6131 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6132 				  cp->addr.type);
6133 	if (err < 0) {
6134 		status = MGMT_STATUS_FAILED;
6135 		goto done;
6136 	}
6137 
6138 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6139 		   sk);
6140 	status = MGMT_STATUS_SUCCESS;
6141 
6142 done:
6143 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6144 				&cp->addr, sizeof(cp->addr));
6145 
6146 	hci_dev_unlock(hdev);
6147 
6148 	return err;
6149 }
6150 
6151 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6152 			  u16 len)
6153 {
6154 	struct mgmt_cp_unblock_device *cp = data;
6155 	u8 status;
6156 	int err;
6157 
6158 	bt_dev_dbg(hdev, "sock %p", sk);
6159 
6160 	if (!bdaddr_type_is_valid(cp->addr.type))
6161 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6162 					 MGMT_STATUS_INVALID_PARAMS,
6163 					 &cp->addr, sizeof(cp->addr));
6164 
6165 	hci_dev_lock(hdev);
6166 
6167 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6168 				  cp->addr.type);
6169 	if (err < 0) {
6170 		status = MGMT_STATUS_INVALID_PARAMS;
6171 		goto done;
6172 	}
6173 
6174 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6175 		   sk);
6176 	status = MGMT_STATUS_SUCCESS;
6177 
6178 done:
6179 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6180 				&cp->addr, sizeof(cp->addr));
6181 
6182 	hci_dev_unlock(hdev);
6183 
6184 	return err;
6185 }
6186 
6187 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6188 {
6189 	return hci_update_eir_sync(hdev);
6190 }
6191 
6192 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6193 			 u16 len)
6194 {
6195 	struct mgmt_cp_set_device_id *cp = data;
6196 	int err;
6197 	__u16 source;
6198 
6199 	bt_dev_dbg(hdev, "sock %p", sk);
6200 
6201 	source = __le16_to_cpu(cp->source);
6202 
6203 	if (source > 0x0002)
6204 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6205 				       MGMT_STATUS_INVALID_PARAMS);
6206 
6207 	hci_dev_lock(hdev);
6208 
6209 	hdev->devid_source = source;
6210 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6211 	hdev->devid_product = __le16_to_cpu(cp->product);
6212 	hdev->devid_version = __le16_to_cpu(cp->version);
6213 
6214 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6215 				NULL, 0);
6216 
6217 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6218 
6219 	hci_dev_unlock(hdev);
6220 
6221 	return err;
6222 }
6223 
6224 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6225 {
6226 	if (err)
6227 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6228 	else
6229 		bt_dev_dbg(hdev, "status %d", err);
6230 }
6231 
6232 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6233 {
6234 	struct cmd_lookup match = { NULL, hdev };
6235 	u8 instance;
6236 	struct adv_info *adv_instance;
6237 	u8 status = mgmt_status(err);
6238 
6239 	if (status) {
6240 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6241 				     cmd_status_rsp, &status);
6242 		return;
6243 	}
6244 
6245 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6246 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
6247 	else
6248 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6249 
6250 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6251 			     &match);
6252 
6253 	new_settings(hdev, match.sk);
6254 
6255 	if (match.sk)
6256 		sock_put(match.sk);
6257 
6258 	/* If "Set Advertising" was just disabled and instance advertising was
6259 	 * set up earlier, then re-enable multi-instance advertising.
6260 	 */
6261 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6262 	    list_empty(&hdev->adv_instances))
6263 		return;
6264 
6265 	instance = hdev->cur_adv_instance;
6266 	if (!instance) {
6267 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6268 							struct adv_info, list);
6269 		if (!adv_instance)
6270 			return;
6271 
6272 		instance = adv_instance->instance;
6273 	}
6274 
6275 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
6276 
6277 	enable_advertising_instance(hdev, err);
6278 }
6279 
6280 static int set_adv_sync(struct hci_dev *hdev, void *data)
6281 {
6282 	struct mgmt_pending_cmd *cmd = data;
6283 	struct mgmt_mode *cp = cmd->param;
6284 	u8 val = !!cp->val;
6285 
6286 	if (cp->val == 0x02)
6287 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6288 	else
6289 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6290 
6291 	cancel_adv_timeout(hdev);
6292 
6293 	if (val) {
6294 		/* Switch to instance "0" for the Set Advertising setting.
6295 		 * We cannot use update_[adv|scan_rsp]_data() here as the
6296 		 * HCI_ADVERTISING flag is not yet set.
6297 		 */
6298 		hdev->cur_adv_instance = 0x00;
6299 
6300 		if (ext_adv_capable(hdev)) {
6301 			hci_start_ext_adv_sync(hdev, 0x00);
6302 		} else {
6303 			hci_update_adv_data_sync(hdev, 0x00);
6304 			hci_update_scan_rsp_data_sync(hdev, 0x00);
6305 			hci_enable_advertising_sync(hdev);
6306 		}
6307 	} else {
6308 		hci_disable_advertising_sync(hdev);
6309 	}
6310 
6311 	return 0;
6312 }
6313 
6314 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6315 			   u16 len)
6316 {
6317 	struct mgmt_mode *cp = data;
6318 	struct mgmt_pending_cmd *cmd;
6319 	u8 val, status;
6320 	int err;
6321 
6322 	bt_dev_dbg(hdev, "sock %p", sk);
6323 
6324 	status = mgmt_le_support(hdev);
6325 	if (status)
6326 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6327 				       status);
6328 
6329 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6330 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6331 				       MGMT_STATUS_INVALID_PARAMS);
6332 
6333 	if (hdev->advertising_paused)
6334 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6335 				       MGMT_STATUS_BUSY);
6336 
6337 	hci_dev_lock(hdev);
6338 
6339 	val = !!cp->val;
6340 
6341 	/* The following conditions are ones which mean that we should
6342 	 * not do any HCI communication but directly send a mgmt
6343 	 * response to user space (after toggling the flag if
6344 	 * necessary).
6345 	 */
6346 	if (!hdev_is_powered(hdev) ||
6347 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6348 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6349 	    hci_dev_test_flag(hdev, HCI_MESH) ||
6350 	    hci_conn_num(hdev, LE_LINK) > 0 ||
6351 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6352 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6353 		bool changed;
6354 
6355 		if (cp->val) {
6356 			hdev->cur_adv_instance = 0x00;
6357 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6358 			if (cp->val == 0x02)
6359 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6360 			else
6361 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6362 		} else {
6363 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6364 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6365 		}
6366 
6367 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6368 		if (err < 0)
6369 			goto unlock;
6370 
6371 		if (changed)
6372 			err = new_settings(hdev, sk);
6373 
6374 		goto unlock;
6375 	}
6376 
6377 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6378 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6379 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6380 				      MGMT_STATUS_BUSY);
6381 		goto unlock;
6382 	}
6383 
6384 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6385 	if (!cmd)
6386 		err = -ENOMEM;
6387 	else
6388 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6389 					 set_advertising_complete);
6390 
6391 	if (err < 0 && cmd)
6392 		mgmt_pending_remove(cmd);
6393 
6394 unlock:
6395 	hci_dev_unlock(hdev);
6396 	return err;
6397 }
6398 
6399 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6400 			      void *data, u16 len)
6401 {
6402 	struct mgmt_cp_set_static_address *cp = data;
6403 	int err;
6404 
6405 	bt_dev_dbg(hdev, "sock %p", sk);
6406 
6407 	if (!lmp_le_capable(hdev))
6408 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6409 				       MGMT_STATUS_NOT_SUPPORTED);
6410 
6411 	if (hdev_is_powered(hdev))
6412 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6413 				       MGMT_STATUS_REJECTED);
6414 
6415 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6416 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6417 			return mgmt_cmd_status(sk, hdev->id,
6418 					       MGMT_OP_SET_STATIC_ADDRESS,
6419 					       MGMT_STATUS_INVALID_PARAMS);
6420 
6421 		/* Two most significant bits shall be set */
6422 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6423 			return mgmt_cmd_status(sk, hdev->id,
6424 					       MGMT_OP_SET_STATIC_ADDRESS,
6425 					       MGMT_STATUS_INVALID_PARAMS);
6426 	}
6427 
6428 	hci_dev_lock(hdev);
6429 
6430 	bacpy(&hdev->static_addr, &cp->bdaddr);
6431 
6432 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6433 	if (err < 0)
6434 		goto unlock;
6435 
6436 	err = new_settings(hdev, sk);
6437 
6438 unlock:
6439 	hci_dev_unlock(hdev);
6440 	return err;
6441 }
6442 
6443 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6444 			   void *data, u16 len)
6445 {
6446 	struct mgmt_cp_set_scan_params *cp = data;
6447 	__u16 interval, window;
6448 	int err;
6449 
6450 	bt_dev_dbg(hdev, "sock %p", sk);
6451 
6452 	if (!lmp_le_capable(hdev))
6453 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6454 				       MGMT_STATUS_NOT_SUPPORTED);
6455 
6456 	interval = __le16_to_cpu(cp->interval);
6457 
6458 	if (interval < 0x0004 || interval > 0x4000)
6459 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6460 				       MGMT_STATUS_INVALID_PARAMS);
6461 
6462 	window = __le16_to_cpu(cp->window);
6463 
6464 	if (window < 0x0004 || window > 0x4000)
6465 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6466 				       MGMT_STATUS_INVALID_PARAMS);
6467 
6468 	if (window > interval)
6469 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6470 				       MGMT_STATUS_INVALID_PARAMS);
6471 
6472 	hci_dev_lock(hdev);
6473 
6474 	hdev->le_scan_interval = interval;
6475 	hdev->le_scan_window = window;
6476 
6477 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6478 				NULL, 0);
6479 
6480 	/* If background scan is running, restart it so new parameters are
6481 	 * loaded.
6482 	 */
6483 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6484 	    hdev->discovery.state == DISCOVERY_STOPPED)
6485 		hci_update_passive_scan(hdev);
6486 
6487 	hci_dev_unlock(hdev);
6488 
6489 	return err;
6490 }
6491 
6492 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6493 {
6494 	struct mgmt_pending_cmd *cmd = data;
6495 
6496 	bt_dev_dbg(hdev, "err %d", err);
6497 
6498 	if (err) {
6499 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6500 				mgmt_status(err));
6501 	} else {
6502 		struct mgmt_mode *cp = cmd->param;
6503 
6504 		if (cp->val)
6505 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6506 		else
6507 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6508 
6509 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6510 		new_settings(hdev, cmd->sk);
6511 	}
6512 
6513 	mgmt_pending_free(cmd);
6514 }
6515 
6516 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6517 {
6518 	struct mgmt_pending_cmd *cmd = data;
6519 	struct mgmt_mode *cp = cmd->param;
6520 
6521 	return hci_write_fast_connectable_sync(hdev, cp->val);
6522 }
6523 
6524 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6525 				void *data, u16 len)
6526 {
6527 	struct mgmt_mode *cp = data;
6528 	struct mgmt_pending_cmd *cmd;
6529 	int err;
6530 
6531 	bt_dev_dbg(hdev, "sock %p", sk);
6532 
6533 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6534 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6535 		return mgmt_cmd_status(sk, hdev->id,
6536 				       MGMT_OP_SET_FAST_CONNECTABLE,
6537 				       MGMT_STATUS_NOT_SUPPORTED);
6538 
6539 	if (cp->val != 0x00 && cp->val != 0x01)
6540 		return mgmt_cmd_status(sk, hdev->id,
6541 				       MGMT_OP_SET_FAST_CONNECTABLE,
6542 				       MGMT_STATUS_INVALID_PARAMS);
6543 
6544 	hci_dev_lock(hdev);
6545 
6546 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6547 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6548 		goto unlock;
6549 	}
6550 
6551 	if (!hdev_is_powered(hdev)) {
6552 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6553 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6554 		new_settings(hdev, sk);
6555 		goto unlock;
6556 	}
6557 
6558 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6559 			       len);
6560 	if (!cmd)
6561 		err = -ENOMEM;
6562 	else
6563 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6564 					 fast_connectable_complete);
6565 
6566 	if (err < 0) {
6567 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6568 				MGMT_STATUS_FAILED);
6569 
6570 		if (cmd)
6571 			mgmt_pending_free(cmd);
6572 	}
6573 
6574 unlock:
6575 	hci_dev_unlock(hdev);
6576 
6577 	return err;
6578 }
6579 
6580 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6581 {
6582 	struct mgmt_pending_cmd *cmd = data;
6583 
6584 	bt_dev_dbg(hdev, "err %d", err);
6585 
6586 	if (err) {
6587 		u8 mgmt_err = mgmt_status(err);
6588 
6589 		/* We need to restore the flag if related HCI commands
6590 		 * failed.
6591 		 */
6592 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6593 
6594 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6595 	} else {
6596 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6597 		new_settings(hdev, cmd->sk);
6598 	}
6599 
6600 	mgmt_pending_free(cmd);
6601 }
6602 
6603 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6604 {
6605 	int status;
6606 
6607 	status = hci_write_fast_connectable_sync(hdev, false);
6608 
6609 	if (!status)
6610 		status = hci_update_scan_sync(hdev);
6611 
6612 	/* Since only the advertising data flags will change, there
6613 	 * is no need to update the scan response data.
6614 	 */
6615 	if (!status)
6616 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6617 
6618 	return status;
6619 }
6620 
6621 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6622 {
6623 	struct mgmt_mode *cp = data;
6624 	struct mgmt_pending_cmd *cmd;
6625 	int err;
6626 
6627 	bt_dev_dbg(hdev, "sock %p", sk);
6628 
6629 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6630 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6631 				       MGMT_STATUS_NOT_SUPPORTED);
6632 
6633 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6634 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6635 				       MGMT_STATUS_REJECTED);
6636 
6637 	if (cp->val != 0x00 && cp->val != 0x01)
6638 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6639 				       MGMT_STATUS_INVALID_PARAMS);
6640 
6641 	hci_dev_lock(hdev);
6642 
6643 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6644 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6645 		goto unlock;
6646 	}
6647 
6648 	if (!hdev_is_powered(hdev)) {
6649 		if (!cp->val) {
6650 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6651 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6652 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6653 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6654 		}
6655 
6656 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6657 
6658 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6659 		if (err < 0)
6660 			goto unlock;
6661 
6662 		err = new_settings(hdev, sk);
6663 		goto unlock;
6664 	}
6665 
6666 	/* Reject disabling when powered on */
6667 	if (!cp->val) {
6668 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6669 				      MGMT_STATUS_REJECTED);
6670 		goto unlock;
6671 	} else {
6672 		/* When configuring a dual-mode controller to operate
6673 		 * with LE only and using a static address, then switching
6674 		 * BR/EDR back on is not allowed.
6675 		 *
6676 		 * Dual-mode controllers shall operate with the public
6677 		 * address as its identity address for BR/EDR and LE. So
6678 		 * reject the attempt to create an invalid configuration.
6679 		 *
6680 		 * The same restrictions applies when secure connections
6681 		 * has been enabled. For BR/EDR this is a controller feature
6682 		 * while for LE it is a host stack feature. This means that
6683 		 * switching BR/EDR back on when secure connections has been
6684 		 * enabled is not a supported transaction.
6685 		 */
6686 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6687 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6688 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6689 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6690 					      MGMT_STATUS_REJECTED);
6691 			goto unlock;
6692 		}
6693 	}
6694 
6695 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6696 	if (!cmd)
6697 		err = -ENOMEM;
6698 	else
6699 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6700 					 set_bredr_complete);
6701 
6702 	if (err < 0) {
6703 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6704 				MGMT_STATUS_FAILED);
6705 		if (cmd)
6706 			mgmt_pending_free(cmd);
6707 
6708 		goto unlock;
6709 	}
6710 
6711 	/* We need to flip the bit already here so that
6712 	 * hci_req_update_adv_data generates the correct flags.
6713 	 */
6714 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6715 
6716 unlock:
6717 	hci_dev_unlock(hdev);
6718 	return err;
6719 }
6720 
6721 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6722 {
6723 	struct mgmt_pending_cmd *cmd = data;
6724 	struct mgmt_mode *cp;
6725 
6726 	bt_dev_dbg(hdev, "err %d", err);
6727 
6728 	if (err) {
6729 		u8 mgmt_err = mgmt_status(err);
6730 
6731 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6732 		goto done;
6733 	}
6734 
6735 	cp = cmd->param;
6736 
6737 	switch (cp->val) {
6738 	case 0x00:
6739 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6740 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6741 		break;
6742 	case 0x01:
6743 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6744 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6745 		break;
6746 	case 0x02:
6747 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6748 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6749 		break;
6750 	}
6751 
6752 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6753 	new_settings(hdev, cmd->sk);
6754 
6755 done:
6756 	mgmt_pending_free(cmd);
6757 }
6758 
6759 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6760 {
6761 	struct mgmt_pending_cmd *cmd = data;
6762 	struct mgmt_mode *cp = cmd->param;
6763 	u8 val = !!cp->val;
6764 
6765 	/* Force write of val */
6766 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6767 
6768 	return hci_write_sc_support_sync(hdev, val);
6769 }
6770 
6771 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6772 			   void *data, u16 len)
6773 {
6774 	struct mgmt_mode *cp = data;
6775 	struct mgmt_pending_cmd *cmd;
6776 	u8 val;
6777 	int err;
6778 
6779 	bt_dev_dbg(hdev, "sock %p", sk);
6780 
6781 	if (!lmp_sc_capable(hdev) &&
6782 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6783 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6784 				       MGMT_STATUS_NOT_SUPPORTED);
6785 
6786 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6787 	    lmp_sc_capable(hdev) &&
6788 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6789 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6790 				       MGMT_STATUS_REJECTED);
6791 
6792 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6793 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6794 				       MGMT_STATUS_INVALID_PARAMS);
6795 
6796 	hci_dev_lock(hdev);
6797 
6798 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6799 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6800 		bool changed;
6801 
6802 		if (cp->val) {
6803 			changed = !hci_dev_test_and_set_flag(hdev,
6804 							     HCI_SC_ENABLED);
6805 			if (cp->val == 0x02)
6806 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6807 			else
6808 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6809 		} else {
6810 			changed = hci_dev_test_and_clear_flag(hdev,
6811 							      HCI_SC_ENABLED);
6812 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6813 		}
6814 
6815 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6816 		if (err < 0)
6817 			goto failed;
6818 
6819 		if (changed)
6820 			err = new_settings(hdev, sk);
6821 
6822 		goto failed;
6823 	}
6824 
6825 	val = !!cp->val;
6826 
6827 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6828 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6829 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6830 		goto failed;
6831 	}
6832 
6833 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6834 	if (!cmd)
6835 		err = -ENOMEM;
6836 	else
6837 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6838 					 set_secure_conn_complete);
6839 
6840 	if (err < 0) {
6841 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6842 				MGMT_STATUS_FAILED);
6843 		if (cmd)
6844 			mgmt_pending_free(cmd);
6845 	}
6846 
6847 failed:
6848 	hci_dev_unlock(hdev);
6849 	return err;
6850 }
6851 
6852 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6853 			  void *data, u16 len)
6854 {
6855 	struct mgmt_mode *cp = data;
6856 	bool changed, use_changed;
6857 	int err;
6858 
6859 	bt_dev_dbg(hdev, "sock %p", sk);
6860 
6861 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6862 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6863 				       MGMT_STATUS_INVALID_PARAMS);
6864 
6865 	hci_dev_lock(hdev);
6866 
6867 	if (cp->val)
6868 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6869 	else
6870 		changed = hci_dev_test_and_clear_flag(hdev,
6871 						      HCI_KEEP_DEBUG_KEYS);
6872 
6873 	if (cp->val == 0x02)
6874 		use_changed = !hci_dev_test_and_set_flag(hdev,
6875 							 HCI_USE_DEBUG_KEYS);
6876 	else
6877 		use_changed = hci_dev_test_and_clear_flag(hdev,
6878 							  HCI_USE_DEBUG_KEYS);
6879 
6880 	if (hdev_is_powered(hdev) && use_changed &&
6881 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6882 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6883 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6884 			     sizeof(mode), &mode);
6885 	}
6886 
6887 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6888 	if (err < 0)
6889 		goto unlock;
6890 
6891 	if (changed)
6892 		err = new_settings(hdev, sk);
6893 
6894 unlock:
6895 	hci_dev_unlock(hdev);
6896 	return err;
6897 }
6898 
6899 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6900 		       u16 len)
6901 {
6902 	struct mgmt_cp_set_privacy *cp = cp_data;
6903 	bool changed;
6904 	int err;
6905 
6906 	bt_dev_dbg(hdev, "sock %p", sk);
6907 
6908 	if (!lmp_le_capable(hdev))
6909 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6910 				       MGMT_STATUS_NOT_SUPPORTED);
6911 
6912 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6913 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6914 				       MGMT_STATUS_INVALID_PARAMS);
6915 
6916 	if (hdev_is_powered(hdev))
6917 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6918 				       MGMT_STATUS_REJECTED);
6919 
6920 	hci_dev_lock(hdev);
6921 
6922 	/* If user space supports this command it is also expected to
6923 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6924 	 */
6925 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6926 
6927 	if (cp->privacy) {
6928 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6929 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6930 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6931 		hci_adv_instances_set_rpa_expired(hdev, true);
6932 		if (cp->privacy == 0x02)
6933 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6934 		else
6935 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6936 	} else {
6937 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6938 		memset(hdev->irk, 0, sizeof(hdev->irk));
6939 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6940 		hci_adv_instances_set_rpa_expired(hdev, false);
6941 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6942 	}
6943 
6944 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6945 	if (err < 0)
6946 		goto unlock;
6947 
6948 	if (changed)
6949 		err = new_settings(hdev, sk);
6950 
6951 unlock:
6952 	hci_dev_unlock(hdev);
6953 	return err;
6954 }
6955 
6956 static bool irk_is_valid(struct mgmt_irk_info *irk)
6957 {
6958 	switch (irk->addr.type) {
6959 	case BDADDR_LE_PUBLIC:
6960 		return true;
6961 
6962 	case BDADDR_LE_RANDOM:
6963 		/* Two most significant bits shall be set */
6964 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6965 			return false;
6966 		return true;
6967 	}
6968 
6969 	return false;
6970 }
6971 
6972 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6973 		     u16 len)
6974 {
6975 	struct mgmt_cp_load_irks *cp = cp_data;
6976 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6977 				   sizeof(struct mgmt_irk_info));
6978 	u16 irk_count, expected_len;
6979 	int i, err;
6980 
6981 	bt_dev_dbg(hdev, "sock %p", sk);
6982 
6983 	if (!lmp_le_capable(hdev))
6984 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6985 				       MGMT_STATUS_NOT_SUPPORTED);
6986 
6987 	irk_count = __le16_to_cpu(cp->irk_count);
6988 	if (irk_count > max_irk_count) {
6989 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6990 			   irk_count);
6991 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6992 				       MGMT_STATUS_INVALID_PARAMS);
6993 	}
6994 
6995 	expected_len = struct_size(cp, irks, irk_count);
6996 	if (expected_len != len) {
6997 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6998 			   expected_len, len);
6999 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7000 				       MGMT_STATUS_INVALID_PARAMS);
7001 	}
7002 
7003 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
7004 
7005 	for (i = 0; i < irk_count; i++) {
7006 		struct mgmt_irk_info *key = &cp->irks[i];
7007 
7008 		if (!irk_is_valid(key))
7009 			return mgmt_cmd_status(sk, hdev->id,
7010 					       MGMT_OP_LOAD_IRKS,
7011 					       MGMT_STATUS_INVALID_PARAMS);
7012 	}
7013 
7014 	hci_dev_lock(hdev);
7015 
7016 	hci_smp_irks_clear(hdev);
7017 
7018 	for (i = 0; i < irk_count; i++) {
7019 		struct mgmt_irk_info *irk = &cp->irks[i];
7020 
7021 		if (hci_is_blocked_key(hdev,
7022 				       HCI_BLOCKED_KEY_TYPE_IRK,
7023 				       irk->val)) {
7024 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7025 				    &irk->addr.bdaddr);
7026 			continue;
7027 		}
7028 
7029 		hci_add_irk(hdev, &irk->addr.bdaddr,
7030 			    le_addr_type(irk->addr.type), irk->val,
7031 			    BDADDR_ANY);
7032 	}
7033 
7034 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7035 
7036 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7037 
7038 	hci_dev_unlock(hdev);
7039 
7040 	return err;
7041 }
7042 
7043 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7044 {
7045 	if (key->initiator != 0x00 && key->initiator != 0x01)
7046 		return false;
7047 
7048 	switch (key->addr.type) {
7049 	case BDADDR_LE_PUBLIC:
7050 		return true;
7051 
7052 	case BDADDR_LE_RANDOM:
7053 		/* Two most significant bits shall be set */
7054 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7055 			return false;
7056 		return true;
7057 	}
7058 
7059 	return false;
7060 }
7061 
7062 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7063 			       void *cp_data, u16 len)
7064 {
7065 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
7066 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7067 				   sizeof(struct mgmt_ltk_info));
7068 	u16 key_count, expected_len;
7069 	int i, err;
7070 
7071 	bt_dev_dbg(hdev, "sock %p", sk);
7072 
7073 	if (!lmp_le_capable(hdev))
7074 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7075 				       MGMT_STATUS_NOT_SUPPORTED);
7076 
7077 	key_count = __le16_to_cpu(cp->key_count);
7078 	if (key_count > max_key_count) {
7079 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7080 			   key_count);
7081 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7082 				       MGMT_STATUS_INVALID_PARAMS);
7083 	}
7084 
7085 	expected_len = struct_size(cp, keys, key_count);
7086 	if (expected_len != len) {
7087 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7088 			   expected_len, len);
7089 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7090 				       MGMT_STATUS_INVALID_PARAMS);
7091 	}
7092 
7093 	bt_dev_dbg(hdev, "key_count %u", key_count);
7094 
7095 	hci_dev_lock(hdev);
7096 
7097 	hci_smp_ltks_clear(hdev);
7098 
7099 	for (i = 0; i < key_count; i++) {
7100 		struct mgmt_ltk_info *key = &cp->keys[i];
7101 		u8 type, authenticated;
7102 
7103 		if (hci_is_blocked_key(hdev,
7104 				       HCI_BLOCKED_KEY_TYPE_LTK,
7105 				       key->val)) {
7106 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7107 				    &key->addr.bdaddr);
7108 			continue;
7109 		}
7110 
7111 		if (!ltk_is_valid(key)) {
7112 			bt_dev_warn(hdev, "Invalid LTK for %pMR",
7113 				    &key->addr.bdaddr);
7114 			continue;
7115 		}
7116 
7117 		switch (key->type) {
7118 		case MGMT_LTK_UNAUTHENTICATED:
7119 			authenticated = 0x00;
7120 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7121 			break;
7122 		case MGMT_LTK_AUTHENTICATED:
7123 			authenticated = 0x01;
7124 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7125 			break;
7126 		case MGMT_LTK_P256_UNAUTH:
7127 			authenticated = 0x00;
7128 			type = SMP_LTK_P256;
7129 			break;
7130 		case MGMT_LTK_P256_AUTH:
7131 			authenticated = 0x01;
7132 			type = SMP_LTK_P256;
7133 			break;
7134 		case MGMT_LTK_P256_DEBUG:
7135 			authenticated = 0x00;
7136 			type = SMP_LTK_P256_DEBUG;
7137 			fallthrough;
7138 		default:
7139 			continue;
7140 		}
7141 
7142 		hci_add_ltk(hdev, &key->addr.bdaddr,
7143 			    le_addr_type(key->addr.type), type, authenticated,
7144 			    key->val, key->enc_size, key->ediv, key->rand);
7145 	}
7146 
7147 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7148 			   NULL, 0);
7149 
7150 	hci_dev_unlock(hdev);
7151 
7152 	return err;
7153 }
7154 
7155 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7156 {
7157 	struct mgmt_pending_cmd *cmd = data;
7158 	struct hci_conn *conn = cmd->user_data;
7159 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7160 	struct mgmt_rp_get_conn_info rp;
7161 	u8 status;
7162 
7163 	bt_dev_dbg(hdev, "err %d", err);
7164 
7165 	memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7166 
7167 	status = mgmt_status(err);
7168 	if (status == MGMT_STATUS_SUCCESS) {
7169 		rp.rssi = conn->rssi;
7170 		rp.tx_power = conn->tx_power;
7171 		rp.max_tx_power = conn->max_tx_power;
7172 	} else {
7173 		rp.rssi = HCI_RSSI_INVALID;
7174 		rp.tx_power = HCI_TX_POWER_INVALID;
7175 		rp.max_tx_power = HCI_TX_POWER_INVALID;
7176 	}
7177 
7178 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7179 			  &rp, sizeof(rp));
7180 
7181 	mgmt_pending_free(cmd);
7182 }
7183 
7184 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7185 {
7186 	struct mgmt_pending_cmd *cmd = data;
7187 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7188 	struct hci_conn *conn;
7189 	int err;
7190 	__le16   handle;
7191 
7192 	/* Make sure we are still connected */
7193 	if (cp->addr.type == BDADDR_BREDR)
7194 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7195 					       &cp->addr.bdaddr);
7196 	else
7197 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7198 
7199 	if (!conn || conn->state != BT_CONNECTED)
7200 		return MGMT_STATUS_NOT_CONNECTED;
7201 
7202 	cmd->user_data = conn;
7203 	handle = cpu_to_le16(conn->handle);
7204 
7205 	/* Refresh RSSI each time */
7206 	err = hci_read_rssi_sync(hdev, handle);
7207 
7208 	/* For LE links TX power does not change thus we don't need to
7209 	 * query for it once value is known.
7210 	 */
7211 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7212 		     conn->tx_power == HCI_TX_POWER_INVALID))
7213 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
7214 
7215 	/* Max TX power needs to be read only once per connection */
7216 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7217 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
7218 
7219 	return err;
7220 }
7221 
7222 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7223 			 u16 len)
7224 {
7225 	struct mgmt_cp_get_conn_info *cp = data;
7226 	struct mgmt_rp_get_conn_info rp;
7227 	struct hci_conn *conn;
7228 	unsigned long conn_info_age;
7229 	int err = 0;
7230 
7231 	bt_dev_dbg(hdev, "sock %p", sk);
7232 
7233 	memset(&rp, 0, sizeof(rp));
7234 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7235 	rp.addr.type = cp->addr.type;
7236 
7237 	if (!bdaddr_type_is_valid(cp->addr.type))
7238 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7239 					 MGMT_STATUS_INVALID_PARAMS,
7240 					 &rp, sizeof(rp));
7241 
7242 	hci_dev_lock(hdev);
7243 
7244 	if (!hdev_is_powered(hdev)) {
7245 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7246 					MGMT_STATUS_NOT_POWERED, &rp,
7247 					sizeof(rp));
7248 		goto unlock;
7249 	}
7250 
7251 	if (cp->addr.type == BDADDR_BREDR)
7252 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7253 					       &cp->addr.bdaddr);
7254 	else
7255 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7256 
7257 	if (!conn || conn->state != BT_CONNECTED) {
7258 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7259 					MGMT_STATUS_NOT_CONNECTED, &rp,
7260 					sizeof(rp));
7261 		goto unlock;
7262 	}
7263 
7264 	/* To avoid client trying to guess when to poll again for information we
7265 	 * calculate conn info age as random value between min/max set in hdev.
7266 	 */
7267 	conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7268 						 hdev->conn_info_max_age - 1);
7269 
7270 	/* Query controller to refresh cached values if they are too old or were
7271 	 * never read.
7272 	 */
7273 	if (time_after(jiffies, conn->conn_info_timestamp +
7274 		       msecs_to_jiffies(conn_info_age)) ||
7275 	    !conn->conn_info_timestamp) {
7276 		struct mgmt_pending_cmd *cmd;
7277 
7278 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7279 				       len);
7280 		if (!cmd) {
7281 			err = -ENOMEM;
7282 		} else {
7283 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7284 						 cmd, get_conn_info_complete);
7285 		}
7286 
7287 		if (err < 0) {
7288 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7289 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
7290 
7291 			if (cmd)
7292 				mgmt_pending_free(cmd);
7293 
7294 			goto unlock;
7295 		}
7296 
7297 		conn->conn_info_timestamp = jiffies;
7298 	} else {
7299 		/* Cache is valid, just reply with values cached in hci_conn */
7300 		rp.rssi = conn->rssi;
7301 		rp.tx_power = conn->tx_power;
7302 		rp.max_tx_power = conn->max_tx_power;
7303 
7304 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7305 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7306 	}
7307 
7308 unlock:
7309 	hci_dev_unlock(hdev);
7310 	return err;
7311 }
7312 
7313 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7314 {
7315 	struct mgmt_pending_cmd *cmd = data;
7316 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7317 	struct mgmt_rp_get_clock_info rp;
7318 	struct hci_conn *conn = cmd->user_data;
7319 	u8 status = mgmt_status(err);
7320 
7321 	bt_dev_dbg(hdev, "err %d", err);
7322 
7323 	memset(&rp, 0, sizeof(rp));
7324 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7325 	rp.addr.type = cp->addr.type;
7326 
7327 	if (err)
7328 		goto complete;
7329 
7330 	rp.local_clock = cpu_to_le32(hdev->clock);
7331 
7332 	if (conn) {
7333 		rp.piconet_clock = cpu_to_le32(conn->clock);
7334 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7335 	}
7336 
7337 complete:
7338 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7339 			  sizeof(rp));
7340 
7341 	mgmt_pending_free(cmd);
7342 }
7343 
7344 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7345 {
7346 	struct mgmt_pending_cmd *cmd = data;
7347 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7348 	struct hci_cp_read_clock hci_cp;
7349 	struct hci_conn *conn;
7350 
7351 	memset(&hci_cp, 0, sizeof(hci_cp));
7352 	hci_read_clock_sync(hdev, &hci_cp);
7353 
7354 	/* Make sure connection still exists */
7355 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7356 	if (!conn || conn->state != BT_CONNECTED)
7357 		return MGMT_STATUS_NOT_CONNECTED;
7358 
7359 	cmd->user_data = conn;
7360 	hci_cp.handle = cpu_to_le16(conn->handle);
7361 	hci_cp.which = 0x01; /* Piconet clock */
7362 
7363 	return hci_read_clock_sync(hdev, &hci_cp);
7364 }
7365 
7366 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7367 								u16 len)
7368 {
7369 	struct mgmt_cp_get_clock_info *cp = data;
7370 	struct mgmt_rp_get_clock_info rp;
7371 	struct mgmt_pending_cmd *cmd;
7372 	struct hci_conn *conn;
7373 	int err;
7374 
7375 	bt_dev_dbg(hdev, "sock %p", sk);
7376 
7377 	memset(&rp, 0, sizeof(rp));
7378 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7379 	rp.addr.type = cp->addr.type;
7380 
7381 	if (cp->addr.type != BDADDR_BREDR)
7382 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7383 					 MGMT_STATUS_INVALID_PARAMS,
7384 					 &rp, sizeof(rp));
7385 
7386 	hci_dev_lock(hdev);
7387 
7388 	if (!hdev_is_powered(hdev)) {
7389 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7390 					MGMT_STATUS_NOT_POWERED, &rp,
7391 					sizeof(rp));
7392 		goto unlock;
7393 	}
7394 
7395 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7396 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7397 					       &cp->addr.bdaddr);
7398 		if (!conn || conn->state != BT_CONNECTED) {
7399 			err = mgmt_cmd_complete(sk, hdev->id,
7400 						MGMT_OP_GET_CLOCK_INFO,
7401 						MGMT_STATUS_NOT_CONNECTED,
7402 						&rp, sizeof(rp));
7403 			goto unlock;
7404 		}
7405 	} else {
7406 		conn = NULL;
7407 	}
7408 
7409 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7410 	if (!cmd)
7411 		err = -ENOMEM;
7412 	else
7413 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7414 					 get_clock_info_complete);
7415 
7416 	if (err < 0) {
7417 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7418 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
7419 
7420 		if (cmd)
7421 			mgmt_pending_free(cmd);
7422 	}
7423 
7424 
7425 unlock:
7426 	hci_dev_unlock(hdev);
7427 	return err;
7428 }
7429 
7430 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7431 {
7432 	struct hci_conn *conn;
7433 
7434 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7435 	if (!conn)
7436 		return false;
7437 
7438 	if (conn->dst_type != type)
7439 		return false;
7440 
7441 	if (conn->state != BT_CONNECTED)
7442 		return false;
7443 
7444 	return true;
7445 }
7446 
7447 /* This function requires the caller holds hdev->lock */
7448 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7449 			       u8 addr_type, u8 auto_connect)
7450 {
7451 	struct hci_conn_params *params;
7452 
7453 	params = hci_conn_params_add(hdev, addr, addr_type);
7454 	if (!params)
7455 		return -EIO;
7456 
7457 	if (params->auto_connect == auto_connect)
7458 		return 0;
7459 
7460 	hci_pend_le_list_del_init(params);
7461 
7462 	switch (auto_connect) {
7463 	case HCI_AUTO_CONN_DISABLED:
7464 	case HCI_AUTO_CONN_LINK_LOSS:
7465 		/* If auto connect is being disabled when we're trying to
7466 		 * connect to device, keep connecting.
7467 		 */
7468 		if (params->explicit_connect)
7469 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7470 		break;
7471 	case HCI_AUTO_CONN_REPORT:
7472 		if (params->explicit_connect)
7473 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7474 		else
7475 			hci_pend_le_list_add(params, &hdev->pend_le_reports);
7476 		break;
7477 	case HCI_AUTO_CONN_DIRECT:
7478 	case HCI_AUTO_CONN_ALWAYS:
7479 		if (!is_connected(hdev, addr, addr_type))
7480 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7481 		break;
7482 	}
7483 
7484 	params->auto_connect = auto_connect;
7485 
7486 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7487 		   addr, addr_type, auto_connect);
7488 
7489 	return 0;
7490 }
7491 
7492 static void device_added(struct sock *sk, struct hci_dev *hdev,
7493 			 bdaddr_t *bdaddr, u8 type, u8 action)
7494 {
7495 	struct mgmt_ev_device_added ev;
7496 
7497 	bacpy(&ev.addr.bdaddr, bdaddr);
7498 	ev.addr.type = type;
7499 	ev.action = action;
7500 
7501 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7502 }
7503 
7504 static void add_device_complete(struct hci_dev *hdev, void *data, int err)
7505 {
7506 	struct mgmt_pending_cmd *cmd = data;
7507 	struct mgmt_cp_add_device *cp = cmd->param;
7508 
7509 	if (!err) {
7510 		struct hci_conn_params *params;
7511 
7512 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7513 						le_addr_type(cp->addr.type));
7514 
7515 		device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
7516 			     cp->action);
7517 		device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
7518 				     cp->addr.type, hdev->conn_flags,
7519 				     params ? params->flags : 0);
7520 	}
7521 
7522 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
7523 			  mgmt_status(err), &cp->addr, sizeof(cp->addr));
7524 	mgmt_pending_free(cmd);
7525 }
7526 
7527 static int add_device_sync(struct hci_dev *hdev, void *data)
7528 {
7529 	return hci_update_passive_scan_sync(hdev);
7530 }
7531 
7532 static int add_device(struct sock *sk, struct hci_dev *hdev,
7533 		      void *data, u16 len)
7534 {
7535 	struct mgmt_pending_cmd *cmd;
7536 	struct mgmt_cp_add_device *cp = data;
7537 	u8 auto_conn, addr_type;
7538 	struct hci_conn_params *params;
7539 	int err;
7540 	u32 current_flags = 0;
7541 	u32 supported_flags;
7542 
7543 	bt_dev_dbg(hdev, "sock %p", sk);
7544 
7545 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7546 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7547 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7548 					 MGMT_STATUS_INVALID_PARAMS,
7549 					 &cp->addr, sizeof(cp->addr));
7550 
7551 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7552 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7553 					 MGMT_STATUS_INVALID_PARAMS,
7554 					 &cp->addr, sizeof(cp->addr));
7555 
7556 	hci_dev_lock(hdev);
7557 
7558 	if (cp->addr.type == BDADDR_BREDR) {
7559 		/* Only incoming connections action is supported for now */
7560 		if (cp->action != 0x01) {
7561 			err = mgmt_cmd_complete(sk, hdev->id,
7562 						MGMT_OP_ADD_DEVICE,
7563 						MGMT_STATUS_INVALID_PARAMS,
7564 						&cp->addr, sizeof(cp->addr));
7565 			goto unlock;
7566 		}
7567 
7568 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7569 						     &cp->addr.bdaddr,
7570 						     cp->addr.type, 0);
7571 		if (err)
7572 			goto unlock;
7573 
7574 		hci_update_scan(hdev);
7575 
7576 		goto added;
7577 	}
7578 
7579 	addr_type = le_addr_type(cp->addr.type);
7580 
7581 	if (cp->action == 0x02)
7582 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7583 	else if (cp->action == 0x01)
7584 		auto_conn = HCI_AUTO_CONN_DIRECT;
7585 	else
7586 		auto_conn = HCI_AUTO_CONN_REPORT;
7587 
7588 	/* Kernel internally uses conn_params with resolvable private
7589 	 * address, but Add Device allows only identity addresses.
7590 	 * Make sure it is enforced before calling
7591 	 * hci_conn_params_lookup.
7592 	 */
7593 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7594 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7595 					MGMT_STATUS_INVALID_PARAMS,
7596 					&cp->addr, sizeof(cp->addr));
7597 		goto unlock;
7598 	}
7599 
7600 	/* If the connection parameters don't exist for this device,
7601 	 * they will be created and configured with defaults.
7602 	 */
7603 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7604 				auto_conn) < 0) {
7605 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7606 					MGMT_STATUS_FAILED, &cp->addr,
7607 					sizeof(cp->addr));
7608 		goto unlock;
7609 	} else {
7610 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7611 						addr_type);
7612 		if (params)
7613 			current_flags = params->flags;
7614 	}
7615 
7616 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7617 	if (!cmd) {
7618 		err = -ENOMEM;
7619 		goto unlock;
7620 	}
7621 
7622 	err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
7623 				 add_device_complete);
7624 	if (err < 0) {
7625 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7626 					MGMT_STATUS_FAILED, &cp->addr,
7627 					sizeof(cp->addr));
7628 		mgmt_pending_free(cmd);
7629 	}
7630 
7631 	goto unlock;
7632 
7633 added:
7634 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7635 	supported_flags = hdev->conn_flags;
7636 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7637 			     supported_flags, current_flags);
7638 
7639 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7640 				MGMT_STATUS_SUCCESS, &cp->addr,
7641 				sizeof(cp->addr));
7642 
7643 unlock:
7644 	hci_dev_unlock(hdev);
7645 	return err;
7646 }
7647 
7648 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7649 			   bdaddr_t *bdaddr, u8 type)
7650 {
7651 	struct mgmt_ev_device_removed ev;
7652 
7653 	bacpy(&ev.addr.bdaddr, bdaddr);
7654 	ev.addr.type = type;
7655 
7656 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7657 }
7658 
7659 static int remove_device_sync(struct hci_dev *hdev, void *data)
7660 {
7661 	return hci_update_passive_scan_sync(hdev);
7662 }
7663 
7664 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7665 			 void *data, u16 len)
7666 {
7667 	struct mgmt_cp_remove_device *cp = data;
7668 	int err;
7669 
7670 	bt_dev_dbg(hdev, "sock %p", sk);
7671 
7672 	hci_dev_lock(hdev);
7673 
7674 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7675 		struct hci_conn_params *params;
7676 		u8 addr_type;
7677 
7678 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7679 			err = mgmt_cmd_complete(sk, hdev->id,
7680 						MGMT_OP_REMOVE_DEVICE,
7681 						MGMT_STATUS_INVALID_PARAMS,
7682 						&cp->addr, sizeof(cp->addr));
7683 			goto unlock;
7684 		}
7685 
7686 		if (cp->addr.type == BDADDR_BREDR) {
7687 			err = hci_bdaddr_list_del(&hdev->accept_list,
7688 						  &cp->addr.bdaddr,
7689 						  cp->addr.type);
7690 			if (err) {
7691 				err = mgmt_cmd_complete(sk, hdev->id,
7692 							MGMT_OP_REMOVE_DEVICE,
7693 							MGMT_STATUS_INVALID_PARAMS,
7694 							&cp->addr,
7695 							sizeof(cp->addr));
7696 				goto unlock;
7697 			}
7698 
7699 			hci_update_scan(hdev);
7700 
7701 			device_removed(sk, hdev, &cp->addr.bdaddr,
7702 				       cp->addr.type);
7703 			goto complete;
7704 		}
7705 
7706 		addr_type = le_addr_type(cp->addr.type);
7707 
7708 		/* Kernel internally uses conn_params with resolvable private
7709 		 * address, but Remove Device allows only identity addresses.
7710 		 * Make sure it is enforced before calling
7711 		 * hci_conn_params_lookup.
7712 		 */
7713 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7714 			err = mgmt_cmd_complete(sk, hdev->id,
7715 						MGMT_OP_REMOVE_DEVICE,
7716 						MGMT_STATUS_INVALID_PARAMS,
7717 						&cp->addr, sizeof(cp->addr));
7718 			goto unlock;
7719 		}
7720 
7721 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7722 						addr_type);
7723 		if (!params) {
7724 			err = mgmt_cmd_complete(sk, hdev->id,
7725 						MGMT_OP_REMOVE_DEVICE,
7726 						MGMT_STATUS_INVALID_PARAMS,
7727 						&cp->addr, sizeof(cp->addr));
7728 			goto unlock;
7729 		}
7730 
7731 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7732 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7733 			err = mgmt_cmd_complete(sk, hdev->id,
7734 						MGMT_OP_REMOVE_DEVICE,
7735 						MGMT_STATUS_INVALID_PARAMS,
7736 						&cp->addr, sizeof(cp->addr));
7737 			goto unlock;
7738 		}
7739 
7740 		hci_conn_params_free(params);
7741 
7742 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7743 	} else {
7744 		struct hci_conn_params *p, *tmp;
7745 		struct bdaddr_list *b, *btmp;
7746 
7747 		if (cp->addr.type) {
7748 			err = mgmt_cmd_complete(sk, hdev->id,
7749 						MGMT_OP_REMOVE_DEVICE,
7750 						MGMT_STATUS_INVALID_PARAMS,
7751 						&cp->addr, sizeof(cp->addr));
7752 			goto unlock;
7753 		}
7754 
7755 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7756 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7757 			list_del(&b->list);
7758 			kfree(b);
7759 		}
7760 
7761 		hci_update_scan(hdev);
7762 
7763 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7764 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7765 				continue;
7766 			device_removed(sk, hdev, &p->addr, p->addr_type);
7767 			if (p->explicit_connect) {
7768 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7769 				continue;
7770 			}
7771 			hci_conn_params_free(p);
7772 		}
7773 
7774 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7775 	}
7776 
7777 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7778 
7779 complete:
7780 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7781 				MGMT_STATUS_SUCCESS, &cp->addr,
7782 				sizeof(cp->addr));
7783 unlock:
7784 	hci_dev_unlock(hdev);
7785 	return err;
7786 }
7787 
7788 static int conn_update_sync(struct hci_dev *hdev, void *data)
7789 {
7790 	struct hci_conn_params *params = data;
7791 	struct hci_conn *conn;
7792 
7793 	conn = hci_conn_hash_lookup_le(hdev, &params->addr, params->addr_type);
7794 	if (!conn)
7795 		return -ECANCELED;
7796 
7797 	return hci_le_conn_update_sync(hdev, conn, params);
7798 }
7799 
7800 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7801 			   u16 len)
7802 {
7803 	struct mgmt_cp_load_conn_param *cp = data;
7804 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7805 				     sizeof(struct mgmt_conn_param));
7806 	u16 param_count, expected_len;
7807 	int i;
7808 
7809 	if (!lmp_le_capable(hdev))
7810 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7811 				       MGMT_STATUS_NOT_SUPPORTED);
7812 
7813 	param_count = __le16_to_cpu(cp->param_count);
7814 	if (param_count > max_param_count) {
7815 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7816 			   param_count);
7817 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7818 				       MGMT_STATUS_INVALID_PARAMS);
7819 	}
7820 
7821 	expected_len = struct_size(cp, params, param_count);
7822 	if (expected_len != len) {
7823 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7824 			   expected_len, len);
7825 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7826 				       MGMT_STATUS_INVALID_PARAMS);
7827 	}
7828 
7829 	bt_dev_dbg(hdev, "param_count %u", param_count);
7830 
7831 	hci_dev_lock(hdev);
7832 
7833 	if (param_count > 1)
7834 		hci_conn_params_clear_disabled(hdev);
7835 
7836 	for (i = 0; i < param_count; i++) {
7837 		struct mgmt_conn_param *param = &cp->params[i];
7838 		struct hci_conn_params *hci_param;
7839 		u16 min, max, latency, timeout;
7840 		bool update = false;
7841 		u8 addr_type;
7842 
7843 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7844 			   param->addr.type);
7845 
7846 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7847 			addr_type = ADDR_LE_DEV_PUBLIC;
7848 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7849 			addr_type = ADDR_LE_DEV_RANDOM;
7850 		} else {
7851 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7852 			continue;
7853 		}
7854 
7855 		min = le16_to_cpu(param->min_interval);
7856 		max = le16_to_cpu(param->max_interval);
7857 		latency = le16_to_cpu(param->latency);
7858 		timeout = le16_to_cpu(param->timeout);
7859 
7860 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7861 			   min, max, latency, timeout);
7862 
7863 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7864 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7865 			continue;
7866 		}
7867 
7868 		/* Detect when the loading is for an existing parameter then
7869 		 * attempt to trigger the connection update procedure.
7870 		 */
7871 		if (!i && param_count == 1) {
7872 			hci_param = hci_conn_params_lookup(hdev,
7873 							   &param->addr.bdaddr,
7874 							   addr_type);
7875 			if (hci_param)
7876 				update = true;
7877 			else
7878 				hci_conn_params_clear_disabled(hdev);
7879 		}
7880 
7881 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7882 						addr_type);
7883 		if (!hci_param) {
7884 			bt_dev_err(hdev, "failed to add connection parameters");
7885 			continue;
7886 		}
7887 
7888 		hci_param->conn_min_interval = min;
7889 		hci_param->conn_max_interval = max;
7890 		hci_param->conn_latency = latency;
7891 		hci_param->supervision_timeout = timeout;
7892 
7893 		/* Check if we need to trigger a connection update */
7894 		if (update) {
7895 			struct hci_conn *conn;
7896 
7897 			/* Lookup for existing connection as central and check
7898 			 * if parameters match and if they don't then trigger
7899 			 * a connection update.
7900 			 */
7901 			conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
7902 						       addr_type);
7903 			if (conn && conn->role == HCI_ROLE_MASTER &&
7904 			    (conn->le_conn_min_interval != min ||
7905 			     conn->le_conn_max_interval != max ||
7906 			     conn->le_conn_latency != latency ||
7907 			     conn->le_supv_timeout != timeout))
7908 				hci_cmd_sync_queue(hdev, conn_update_sync,
7909 						   hci_param, NULL);
7910 		}
7911 	}
7912 
7913 	hci_dev_unlock(hdev);
7914 
7915 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7916 				 NULL, 0);
7917 }
7918 
7919 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7920 			       void *data, u16 len)
7921 {
7922 	struct mgmt_cp_set_external_config *cp = data;
7923 	bool changed;
7924 	int err;
7925 
7926 	bt_dev_dbg(hdev, "sock %p", sk);
7927 
7928 	if (hdev_is_powered(hdev))
7929 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7930 				       MGMT_STATUS_REJECTED);
7931 
7932 	if (cp->config != 0x00 && cp->config != 0x01)
7933 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7934 				         MGMT_STATUS_INVALID_PARAMS);
7935 
7936 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7937 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7938 				       MGMT_STATUS_NOT_SUPPORTED);
7939 
7940 	hci_dev_lock(hdev);
7941 
7942 	if (cp->config)
7943 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7944 	else
7945 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7946 
7947 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7948 	if (err < 0)
7949 		goto unlock;
7950 
7951 	if (!changed)
7952 		goto unlock;
7953 
7954 	err = new_options(hdev, sk);
7955 
7956 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7957 		mgmt_index_removed(hdev);
7958 
7959 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7960 			hci_dev_set_flag(hdev, HCI_CONFIG);
7961 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7962 
7963 			queue_work(hdev->req_workqueue, &hdev->power_on);
7964 		} else {
7965 			set_bit(HCI_RAW, &hdev->flags);
7966 			mgmt_index_added(hdev);
7967 		}
7968 	}
7969 
7970 unlock:
7971 	hci_dev_unlock(hdev);
7972 	return err;
7973 }
7974 
7975 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7976 			      void *data, u16 len)
7977 {
7978 	struct mgmt_cp_set_public_address *cp = data;
7979 	bool changed;
7980 	int err;
7981 
7982 	bt_dev_dbg(hdev, "sock %p", sk);
7983 
7984 	if (hdev_is_powered(hdev))
7985 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7986 				       MGMT_STATUS_REJECTED);
7987 
7988 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7989 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7990 				       MGMT_STATUS_INVALID_PARAMS);
7991 
7992 	if (!hdev->set_bdaddr)
7993 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7994 				       MGMT_STATUS_NOT_SUPPORTED);
7995 
7996 	hci_dev_lock(hdev);
7997 
7998 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7999 	bacpy(&hdev->public_addr, &cp->bdaddr);
8000 
8001 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8002 	if (err < 0)
8003 		goto unlock;
8004 
8005 	if (!changed)
8006 		goto unlock;
8007 
8008 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8009 		err = new_options(hdev, sk);
8010 
8011 	if (is_configured(hdev)) {
8012 		mgmt_index_removed(hdev);
8013 
8014 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8015 
8016 		hci_dev_set_flag(hdev, HCI_CONFIG);
8017 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8018 
8019 		queue_work(hdev->req_workqueue, &hdev->power_on);
8020 	}
8021 
8022 unlock:
8023 	hci_dev_unlock(hdev);
8024 	return err;
8025 }
8026 
8027 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8028 					     int err)
8029 {
8030 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8031 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8032 	u8 *h192, *r192, *h256, *r256;
8033 	struct mgmt_pending_cmd *cmd = data;
8034 	struct sk_buff *skb = cmd->skb;
8035 	u8 status = mgmt_status(err);
8036 	u16 eir_len;
8037 
8038 	if (err == -ECANCELED ||
8039 	    cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8040 		return;
8041 
8042 	if (!status) {
8043 		if (!skb)
8044 			status = MGMT_STATUS_FAILED;
8045 		else if (IS_ERR(skb))
8046 			status = mgmt_status(PTR_ERR(skb));
8047 		else
8048 			status = mgmt_status(skb->data[0]);
8049 	}
8050 
8051 	bt_dev_dbg(hdev, "status %u", status);
8052 
8053 	mgmt_cp = cmd->param;
8054 
8055 	if (status) {
8056 		status = mgmt_status(status);
8057 		eir_len = 0;
8058 
8059 		h192 = NULL;
8060 		r192 = NULL;
8061 		h256 = NULL;
8062 		r256 = NULL;
8063 	} else if (!bredr_sc_enabled(hdev)) {
8064 		struct hci_rp_read_local_oob_data *rp;
8065 
8066 		if (skb->len != sizeof(*rp)) {
8067 			status = MGMT_STATUS_FAILED;
8068 			eir_len = 0;
8069 		} else {
8070 			status = MGMT_STATUS_SUCCESS;
8071 			rp = (void *)skb->data;
8072 
8073 			eir_len = 5 + 18 + 18;
8074 			h192 = rp->hash;
8075 			r192 = rp->rand;
8076 			h256 = NULL;
8077 			r256 = NULL;
8078 		}
8079 	} else {
8080 		struct hci_rp_read_local_oob_ext_data *rp;
8081 
8082 		if (skb->len != sizeof(*rp)) {
8083 			status = MGMT_STATUS_FAILED;
8084 			eir_len = 0;
8085 		} else {
8086 			status = MGMT_STATUS_SUCCESS;
8087 			rp = (void *)skb->data;
8088 
8089 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8090 				eir_len = 5 + 18 + 18;
8091 				h192 = NULL;
8092 				r192 = NULL;
8093 			} else {
8094 				eir_len = 5 + 18 + 18 + 18 + 18;
8095 				h192 = rp->hash192;
8096 				r192 = rp->rand192;
8097 			}
8098 
8099 			h256 = rp->hash256;
8100 			r256 = rp->rand256;
8101 		}
8102 	}
8103 
8104 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8105 	if (!mgmt_rp)
8106 		goto done;
8107 
8108 	if (eir_len == 0)
8109 		goto send_rsp;
8110 
8111 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8112 				  hdev->dev_class, 3);
8113 
8114 	if (h192 && r192) {
8115 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8116 					  EIR_SSP_HASH_C192, h192, 16);
8117 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8118 					  EIR_SSP_RAND_R192, r192, 16);
8119 	}
8120 
8121 	if (h256 && r256) {
8122 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8123 					  EIR_SSP_HASH_C256, h256, 16);
8124 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8125 					  EIR_SSP_RAND_R256, r256, 16);
8126 	}
8127 
8128 send_rsp:
8129 	mgmt_rp->type = mgmt_cp->type;
8130 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
8131 
8132 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
8133 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8134 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8135 	if (err < 0 || status)
8136 		goto done;
8137 
8138 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8139 
8140 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8141 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8142 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8143 done:
8144 	if (skb && !IS_ERR(skb))
8145 		kfree_skb(skb);
8146 
8147 	kfree(mgmt_rp);
8148 	mgmt_pending_remove(cmd);
8149 }
8150 
8151 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8152 				  struct mgmt_cp_read_local_oob_ext_data *cp)
8153 {
8154 	struct mgmt_pending_cmd *cmd;
8155 	int err;
8156 
8157 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8158 			       cp, sizeof(*cp));
8159 	if (!cmd)
8160 		return -ENOMEM;
8161 
8162 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8163 				 read_local_oob_ext_data_complete);
8164 
8165 	if (err < 0) {
8166 		mgmt_pending_remove(cmd);
8167 		return err;
8168 	}
8169 
8170 	return 0;
8171 }
8172 
8173 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8174 				   void *data, u16 data_len)
8175 {
8176 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
8177 	struct mgmt_rp_read_local_oob_ext_data *rp;
8178 	size_t rp_len;
8179 	u16 eir_len;
8180 	u8 status, flags, role, addr[7], hash[16], rand[16];
8181 	int err;
8182 
8183 	bt_dev_dbg(hdev, "sock %p", sk);
8184 
8185 	if (hdev_is_powered(hdev)) {
8186 		switch (cp->type) {
8187 		case BIT(BDADDR_BREDR):
8188 			status = mgmt_bredr_support(hdev);
8189 			if (status)
8190 				eir_len = 0;
8191 			else
8192 				eir_len = 5;
8193 			break;
8194 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8195 			status = mgmt_le_support(hdev);
8196 			if (status)
8197 				eir_len = 0;
8198 			else
8199 				eir_len = 9 + 3 + 18 + 18 + 3;
8200 			break;
8201 		default:
8202 			status = MGMT_STATUS_INVALID_PARAMS;
8203 			eir_len = 0;
8204 			break;
8205 		}
8206 	} else {
8207 		status = MGMT_STATUS_NOT_POWERED;
8208 		eir_len = 0;
8209 	}
8210 
8211 	rp_len = sizeof(*rp) + eir_len;
8212 	rp = kmalloc(rp_len, GFP_ATOMIC);
8213 	if (!rp)
8214 		return -ENOMEM;
8215 
8216 	if (!status && !lmp_ssp_capable(hdev)) {
8217 		status = MGMT_STATUS_NOT_SUPPORTED;
8218 		eir_len = 0;
8219 	}
8220 
8221 	if (status)
8222 		goto complete;
8223 
8224 	hci_dev_lock(hdev);
8225 
8226 	eir_len = 0;
8227 	switch (cp->type) {
8228 	case BIT(BDADDR_BREDR):
8229 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8230 			err = read_local_ssp_oob_req(hdev, sk, cp);
8231 			hci_dev_unlock(hdev);
8232 			if (!err)
8233 				goto done;
8234 
8235 			status = MGMT_STATUS_FAILED;
8236 			goto complete;
8237 		} else {
8238 			eir_len = eir_append_data(rp->eir, eir_len,
8239 						  EIR_CLASS_OF_DEV,
8240 						  hdev->dev_class, 3);
8241 		}
8242 		break;
8243 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8244 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8245 		    smp_generate_oob(hdev, hash, rand) < 0) {
8246 			hci_dev_unlock(hdev);
8247 			status = MGMT_STATUS_FAILED;
8248 			goto complete;
8249 		}
8250 
8251 		/* This should return the active RPA, but since the RPA
8252 		 * is only programmed on demand, it is really hard to fill
8253 		 * this in at the moment. For now disallow retrieving
8254 		 * local out-of-band data when privacy is in use.
8255 		 *
8256 		 * Returning the identity address will not help here since
8257 		 * pairing happens before the identity resolving key is
8258 		 * known and thus the connection establishment happens
8259 		 * based on the RPA and not the identity address.
8260 		 */
8261 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8262 			hci_dev_unlock(hdev);
8263 			status = MGMT_STATUS_REJECTED;
8264 			goto complete;
8265 		}
8266 
8267 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8268 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8269 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8270 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
8271 			memcpy(addr, &hdev->static_addr, 6);
8272 			addr[6] = 0x01;
8273 		} else {
8274 			memcpy(addr, &hdev->bdaddr, 6);
8275 			addr[6] = 0x00;
8276 		}
8277 
8278 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8279 					  addr, sizeof(addr));
8280 
8281 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8282 			role = 0x02;
8283 		else
8284 			role = 0x01;
8285 
8286 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8287 					  &role, sizeof(role));
8288 
8289 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8290 			eir_len = eir_append_data(rp->eir, eir_len,
8291 						  EIR_LE_SC_CONFIRM,
8292 						  hash, sizeof(hash));
8293 
8294 			eir_len = eir_append_data(rp->eir, eir_len,
8295 						  EIR_LE_SC_RANDOM,
8296 						  rand, sizeof(rand));
8297 		}
8298 
8299 		flags = mgmt_get_adv_discov_flags(hdev);
8300 
8301 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8302 			flags |= LE_AD_NO_BREDR;
8303 
8304 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8305 					  &flags, sizeof(flags));
8306 		break;
8307 	}
8308 
8309 	hci_dev_unlock(hdev);
8310 
8311 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8312 
8313 	status = MGMT_STATUS_SUCCESS;
8314 
8315 complete:
8316 	rp->type = cp->type;
8317 	rp->eir_len = cpu_to_le16(eir_len);
8318 
8319 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8320 				status, rp, sizeof(*rp) + eir_len);
8321 	if (err < 0 || status)
8322 		goto done;
8323 
8324 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8325 				 rp, sizeof(*rp) + eir_len,
8326 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
8327 
8328 done:
8329 	kfree(rp);
8330 
8331 	return err;
8332 }
8333 
8334 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8335 {
8336 	u32 flags = 0;
8337 
8338 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
8339 	flags |= MGMT_ADV_FLAG_DISCOV;
8340 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8341 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8342 	flags |= MGMT_ADV_FLAG_APPEARANCE;
8343 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8344 	flags |= MGMT_ADV_PARAM_DURATION;
8345 	flags |= MGMT_ADV_PARAM_TIMEOUT;
8346 	flags |= MGMT_ADV_PARAM_INTERVALS;
8347 	flags |= MGMT_ADV_PARAM_TX_POWER;
8348 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
8349 
8350 	/* In extended adv TX_POWER returned from Set Adv Param
8351 	 * will be always valid.
8352 	 */
8353 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8354 		flags |= MGMT_ADV_FLAG_TX_POWER;
8355 
8356 	if (ext_adv_capable(hdev)) {
8357 		flags |= MGMT_ADV_FLAG_SEC_1M;
8358 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8359 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8360 
8361 		if (le_2m_capable(hdev))
8362 			flags |= MGMT_ADV_FLAG_SEC_2M;
8363 
8364 		if (le_coded_capable(hdev))
8365 			flags |= MGMT_ADV_FLAG_SEC_CODED;
8366 	}
8367 
8368 	return flags;
8369 }
8370 
8371 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8372 			     void *data, u16 data_len)
8373 {
8374 	struct mgmt_rp_read_adv_features *rp;
8375 	size_t rp_len;
8376 	int err;
8377 	struct adv_info *adv_instance;
8378 	u32 supported_flags;
8379 	u8 *instance;
8380 
8381 	bt_dev_dbg(hdev, "sock %p", sk);
8382 
8383 	if (!lmp_le_capable(hdev))
8384 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8385 				       MGMT_STATUS_REJECTED);
8386 
8387 	hci_dev_lock(hdev);
8388 
8389 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8390 	rp = kmalloc(rp_len, GFP_ATOMIC);
8391 	if (!rp) {
8392 		hci_dev_unlock(hdev);
8393 		return -ENOMEM;
8394 	}
8395 
8396 	supported_flags = get_supported_adv_flags(hdev);
8397 
8398 	rp->supported_flags = cpu_to_le32(supported_flags);
8399 	rp->max_adv_data_len = max_adv_len(hdev);
8400 	rp->max_scan_rsp_len = max_adv_len(hdev);
8401 	rp->max_instances = hdev->le_num_of_adv_sets;
8402 	rp->num_instances = hdev->adv_instance_cnt;
8403 
8404 	instance = rp->instance;
8405 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8406 		/* Only instances 1-le_num_of_adv_sets are externally visible */
8407 		if (adv_instance->instance <= hdev->adv_instance_cnt) {
8408 			*instance = adv_instance->instance;
8409 			instance++;
8410 		} else {
8411 			rp->num_instances--;
8412 			rp_len--;
8413 		}
8414 	}
8415 
8416 	hci_dev_unlock(hdev);
8417 
8418 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8419 				MGMT_STATUS_SUCCESS, rp, rp_len);
8420 
8421 	kfree(rp);
8422 
8423 	return err;
8424 }
8425 
8426 static u8 calculate_name_len(struct hci_dev *hdev)
8427 {
8428 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8429 
8430 	return eir_append_local_name(hdev, buf, 0);
8431 }
8432 
8433 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8434 			   bool is_adv_data)
8435 {
8436 	u8 max_len = max_adv_len(hdev);
8437 
8438 	if (is_adv_data) {
8439 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8440 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
8441 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
8442 			max_len -= 3;
8443 
8444 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8445 			max_len -= 3;
8446 	} else {
8447 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8448 			max_len -= calculate_name_len(hdev);
8449 
8450 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8451 			max_len -= 4;
8452 	}
8453 
8454 	return max_len;
8455 }
8456 
8457 static bool flags_managed(u32 adv_flags)
8458 {
8459 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8460 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
8461 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
8462 }
8463 
8464 static bool tx_power_managed(u32 adv_flags)
8465 {
8466 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8467 }
8468 
8469 static bool name_managed(u32 adv_flags)
8470 {
8471 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8472 }
8473 
8474 static bool appearance_managed(u32 adv_flags)
8475 {
8476 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8477 }
8478 
8479 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8480 			      u8 len, bool is_adv_data)
8481 {
8482 	int i, cur_len;
8483 	u8 max_len;
8484 
8485 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8486 
8487 	if (len > max_len)
8488 		return false;
8489 
8490 	/* Make sure that the data is correctly formatted. */
8491 	for (i = 0; i < len; i += (cur_len + 1)) {
8492 		cur_len = data[i];
8493 
8494 		if (!cur_len)
8495 			continue;
8496 
8497 		if (data[i + 1] == EIR_FLAGS &&
8498 		    (!is_adv_data || flags_managed(adv_flags)))
8499 			return false;
8500 
8501 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8502 			return false;
8503 
8504 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8505 			return false;
8506 
8507 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8508 			return false;
8509 
8510 		if (data[i + 1] == EIR_APPEARANCE &&
8511 		    appearance_managed(adv_flags))
8512 			return false;
8513 
8514 		/* If the current field length would exceed the total data
8515 		 * length, then it's invalid.
8516 		 */
8517 		if (i + cur_len >= len)
8518 			return false;
8519 	}
8520 
8521 	return true;
8522 }
8523 
8524 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8525 {
8526 	u32 supported_flags, phy_flags;
8527 
8528 	/* The current implementation only supports a subset of the specified
8529 	 * flags. Also need to check mutual exclusiveness of sec flags.
8530 	 */
8531 	supported_flags = get_supported_adv_flags(hdev);
8532 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8533 	if (adv_flags & ~supported_flags ||
8534 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8535 		return false;
8536 
8537 	return true;
8538 }
8539 
8540 static bool adv_busy(struct hci_dev *hdev)
8541 {
8542 	return pending_find(MGMT_OP_SET_LE, hdev);
8543 }
8544 
8545 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8546 			     int err)
8547 {
8548 	struct adv_info *adv, *n;
8549 
8550 	bt_dev_dbg(hdev, "err %d", err);
8551 
8552 	hci_dev_lock(hdev);
8553 
8554 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8555 		u8 instance;
8556 
8557 		if (!adv->pending)
8558 			continue;
8559 
8560 		if (!err) {
8561 			adv->pending = false;
8562 			continue;
8563 		}
8564 
8565 		instance = adv->instance;
8566 
8567 		if (hdev->cur_adv_instance == instance)
8568 			cancel_adv_timeout(hdev);
8569 
8570 		hci_remove_adv_instance(hdev, instance);
8571 		mgmt_advertising_removed(sk, hdev, instance);
8572 	}
8573 
8574 	hci_dev_unlock(hdev);
8575 }
8576 
8577 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8578 {
8579 	struct mgmt_pending_cmd *cmd = data;
8580 	struct mgmt_cp_add_advertising *cp = cmd->param;
8581 	struct mgmt_rp_add_advertising rp;
8582 
8583 	memset(&rp, 0, sizeof(rp));
8584 
8585 	rp.instance = cp->instance;
8586 
8587 	if (err)
8588 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8589 				mgmt_status(err));
8590 	else
8591 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8592 				  mgmt_status(err), &rp, sizeof(rp));
8593 
8594 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8595 
8596 	mgmt_pending_free(cmd);
8597 }
8598 
8599 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8600 {
8601 	struct mgmt_pending_cmd *cmd = data;
8602 	struct mgmt_cp_add_advertising *cp = cmd->param;
8603 
8604 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8605 }
8606 
8607 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8608 			   void *data, u16 data_len)
8609 {
8610 	struct mgmt_cp_add_advertising *cp = data;
8611 	struct mgmt_rp_add_advertising rp;
8612 	u32 flags;
8613 	u8 status;
8614 	u16 timeout, duration;
8615 	unsigned int prev_instance_cnt;
8616 	u8 schedule_instance = 0;
8617 	struct adv_info *adv, *next_instance;
8618 	int err;
8619 	struct mgmt_pending_cmd *cmd;
8620 
8621 	bt_dev_dbg(hdev, "sock %p", sk);
8622 
8623 	status = mgmt_le_support(hdev);
8624 	if (status)
8625 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8626 				       status);
8627 
8628 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8629 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8630 				       MGMT_STATUS_INVALID_PARAMS);
8631 
8632 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8633 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8634 				       MGMT_STATUS_INVALID_PARAMS);
8635 
8636 	flags = __le32_to_cpu(cp->flags);
8637 	timeout = __le16_to_cpu(cp->timeout);
8638 	duration = __le16_to_cpu(cp->duration);
8639 
8640 	if (!requested_adv_flags_are_valid(hdev, flags))
8641 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8642 				       MGMT_STATUS_INVALID_PARAMS);
8643 
8644 	hci_dev_lock(hdev);
8645 
8646 	if (timeout && !hdev_is_powered(hdev)) {
8647 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8648 				      MGMT_STATUS_REJECTED);
8649 		goto unlock;
8650 	}
8651 
8652 	if (adv_busy(hdev)) {
8653 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8654 				      MGMT_STATUS_BUSY);
8655 		goto unlock;
8656 	}
8657 
8658 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8659 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8660 			       cp->scan_rsp_len, false)) {
8661 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8662 				      MGMT_STATUS_INVALID_PARAMS);
8663 		goto unlock;
8664 	}
8665 
8666 	prev_instance_cnt = hdev->adv_instance_cnt;
8667 
8668 	adv = hci_add_adv_instance(hdev, cp->instance, flags,
8669 				   cp->adv_data_len, cp->data,
8670 				   cp->scan_rsp_len,
8671 				   cp->data + cp->adv_data_len,
8672 				   timeout, duration,
8673 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8674 				   hdev->le_adv_min_interval,
8675 				   hdev->le_adv_max_interval, 0);
8676 	if (IS_ERR(adv)) {
8677 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8678 				      MGMT_STATUS_FAILED);
8679 		goto unlock;
8680 	}
8681 
8682 	/* Only trigger an advertising added event if a new instance was
8683 	 * actually added.
8684 	 */
8685 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8686 		mgmt_advertising_added(sk, hdev, cp->instance);
8687 
8688 	if (hdev->cur_adv_instance == cp->instance) {
8689 		/* If the currently advertised instance is being changed then
8690 		 * cancel the current advertising and schedule the next
8691 		 * instance. If there is only one instance then the overridden
8692 		 * advertising data will be visible right away.
8693 		 */
8694 		cancel_adv_timeout(hdev);
8695 
8696 		next_instance = hci_get_next_instance(hdev, cp->instance);
8697 		if (next_instance)
8698 			schedule_instance = next_instance->instance;
8699 	} else if (!hdev->adv_instance_timeout) {
8700 		/* Immediately advertise the new instance if no other
8701 		 * instance is currently being advertised.
8702 		 */
8703 		schedule_instance = cp->instance;
8704 	}
8705 
8706 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8707 	 * there is no instance to be advertised then we have no HCI
8708 	 * communication to make. Simply return.
8709 	 */
8710 	if (!hdev_is_powered(hdev) ||
8711 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8712 	    !schedule_instance) {
8713 		rp.instance = cp->instance;
8714 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8715 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8716 		goto unlock;
8717 	}
8718 
8719 	/* We're good to go, update advertising data, parameters, and start
8720 	 * advertising.
8721 	 */
8722 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8723 			       data_len);
8724 	if (!cmd) {
8725 		err = -ENOMEM;
8726 		goto unlock;
8727 	}
8728 
8729 	cp->instance = schedule_instance;
8730 
8731 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8732 				 add_advertising_complete);
8733 	if (err < 0)
8734 		mgmt_pending_free(cmd);
8735 
8736 unlock:
8737 	hci_dev_unlock(hdev);
8738 
8739 	return err;
8740 }
8741 
8742 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8743 					int err)
8744 {
8745 	struct mgmt_pending_cmd *cmd = data;
8746 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8747 	struct mgmt_rp_add_ext_adv_params rp;
8748 	struct adv_info *adv;
8749 	u32 flags;
8750 
8751 	BT_DBG("%s", hdev->name);
8752 
8753 	hci_dev_lock(hdev);
8754 
8755 	adv = hci_find_adv_instance(hdev, cp->instance);
8756 	if (!adv)
8757 		goto unlock;
8758 
8759 	rp.instance = cp->instance;
8760 	rp.tx_power = adv->tx_power;
8761 
8762 	/* While we're at it, inform userspace of the available space for this
8763 	 * advertisement, given the flags that will be used.
8764 	 */
8765 	flags = __le32_to_cpu(cp->flags);
8766 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8767 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8768 
8769 	if (err) {
8770 		/* If this advertisement was previously advertising and we
8771 		 * failed to update it, we signal that it has been removed and
8772 		 * delete its structure
8773 		 */
8774 		if (!adv->pending)
8775 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8776 
8777 		hci_remove_adv_instance(hdev, cp->instance);
8778 
8779 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8780 				mgmt_status(err));
8781 	} else {
8782 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8783 				  mgmt_status(err), &rp, sizeof(rp));
8784 	}
8785 
8786 unlock:
8787 	mgmt_pending_free(cmd);
8788 
8789 	hci_dev_unlock(hdev);
8790 }
8791 
8792 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8793 {
8794 	struct mgmt_pending_cmd *cmd = data;
8795 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8796 
8797 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8798 }
8799 
8800 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8801 			      void *data, u16 data_len)
8802 {
8803 	struct mgmt_cp_add_ext_adv_params *cp = data;
8804 	struct mgmt_rp_add_ext_adv_params rp;
8805 	struct mgmt_pending_cmd *cmd = NULL;
8806 	struct adv_info *adv;
8807 	u32 flags, min_interval, max_interval;
8808 	u16 timeout, duration;
8809 	u8 status;
8810 	s8 tx_power;
8811 	int err;
8812 
8813 	BT_DBG("%s", hdev->name);
8814 
8815 	status = mgmt_le_support(hdev);
8816 	if (status)
8817 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8818 				       status);
8819 
8820 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8821 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8822 				       MGMT_STATUS_INVALID_PARAMS);
8823 
8824 	/* The purpose of breaking add_advertising into two separate MGMT calls
8825 	 * for params and data is to allow more parameters to be added to this
8826 	 * structure in the future. For this reason, we verify that we have the
8827 	 * bare minimum structure we know of when the interface was defined. Any
8828 	 * extra parameters we don't know about will be ignored in this request.
8829 	 */
8830 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8831 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8832 				       MGMT_STATUS_INVALID_PARAMS);
8833 
8834 	flags = __le32_to_cpu(cp->flags);
8835 
8836 	if (!requested_adv_flags_are_valid(hdev, flags))
8837 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8838 				       MGMT_STATUS_INVALID_PARAMS);
8839 
8840 	hci_dev_lock(hdev);
8841 
8842 	/* In new interface, we require that we are powered to register */
8843 	if (!hdev_is_powered(hdev)) {
8844 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8845 				      MGMT_STATUS_REJECTED);
8846 		goto unlock;
8847 	}
8848 
8849 	if (adv_busy(hdev)) {
8850 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8851 				      MGMT_STATUS_BUSY);
8852 		goto unlock;
8853 	}
8854 
8855 	/* Parse defined parameters from request, use defaults otherwise */
8856 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8857 		  __le16_to_cpu(cp->timeout) : 0;
8858 
8859 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8860 		   __le16_to_cpu(cp->duration) :
8861 		   hdev->def_multi_adv_rotation_duration;
8862 
8863 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8864 		       __le32_to_cpu(cp->min_interval) :
8865 		       hdev->le_adv_min_interval;
8866 
8867 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8868 		       __le32_to_cpu(cp->max_interval) :
8869 		       hdev->le_adv_max_interval;
8870 
8871 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8872 		   cp->tx_power :
8873 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8874 
8875 	/* Create advertising instance with no advertising or response data */
8876 	adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8877 				   timeout, duration, tx_power, min_interval,
8878 				   max_interval, 0);
8879 
8880 	if (IS_ERR(adv)) {
8881 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8882 				      MGMT_STATUS_FAILED);
8883 		goto unlock;
8884 	}
8885 
8886 	/* Submit request for advertising params if ext adv available */
8887 	if (ext_adv_capable(hdev)) {
8888 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8889 				       data, data_len);
8890 		if (!cmd) {
8891 			err = -ENOMEM;
8892 			hci_remove_adv_instance(hdev, cp->instance);
8893 			goto unlock;
8894 		}
8895 
8896 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8897 					 add_ext_adv_params_complete);
8898 		if (err < 0)
8899 			mgmt_pending_free(cmd);
8900 	} else {
8901 		rp.instance = cp->instance;
8902 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8903 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8904 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8905 		err = mgmt_cmd_complete(sk, hdev->id,
8906 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8907 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8908 	}
8909 
8910 unlock:
8911 	hci_dev_unlock(hdev);
8912 
8913 	return err;
8914 }
8915 
8916 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8917 {
8918 	struct mgmt_pending_cmd *cmd = data;
8919 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8920 	struct mgmt_rp_add_advertising rp;
8921 
8922 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8923 
8924 	memset(&rp, 0, sizeof(rp));
8925 
8926 	rp.instance = cp->instance;
8927 
8928 	if (err)
8929 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8930 				mgmt_status(err));
8931 	else
8932 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8933 				  mgmt_status(err), &rp, sizeof(rp));
8934 
8935 	mgmt_pending_free(cmd);
8936 }
8937 
8938 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8939 {
8940 	struct mgmt_pending_cmd *cmd = data;
8941 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8942 	int err;
8943 
8944 	if (ext_adv_capable(hdev)) {
8945 		err = hci_update_adv_data_sync(hdev, cp->instance);
8946 		if (err)
8947 			return err;
8948 
8949 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8950 		if (err)
8951 			return err;
8952 
8953 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8954 	}
8955 
8956 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8957 }
8958 
8959 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8960 			    u16 data_len)
8961 {
8962 	struct mgmt_cp_add_ext_adv_data *cp = data;
8963 	struct mgmt_rp_add_ext_adv_data rp;
8964 	u8 schedule_instance = 0;
8965 	struct adv_info *next_instance;
8966 	struct adv_info *adv_instance;
8967 	int err = 0;
8968 	struct mgmt_pending_cmd *cmd;
8969 
8970 	BT_DBG("%s", hdev->name);
8971 
8972 	hci_dev_lock(hdev);
8973 
8974 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8975 
8976 	if (!adv_instance) {
8977 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8978 				      MGMT_STATUS_INVALID_PARAMS);
8979 		goto unlock;
8980 	}
8981 
8982 	/* In new interface, we require that we are powered to register */
8983 	if (!hdev_is_powered(hdev)) {
8984 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8985 				      MGMT_STATUS_REJECTED);
8986 		goto clear_new_instance;
8987 	}
8988 
8989 	if (adv_busy(hdev)) {
8990 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8991 				      MGMT_STATUS_BUSY);
8992 		goto clear_new_instance;
8993 	}
8994 
8995 	/* Validate new data */
8996 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8997 			       cp->adv_data_len, true) ||
8998 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8999 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
9000 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9001 				      MGMT_STATUS_INVALID_PARAMS);
9002 		goto clear_new_instance;
9003 	}
9004 
9005 	/* Set the data in the advertising instance */
9006 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9007 				  cp->data, cp->scan_rsp_len,
9008 				  cp->data + cp->adv_data_len);
9009 
9010 	/* If using software rotation, determine next instance to use */
9011 	if (hdev->cur_adv_instance == cp->instance) {
9012 		/* If the currently advertised instance is being changed
9013 		 * then cancel the current advertising and schedule the
9014 		 * next instance. If there is only one instance then the
9015 		 * overridden advertising data will be visible right
9016 		 * away
9017 		 */
9018 		cancel_adv_timeout(hdev);
9019 
9020 		next_instance = hci_get_next_instance(hdev, cp->instance);
9021 		if (next_instance)
9022 			schedule_instance = next_instance->instance;
9023 	} else if (!hdev->adv_instance_timeout) {
9024 		/* Immediately advertise the new instance if no other
9025 		 * instance is currently being advertised.
9026 		 */
9027 		schedule_instance = cp->instance;
9028 	}
9029 
9030 	/* If the HCI_ADVERTISING flag is set or there is no instance to
9031 	 * be advertised then we have no HCI communication to make.
9032 	 * Simply return.
9033 	 */
9034 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9035 		if (adv_instance->pending) {
9036 			mgmt_advertising_added(sk, hdev, cp->instance);
9037 			adv_instance->pending = false;
9038 		}
9039 		rp.instance = cp->instance;
9040 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9041 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9042 		goto unlock;
9043 	}
9044 
9045 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9046 			       data_len);
9047 	if (!cmd) {
9048 		err = -ENOMEM;
9049 		goto clear_new_instance;
9050 	}
9051 
9052 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9053 				 add_ext_adv_data_complete);
9054 	if (err < 0) {
9055 		mgmt_pending_free(cmd);
9056 		goto clear_new_instance;
9057 	}
9058 
9059 	/* We were successful in updating data, so trigger advertising_added
9060 	 * event if this is an instance that wasn't previously advertising. If
9061 	 * a failure occurs in the requests we initiated, we will remove the
9062 	 * instance again in add_advertising_complete
9063 	 */
9064 	if (adv_instance->pending)
9065 		mgmt_advertising_added(sk, hdev, cp->instance);
9066 
9067 	goto unlock;
9068 
9069 clear_new_instance:
9070 	hci_remove_adv_instance(hdev, cp->instance);
9071 
9072 unlock:
9073 	hci_dev_unlock(hdev);
9074 
9075 	return err;
9076 }
9077 
9078 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9079 					int err)
9080 {
9081 	struct mgmt_pending_cmd *cmd = data;
9082 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9083 	struct mgmt_rp_remove_advertising rp;
9084 
9085 	bt_dev_dbg(hdev, "err %d", err);
9086 
9087 	memset(&rp, 0, sizeof(rp));
9088 	rp.instance = cp->instance;
9089 
9090 	if (err)
9091 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9092 				mgmt_status(err));
9093 	else
9094 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9095 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9096 
9097 	mgmt_pending_free(cmd);
9098 }
9099 
9100 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9101 {
9102 	struct mgmt_pending_cmd *cmd = data;
9103 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9104 	int err;
9105 
9106 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9107 	if (err)
9108 		return err;
9109 
9110 	if (list_empty(&hdev->adv_instances))
9111 		err = hci_disable_advertising_sync(hdev);
9112 
9113 	return err;
9114 }
9115 
9116 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9117 			      void *data, u16 data_len)
9118 {
9119 	struct mgmt_cp_remove_advertising *cp = data;
9120 	struct mgmt_pending_cmd *cmd;
9121 	int err;
9122 
9123 	bt_dev_dbg(hdev, "sock %p", sk);
9124 
9125 	hci_dev_lock(hdev);
9126 
9127 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9128 		err = mgmt_cmd_status(sk, hdev->id,
9129 				      MGMT_OP_REMOVE_ADVERTISING,
9130 				      MGMT_STATUS_INVALID_PARAMS);
9131 		goto unlock;
9132 	}
9133 
9134 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
9135 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9136 				      MGMT_STATUS_BUSY);
9137 		goto unlock;
9138 	}
9139 
9140 	if (list_empty(&hdev->adv_instances)) {
9141 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9142 				      MGMT_STATUS_INVALID_PARAMS);
9143 		goto unlock;
9144 	}
9145 
9146 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9147 			       data_len);
9148 	if (!cmd) {
9149 		err = -ENOMEM;
9150 		goto unlock;
9151 	}
9152 
9153 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9154 				 remove_advertising_complete);
9155 	if (err < 0)
9156 		mgmt_pending_free(cmd);
9157 
9158 unlock:
9159 	hci_dev_unlock(hdev);
9160 
9161 	return err;
9162 }
9163 
9164 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9165 			     void *data, u16 data_len)
9166 {
9167 	struct mgmt_cp_get_adv_size_info *cp = data;
9168 	struct mgmt_rp_get_adv_size_info rp;
9169 	u32 flags, supported_flags;
9170 
9171 	bt_dev_dbg(hdev, "sock %p", sk);
9172 
9173 	if (!lmp_le_capable(hdev))
9174 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9175 				       MGMT_STATUS_REJECTED);
9176 
9177 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9178 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9179 				       MGMT_STATUS_INVALID_PARAMS);
9180 
9181 	flags = __le32_to_cpu(cp->flags);
9182 
9183 	/* The current implementation only supports a subset of the specified
9184 	 * flags.
9185 	 */
9186 	supported_flags = get_supported_adv_flags(hdev);
9187 	if (flags & ~supported_flags)
9188 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9189 				       MGMT_STATUS_INVALID_PARAMS);
9190 
9191 	rp.instance = cp->instance;
9192 	rp.flags = cp->flags;
9193 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9194 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9195 
9196 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9197 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9198 }
9199 
9200 static const struct hci_mgmt_handler mgmt_handlers[] = {
9201 	{ NULL }, /* 0x0000 (no command) */
9202 	{ read_version,            MGMT_READ_VERSION_SIZE,
9203 						HCI_MGMT_NO_HDEV |
9204 						HCI_MGMT_UNTRUSTED },
9205 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
9206 						HCI_MGMT_NO_HDEV |
9207 						HCI_MGMT_UNTRUSTED },
9208 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9209 						HCI_MGMT_NO_HDEV |
9210 						HCI_MGMT_UNTRUSTED },
9211 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
9212 						HCI_MGMT_UNTRUSTED },
9213 	{ set_powered,             MGMT_SETTING_SIZE },
9214 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9215 	{ set_connectable,         MGMT_SETTING_SIZE },
9216 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
9217 	{ set_bondable,            MGMT_SETTING_SIZE },
9218 	{ set_link_security,       MGMT_SETTING_SIZE },
9219 	{ set_ssp,                 MGMT_SETTING_SIZE },
9220 	{ set_hs,                  MGMT_SETTING_SIZE },
9221 	{ set_le,                  MGMT_SETTING_SIZE },
9222 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9223 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9224 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
9225 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9226 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9227 						HCI_MGMT_VAR_LEN },
9228 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9229 						HCI_MGMT_VAR_LEN },
9230 	{ disconnect,              MGMT_DISCONNECT_SIZE },
9231 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9232 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9233 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9234 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9235 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
9236 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9237 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9238 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9239 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9240 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9241 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9242 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9243 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9244 						HCI_MGMT_VAR_LEN },
9245 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9246 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
9247 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9248 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9249 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
9250 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9251 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9252 	{ set_advertising,         MGMT_SETTING_SIZE },
9253 	{ set_bredr,               MGMT_SETTING_SIZE },
9254 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9255 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9256 	{ set_secure_conn,         MGMT_SETTING_SIZE },
9257 	{ set_debug_keys,          MGMT_SETTING_SIZE },
9258 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
9259 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
9260 						HCI_MGMT_VAR_LEN },
9261 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9262 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9263 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
9264 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9265 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9266 						HCI_MGMT_VAR_LEN },
9267 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9268 						HCI_MGMT_NO_HDEV |
9269 						HCI_MGMT_UNTRUSTED },
9270 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9271 						HCI_MGMT_UNCONFIGURED |
9272 						HCI_MGMT_UNTRUSTED },
9273 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9274 						HCI_MGMT_UNCONFIGURED },
9275 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9276 						HCI_MGMT_UNCONFIGURED },
9277 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9278 						HCI_MGMT_VAR_LEN },
9279 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9280 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9281 						HCI_MGMT_NO_HDEV |
9282 						HCI_MGMT_UNTRUSTED },
9283 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9284 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
9285 						HCI_MGMT_VAR_LEN },
9286 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
9287 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9288 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9289 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9290 						HCI_MGMT_UNTRUSTED },
9291 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
9292 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9293 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9294 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9295 						HCI_MGMT_VAR_LEN },
9296 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
9297 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9298 						HCI_MGMT_UNTRUSTED },
9299 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9300 						HCI_MGMT_UNTRUSTED |
9301 						HCI_MGMT_HDEV_OPTIONAL },
9302 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9303 						HCI_MGMT_VAR_LEN |
9304 						HCI_MGMT_HDEV_OPTIONAL },
9305 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9306 						HCI_MGMT_UNTRUSTED },
9307 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9308 						HCI_MGMT_VAR_LEN },
9309 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9310 						HCI_MGMT_UNTRUSTED },
9311 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9312 						HCI_MGMT_VAR_LEN },
9313 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9314 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9315 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9316 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9317 						HCI_MGMT_VAR_LEN },
9318 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9319 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9320 						HCI_MGMT_VAR_LEN },
9321 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9322 						HCI_MGMT_VAR_LEN },
9323 	{ add_adv_patterns_monitor_rssi,
9324 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9325 						HCI_MGMT_VAR_LEN },
9326 	{ set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9327 						HCI_MGMT_VAR_LEN },
9328 	{ mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9329 	{ mesh_send,               MGMT_MESH_SEND_SIZE,
9330 						HCI_MGMT_VAR_LEN },
9331 	{ mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9332 	{ mgmt_hci_cmd_sync,       MGMT_HCI_CMD_SYNC_SIZE, HCI_MGMT_VAR_LEN },
9333 };
9334 
9335 void mgmt_index_added(struct hci_dev *hdev)
9336 {
9337 	struct mgmt_ev_ext_index ev;
9338 
9339 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9340 		return;
9341 
9342 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9343 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9344 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9345 		ev.type = 0x01;
9346 	} else {
9347 		mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9348 				 HCI_MGMT_INDEX_EVENTS);
9349 		ev.type = 0x00;
9350 	}
9351 
9352 	ev.bus = hdev->bus;
9353 
9354 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9355 			 HCI_MGMT_EXT_INDEX_EVENTS);
9356 }
9357 
9358 void mgmt_index_removed(struct hci_dev *hdev)
9359 {
9360 	struct mgmt_ev_ext_index ev;
9361 	struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9362 
9363 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9364 		return;
9365 
9366 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9367 
9368 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9369 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9370 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9371 		ev.type = 0x01;
9372 	} else {
9373 		mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9374 				 HCI_MGMT_INDEX_EVENTS);
9375 		ev.type = 0x00;
9376 	}
9377 
9378 	ev.bus = hdev->bus;
9379 
9380 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9381 			 HCI_MGMT_EXT_INDEX_EVENTS);
9382 
9383 	/* Cancel any remaining timed work */
9384 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
9385 		return;
9386 	cancel_delayed_work_sync(&hdev->discov_off);
9387 	cancel_delayed_work_sync(&hdev->service_cache);
9388 	cancel_delayed_work_sync(&hdev->rpa_expired);
9389 }
9390 
9391 void mgmt_power_on(struct hci_dev *hdev, int err)
9392 {
9393 	struct cmd_lookup match = { NULL, hdev };
9394 
9395 	bt_dev_dbg(hdev, "err %d", err);
9396 
9397 	hci_dev_lock(hdev);
9398 
9399 	if (!err) {
9400 		restart_le_actions(hdev);
9401 		hci_update_passive_scan(hdev);
9402 	}
9403 
9404 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9405 
9406 	new_settings(hdev, match.sk);
9407 
9408 	if (match.sk)
9409 		sock_put(match.sk);
9410 
9411 	hci_dev_unlock(hdev);
9412 }
9413 
9414 void __mgmt_power_off(struct hci_dev *hdev)
9415 {
9416 	struct cmd_lookup match = { NULL, hdev };
9417 	u8 zero_cod[] = { 0, 0, 0 };
9418 
9419 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9420 
9421 	/* If the power off is because of hdev unregistration let
9422 	 * use the appropriate INVALID_INDEX status. Otherwise use
9423 	 * NOT_POWERED. We cover both scenarios here since later in
9424 	 * mgmt_index_removed() any hci_conn callbacks will have already
9425 	 * been triggered, potentially causing misleading DISCONNECTED
9426 	 * status responses.
9427 	 */
9428 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9429 		match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9430 	else
9431 		match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9432 
9433 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9434 
9435 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9436 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9437 				   zero_cod, sizeof(zero_cod),
9438 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9439 		ext_info_changed(hdev, NULL);
9440 	}
9441 
9442 	new_settings(hdev, match.sk);
9443 
9444 	if (match.sk)
9445 		sock_put(match.sk);
9446 }
9447 
9448 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9449 {
9450 	struct mgmt_pending_cmd *cmd;
9451 	u8 status;
9452 
9453 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9454 	if (!cmd)
9455 		return;
9456 
9457 	if (err == -ERFKILL)
9458 		status = MGMT_STATUS_RFKILLED;
9459 	else
9460 		status = MGMT_STATUS_FAILED;
9461 
9462 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9463 
9464 	mgmt_pending_remove(cmd);
9465 }
9466 
9467 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9468 		       bool persistent)
9469 {
9470 	struct mgmt_ev_new_link_key ev;
9471 
9472 	memset(&ev, 0, sizeof(ev));
9473 
9474 	ev.store_hint = persistent;
9475 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9476 	ev.key.addr.type = BDADDR_BREDR;
9477 	ev.key.type = key->type;
9478 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9479 	ev.key.pin_len = key->pin_len;
9480 
9481 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9482 }
9483 
9484 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9485 {
9486 	switch (ltk->type) {
9487 	case SMP_LTK:
9488 	case SMP_LTK_RESPONDER:
9489 		if (ltk->authenticated)
9490 			return MGMT_LTK_AUTHENTICATED;
9491 		return MGMT_LTK_UNAUTHENTICATED;
9492 	case SMP_LTK_P256:
9493 		if (ltk->authenticated)
9494 			return MGMT_LTK_P256_AUTH;
9495 		return MGMT_LTK_P256_UNAUTH;
9496 	case SMP_LTK_P256_DEBUG:
9497 		return MGMT_LTK_P256_DEBUG;
9498 	}
9499 
9500 	return MGMT_LTK_UNAUTHENTICATED;
9501 }
9502 
9503 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9504 {
9505 	struct mgmt_ev_new_long_term_key ev;
9506 
9507 	memset(&ev, 0, sizeof(ev));
9508 
9509 	/* Devices using resolvable or non-resolvable random addresses
9510 	 * without providing an identity resolving key don't require
9511 	 * to store long term keys. Their addresses will change the
9512 	 * next time around.
9513 	 *
9514 	 * Only when a remote device provides an identity address
9515 	 * make sure the long term key is stored. If the remote
9516 	 * identity is known, the long term keys are internally
9517 	 * mapped to the identity address. So allow static random
9518 	 * and public addresses here.
9519 	 */
9520 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9521 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9522 		ev.store_hint = 0x00;
9523 	else
9524 		ev.store_hint = persistent;
9525 
9526 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9527 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9528 	ev.key.type = mgmt_ltk_type(key);
9529 	ev.key.enc_size = key->enc_size;
9530 	ev.key.ediv = key->ediv;
9531 	ev.key.rand = key->rand;
9532 
9533 	if (key->type == SMP_LTK)
9534 		ev.key.initiator = 1;
9535 
9536 	/* Make sure we copy only the significant bytes based on the
9537 	 * encryption key size, and set the rest of the value to zeroes.
9538 	 */
9539 	memcpy(ev.key.val, key->val, key->enc_size);
9540 	memset(ev.key.val + key->enc_size, 0,
9541 	       sizeof(ev.key.val) - key->enc_size);
9542 
9543 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9544 }
9545 
9546 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9547 {
9548 	struct mgmt_ev_new_irk ev;
9549 
9550 	memset(&ev, 0, sizeof(ev));
9551 
9552 	ev.store_hint = persistent;
9553 
9554 	bacpy(&ev.rpa, &irk->rpa);
9555 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9556 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9557 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9558 
9559 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9560 }
9561 
9562 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9563 		   bool persistent)
9564 {
9565 	struct mgmt_ev_new_csrk ev;
9566 
9567 	memset(&ev, 0, sizeof(ev));
9568 
9569 	/* Devices using resolvable or non-resolvable random addresses
9570 	 * without providing an identity resolving key don't require
9571 	 * to store signature resolving keys. Their addresses will change
9572 	 * the next time around.
9573 	 *
9574 	 * Only when a remote device provides an identity address
9575 	 * make sure the signature resolving key is stored. So allow
9576 	 * static random and public addresses here.
9577 	 */
9578 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9579 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9580 		ev.store_hint = 0x00;
9581 	else
9582 		ev.store_hint = persistent;
9583 
9584 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9585 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9586 	ev.key.type = csrk->type;
9587 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9588 
9589 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9590 }
9591 
9592 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9593 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9594 			 u16 max_interval, u16 latency, u16 timeout)
9595 {
9596 	struct mgmt_ev_new_conn_param ev;
9597 
9598 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9599 		return;
9600 
9601 	memset(&ev, 0, sizeof(ev));
9602 	bacpy(&ev.addr.bdaddr, bdaddr);
9603 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9604 	ev.store_hint = store_hint;
9605 	ev.min_interval = cpu_to_le16(min_interval);
9606 	ev.max_interval = cpu_to_le16(max_interval);
9607 	ev.latency = cpu_to_le16(latency);
9608 	ev.timeout = cpu_to_le16(timeout);
9609 
9610 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9611 }
9612 
9613 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9614 			   u8 *name, u8 name_len)
9615 {
9616 	struct sk_buff *skb;
9617 	struct mgmt_ev_device_connected *ev;
9618 	u16 eir_len = 0;
9619 	u32 flags = 0;
9620 
9621 	if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9622 		return;
9623 
9624 	/* allocate buff for LE or BR/EDR adv */
9625 	if (conn->le_adv_data_len > 0)
9626 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9627 				     sizeof(*ev) + conn->le_adv_data_len);
9628 	else
9629 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9630 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9631 				     eir_precalc_len(sizeof(conn->dev_class)));
9632 
9633 	if (!skb)
9634 		return;
9635 
9636 	ev = skb_put(skb, sizeof(*ev));
9637 	bacpy(&ev->addr.bdaddr, &conn->dst);
9638 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9639 
9640 	if (conn->out)
9641 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9642 
9643 	ev->flags = __cpu_to_le32(flags);
9644 
9645 	/* We must ensure that the EIR Data fields are ordered and
9646 	 * unique. Keep it simple for now and avoid the problem by not
9647 	 * adding any BR/EDR data to the LE adv.
9648 	 */
9649 	if (conn->le_adv_data_len > 0) {
9650 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9651 		eir_len = conn->le_adv_data_len;
9652 	} else {
9653 		if (name)
9654 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9655 
9656 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9657 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9658 						    conn->dev_class, sizeof(conn->dev_class));
9659 	}
9660 
9661 	ev->eir_len = cpu_to_le16(eir_len);
9662 
9663 	mgmt_event_skb(skb, NULL);
9664 }
9665 
9666 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9667 {
9668 	struct hci_dev *hdev = data;
9669 	struct mgmt_cp_unpair_device *cp = cmd->param;
9670 
9671 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9672 
9673 	cmd->cmd_complete(cmd, 0);
9674 	mgmt_pending_remove(cmd);
9675 }
9676 
9677 bool mgmt_powering_down(struct hci_dev *hdev)
9678 {
9679 	struct mgmt_pending_cmd *cmd;
9680 	struct mgmt_mode *cp;
9681 
9682 	if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9683 		return true;
9684 
9685 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9686 	if (!cmd)
9687 		return false;
9688 
9689 	cp = cmd->param;
9690 	if (!cp->val)
9691 		return true;
9692 
9693 	return false;
9694 }
9695 
9696 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9697 			      u8 link_type, u8 addr_type, u8 reason,
9698 			      bool mgmt_connected)
9699 {
9700 	struct mgmt_ev_device_disconnected ev;
9701 	struct sock *sk = NULL;
9702 
9703 	if (!mgmt_connected)
9704 		return;
9705 
9706 	if (link_type != ACL_LINK && link_type != LE_LINK)
9707 		return;
9708 
9709 	bacpy(&ev.addr.bdaddr, bdaddr);
9710 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9711 	ev.reason = reason;
9712 
9713 	/* Report disconnects due to suspend */
9714 	if (hdev->suspended)
9715 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9716 
9717 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9718 
9719 	if (sk)
9720 		sock_put(sk);
9721 }
9722 
9723 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9724 			    u8 link_type, u8 addr_type, u8 status)
9725 {
9726 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9727 	struct mgmt_cp_disconnect *cp;
9728 	struct mgmt_pending_cmd *cmd;
9729 
9730 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9731 			     hdev);
9732 
9733 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9734 	if (!cmd)
9735 		return;
9736 
9737 	cp = cmd->param;
9738 
9739 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9740 		return;
9741 
9742 	if (cp->addr.type != bdaddr_type)
9743 		return;
9744 
9745 	cmd->cmd_complete(cmd, mgmt_status(status));
9746 	mgmt_pending_remove(cmd);
9747 }
9748 
9749 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9750 {
9751 	struct mgmt_ev_connect_failed ev;
9752 
9753 	if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9754 		mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9755 					 conn->dst_type, status, true);
9756 		return;
9757 	}
9758 
9759 	bacpy(&ev.addr.bdaddr, &conn->dst);
9760 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9761 	ev.status = mgmt_status(status);
9762 
9763 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9764 }
9765 
9766 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9767 {
9768 	struct mgmt_ev_pin_code_request ev;
9769 
9770 	bacpy(&ev.addr.bdaddr, bdaddr);
9771 	ev.addr.type = BDADDR_BREDR;
9772 	ev.secure = secure;
9773 
9774 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9775 }
9776 
9777 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9778 				  u8 status)
9779 {
9780 	struct mgmt_pending_cmd *cmd;
9781 
9782 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9783 	if (!cmd)
9784 		return;
9785 
9786 	cmd->cmd_complete(cmd, mgmt_status(status));
9787 	mgmt_pending_remove(cmd);
9788 }
9789 
9790 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9791 				      u8 status)
9792 {
9793 	struct mgmt_pending_cmd *cmd;
9794 
9795 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9796 	if (!cmd)
9797 		return;
9798 
9799 	cmd->cmd_complete(cmd, mgmt_status(status));
9800 	mgmt_pending_remove(cmd);
9801 }
9802 
9803 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9804 			      u8 link_type, u8 addr_type, u32 value,
9805 			      u8 confirm_hint)
9806 {
9807 	struct mgmt_ev_user_confirm_request ev;
9808 
9809 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9810 
9811 	bacpy(&ev.addr.bdaddr, bdaddr);
9812 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9813 	ev.confirm_hint = confirm_hint;
9814 	ev.value = cpu_to_le32(value);
9815 
9816 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9817 			  NULL);
9818 }
9819 
9820 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9821 			      u8 link_type, u8 addr_type)
9822 {
9823 	struct mgmt_ev_user_passkey_request ev;
9824 
9825 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9826 
9827 	bacpy(&ev.addr.bdaddr, bdaddr);
9828 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9829 
9830 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9831 			  NULL);
9832 }
9833 
9834 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9835 				      u8 link_type, u8 addr_type, u8 status,
9836 				      u8 opcode)
9837 {
9838 	struct mgmt_pending_cmd *cmd;
9839 
9840 	cmd = pending_find(opcode, hdev);
9841 	if (!cmd)
9842 		return -ENOENT;
9843 
9844 	cmd->cmd_complete(cmd, mgmt_status(status));
9845 	mgmt_pending_remove(cmd);
9846 
9847 	return 0;
9848 }
9849 
9850 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9851 				     u8 link_type, u8 addr_type, u8 status)
9852 {
9853 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9854 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9855 }
9856 
9857 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9858 					 u8 link_type, u8 addr_type, u8 status)
9859 {
9860 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9861 					  status,
9862 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9863 }
9864 
9865 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9866 				     u8 link_type, u8 addr_type, u8 status)
9867 {
9868 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9869 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9870 }
9871 
9872 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9873 					 u8 link_type, u8 addr_type, u8 status)
9874 {
9875 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9876 					  status,
9877 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9878 }
9879 
9880 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9881 			     u8 link_type, u8 addr_type, u32 passkey,
9882 			     u8 entered)
9883 {
9884 	struct mgmt_ev_passkey_notify ev;
9885 
9886 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9887 
9888 	bacpy(&ev.addr.bdaddr, bdaddr);
9889 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9890 	ev.passkey = __cpu_to_le32(passkey);
9891 	ev.entered = entered;
9892 
9893 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9894 }
9895 
9896 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9897 {
9898 	struct mgmt_ev_auth_failed ev;
9899 	struct mgmt_pending_cmd *cmd;
9900 	u8 status = mgmt_status(hci_status);
9901 
9902 	bacpy(&ev.addr.bdaddr, &conn->dst);
9903 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9904 	ev.status = status;
9905 
9906 	cmd = find_pairing(conn);
9907 
9908 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9909 		    cmd ? cmd->sk : NULL);
9910 
9911 	if (cmd) {
9912 		cmd->cmd_complete(cmd, status);
9913 		mgmt_pending_remove(cmd);
9914 	}
9915 }
9916 
9917 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9918 {
9919 	struct cmd_lookup match = { NULL, hdev };
9920 	bool changed;
9921 
9922 	if (status) {
9923 		u8 mgmt_err = mgmt_status(status);
9924 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9925 				     cmd_status_rsp, &mgmt_err);
9926 		return;
9927 	}
9928 
9929 	if (test_bit(HCI_AUTH, &hdev->flags))
9930 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9931 	else
9932 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9933 
9934 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9935 			     &match);
9936 
9937 	if (changed)
9938 		new_settings(hdev, match.sk);
9939 
9940 	if (match.sk)
9941 		sock_put(match.sk);
9942 }
9943 
9944 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9945 {
9946 	struct cmd_lookup *match = data;
9947 
9948 	if (match->sk == NULL) {
9949 		match->sk = cmd->sk;
9950 		sock_hold(match->sk);
9951 	}
9952 }
9953 
9954 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9955 				    u8 status)
9956 {
9957 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9958 
9959 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9960 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9961 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9962 
9963 	if (!status) {
9964 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9965 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9966 		ext_info_changed(hdev, NULL);
9967 	}
9968 
9969 	if (match.sk)
9970 		sock_put(match.sk);
9971 }
9972 
9973 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9974 {
9975 	struct mgmt_cp_set_local_name ev;
9976 	struct mgmt_pending_cmd *cmd;
9977 
9978 	if (status)
9979 		return;
9980 
9981 	memset(&ev, 0, sizeof(ev));
9982 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9983 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9984 
9985 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9986 	if (!cmd) {
9987 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9988 
9989 		/* If this is a HCI command related to powering on the
9990 		 * HCI dev don't send any mgmt signals.
9991 		 */
9992 		if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9993 			return;
9994 
9995 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
9996 			return;
9997 	}
9998 
9999 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10000 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10001 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10002 }
10003 
10004 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10005 {
10006 	int i;
10007 
10008 	for (i = 0; i < uuid_count; i++) {
10009 		if (!memcmp(uuid, uuids[i], 16))
10010 			return true;
10011 	}
10012 
10013 	return false;
10014 }
10015 
10016 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10017 {
10018 	u16 parsed = 0;
10019 
10020 	while (parsed < eir_len) {
10021 		u8 field_len = eir[0];
10022 		u8 uuid[16];
10023 		int i;
10024 
10025 		if (field_len == 0)
10026 			break;
10027 
10028 		if (eir_len - parsed < field_len + 1)
10029 			break;
10030 
10031 		switch (eir[1]) {
10032 		case EIR_UUID16_ALL:
10033 		case EIR_UUID16_SOME:
10034 			for (i = 0; i + 3 <= field_len; i += 2) {
10035 				memcpy(uuid, bluetooth_base_uuid, 16);
10036 				uuid[13] = eir[i + 3];
10037 				uuid[12] = eir[i + 2];
10038 				if (has_uuid(uuid, uuid_count, uuids))
10039 					return true;
10040 			}
10041 			break;
10042 		case EIR_UUID32_ALL:
10043 		case EIR_UUID32_SOME:
10044 			for (i = 0; i + 5 <= field_len; i += 4) {
10045 				memcpy(uuid, bluetooth_base_uuid, 16);
10046 				uuid[15] = eir[i + 5];
10047 				uuid[14] = eir[i + 4];
10048 				uuid[13] = eir[i + 3];
10049 				uuid[12] = eir[i + 2];
10050 				if (has_uuid(uuid, uuid_count, uuids))
10051 					return true;
10052 			}
10053 			break;
10054 		case EIR_UUID128_ALL:
10055 		case EIR_UUID128_SOME:
10056 			for (i = 0; i + 17 <= field_len; i += 16) {
10057 				memcpy(uuid, eir + i + 2, 16);
10058 				if (has_uuid(uuid, uuid_count, uuids))
10059 					return true;
10060 			}
10061 			break;
10062 		}
10063 
10064 		parsed += field_len + 1;
10065 		eir += field_len + 1;
10066 	}
10067 
10068 	return false;
10069 }
10070 
10071 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10072 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10073 {
10074 	/* If a RSSI threshold has been specified, and
10075 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10076 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10077 	 * is set, let it through for further processing, as we might need to
10078 	 * restart the scan.
10079 	 *
10080 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10081 	 * the results are also dropped.
10082 	 */
10083 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10084 	    (rssi == HCI_RSSI_INVALID ||
10085 	    (rssi < hdev->discovery.rssi &&
10086 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10087 		return  false;
10088 
10089 	if (hdev->discovery.uuid_count != 0) {
10090 		/* If a list of UUIDs is provided in filter, results with no
10091 		 * matching UUID should be dropped.
10092 		 */
10093 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10094 				   hdev->discovery.uuids) &&
10095 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
10096 				   hdev->discovery.uuid_count,
10097 				   hdev->discovery.uuids))
10098 			return false;
10099 	}
10100 
10101 	/* If duplicate filtering does not report RSSI changes, then restart
10102 	 * scanning to ensure updated result with updated RSSI values.
10103 	 */
10104 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10105 		/* Validate RSSI value against the RSSI threshold once more. */
10106 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10107 		    rssi < hdev->discovery.rssi)
10108 			return false;
10109 	}
10110 
10111 	return true;
10112 }
10113 
10114 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10115 				  bdaddr_t *bdaddr, u8 addr_type)
10116 {
10117 	struct mgmt_ev_adv_monitor_device_lost ev;
10118 
10119 	ev.monitor_handle = cpu_to_le16(handle);
10120 	bacpy(&ev.addr.bdaddr, bdaddr);
10121 	ev.addr.type = addr_type;
10122 
10123 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10124 		   NULL);
10125 }
10126 
10127 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10128 					       struct sk_buff *skb,
10129 					       struct sock *skip_sk,
10130 					       u16 handle)
10131 {
10132 	struct sk_buff *advmon_skb;
10133 	size_t advmon_skb_len;
10134 	__le16 *monitor_handle;
10135 
10136 	if (!skb)
10137 		return;
10138 
10139 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10140 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
10141 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10142 				    advmon_skb_len);
10143 	if (!advmon_skb)
10144 		return;
10145 
10146 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10147 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10148 	 * store monitor_handle of the matched monitor.
10149 	 */
10150 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10151 	*monitor_handle = cpu_to_le16(handle);
10152 	skb_put_data(advmon_skb, skb->data, skb->len);
10153 
10154 	mgmt_event_skb(advmon_skb, skip_sk);
10155 }
10156 
10157 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10158 					  bdaddr_t *bdaddr, bool report_device,
10159 					  struct sk_buff *skb,
10160 					  struct sock *skip_sk)
10161 {
10162 	struct monitored_device *dev, *tmp;
10163 	bool matched = false;
10164 	bool notified = false;
10165 
10166 	/* We have received the Advertisement Report because:
10167 	 * 1. the kernel has initiated active discovery
10168 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10169 	 *    passive scanning
10170 	 * 3. if none of the above is true, we have one or more active
10171 	 *    Advertisement Monitor
10172 	 *
10173 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10174 	 * and report ONLY one advertisement per device for the matched Monitor
10175 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10176 	 *
10177 	 * For case 3, since we are not active scanning and all advertisements
10178 	 * received are due to a matched Advertisement Monitor, report all
10179 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10180 	 */
10181 	if (report_device && !hdev->advmon_pend_notify) {
10182 		mgmt_event_skb(skb, skip_sk);
10183 		return;
10184 	}
10185 
10186 	hdev->advmon_pend_notify = false;
10187 
10188 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10189 		if (!bacmp(&dev->bdaddr, bdaddr)) {
10190 			matched = true;
10191 
10192 			if (!dev->notified) {
10193 				mgmt_send_adv_monitor_device_found(hdev, skb,
10194 								   skip_sk,
10195 								   dev->handle);
10196 				notified = true;
10197 				dev->notified = true;
10198 			}
10199 		}
10200 
10201 		if (!dev->notified)
10202 			hdev->advmon_pend_notify = true;
10203 	}
10204 
10205 	if (!report_device &&
10206 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
10207 		/* Handle 0 indicates that we are not active scanning and this
10208 		 * is a subsequent advertisement report for an already matched
10209 		 * Advertisement Monitor or the controller offloading support
10210 		 * is not available.
10211 		 */
10212 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10213 	}
10214 
10215 	if (report_device)
10216 		mgmt_event_skb(skb, skip_sk);
10217 	else
10218 		kfree_skb(skb);
10219 }
10220 
10221 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10222 			      u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10223 			      u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10224 			      u64 instant)
10225 {
10226 	struct sk_buff *skb;
10227 	struct mgmt_ev_mesh_device_found *ev;
10228 	int i, j;
10229 
10230 	if (!hdev->mesh_ad_types[0])
10231 		goto accepted;
10232 
10233 	/* Scan for requested AD types */
10234 	if (eir_len > 0) {
10235 		for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10236 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10237 				if (!hdev->mesh_ad_types[j])
10238 					break;
10239 
10240 				if (hdev->mesh_ad_types[j] == eir[i + 1])
10241 					goto accepted;
10242 			}
10243 		}
10244 	}
10245 
10246 	if (scan_rsp_len > 0) {
10247 		for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10248 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10249 				if (!hdev->mesh_ad_types[j])
10250 					break;
10251 
10252 				if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10253 					goto accepted;
10254 			}
10255 		}
10256 	}
10257 
10258 	return;
10259 
10260 accepted:
10261 	skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10262 			     sizeof(*ev) + eir_len + scan_rsp_len);
10263 	if (!skb)
10264 		return;
10265 
10266 	ev = skb_put(skb, sizeof(*ev));
10267 
10268 	bacpy(&ev->addr.bdaddr, bdaddr);
10269 	ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10270 	ev->rssi = rssi;
10271 	ev->flags = cpu_to_le32(flags);
10272 	ev->instant = cpu_to_le64(instant);
10273 
10274 	if (eir_len > 0)
10275 		/* Copy EIR or advertising data into event */
10276 		skb_put_data(skb, eir, eir_len);
10277 
10278 	if (scan_rsp_len > 0)
10279 		/* Append scan response data to event */
10280 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10281 
10282 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10283 
10284 	mgmt_event_skb(skb, NULL);
10285 }
10286 
10287 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10288 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10289 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10290 		       u64 instant)
10291 {
10292 	struct sk_buff *skb;
10293 	struct mgmt_ev_device_found *ev;
10294 	bool report_device = hci_discovery_active(hdev);
10295 
10296 	if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10297 		mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10298 				  eir, eir_len, scan_rsp, scan_rsp_len,
10299 				  instant);
10300 
10301 	/* Don't send events for a non-kernel initiated discovery. With
10302 	 * LE one exception is if we have pend_le_reports > 0 in which
10303 	 * case we're doing passive scanning and want these events.
10304 	 */
10305 	if (!hci_discovery_active(hdev)) {
10306 		if (link_type == ACL_LINK)
10307 			return;
10308 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10309 			report_device = true;
10310 		else if (!hci_is_adv_monitoring(hdev))
10311 			return;
10312 	}
10313 
10314 	if (hdev->discovery.result_filtering) {
10315 		/* We are using service discovery */
10316 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10317 				     scan_rsp_len))
10318 			return;
10319 	}
10320 
10321 	if (hdev->discovery.limited) {
10322 		/* Check for limited discoverable bit */
10323 		if (dev_class) {
10324 			if (!(dev_class[1] & 0x20))
10325 				return;
10326 		} else {
10327 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10328 			if (!flags || !(flags[0] & LE_AD_LIMITED))
10329 				return;
10330 		}
10331 	}
10332 
10333 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
10334 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10335 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
10336 	if (!skb)
10337 		return;
10338 
10339 	ev = skb_put(skb, sizeof(*ev));
10340 
10341 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
10342 	 * RSSI value was reported as 0 when not available. This behavior
10343 	 * is kept when using device discovery. This is required for full
10344 	 * backwards compatibility with the API.
10345 	 *
10346 	 * However when using service discovery, the value 127 will be
10347 	 * returned when the RSSI is not available.
10348 	 */
10349 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10350 	    link_type == ACL_LINK)
10351 		rssi = 0;
10352 
10353 	bacpy(&ev->addr.bdaddr, bdaddr);
10354 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10355 	ev->rssi = rssi;
10356 	ev->flags = cpu_to_le32(flags);
10357 
10358 	if (eir_len > 0)
10359 		/* Copy EIR or advertising data into event */
10360 		skb_put_data(skb, eir, eir_len);
10361 
10362 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10363 		u8 eir_cod[5];
10364 
10365 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10366 					   dev_class, 3);
10367 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
10368 	}
10369 
10370 	if (scan_rsp_len > 0)
10371 		/* Append scan response data to event */
10372 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10373 
10374 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10375 
10376 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10377 }
10378 
10379 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10380 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10381 {
10382 	struct sk_buff *skb;
10383 	struct mgmt_ev_device_found *ev;
10384 	u16 eir_len = 0;
10385 	u32 flags = 0;
10386 
10387 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10388 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10389 	if (!skb)
10390 		return;
10391 
10392 	ev = skb_put(skb, sizeof(*ev));
10393 	bacpy(&ev->addr.bdaddr, bdaddr);
10394 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10395 	ev->rssi = rssi;
10396 
10397 	if (name)
10398 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10399 	else
10400 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10401 
10402 	ev->eir_len = cpu_to_le16(eir_len);
10403 	ev->flags = cpu_to_le32(flags);
10404 
10405 	mgmt_event_skb(skb, NULL);
10406 }
10407 
10408 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10409 {
10410 	struct mgmt_ev_discovering ev;
10411 
10412 	bt_dev_dbg(hdev, "discovering %u", discovering);
10413 
10414 	memset(&ev, 0, sizeof(ev));
10415 	ev.type = hdev->discovery.type;
10416 	ev.discovering = discovering;
10417 
10418 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10419 }
10420 
10421 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10422 {
10423 	struct mgmt_ev_controller_suspend ev;
10424 
10425 	ev.suspend_state = state;
10426 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10427 }
10428 
10429 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10430 		   u8 addr_type)
10431 {
10432 	struct mgmt_ev_controller_resume ev;
10433 
10434 	ev.wake_reason = reason;
10435 	if (bdaddr) {
10436 		bacpy(&ev.addr.bdaddr, bdaddr);
10437 		ev.addr.type = addr_type;
10438 	} else {
10439 		memset(&ev.addr, 0, sizeof(ev.addr));
10440 	}
10441 
10442 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10443 }
10444 
10445 static struct hci_mgmt_chan chan = {
10446 	.channel	= HCI_CHANNEL_CONTROL,
10447 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
10448 	.handlers	= mgmt_handlers,
10449 	.hdev_init	= mgmt_init_hdev,
10450 };
10451 
10452 int mgmt_init(void)
10453 {
10454 	return hci_mgmt_chan_register(&chan);
10455 }
10456 
10457 void mgmt_exit(void)
10458 {
10459 	hci_mgmt_chan_unregister(&chan);
10460 }
10461 
10462 void mgmt_cleanup(struct sock *sk)
10463 {
10464 	struct mgmt_mesh_tx *mesh_tx;
10465 	struct hci_dev *hdev;
10466 
10467 	read_lock(&hci_dev_list_lock);
10468 
10469 	list_for_each_entry(hdev, &hci_dev_list, list) {
10470 		do {
10471 			mesh_tx = mgmt_mesh_next(hdev, sk);
10472 
10473 			if (mesh_tx)
10474 				mesh_send_complete(hdev, mesh_tx, true);
10475 		} while (mesh_tx);
10476 	}
10477 
10478 	read_unlock(&hci_dev_list_lock);
10479 }
10480