xref: /linux/net/bluetooth/mgmt.c (revision 4fd18fc38757217c746aa063ba9e4729814dc737)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 
42 #define MGMT_VERSION	1
43 #define MGMT_REVISION	19
44 
45 static const u16 mgmt_commands[] = {
46 	MGMT_OP_READ_INDEX_LIST,
47 	MGMT_OP_READ_INFO,
48 	MGMT_OP_SET_POWERED,
49 	MGMT_OP_SET_DISCOVERABLE,
50 	MGMT_OP_SET_CONNECTABLE,
51 	MGMT_OP_SET_FAST_CONNECTABLE,
52 	MGMT_OP_SET_BONDABLE,
53 	MGMT_OP_SET_LINK_SECURITY,
54 	MGMT_OP_SET_SSP,
55 	MGMT_OP_SET_HS,
56 	MGMT_OP_SET_LE,
57 	MGMT_OP_SET_DEV_CLASS,
58 	MGMT_OP_SET_LOCAL_NAME,
59 	MGMT_OP_ADD_UUID,
60 	MGMT_OP_REMOVE_UUID,
61 	MGMT_OP_LOAD_LINK_KEYS,
62 	MGMT_OP_LOAD_LONG_TERM_KEYS,
63 	MGMT_OP_DISCONNECT,
64 	MGMT_OP_GET_CONNECTIONS,
65 	MGMT_OP_PIN_CODE_REPLY,
66 	MGMT_OP_PIN_CODE_NEG_REPLY,
67 	MGMT_OP_SET_IO_CAPABILITY,
68 	MGMT_OP_PAIR_DEVICE,
69 	MGMT_OP_CANCEL_PAIR_DEVICE,
70 	MGMT_OP_UNPAIR_DEVICE,
71 	MGMT_OP_USER_CONFIRM_REPLY,
72 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
73 	MGMT_OP_USER_PASSKEY_REPLY,
74 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
75 	MGMT_OP_READ_LOCAL_OOB_DATA,
76 	MGMT_OP_ADD_REMOTE_OOB_DATA,
77 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
78 	MGMT_OP_START_DISCOVERY,
79 	MGMT_OP_STOP_DISCOVERY,
80 	MGMT_OP_CONFIRM_NAME,
81 	MGMT_OP_BLOCK_DEVICE,
82 	MGMT_OP_UNBLOCK_DEVICE,
83 	MGMT_OP_SET_DEVICE_ID,
84 	MGMT_OP_SET_ADVERTISING,
85 	MGMT_OP_SET_BREDR,
86 	MGMT_OP_SET_STATIC_ADDRESS,
87 	MGMT_OP_SET_SCAN_PARAMS,
88 	MGMT_OP_SET_SECURE_CONN,
89 	MGMT_OP_SET_DEBUG_KEYS,
90 	MGMT_OP_SET_PRIVACY,
91 	MGMT_OP_LOAD_IRKS,
92 	MGMT_OP_GET_CONN_INFO,
93 	MGMT_OP_GET_CLOCK_INFO,
94 	MGMT_OP_ADD_DEVICE,
95 	MGMT_OP_REMOVE_DEVICE,
96 	MGMT_OP_LOAD_CONN_PARAM,
97 	MGMT_OP_READ_UNCONF_INDEX_LIST,
98 	MGMT_OP_READ_CONFIG_INFO,
99 	MGMT_OP_SET_EXTERNAL_CONFIG,
100 	MGMT_OP_SET_PUBLIC_ADDRESS,
101 	MGMT_OP_START_SERVICE_DISCOVERY,
102 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
103 	MGMT_OP_READ_EXT_INDEX_LIST,
104 	MGMT_OP_READ_ADV_FEATURES,
105 	MGMT_OP_ADD_ADVERTISING,
106 	MGMT_OP_REMOVE_ADVERTISING,
107 	MGMT_OP_GET_ADV_SIZE_INFO,
108 	MGMT_OP_START_LIMITED_DISCOVERY,
109 	MGMT_OP_READ_EXT_INFO,
110 	MGMT_OP_SET_APPEARANCE,
111 	MGMT_OP_SET_BLOCKED_KEYS,
112 	MGMT_OP_SET_WIDEBAND_SPEECH,
113 	MGMT_OP_READ_CONTROLLER_CAP,
114 	MGMT_OP_READ_EXP_FEATURES_INFO,
115 	MGMT_OP_SET_EXP_FEATURE,
116 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
117 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
118 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
119 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
120 	MGMT_OP_GET_DEVICE_FLAGS,
121 	MGMT_OP_SET_DEVICE_FLAGS,
122 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
123 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
124 	MGMT_OP_REMOVE_ADV_MONITOR,
125 	MGMT_OP_ADD_EXT_ADV_PARAMS,
126 	MGMT_OP_ADD_EXT_ADV_DATA,
127 };
128 
129 static const u16 mgmt_events[] = {
130 	MGMT_EV_CONTROLLER_ERROR,
131 	MGMT_EV_INDEX_ADDED,
132 	MGMT_EV_INDEX_REMOVED,
133 	MGMT_EV_NEW_SETTINGS,
134 	MGMT_EV_CLASS_OF_DEV_CHANGED,
135 	MGMT_EV_LOCAL_NAME_CHANGED,
136 	MGMT_EV_NEW_LINK_KEY,
137 	MGMT_EV_NEW_LONG_TERM_KEY,
138 	MGMT_EV_DEVICE_CONNECTED,
139 	MGMT_EV_DEVICE_DISCONNECTED,
140 	MGMT_EV_CONNECT_FAILED,
141 	MGMT_EV_PIN_CODE_REQUEST,
142 	MGMT_EV_USER_CONFIRM_REQUEST,
143 	MGMT_EV_USER_PASSKEY_REQUEST,
144 	MGMT_EV_AUTH_FAILED,
145 	MGMT_EV_DEVICE_FOUND,
146 	MGMT_EV_DISCOVERING,
147 	MGMT_EV_DEVICE_BLOCKED,
148 	MGMT_EV_DEVICE_UNBLOCKED,
149 	MGMT_EV_DEVICE_UNPAIRED,
150 	MGMT_EV_PASSKEY_NOTIFY,
151 	MGMT_EV_NEW_IRK,
152 	MGMT_EV_NEW_CSRK,
153 	MGMT_EV_DEVICE_ADDED,
154 	MGMT_EV_DEVICE_REMOVED,
155 	MGMT_EV_NEW_CONN_PARAM,
156 	MGMT_EV_UNCONF_INDEX_ADDED,
157 	MGMT_EV_UNCONF_INDEX_REMOVED,
158 	MGMT_EV_NEW_CONFIG_OPTIONS,
159 	MGMT_EV_EXT_INDEX_ADDED,
160 	MGMT_EV_EXT_INDEX_REMOVED,
161 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
162 	MGMT_EV_ADVERTISING_ADDED,
163 	MGMT_EV_ADVERTISING_REMOVED,
164 	MGMT_EV_EXT_INFO_CHANGED,
165 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
166 	MGMT_EV_EXP_FEATURE_CHANGED,
167 	MGMT_EV_DEVICE_FLAGS_CHANGED,
168 	MGMT_EV_CONTROLLER_SUSPEND,
169 	MGMT_EV_CONTROLLER_RESUME,
170 };
171 
172 static const u16 mgmt_untrusted_commands[] = {
173 	MGMT_OP_READ_INDEX_LIST,
174 	MGMT_OP_READ_INFO,
175 	MGMT_OP_READ_UNCONF_INDEX_LIST,
176 	MGMT_OP_READ_CONFIG_INFO,
177 	MGMT_OP_READ_EXT_INDEX_LIST,
178 	MGMT_OP_READ_EXT_INFO,
179 	MGMT_OP_READ_CONTROLLER_CAP,
180 	MGMT_OP_READ_EXP_FEATURES_INFO,
181 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
182 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
183 };
184 
185 static const u16 mgmt_untrusted_events[] = {
186 	MGMT_EV_INDEX_ADDED,
187 	MGMT_EV_INDEX_REMOVED,
188 	MGMT_EV_NEW_SETTINGS,
189 	MGMT_EV_CLASS_OF_DEV_CHANGED,
190 	MGMT_EV_LOCAL_NAME_CHANGED,
191 	MGMT_EV_UNCONF_INDEX_ADDED,
192 	MGMT_EV_UNCONF_INDEX_REMOVED,
193 	MGMT_EV_NEW_CONFIG_OPTIONS,
194 	MGMT_EV_EXT_INDEX_ADDED,
195 	MGMT_EV_EXT_INDEX_REMOVED,
196 	MGMT_EV_EXT_INFO_CHANGED,
197 	MGMT_EV_EXP_FEATURE_CHANGED,
198 	MGMT_EV_ADV_MONITOR_ADDED,
199 	MGMT_EV_ADV_MONITOR_REMOVED,
200 };
201 
202 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
203 
204 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
205 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
206 
207 /* HCI to MGMT error code conversion table */
208 static const u8 mgmt_status_table[] = {
209 	MGMT_STATUS_SUCCESS,
210 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
211 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
212 	MGMT_STATUS_FAILED,		/* Hardware Failure */
213 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
214 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
215 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
216 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
217 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
218 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
219 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
220 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
221 	MGMT_STATUS_BUSY,		/* Command Disallowed */
222 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
223 	MGMT_STATUS_REJECTED,		/* Rejected Security */
224 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
225 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
226 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
227 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
228 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
229 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
230 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
231 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
232 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
233 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
234 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
235 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
236 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
237 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
238 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
239 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
240 	MGMT_STATUS_FAILED,		/* Unspecified Error */
241 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
242 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
243 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
244 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
245 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
246 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
247 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
248 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
249 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
250 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
251 	MGMT_STATUS_FAILED,		/* Transaction Collision */
252 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
253 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
254 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
255 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
256 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
257 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
258 	MGMT_STATUS_FAILED,		/* Slot Violation */
259 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
260 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
261 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
262 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
263 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
264 	MGMT_STATUS_BUSY,		/* Controller Busy */
265 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
266 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
267 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
268 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
269 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
270 };
271 
272 static u8 mgmt_status(u8 hci_status)
273 {
274 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
275 		return mgmt_status_table[hci_status];
276 
277 	return MGMT_STATUS_FAILED;
278 }
279 
280 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
281 			    u16 len, int flag)
282 {
283 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
284 			       flag, NULL);
285 }
286 
287 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
288 			      u16 len, int flag, struct sock *skip_sk)
289 {
290 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
291 			       flag, skip_sk);
292 }
293 
294 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
295 		      struct sock *skip_sk)
296 {
297 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
298 			       HCI_SOCK_TRUSTED, skip_sk);
299 }
300 
301 static u8 le_addr_type(u8 mgmt_addr_type)
302 {
303 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
304 		return ADDR_LE_DEV_PUBLIC;
305 	else
306 		return ADDR_LE_DEV_RANDOM;
307 }
308 
309 void mgmt_fill_version_info(void *ver)
310 {
311 	struct mgmt_rp_read_version *rp = ver;
312 
313 	rp->version = MGMT_VERSION;
314 	rp->revision = cpu_to_le16(MGMT_REVISION);
315 }
316 
317 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
318 			u16 data_len)
319 {
320 	struct mgmt_rp_read_version rp;
321 
322 	bt_dev_dbg(hdev, "sock %p", sk);
323 
324 	mgmt_fill_version_info(&rp);
325 
326 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
327 				 &rp, sizeof(rp));
328 }
329 
330 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
331 			 u16 data_len)
332 {
333 	struct mgmt_rp_read_commands *rp;
334 	u16 num_commands, num_events;
335 	size_t rp_size;
336 	int i, err;
337 
338 	bt_dev_dbg(hdev, "sock %p", sk);
339 
340 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
341 		num_commands = ARRAY_SIZE(mgmt_commands);
342 		num_events = ARRAY_SIZE(mgmt_events);
343 	} else {
344 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
345 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
346 	}
347 
348 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
349 
350 	rp = kmalloc(rp_size, GFP_KERNEL);
351 	if (!rp)
352 		return -ENOMEM;
353 
354 	rp->num_commands = cpu_to_le16(num_commands);
355 	rp->num_events = cpu_to_le16(num_events);
356 
357 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
358 		__le16 *opcode = rp->opcodes;
359 
360 		for (i = 0; i < num_commands; i++, opcode++)
361 			put_unaligned_le16(mgmt_commands[i], opcode);
362 
363 		for (i = 0; i < num_events; i++, opcode++)
364 			put_unaligned_le16(mgmt_events[i], opcode);
365 	} else {
366 		__le16 *opcode = rp->opcodes;
367 
368 		for (i = 0; i < num_commands; i++, opcode++)
369 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
370 
371 		for (i = 0; i < num_events; i++, opcode++)
372 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
373 	}
374 
375 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
376 				rp, rp_size);
377 	kfree(rp);
378 
379 	return err;
380 }
381 
382 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
383 			   u16 data_len)
384 {
385 	struct mgmt_rp_read_index_list *rp;
386 	struct hci_dev *d;
387 	size_t rp_len;
388 	u16 count;
389 	int err;
390 
391 	bt_dev_dbg(hdev, "sock %p", sk);
392 
393 	read_lock(&hci_dev_list_lock);
394 
395 	count = 0;
396 	list_for_each_entry(d, &hci_dev_list, list) {
397 		if (d->dev_type == HCI_PRIMARY &&
398 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
399 			count++;
400 	}
401 
402 	rp_len = sizeof(*rp) + (2 * count);
403 	rp = kmalloc(rp_len, GFP_ATOMIC);
404 	if (!rp) {
405 		read_unlock(&hci_dev_list_lock);
406 		return -ENOMEM;
407 	}
408 
409 	count = 0;
410 	list_for_each_entry(d, &hci_dev_list, list) {
411 		if (hci_dev_test_flag(d, HCI_SETUP) ||
412 		    hci_dev_test_flag(d, HCI_CONFIG) ||
413 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
414 			continue;
415 
416 		/* Devices marked as raw-only are neither configured
417 		 * nor unconfigured controllers.
418 		 */
419 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
420 			continue;
421 
422 		if (d->dev_type == HCI_PRIMARY &&
423 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
424 			rp->index[count++] = cpu_to_le16(d->id);
425 			bt_dev_dbg(hdev, "Added hci%u", d->id);
426 		}
427 	}
428 
429 	rp->num_controllers = cpu_to_le16(count);
430 	rp_len = sizeof(*rp) + (2 * count);
431 
432 	read_unlock(&hci_dev_list_lock);
433 
434 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
435 				0, rp, rp_len);
436 
437 	kfree(rp);
438 
439 	return err;
440 }
441 
442 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
443 				  void *data, u16 data_len)
444 {
445 	struct mgmt_rp_read_unconf_index_list *rp;
446 	struct hci_dev *d;
447 	size_t rp_len;
448 	u16 count;
449 	int err;
450 
451 	bt_dev_dbg(hdev, "sock %p", sk);
452 
453 	read_lock(&hci_dev_list_lock);
454 
455 	count = 0;
456 	list_for_each_entry(d, &hci_dev_list, list) {
457 		if (d->dev_type == HCI_PRIMARY &&
458 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
459 			count++;
460 	}
461 
462 	rp_len = sizeof(*rp) + (2 * count);
463 	rp = kmalloc(rp_len, GFP_ATOMIC);
464 	if (!rp) {
465 		read_unlock(&hci_dev_list_lock);
466 		return -ENOMEM;
467 	}
468 
469 	count = 0;
470 	list_for_each_entry(d, &hci_dev_list, list) {
471 		if (hci_dev_test_flag(d, HCI_SETUP) ||
472 		    hci_dev_test_flag(d, HCI_CONFIG) ||
473 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
474 			continue;
475 
476 		/* Devices marked as raw-only are neither configured
477 		 * nor unconfigured controllers.
478 		 */
479 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
480 			continue;
481 
482 		if (d->dev_type == HCI_PRIMARY &&
483 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
484 			rp->index[count++] = cpu_to_le16(d->id);
485 			bt_dev_dbg(hdev, "Added hci%u", d->id);
486 		}
487 	}
488 
489 	rp->num_controllers = cpu_to_le16(count);
490 	rp_len = sizeof(*rp) + (2 * count);
491 
492 	read_unlock(&hci_dev_list_lock);
493 
494 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
495 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
496 
497 	kfree(rp);
498 
499 	return err;
500 }
501 
502 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
503 			       void *data, u16 data_len)
504 {
505 	struct mgmt_rp_read_ext_index_list *rp;
506 	struct hci_dev *d;
507 	u16 count;
508 	int err;
509 
510 	bt_dev_dbg(hdev, "sock %p", sk);
511 
512 	read_lock(&hci_dev_list_lock);
513 
514 	count = 0;
515 	list_for_each_entry(d, &hci_dev_list, list) {
516 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
517 			count++;
518 	}
519 
520 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
521 	if (!rp) {
522 		read_unlock(&hci_dev_list_lock);
523 		return -ENOMEM;
524 	}
525 
526 	count = 0;
527 	list_for_each_entry(d, &hci_dev_list, list) {
528 		if (hci_dev_test_flag(d, HCI_SETUP) ||
529 		    hci_dev_test_flag(d, HCI_CONFIG) ||
530 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
531 			continue;
532 
533 		/* Devices marked as raw-only are neither configured
534 		 * nor unconfigured controllers.
535 		 */
536 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
537 			continue;
538 
539 		if (d->dev_type == HCI_PRIMARY) {
540 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
541 				rp->entry[count].type = 0x01;
542 			else
543 				rp->entry[count].type = 0x00;
544 		} else if (d->dev_type == HCI_AMP) {
545 			rp->entry[count].type = 0x02;
546 		} else {
547 			continue;
548 		}
549 
550 		rp->entry[count].bus = d->bus;
551 		rp->entry[count++].index = cpu_to_le16(d->id);
552 		bt_dev_dbg(hdev, "Added hci%u", d->id);
553 	}
554 
555 	rp->num_controllers = cpu_to_le16(count);
556 
557 	read_unlock(&hci_dev_list_lock);
558 
559 	/* If this command is called at least once, then all the
560 	 * default index and unconfigured index events are disabled
561 	 * and from now on only extended index events are used.
562 	 */
563 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
564 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
565 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
566 
567 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
568 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
569 				struct_size(rp, entry, count));
570 
571 	kfree(rp);
572 
573 	return err;
574 }
575 
576 static bool is_configured(struct hci_dev *hdev)
577 {
578 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
579 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
580 		return false;
581 
582 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
583 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
584 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
585 		return false;
586 
587 	return true;
588 }
589 
590 static __le32 get_missing_options(struct hci_dev *hdev)
591 {
592 	u32 options = 0;
593 
594 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
595 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
596 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
597 
598 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
599 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
600 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
601 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
602 
603 	return cpu_to_le32(options);
604 }
605 
606 static int new_options(struct hci_dev *hdev, struct sock *skip)
607 {
608 	__le32 options = get_missing_options(hdev);
609 
610 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
611 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
612 }
613 
614 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
615 {
616 	__le32 options = get_missing_options(hdev);
617 
618 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
619 				 sizeof(options));
620 }
621 
622 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
623 			    void *data, u16 data_len)
624 {
625 	struct mgmt_rp_read_config_info rp;
626 	u32 options = 0;
627 
628 	bt_dev_dbg(hdev, "sock %p", sk);
629 
630 	hci_dev_lock(hdev);
631 
632 	memset(&rp, 0, sizeof(rp));
633 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
634 
635 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
636 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
637 
638 	if (hdev->set_bdaddr)
639 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
640 
641 	rp.supported_options = cpu_to_le32(options);
642 	rp.missing_options = get_missing_options(hdev);
643 
644 	hci_dev_unlock(hdev);
645 
646 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
647 				 &rp, sizeof(rp));
648 }
649 
650 static u32 get_supported_phys(struct hci_dev *hdev)
651 {
652 	u32 supported_phys = 0;
653 
654 	if (lmp_bredr_capable(hdev)) {
655 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
656 
657 		if (hdev->features[0][0] & LMP_3SLOT)
658 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
659 
660 		if (hdev->features[0][0] & LMP_5SLOT)
661 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
662 
663 		if (lmp_edr_2m_capable(hdev)) {
664 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
665 
666 			if (lmp_edr_3slot_capable(hdev))
667 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
668 
669 			if (lmp_edr_5slot_capable(hdev))
670 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
671 
672 			if (lmp_edr_3m_capable(hdev)) {
673 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
674 
675 				if (lmp_edr_3slot_capable(hdev))
676 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
677 
678 				if (lmp_edr_5slot_capable(hdev))
679 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
680 			}
681 		}
682 	}
683 
684 	if (lmp_le_capable(hdev)) {
685 		supported_phys |= MGMT_PHY_LE_1M_TX;
686 		supported_phys |= MGMT_PHY_LE_1M_RX;
687 
688 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
689 			supported_phys |= MGMT_PHY_LE_2M_TX;
690 			supported_phys |= MGMT_PHY_LE_2M_RX;
691 		}
692 
693 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
694 			supported_phys |= MGMT_PHY_LE_CODED_TX;
695 			supported_phys |= MGMT_PHY_LE_CODED_RX;
696 		}
697 	}
698 
699 	return supported_phys;
700 }
701 
702 static u32 get_selected_phys(struct hci_dev *hdev)
703 {
704 	u32 selected_phys = 0;
705 
706 	if (lmp_bredr_capable(hdev)) {
707 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
708 
709 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
710 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
711 
712 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
713 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
714 
715 		if (lmp_edr_2m_capable(hdev)) {
716 			if (!(hdev->pkt_type & HCI_2DH1))
717 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
718 
719 			if (lmp_edr_3slot_capable(hdev) &&
720 			    !(hdev->pkt_type & HCI_2DH3))
721 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
722 
723 			if (lmp_edr_5slot_capable(hdev) &&
724 			    !(hdev->pkt_type & HCI_2DH5))
725 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
726 
727 			if (lmp_edr_3m_capable(hdev)) {
728 				if (!(hdev->pkt_type & HCI_3DH1))
729 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
730 
731 				if (lmp_edr_3slot_capable(hdev) &&
732 				    !(hdev->pkt_type & HCI_3DH3))
733 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
734 
735 				if (lmp_edr_5slot_capable(hdev) &&
736 				    !(hdev->pkt_type & HCI_3DH5))
737 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
738 			}
739 		}
740 	}
741 
742 	if (lmp_le_capable(hdev)) {
743 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
744 			selected_phys |= MGMT_PHY_LE_1M_TX;
745 
746 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
747 			selected_phys |= MGMT_PHY_LE_1M_RX;
748 
749 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
750 			selected_phys |= MGMT_PHY_LE_2M_TX;
751 
752 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
753 			selected_phys |= MGMT_PHY_LE_2M_RX;
754 
755 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
756 			selected_phys |= MGMT_PHY_LE_CODED_TX;
757 
758 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
759 			selected_phys |= MGMT_PHY_LE_CODED_RX;
760 	}
761 
762 	return selected_phys;
763 }
764 
765 static u32 get_configurable_phys(struct hci_dev *hdev)
766 {
767 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
768 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
769 }
770 
771 static u32 get_supported_settings(struct hci_dev *hdev)
772 {
773 	u32 settings = 0;
774 
775 	settings |= MGMT_SETTING_POWERED;
776 	settings |= MGMT_SETTING_BONDABLE;
777 	settings |= MGMT_SETTING_DEBUG_KEYS;
778 	settings |= MGMT_SETTING_CONNECTABLE;
779 	settings |= MGMT_SETTING_DISCOVERABLE;
780 
781 	if (lmp_bredr_capable(hdev)) {
782 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
783 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
784 		settings |= MGMT_SETTING_BREDR;
785 		settings |= MGMT_SETTING_LINK_SECURITY;
786 
787 		if (lmp_ssp_capable(hdev)) {
788 			settings |= MGMT_SETTING_SSP;
789 			if (IS_ENABLED(CONFIG_BT_HS))
790 				settings |= MGMT_SETTING_HS;
791 		}
792 
793 		if (lmp_sc_capable(hdev))
794 			settings |= MGMT_SETTING_SECURE_CONN;
795 
796 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
797 			     &hdev->quirks))
798 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
799 	}
800 
801 	if (lmp_le_capable(hdev)) {
802 		settings |= MGMT_SETTING_LE;
803 		settings |= MGMT_SETTING_SECURE_CONN;
804 		settings |= MGMT_SETTING_PRIVACY;
805 		settings |= MGMT_SETTING_STATIC_ADDRESS;
806 
807 		/* When the experimental feature for LL Privacy support is
808 		 * enabled, then advertising is no longer supported.
809 		 */
810 		if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
811 			settings |= MGMT_SETTING_ADVERTISING;
812 	}
813 
814 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
815 	    hdev->set_bdaddr)
816 		settings |= MGMT_SETTING_CONFIGURATION;
817 
818 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
819 
820 	return settings;
821 }
822 
823 static u32 get_current_settings(struct hci_dev *hdev)
824 {
825 	u32 settings = 0;
826 
827 	if (hdev_is_powered(hdev))
828 		settings |= MGMT_SETTING_POWERED;
829 
830 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
831 		settings |= MGMT_SETTING_CONNECTABLE;
832 
833 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
834 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
835 
836 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
837 		settings |= MGMT_SETTING_DISCOVERABLE;
838 
839 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
840 		settings |= MGMT_SETTING_BONDABLE;
841 
842 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
843 		settings |= MGMT_SETTING_BREDR;
844 
845 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
846 		settings |= MGMT_SETTING_LE;
847 
848 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
849 		settings |= MGMT_SETTING_LINK_SECURITY;
850 
851 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
852 		settings |= MGMT_SETTING_SSP;
853 
854 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
855 		settings |= MGMT_SETTING_HS;
856 
857 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
858 		settings |= MGMT_SETTING_ADVERTISING;
859 
860 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
861 		settings |= MGMT_SETTING_SECURE_CONN;
862 
863 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
864 		settings |= MGMT_SETTING_DEBUG_KEYS;
865 
866 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
867 		settings |= MGMT_SETTING_PRIVACY;
868 
869 	/* The current setting for static address has two purposes. The
870 	 * first is to indicate if the static address will be used and
871 	 * the second is to indicate if it is actually set.
872 	 *
873 	 * This means if the static address is not configured, this flag
874 	 * will never be set. If the address is configured, then if the
875 	 * address is actually used decides if the flag is set or not.
876 	 *
877 	 * For single mode LE only controllers and dual-mode controllers
878 	 * with BR/EDR disabled, the existence of the static address will
879 	 * be evaluated.
880 	 */
881 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
882 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
883 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
884 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
885 			settings |= MGMT_SETTING_STATIC_ADDRESS;
886 	}
887 
888 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
889 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
890 
891 	return settings;
892 }
893 
894 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
895 {
896 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
897 }
898 
899 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
900 						  struct hci_dev *hdev,
901 						  const void *data)
902 {
903 	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
904 }
905 
906 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
907 {
908 	struct mgmt_pending_cmd *cmd;
909 
910 	/* If there's a pending mgmt command the flags will not yet have
911 	 * their final values, so check for this first.
912 	 */
913 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
914 	if (cmd) {
915 		struct mgmt_mode *cp = cmd->param;
916 		if (cp->val == 0x01)
917 			return LE_AD_GENERAL;
918 		else if (cp->val == 0x02)
919 			return LE_AD_LIMITED;
920 	} else {
921 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
922 			return LE_AD_LIMITED;
923 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
924 			return LE_AD_GENERAL;
925 	}
926 
927 	return 0;
928 }
929 
930 bool mgmt_get_connectable(struct hci_dev *hdev)
931 {
932 	struct mgmt_pending_cmd *cmd;
933 
934 	/* If there's a pending mgmt command the flag will not yet have
935 	 * it's final value, so check for this first.
936 	 */
937 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
938 	if (cmd) {
939 		struct mgmt_mode *cp = cmd->param;
940 
941 		return cp->val;
942 	}
943 
944 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
945 }
946 
947 static void service_cache_off(struct work_struct *work)
948 {
949 	struct hci_dev *hdev = container_of(work, struct hci_dev,
950 					    service_cache.work);
951 	struct hci_request req;
952 
953 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
954 		return;
955 
956 	hci_req_init(&req, hdev);
957 
958 	hci_dev_lock(hdev);
959 
960 	__hci_req_update_eir(&req);
961 	__hci_req_update_class(&req);
962 
963 	hci_dev_unlock(hdev);
964 
965 	hci_req_run(&req, NULL);
966 }
967 
968 static void rpa_expired(struct work_struct *work)
969 {
970 	struct hci_dev *hdev = container_of(work, struct hci_dev,
971 					    rpa_expired.work);
972 	struct hci_request req;
973 
974 	bt_dev_dbg(hdev, "");
975 
976 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
977 
978 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
979 		return;
980 
981 	/* The generation of a new RPA and programming it into the
982 	 * controller happens in the hci_req_enable_advertising()
983 	 * function.
984 	 */
985 	hci_req_init(&req, hdev);
986 	if (ext_adv_capable(hdev))
987 		__hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
988 	else
989 		__hci_req_enable_advertising(&req);
990 	hci_req_run(&req, NULL);
991 }
992 
993 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
994 {
995 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
996 		return;
997 
998 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
999 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1000 
1001 	/* Non-mgmt controlled devices get this bit set
1002 	 * implicitly so that pairing works for them, however
1003 	 * for mgmt we require user-space to explicitly enable
1004 	 * it
1005 	 */
1006 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1007 }
1008 
1009 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1010 				void *data, u16 data_len)
1011 {
1012 	struct mgmt_rp_read_info rp;
1013 
1014 	bt_dev_dbg(hdev, "sock %p", sk);
1015 
1016 	hci_dev_lock(hdev);
1017 
1018 	memset(&rp, 0, sizeof(rp));
1019 
1020 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1021 
1022 	rp.version = hdev->hci_ver;
1023 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1024 
1025 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1026 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1027 
1028 	memcpy(rp.dev_class, hdev->dev_class, 3);
1029 
1030 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1031 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1032 
1033 	hci_dev_unlock(hdev);
1034 
1035 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1036 				 sizeof(rp));
1037 }
1038 
1039 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1040 {
1041 	u16 eir_len = 0;
1042 	size_t name_len;
1043 
1044 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1045 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1046 					  hdev->dev_class, 3);
1047 
1048 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1049 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1050 					  hdev->appearance);
1051 
1052 	name_len = strlen(hdev->dev_name);
1053 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1054 				  hdev->dev_name, name_len);
1055 
1056 	name_len = strlen(hdev->short_name);
1057 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1058 				  hdev->short_name, name_len);
1059 
1060 	return eir_len;
1061 }
1062 
1063 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1064 				    void *data, u16 data_len)
1065 {
1066 	char buf[512];
1067 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1068 	u16 eir_len;
1069 
1070 	bt_dev_dbg(hdev, "sock %p", sk);
1071 
1072 	memset(&buf, 0, sizeof(buf));
1073 
1074 	hci_dev_lock(hdev);
1075 
1076 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1077 
1078 	rp->version = hdev->hci_ver;
1079 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1080 
1081 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1082 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1083 
1084 
1085 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1086 	rp->eir_len = cpu_to_le16(eir_len);
1087 
1088 	hci_dev_unlock(hdev);
1089 
1090 	/* If this command is called at least once, then the events
1091 	 * for class of device and local name changes are disabled
1092 	 * and only the new extended controller information event
1093 	 * is used.
1094 	 */
1095 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1096 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1097 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1098 
1099 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1100 				 sizeof(*rp) + eir_len);
1101 }
1102 
1103 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1104 {
1105 	char buf[512];
1106 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1107 	u16 eir_len;
1108 
1109 	memset(buf, 0, sizeof(buf));
1110 
1111 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1112 	ev->eir_len = cpu_to_le16(eir_len);
1113 
1114 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1115 				  sizeof(*ev) + eir_len,
1116 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1117 }
1118 
1119 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1120 {
1121 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1122 
1123 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1124 				 sizeof(settings));
1125 }
1126 
1127 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1128 {
1129 	bt_dev_dbg(hdev, "status 0x%02x", status);
1130 
1131 	if (hci_conn_count(hdev) == 0) {
1132 		cancel_delayed_work(&hdev->power_off);
1133 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1134 	}
1135 }
1136 
1137 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1138 {
1139 	struct mgmt_ev_advertising_added ev;
1140 
1141 	ev.instance = instance;
1142 
1143 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1144 }
1145 
1146 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1147 			      u8 instance)
1148 {
1149 	struct mgmt_ev_advertising_removed ev;
1150 
1151 	ev.instance = instance;
1152 
1153 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1154 }
1155 
1156 static void cancel_adv_timeout(struct hci_dev *hdev)
1157 {
1158 	if (hdev->adv_instance_timeout) {
1159 		hdev->adv_instance_timeout = 0;
1160 		cancel_delayed_work(&hdev->adv_instance_expire);
1161 	}
1162 }
1163 
1164 static int clean_up_hci_state(struct hci_dev *hdev)
1165 {
1166 	struct hci_request req;
1167 	struct hci_conn *conn;
1168 	bool discov_stopped;
1169 	int err;
1170 
1171 	hci_req_init(&req, hdev);
1172 
1173 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1174 	    test_bit(HCI_PSCAN, &hdev->flags)) {
1175 		u8 scan = 0x00;
1176 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1177 	}
1178 
1179 	hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1180 
1181 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1182 		__hci_req_disable_advertising(&req);
1183 
1184 	discov_stopped = hci_req_stop_discovery(&req);
1185 
1186 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1187 		/* 0x15 == Terminated due to Power Off */
1188 		__hci_abort_conn(&req, conn, 0x15);
1189 	}
1190 
1191 	err = hci_req_run(&req, clean_up_hci_complete);
1192 	if (!err && discov_stopped)
1193 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1194 
1195 	return err;
1196 }
1197 
1198 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1199 		       u16 len)
1200 {
1201 	struct mgmt_mode *cp = data;
1202 	struct mgmt_pending_cmd *cmd;
1203 	int err;
1204 
1205 	bt_dev_dbg(hdev, "sock %p", sk);
1206 
1207 	if (cp->val != 0x00 && cp->val != 0x01)
1208 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1209 				       MGMT_STATUS_INVALID_PARAMS);
1210 
1211 	hci_dev_lock(hdev);
1212 
1213 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1214 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1215 				      MGMT_STATUS_BUSY);
1216 		goto failed;
1217 	}
1218 
1219 	if (!!cp->val == hdev_is_powered(hdev)) {
1220 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1221 		goto failed;
1222 	}
1223 
1224 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1225 	if (!cmd) {
1226 		err = -ENOMEM;
1227 		goto failed;
1228 	}
1229 
1230 	if (cp->val) {
1231 		queue_work(hdev->req_workqueue, &hdev->power_on);
1232 		err = 0;
1233 	} else {
1234 		/* Disconnect connections, stop scans, etc */
1235 		err = clean_up_hci_state(hdev);
1236 		if (!err)
1237 			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1238 					   HCI_POWER_OFF_TIMEOUT);
1239 
1240 		/* ENODATA means there were no HCI commands queued */
1241 		if (err == -ENODATA) {
1242 			cancel_delayed_work(&hdev->power_off);
1243 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1244 			err = 0;
1245 		}
1246 	}
1247 
1248 failed:
1249 	hci_dev_unlock(hdev);
1250 	return err;
1251 }
1252 
1253 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1254 {
1255 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1256 
1257 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1258 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1259 }
1260 
1261 int mgmt_new_settings(struct hci_dev *hdev)
1262 {
1263 	return new_settings(hdev, NULL);
1264 }
1265 
1266 struct cmd_lookup {
1267 	struct sock *sk;
1268 	struct hci_dev *hdev;
1269 	u8 mgmt_status;
1270 };
1271 
1272 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1273 {
1274 	struct cmd_lookup *match = data;
1275 
1276 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1277 
1278 	list_del(&cmd->list);
1279 
1280 	if (match->sk == NULL) {
1281 		match->sk = cmd->sk;
1282 		sock_hold(match->sk);
1283 	}
1284 
1285 	mgmt_pending_free(cmd);
1286 }
1287 
1288 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1289 {
1290 	u8 *status = data;
1291 
1292 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1293 	mgmt_pending_remove(cmd);
1294 }
1295 
1296 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1297 {
1298 	if (cmd->cmd_complete) {
1299 		u8 *status = data;
1300 
1301 		cmd->cmd_complete(cmd, *status);
1302 		mgmt_pending_remove(cmd);
1303 
1304 		return;
1305 	}
1306 
1307 	cmd_status_rsp(cmd, data);
1308 }
1309 
1310 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1311 {
1312 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1313 				 cmd->param, cmd->param_len);
1314 }
1315 
1316 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1317 {
1318 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1319 				 cmd->param, sizeof(struct mgmt_addr_info));
1320 }
1321 
1322 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1323 {
1324 	if (!lmp_bredr_capable(hdev))
1325 		return MGMT_STATUS_NOT_SUPPORTED;
1326 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1327 		return MGMT_STATUS_REJECTED;
1328 	else
1329 		return MGMT_STATUS_SUCCESS;
1330 }
1331 
1332 static u8 mgmt_le_support(struct hci_dev *hdev)
1333 {
1334 	if (!lmp_le_capable(hdev))
1335 		return MGMT_STATUS_NOT_SUPPORTED;
1336 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1337 		return MGMT_STATUS_REJECTED;
1338 	else
1339 		return MGMT_STATUS_SUCCESS;
1340 }
1341 
1342 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1343 {
1344 	struct mgmt_pending_cmd *cmd;
1345 
1346 	bt_dev_dbg(hdev, "status 0x%02x", status);
1347 
1348 	hci_dev_lock(hdev);
1349 
1350 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1351 	if (!cmd)
1352 		goto unlock;
1353 
1354 	if (status) {
1355 		u8 mgmt_err = mgmt_status(status);
1356 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1357 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1358 		goto remove_cmd;
1359 	}
1360 
1361 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1362 	    hdev->discov_timeout > 0) {
1363 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1364 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1365 	}
1366 
1367 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1368 	new_settings(hdev, cmd->sk);
1369 
1370 remove_cmd:
1371 	mgmt_pending_remove(cmd);
1372 
1373 unlock:
1374 	hci_dev_unlock(hdev);
1375 }
1376 
1377 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1378 			    u16 len)
1379 {
1380 	struct mgmt_cp_set_discoverable *cp = data;
1381 	struct mgmt_pending_cmd *cmd;
1382 	u16 timeout;
1383 	int err;
1384 
1385 	bt_dev_dbg(hdev, "sock %p", sk);
1386 
1387 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1388 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1389 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1390 				       MGMT_STATUS_REJECTED);
1391 
1392 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1393 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1394 				       MGMT_STATUS_INVALID_PARAMS);
1395 
1396 	timeout = __le16_to_cpu(cp->timeout);
1397 
1398 	/* Disabling discoverable requires that no timeout is set,
1399 	 * and enabling limited discoverable requires a timeout.
1400 	 */
1401 	if ((cp->val == 0x00 && timeout > 0) ||
1402 	    (cp->val == 0x02 && timeout == 0))
1403 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1404 				       MGMT_STATUS_INVALID_PARAMS);
1405 
1406 	hci_dev_lock(hdev);
1407 
1408 	if (!hdev_is_powered(hdev) && timeout > 0) {
1409 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1410 				      MGMT_STATUS_NOT_POWERED);
1411 		goto failed;
1412 	}
1413 
1414 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1415 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1416 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1417 				      MGMT_STATUS_BUSY);
1418 		goto failed;
1419 	}
1420 
1421 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1422 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1423 				      MGMT_STATUS_REJECTED);
1424 		goto failed;
1425 	}
1426 
1427 	if (hdev->advertising_paused) {
1428 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1429 				      MGMT_STATUS_BUSY);
1430 		goto failed;
1431 	}
1432 
1433 	if (!hdev_is_powered(hdev)) {
1434 		bool changed = false;
1435 
1436 		/* Setting limited discoverable when powered off is
1437 		 * not a valid operation since it requires a timeout
1438 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1439 		 */
1440 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1441 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1442 			changed = true;
1443 		}
1444 
1445 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1446 		if (err < 0)
1447 			goto failed;
1448 
1449 		if (changed)
1450 			err = new_settings(hdev, sk);
1451 
1452 		goto failed;
1453 	}
1454 
1455 	/* If the current mode is the same, then just update the timeout
1456 	 * value with the new value. And if only the timeout gets updated,
1457 	 * then no need for any HCI transactions.
1458 	 */
1459 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1460 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1461 						   HCI_LIMITED_DISCOVERABLE)) {
1462 		cancel_delayed_work(&hdev->discov_off);
1463 		hdev->discov_timeout = timeout;
1464 
1465 		if (cp->val && hdev->discov_timeout > 0) {
1466 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1467 			queue_delayed_work(hdev->req_workqueue,
1468 					   &hdev->discov_off, to);
1469 		}
1470 
1471 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1472 		goto failed;
1473 	}
1474 
1475 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1476 	if (!cmd) {
1477 		err = -ENOMEM;
1478 		goto failed;
1479 	}
1480 
1481 	/* Cancel any potential discoverable timeout that might be
1482 	 * still active and store new timeout value. The arming of
1483 	 * the timeout happens in the complete handler.
1484 	 */
1485 	cancel_delayed_work(&hdev->discov_off);
1486 	hdev->discov_timeout = timeout;
1487 
1488 	if (cp->val)
1489 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1490 	else
1491 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1492 
1493 	/* Limited discoverable mode */
1494 	if (cp->val == 0x02)
1495 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1496 	else
1497 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1498 
1499 	queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1500 	err = 0;
1501 
1502 failed:
1503 	hci_dev_unlock(hdev);
1504 	return err;
1505 }
1506 
1507 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1508 {
1509 	struct mgmt_pending_cmd *cmd;
1510 
1511 	bt_dev_dbg(hdev, "status 0x%02x", status);
1512 
1513 	hci_dev_lock(hdev);
1514 
1515 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1516 	if (!cmd)
1517 		goto unlock;
1518 
1519 	if (status) {
1520 		u8 mgmt_err = mgmt_status(status);
1521 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1522 		goto remove_cmd;
1523 	}
1524 
1525 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1526 	new_settings(hdev, cmd->sk);
1527 
1528 remove_cmd:
1529 	mgmt_pending_remove(cmd);
1530 
1531 unlock:
1532 	hci_dev_unlock(hdev);
1533 }
1534 
1535 static int set_connectable_update_settings(struct hci_dev *hdev,
1536 					   struct sock *sk, u8 val)
1537 {
1538 	bool changed = false;
1539 	int err;
1540 
1541 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1542 		changed = true;
1543 
1544 	if (val) {
1545 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1546 	} else {
1547 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1548 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1549 	}
1550 
1551 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1552 	if (err < 0)
1553 		return err;
1554 
1555 	if (changed) {
1556 		hci_req_update_scan(hdev);
1557 		hci_update_background_scan(hdev);
1558 		return new_settings(hdev, sk);
1559 	}
1560 
1561 	return 0;
1562 }
1563 
1564 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1565 			   u16 len)
1566 {
1567 	struct mgmt_mode *cp = data;
1568 	struct mgmt_pending_cmd *cmd;
1569 	int err;
1570 
1571 	bt_dev_dbg(hdev, "sock %p", sk);
1572 
1573 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1574 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1575 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1576 				       MGMT_STATUS_REJECTED);
1577 
1578 	if (cp->val != 0x00 && cp->val != 0x01)
1579 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1580 				       MGMT_STATUS_INVALID_PARAMS);
1581 
1582 	hci_dev_lock(hdev);
1583 
1584 	if (!hdev_is_powered(hdev)) {
1585 		err = set_connectable_update_settings(hdev, sk, cp->val);
1586 		goto failed;
1587 	}
1588 
1589 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1590 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1591 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1592 				      MGMT_STATUS_BUSY);
1593 		goto failed;
1594 	}
1595 
1596 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1597 	if (!cmd) {
1598 		err = -ENOMEM;
1599 		goto failed;
1600 	}
1601 
1602 	if (cp->val) {
1603 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1604 	} else {
1605 		if (hdev->discov_timeout > 0)
1606 			cancel_delayed_work(&hdev->discov_off);
1607 
1608 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1609 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1610 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1611 	}
1612 
1613 	queue_work(hdev->req_workqueue, &hdev->connectable_update);
1614 	err = 0;
1615 
1616 failed:
1617 	hci_dev_unlock(hdev);
1618 	return err;
1619 }
1620 
1621 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1622 			u16 len)
1623 {
1624 	struct mgmt_mode *cp = data;
1625 	bool changed;
1626 	int err;
1627 
1628 	bt_dev_dbg(hdev, "sock %p", sk);
1629 
1630 	if (cp->val != 0x00 && cp->val != 0x01)
1631 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1632 				       MGMT_STATUS_INVALID_PARAMS);
1633 
1634 	hci_dev_lock(hdev);
1635 
1636 	if (cp->val)
1637 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1638 	else
1639 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1640 
1641 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1642 	if (err < 0)
1643 		goto unlock;
1644 
1645 	if (changed) {
1646 		/* In limited privacy mode the change of bondable mode
1647 		 * may affect the local advertising address.
1648 		 */
1649 		if (hdev_is_powered(hdev) &&
1650 		    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1651 		    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1652 		    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1653 			queue_work(hdev->req_workqueue,
1654 				   &hdev->discoverable_update);
1655 
1656 		err = new_settings(hdev, sk);
1657 	}
1658 
1659 unlock:
1660 	hci_dev_unlock(hdev);
1661 	return err;
1662 }
1663 
1664 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1665 			     u16 len)
1666 {
1667 	struct mgmt_mode *cp = data;
1668 	struct mgmt_pending_cmd *cmd;
1669 	u8 val, status;
1670 	int err;
1671 
1672 	bt_dev_dbg(hdev, "sock %p", sk);
1673 
1674 	status = mgmt_bredr_support(hdev);
1675 	if (status)
1676 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1677 				       status);
1678 
1679 	if (cp->val != 0x00 && cp->val != 0x01)
1680 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1681 				       MGMT_STATUS_INVALID_PARAMS);
1682 
1683 	hci_dev_lock(hdev);
1684 
1685 	if (!hdev_is_powered(hdev)) {
1686 		bool changed = false;
1687 
1688 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1689 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1690 			changed = true;
1691 		}
1692 
1693 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1694 		if (err < 0)
1695 			goto failed;
1696 
1697 		if (changed)
1698 			err = new_settings(hdev, sk);
1699 
1700 		goto failed;
1701 	}
1702 
1703 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1704 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1705 				      MGMT_STATUS_BUSY);
1706 		goto failed;
1707 	}
1708 
1709 	val = !!cp->val;
1710 
1711 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1712 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1713 		goto failed;
1714 	}
1715 
1716 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1717 	if (!cmd) {
1718 		err = -ENOMEM;
1719 		goto failed;
1720 	}
1721 
1722 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1723 	if (err < 0) {
1724 		mgmt_pending_remove(cmd);
1725 		goto failed;
1726 	}
1727 
1728 failed:
1729 	hci_dev_unlock(hdev);
1730 	return err;
1731 }
1732 
1733 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1734 {
1735 	struct mgmt_mode *cp = data;
1736 	struct mgmt_pending_cmd *cmd;
1737 	u8 status;
1738 	int err;
1739 
1740 	bt_dev_dbg(hdev, "sock %p", sk);
1741 
1742 	status = mgmt_bredr_support(hdev);
1743 	if (status)
1744 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1745 
1746 	if (!lmp_ssp_capable(hdev))
1747 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1748 				       MGMT_STATUS_NOT_SUPPORTED);
1749 
1750 	if (cp->val != 0x00 && cp->val != 0x01)
1751 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1752 				       MGMT_STATUS_INVALID_PARAMS);
1753 
1754 	hci_dev_lock(hdev);
1755 
1756 	if (!hdev_is_powered(hdev)) {
1757 		bool changed;
1758 
1759 		if (cp->val) {
1760 			changed = !hci_dev_test_and_set_flag(hdev,
1761 							     HCI_SSP_ENABLED);
1762 		} else {
1763 			changed = hci_dev_test_and_clear_flag(hdev,
1764 							      HCI_SSP_ENABLED);
1765 			if (!changed)
1766 				changed = hci_dev_test_and_clear_flag(hdev,
1767 								      HCI_HS_ENABLED);
1768 			else
1769 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1770 		}
1771 
1772 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1773 		if (err < 0)
1774 			goto failed;
1775 
1776 		if (changed)
1777 			err = new_settings(hdev, sk);
1778 
1779 		goto failed;
1780 	}
1781 
1782 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1783 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1784 				      MGMT_STATUS_BUSY);
1785 		goto failed;
1786 	}
1787 
1788 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1789 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1790 		goto failed;
1791 	}
1792 
1793 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1794 	if (!cmd) {
1795 		err = -ENOMEM;
1796 		goto failed;
1797 	}
1798 
1799 	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1800 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1801 			     sizeof(cp->val), &cp->val);
1802 
1803 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1804 	if (err < 0) {
1805 		mgmt_pending_remove(cmd);
1806 		goto failed;
1807 	}
1808 
1809 failed:
1810 	hci_dev_unlock(hdev);
1811 	return err;
1812 }
1813 
1814 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1815 {
1816 	struct mgmt_mode *cp = data;
1817 	bool changed;
1818 	u8 status;
1819 	int err;
1820 
1821 	bt_dev_dbg(hdev, "sock %p", sk);
1822 
1823 	if (!IS_ENABLED(CONFIG_BT_HS))
1824 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1825 				       MGMT_STATUS_NOT_SUPPORTED);
1826 
1827 	status = mgmt_bredr_support(hdev);
1828 	if (status)
1829 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1830 
1831 	if (!lmp_ssp_capable(hdev))
1832 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1833 				       MGMT_STATUS_NOT_SUPPORTED);
1834 
1835 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1836 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1837 				       MGMT_STATUS_REJECTED);
1838 
1839 	if (cp->val != 0x00 && cp->val != 0x01)
1840 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1841 				       MGMT_STATUS_INVALID_PARAMS);
1842 
1843 	hci_dev_lock(hdev);
1844 
1845 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1846 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1847 				      MGMT_STATUS_BUSY);
1848 		goto unlock;
1849 	}
1850 
1851 	if (cp->val) {
1852 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1853 	} else {
1854 		if (hdev_is_powered(hdev)) {
1855 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1856 					      MGMT_STATUS_REJECTED);
1857 			goto unlock;
1858 		}
1859 
1860 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1861 	}
1862 
1863 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1864 	if (err < 0)
1865 		goto unlock;
1866 
1867 	if (changed)
1868 		err = new_settings(hdev, sk);
1869 
1870 unlock:
1871 	hci_dev_unlock(hdev);
1872 	return err;
1873 }
1874 
1875 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1876 {
1877 	struct cmd_lookup match = { NULL, hdev };
1878 
1879 	hci_dev_lock(hdev);
1880 
1881 	if (status) {
1882 		u8 mgmt_err = mgmt_status(status);
1883 
1884 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1885 				     &mgmt_err);
1886 		goto unlock;
1887 	}
1888 
1889 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1890 
1891 	new_settings(hdev, match.sk);
1892 
1893 	if (match.sk)
1894 		sock_put(match.sk);
1895 
1896 	/* Make sure the controller has a good default for
1897 	 * advertising data. Restrict the update to when LE
1898 	 * has actually been enabled. During power on, the
1899 	 * update in powered_update_hci will take care of it.
1900 	 */
1901 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1902 		struct hci_request req;
1903 		hci_req_init(&req, hdev);
1904 		if (ext_adv_capable(hdev)) {
1905 			int err;
1906 
1907 			err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1908 			if (!err)
1909 				__hci_req_update_scan_rsp_data(&req, 0x00);
1910 		} else {
1911 			__hci_req_update_adv_data(&req, 0x00);
1912 			__hci_req_update_scan_rsp_data(&req, 0x00);
1913 		}
1914 		hci_req_run(&req, NULL);
1915 		hci_update_background_scan(hdev);
1916 	}
1917 
1918 unlock:
1919 	hci_dev_unlock(hdev);
1920 }
1921 
1922 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1923 {
1924 	struct mgmt_mode *cp = data;
1925 	struct hci_cp_write_le_host_supported hci_cp;
1926 	struct mgmt_pending_cmd *cmd;
1927 	struct hci_request req;
1928 	int err;
1929 	u8 val, enabled;
1930 
1931 	bt_dev_dbg(hdev, "sock %p", sk);
1932 
1933 	if (!lmp_le_capable(hdev))
1934 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1935 				       MGMT_STATUS_NOT_SUPPORTED);
1936 
1937 	if (cp->val != 0x00 && cp->val != 0x01)
1938 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1939 				       MGMT_STATUS_INVALID_PARAMS);
1940 
1941 	/* Bluetooth single mode LE only controllers or dual-mode
1942 	 * controllers configured as LE only devices, do not allow
1943 	 * switching LE off. These have either LE enabled explicitly
1944 	 * or BR/EDR has been previously switched off.
1945 	 *
1946 	 * When trying to enable an already enabled LE, then gracefully
1947 	 * send a positive response. Trying to disable it however will
1948 	 * result into rejection.
1949 	 */
1950 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1951 		if (cp->val == 0x01)
1952 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1953 
1954 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1955 				       MGMT_STATUS_REJECTED);
1956 	}
1957 
1958 	hci_dev_lock(hdev);
1959 
1960 	val = !!cp->val;
1961 	enabled = lmp_host_le_capable(hdev);
1962 
1963 	if (!val)
1964 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1965 
1966 	if (!hdev_is_powered(hdev) || val == enabled) {
1967 		bool changed = false;
1968 
1969 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1970 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1971 			changed = true;
1972 		}
1973 
1974 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1975 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1976 			changed = true;
1977 		}
1978 
1979 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1980 		if (err < 0)
1981 			goto unlock;
1982 
1983 		if (changed)
1984 			err = new_settings(hdev, sk);
1985 
1986 		goto unlock;
1987 	}
1988 
1989 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
1990 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1991 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1992 				      MGMT_STATUS_BUSY);
1993 		goto unlock;
1994 	}
1995 
1996 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1997 	if (!cmd) {
1998 		err = -ENOMEM;
1999 		goto unlock;
2000 	}
2001 
2002 	hci_req_init(&req, hdev);
2003 
2004 	memset(&hci_cp, 0, sizeof(hci_cp));
2005 
2006 	if (val) {
2007 		hci_cp.le = val;
2008 		hci_cp.simul = 0x00;
2009 	} else {
2010 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2011 			__hci_req_disable_advertising(&req);
2012 
2013 		if (ext_adv_capable(hdev))
2014 			__hci_req_clear_ext_adv_sets(&req);
2015 	}
2016 
2017 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2018 		    &hci_cp);
2019 
2020 	err = hci_req_run(&req, le_enable_complete);
2021 	if (err < 0)
2022 		mgmt_pending_remove(cmd);
2023 
2024 unlock:
2025 	hci_dev_unlock(hdev);
2026 	return err;
2027 }
2028 
2029 /* This is a helper function to test for pending mgmt commands that can
2030  * cause CoD or EIR HCI commands. We can only allow one such pending
2031  * mgmt command at a time since otherwise we cannot easily track what
2032  * the current values are, will be, and based on that calculate if a new
2033  * HCI command needs to be sent and if yes with what value.
2034  */
2035 static bool pending_eir_or_class(struct hci_dev *hdev)
2036 {
2037 	struct mgmt_pending_cmd *cmd;
2038 
2039 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2040 		switch (cmd->opcode) {
2041 		case MGMT_OP_ADD_UUID:
2042 		case MGMT_OP_REMOVE_UUID:
2043 		case MGMT_OP_SET_DEV_CLASS:
2044 		case MGMT_OP_SET_POWERED:
2045 			return true;
2046 		}
2047 	}
2048 
2049 	return false;
2050 }
2051 
2052 static const u8 bluetooth_base_uuid[] = {
2053 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2054 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2055 };
2056 
2057 static u8 get_uuid_size(const u8 *uuid)
2058 {
2059 	u32 val;
2060 
2061 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2062 		return 128;
2063 
2064 	val = get_unaligned_le32(&uuid[12]);
2065 	if (val > 0xffff)
2066 		return 32;
2067 
2068 	return 16;
2069 }
2070 
2071 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2072 {
2073 	struct mgmt_pending_cmd *cmd;
2074 
2075 	hci_dev_lock(hdev);
2076 
2077 	cmd = pending_find(mgmt_op, hdev);
2078 	if (!cmd)
2079 		goto unlock;
2080 
2081 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2082 			  mgmt_status(status), hdev->dev_class, 3);
2083 
2084 	mgmt_pending_remove(cmd);
2085 
2086 unlock:
2087 	hci_dev_unlock(hdev);
2088 }
2089 
2090 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2091 {
2092 	bt_dev_dbg(hdev, "status 0x%02x", status);
2093 
2094 	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2095 }
2096 
2097 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2098 {
2099 	struct mgmt_cp_add_uuid *cp = data;
2100 	struct mgmt_pending_cmd *cmd;
2101 	struct hci_request req;
2102 	struct bt_uuid *uuid;
2103 	int err;
2104 
2105 	bt_dev_dbg(hdev, "sock %p", sk);
2106 
2107 	hci_dev_lock(hdev);
2108 
2109 	if (pending_eir_or_class(hdev)) {
2110 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2111 				      MGMT_STATUS_BUSY);
2112 		goto failed;
2113 	}
2114 
2115 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2116 	if (!uuid) {
2117 		err = -ENOMEM;
2118 		goto failed;
2119 	}
2120 
2121 	memcpy(uuid->uuid, cp->uuid, 16);
2122 	uuid->svc_hint = cp->svc_hint;
2123 	uuid->size = get_uuid_size(cp->uuid);
2124 
2125 	list_add_tail(&uuid->list, &hdev->uuids);
2126 
2127 	hci_req_init(&req, hdev);
2128 
2129 	__hci_req_update_class(&req);
2130 	__hci_req_update_eir(&req);
2131 
2132 	err = hci_req_run(&req, add_uuid_complete);
2133 	if (err < 0) {
2134 		if (err != -ENODATA)
2135 			goto failed;
2136 
2137 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2138 					hdev->dev_class, 3);
2139 		goto failed;
2140 	}
2141 
2142 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2143 	if (!cmd) {
2144 		err = -ENOMEM;
2145 		goto failed;
2146 	}
2147 
2148 	err = 0;
2149 
2150 failed:
2151 	hci_dev_unlock(hdev);
2152 	return err;
2153 }
2154 
2155 static bool enable_service_cache(struct hci_dev *hdev)
2156 {
2157 	if (!hdev_is_powered(hdev))
2158 		return false;
2159 
2160 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2161 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2162 				   CACHE_TIMEOUT);
2163 		return true;
2164 	}
2165 
2166 	return false;
2167 }
2168 
2169 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2170 {
2171 	bt_dev_dbg(hdev, "status 0x%02x", status);
2172 
2173 	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2174 }
2175 
2176 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2177 		       u16 len)
2178 {
2179 	struct mgmt_cp_remove_uuid *cp = data;
2180 	struct mgmt_pending_cmd *cmd;
2181 	struct bt_uuid *match, *tmp;
2182 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2183 	struct hci_request req;
2184 	int err, found;
2185 
2186 	bt_dev_dbg(hdev, "sock %p", sk);
2187 
2188 	hci_dev_lock(hdev);
2189 
2190 	if (pending_eir_or_class(hdev)) {
2191 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2192 				      MGMT_STATUS_BUSY);
2193 		goto unlock;
2194 	}
2195 
2196 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2197 		hci_uuids_clear(hdev);
2198 
2199 		if (enable_service_cache(hdev)) {
2200 			err = mgmt_cmd_complete(sk, hdev->id,
2201 						MGMT_OP_REMOVE_UUID,
2202 						0, hdev->dev_class, 3);
2203 			goto unlock;
2204 		}
2205 
2206 		goto update_class;
2207 	}
2208 
2209 	found = 0;
2210 
2211 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2212 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2213 			continue;
2214 
2215 		list_del(&match->list);
2216 		kfree(match);
2217 		found++;
2218 	}
2219 
2220 	if (found == 0) {
2221 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2222 				      MGMT_STATUS_INVALID_PARAMS);
2223 		goto unlock;
2224 	}
2225 
2226 update_class:
2227 	hci_req_init(&req, hdev);
2228 
2229 	__hci_req_update_class(&req);
2230 	__hci_req_update_eir(&req);
2231 
2232 	err = hci_req_run(&req, remove_uuid_complete);
2233 	if (err < 0) {
2234 		if (err != -ENODATA)
2235 			goto unlock;
2236 
2237 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2238 					hdev->dev_class, 3);
2239 		goto unlock;
2240 	}
2241 
2242 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2243 	if (!cmd) {
2244 		err = -ENOMEM;
2245 		goto unlock;
2246 	}
2247 
2248 	err = 0;
2249 
2250 unlock:
2251 	hci_dev_unlock(hdev);
2252 	return err;
2253 }
2254 
2255 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2256 {
2257 	bt_dev_dbg(hdev, "status 0x%02x", status);
2258 
2259 	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2260 }
2261 
2262 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2263 			 u16 len)
2264 {
2265 	struct mgmt_cp_set_dev_class *cp = data;
2266 	struct mgmt_pending_cmd *cmd;
2267 	struct hci_request req;
2268 	int err;
2269 
2270 	bt_dev_dbg(hdev, "sock %p", sk);
2271 
2272 	if (!lmp_bredr_capable(hdev))
2273 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2274 				       MGMT_STATUS_NOT_SUPPORTED);
2275 
2276 	hci_dev_lock(hdev);
2277 
2278 	if (pending_eir_or_class(hdev)) {
2279 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2280 				      MGMT_STATUS_BUSY);
2281 		goto unlock;
2282 	}
2283 
2284 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2285 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2286 				      MGMT_STATUS_INVALID_PARAMS);
2287 		goto unlock;
2288 	}
2289 
2290 	hdev->major_class = cp->major;
2291 	hdev->minor_class = cp->minor;
2292 
2293 	if (!hdev_is_powered(hdev)) {
2294 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2295 					hdev->dev_class, 3);
2296 		goto unlock;
2297 	}
2298 
2299 	hci_req_init(&req, hdev);
2300 
2301 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2302 		hci_dev_unlock(hdev);
2303 		cancel_delayed_work_sync(&hdev->service_cache);
2304 		hci_dev_lock(hdev);
2305 		__hci_req_update_eir(&req);
2306 	}
2307 
2308 	__hci_req_update_class(&req);
2309 
2310 	err = hci_req_run(&req, set_class_complete);
2311 	if (err < 0) {
2312 		if (err != -ENODATA)
2313 			goto unlock;
2314 
2315 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2316 					hdev->dev_class, 3);
2317 		goto unlock;
2318 	}
2319 
2320 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2321 	if (!cmd) {
2322 		err = -ENOMEM;
2323 		goto unlock;
2324 	}
2325 
2326 	err = 0;
2327 
2328 unlock:
2329 	hci_dev_unlock(hdev);
2330 	return err;
2331 }
2332 
2333 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2334 			  u16 len)
2335 {
2336 	struct mgmt_cp_load_link_keys *cp = data;
2337 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2338 				   sizeof(struct mgmt_link_key_info));
2339 	u16 key_count, expected_len;
2340 	bool changed;
2341 	int i;
2342 
2343 	bt_dev_dbg(hdev, "sock %p", sk);
2344 
2345 	if (!lmp_bredr_capable(hdev))
2346 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2347 				       MGMT_STATUS_NOT_SUPPORTED);
2348 
2349 	key_count = __le16_to_cpu(cp->key_count);
2350 	if (key_count > max_key_count) {
2351 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2352 			   key_count);
2353 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2354 				       MGMT_STATUS_INVALID_PARAMS);
2355 	}
2356 
2357 	expected_len = struct_size(cp, keys, key_count);
2358 	if (expected_len != len) {
2359 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2360 			   expected_len, len);
2361 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2362 				       MGMT_STATUS_INVALID_PARAMS);
2363 	}
2364 
2365 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2366 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2367 				       MGMT_STATUS_INVALID_PARAMS);
2368 
2369 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2370 		   key_count);
2371 
2372 	for (i = 0; i < key_count; i++) {
2373 		struct mgmt_link_key_info *key = &cp->keys[i];
2374 
2375 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2376 			return mgmt_cmd_status(sk, hdev->id,
2377 					       MGMT_OP_LOAD_LINK_KEYS,
2378 					       MGMT_STATUS_INVALID_PARAMS);
2379 	}
2380 
2381 	hci_dev_lock(hdev);
2382 
2383 	hci_link_keys_clear(hdev);
2384 
2385 	if (cp->debug_keys)
2386 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2387 	else
2388 		changed = hci_dev_test_and_clear_flag(hdev,
2389 						      HCI_KEEP_DEBUG_KEYS);
2390 
2391 	if (changed)
2392 		new_settings(hdev, NULL);
2393 
2394 	for (i = 0; i < key_count; i++) {
2395 		struct mgmt_link_key_info *key = &cp->keys[i];
2396 
2397 		if (hci_is_blocked_key(hdev,
2398 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2399 				       key->val)) {
2400 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2401 				    &key->addr.bdaddr);
2402 			continue;
2403 		}
2404 
2405 		/* Always ignore debug keys and require a new pairing if
2406 		 * the user wants to use them.
2407 		 */
2408 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2409 			continue;
2410 
2411 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2412 				 key->type, key->pin_len, NULL);
2413 	}
2414 
2415 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2416 
2417 	hci_dev_unlock(hdev);
2418 
2419 	return 0;
2420 }
2421 
2422 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2423 			   u8 addr_type, struct sock *skip_sk)
2424 {
2425 	struct mgmt_ev_device_unpaired ev;
2426 
2427 	bacpy(&ev.addr.bdaddr, bdaddr);
2428 	ev.addr.type = addr_type;
2429 
2430 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2431 			  skip_sk);
2432 }
2433 
2434 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2435 			 u16 len)
2436 {
2437 	struct mgmt_cp_unpair_device *cp = data;
2438 	struct mgmt_rp_unpair_device rp;
2439 	struct hci_conn_params *params;
2440 	struct mgmt_pending_cmd *cmd;
2441 	struct hci_conn *conn;
2442 	u8 addr_type;
2443 	int err;
2444 
2445 	memset(&rp, 0, sizeof(rp));
2446 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2447 	rp.addr.type = cp->addr.type;
2448 
2449 	if (!bdaddr_type_is_valid(cp->addr.type))
2450 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2451 					 MGMT_STATUS_INVALID_PARAMS,
2452 					 &rp, sizeof(rp));
2453 
2454 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2455 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2456 					 MGMT_STATUS_INVALID_PARAMS,
2457 					 &rp, sizeof(rp));
2458 
2459 	hci_dev_lock(hdev);
2460 
2461 	if (!hdev_is_powered(hdev)) {
2462 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2463 					MGMT_STATUS_NOT_POWERED, &rp,
2464 					sizeof(rp));
2465 		goto unlock;
2466 	}
2467 
2468 	if (cp->addr.type == BDADDR_BREDR) {
2469 		/* If disconnection is requested, then look up the
2470 		 * connection. If the remote device is connected, it
2471 		 * will be later used to terminate the link.
2472 		 *
2473 		 * Setting it to NULL explicitly will cause no
2474 		 * termination of the link.
2475 		 */
2476 		if (cp->disconnect)
2477 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2478 						       &cp->addr.bdaddr);
2479 		else
2480 			conn = NULL;
2481 
2482 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2483 		if (err < 0) {
2484 			err = mgmt_cmd_complete(sk, hdev->id,
2485 						MGMT_OP_UNPAIR_DEVICE,
2486 						MGMT_STATUS_NOT_PAIRED, &rp,
2487 						sizeof(rp));
2488 			goto unlock;
2489 		}
2490 
2491 		goto done;
2492 	}
2493 
2494 	/* LE address type */
2495 	addr_type = le_addr_type(cp->addr.type);
2496 
2497 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2498 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2499 	if (err < 0) {
2500 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2501 					MGMT_STATUS_NOT_PAIRED, &rp,
2502 					sizeof(rp));
2503 		goto unlock;
2504 	}
2505 
2506 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2507 	if (!conn) {
2508 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2509 		goto done;
2510 	}
2511 
2512 
2513 	/* Defer clearing up the connection parameters until closing to
2514 	 * give a chance of keeping them if a repairing happens.
2515 	 */
2516 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2517 
2518 	/* Disable auto-connection parameters if present */
2519 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2520 	if (params) {
2521 		if (params->explicit_connect)
2522 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2523 		else
2524 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2525 	}
2526 
2527 	/* If disconnection is not requested, then clear the connection
2528 	 * variable so that the link is not terminated.
2529 	 */
2530 	if (!cp->disconnect)
2531 		conn = NULL;
2532 
2533 done:
2534 	/* If the connection variable is set, then termination of the
2535 	 * link is requested.
2536 	 */
2537 	if (!conn) {
2538 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2539 					&rp, sizeof(rp));
2540 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2541 		goto unlock;
2542 	}
2543 
2544 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2545 			       sizeof(*cp));
2546 	if (!cmd) {
2547 		err = -ENOMEM;
2548 		goto unlock;
2549 	}
2550 
2551 	cmd->cmd_complete = addr_cmd_complete;
2552 
2553 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2554 	if (err < 0)
2555 		mgmt_pending_remove(cmd);
2556 
2557 unlock:
2558 	hci_dev_unlock(hdev);
2559 	return err;
2560 }
2561 
2562 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2563 		      u16 len)
2564 {
2565 	struct mgmt_cp_disconnect *cp = data;
2566 	struct mgmt_rp_disconnect rp;
2567 	struct mgmt_pending_cmd *cmd;
2568 	struct hci_conn *conn;
2569 	int err;
2570 
2571 	bt_dev_dbg(hdev, "sock %p", sk);
2572 
2573 	memset(&rp, 0, sizeof(rp));
2574 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2575 	rp.addr.type = cp->addr.type;
2576 
2577 	if (!bdaddr_type_is_valid(cp->addr.type))
2578 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2579 					 MGMT_STATUS_INVALID_PARAMS,
2580 					 &rp, sizeof(rp));
2581 
2582 	hci_dev_lock(hdev);
2583 
2584 	if (!test_bit(HCI_UP, &hdev->flags)) {
2585 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2586 					MGMT_STATUS_NOT_POWERED, &rp,
2587 					sizeof(rp));
2588 		goto failed;
2589 	}
2590 
2591 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2592 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2593 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2594 		goto failed;
2595 	}
2596 
2597 	if (cp->addr.type == BDADDR_BREDR)
2598 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2599 					       &cp->addr.bdaddr);
2600 	else
2601 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2602 					       le_addr_type(cp->addr.type));
2603 
2604 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2605 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2606 					MGMT_STATUS_NOT_CONNECTED, &rp,
2607 					sizeof(rp));
2608 		goto failed;
2609 	}
2610 
2611 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2612 	if (!cmd) {
2613 		err = -ENOMEM;
2614 		goto failed;
2615 	}
2616 
2617 	cmd->cmd_complete = generic_cmd_complete;
2618 
2619 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2620 	if (err < 0)
2621 		mgmt_pending_remove(cmd);
2622 
2623 failed:
2624 	hci_dev_unlock(hdev);
2625 	return err;
2626 }
2627 
2628 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2629 {
2630 	switch (link_type) {
2631 	case LE_LINK:
2632 		switch (addr_type) {
2633 		case ADDR_LE_DEV_PUBLIC:
2634 			return BDADDR_LE_PUBLIC;
2635 
2636 		default:
2637 			/* Fallback to LE Random address type */
2638 			return BDADDR_LE_RANDOM;
2639 		}
2640 
2641 	default:
2642 		/* Fallback to BR/EDR type */
2643 		return BDADDR_BREDR;
2644 	}
2645 }
2646 
2647 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2648 			   u16 data_len)
2649 {
2650 	struct mgmt_rp_get_connections *rp;
2651 	struct hci_conn *c;
2652 	int err;
2653 	u16 i;
2654 
2655 	bt_dev_dbg(hdev, "sock %p", sk);
2656 
2657 	hci_dev_lock(hdev);
2658 
2659 	if (!hdev_is_powered(hdev)) {
2660 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2661 				      MGMT_STATUS_NOT_POWERED);
2662 		goto unlock;
2663 	}
2664 
2665 	i = 0;
2666 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2667 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2668 			i++;
2669 	}
2670 
2671 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2672 	if (!rp) {
2673 		err = -ENOMEM;
2674 		goto unlock;
2675 	}
2676 
2677 	i = 0;
2678 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2679 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2680 			continue;
2681 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2682 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2683 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2684 			continue;
2685 		i++;
2686 	}
2687 
2688 	rp->conn_count = cpu_to_le16(i);
2689 
2690 	/* Recalculate length in case of filtered SCO connections, etc */
2691 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2692 				struct_size(rp, addr, i));
2693 
2694 	kfree(rp);
2695 
2696 unlock:
2697 	hci_dev_unlock(hdev);
2698 	return err;
2699 }
2700 
2701 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2702 				   struct mgmt_cp_pin_code_neg_reply *cp)
2703 {
2704 	struct mgmt_pending_cmd *cmd;
2705 	int err;
2706 
2707 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2708 			       sizeof(*cp));
2709 	if (!cmd)
2710 		return -ENOMEM;
2711 
2712 	cmd->cmd_complete = addr_cmd_complete;
2713 
2714 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2715 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2716 	if (err < 0)
2717 		mgmt_pending_remove(cmd);
2718 
2719 	return err;
2720 }
2721 
2722 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2723 			  u16 len)
2724 {
2725 	struct hci_conn *conn;
2726 	struct mgmt_cp_pin_code_reply *cp = data;
2727 	struct hci_cp_pin_code_reply reply;
2728 	struct mgmt_pending_cmd *cmd;
2729 	int err;
2730 
2731 	bt_dev_dbg(hdev, "sock %p", sk);
2732 
2733 	hci_dev_lock(hdev);
2734 
2735 	if (!hdev_is_powered(hdev)) {
2736 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2737 				      MGMT_STATUS_NOT_POWERED);
2738 		goto failed;
2739 	}
2740 
2741 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2742 	if (!conn) {
2743 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2744 				      MGMT_STATUS_NOT_CONNECTED);
2745 		goto failed;
2746 	}
2747 
2748 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2749 		struct mgmt_cp_pin_code_neg_reply ncp;
2750 
2751 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2752 
2753 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2754 
2755 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2756 		if (err >= 0)
2757 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2758 					      MGMT_STATUS_INVALID_PARAMS);
2759 
2760 		goto failed;
2761 	}
2762 
2763 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2764 	if (!cmd) {
2765 		err = -ENOMEM;
2766 		goto failed;
2767 	}
2768 
2769 	cmd->cmd_complete = addr_cmd_complete;
2770 
2771 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2772 	reply.pin_len = cp->pin_len;
2773 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2774 
2775 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2776 	if (err < 0)
2777 		mgmt_pending_remove(cmd);
2778 
2779 failed:
2780 	hci_dev_unlock(hdev);
2781 	return err;
2782 }
2783 
2784 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2785 			     u16 len)
2786 {
2787 	struct mgmt_cp_set_io_capability *cp = data;
2788 
2789 	bt_dev_dbg(hdev, "sock %p", sk);
2790 
2791 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2792 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2793 				       MGMT_STATUS_INVALID_PARAMS);
2794 
2795 	hci_dev_lock(hdev);
2796 
2797 	hdev->io_capability = cp->io_capability;
2798 
2799 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2800 
2801 	hci_dev_unlock(hdev);
2802 
2803 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2804 				 NULL, 0);
2805 }
2806 
2807 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2808 {
2809 	struct hci_dev *hdev = conn->hdev;
2810 	struct mgmt_pending_cmd *cmd;
2811 
2812 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2813 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2814 			continue;
2815 
2816 		if (cmd->user_data != conn)
2817 			continue;
2818 
2819 		return cmd;
2820 	}
2821 
2822 	return NULL;
2823 }
2824 
2825 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2826 {
2827 	struct mgmt_rp_pair_device rp;
2828 	struct hci_conn *conn = cmd->user_data;
2829 	int err;
2830 
2831 	bacpy(&rp.addr.bdaddr, &conn->dst);
2832 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2833 
2834 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2835 				status, &rp, sizeof(rp));
2836 
2837 	/* So we don't get further callbacks for this connection */
2838 	conn->connect_cfm_cb = NULL;
2839 	conn->security_cfm_cb = NULL;
2840 	conn->disconn_cfm_cb = NULL;
2841 
2842 	hci_conn_drop(conn);
2843 
2844 	/* The device is paired so there is no need to remove
2845 	 * its connection parameters anymore.
2846 	 */
2847 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2848 
2849 	hci_conn_put(conn);
2850 
2851 	return err;
2852 }
2853 
2854 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2855 {
2856 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2857 	struct mgmt_pending_cmd *cmd;
2858 
2859 	cmd = find_pairing(conn);
2860 	if (cmd) {
2861 		cmd->cmd_complete(cmd, status);
2862 		mgmt_pending_remove(cmd);
2863 	}
2864 }
2865 
2866 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2867 {
2868 	struct mgmt_pending_cmd *cmd;
2869 
2870 	BT_DBG("status %u", status);
2871 
2872 	cmd = find_pairing(conn);
2873 	if (!cmd) {
2874 		BT_DBG("Unable to find a pending command");
2875 		return;
2876 	}
2877 
2878 	cmd->cmd_complete(cmd, mgmt_status(status));
2879 	mgmt_pending_remove(cmd);
2880 }
2881 
2882 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2883 {
2884 	struct mgmt_pending_cmd *cmd;
2885 
2886 	BT_DBG("status %u", status);
2887 
2888 	if (!status)
2889 		return;
2890 
2891 	cmd = find_pairing(conn);
2892 	if (!cmd) {
2893 		BT_DBG("Unable to find a pending command");
2894 		return;
2895 	}
2896 
2897 	cmd->cmd_complete(cmd, mgmt_status(status));
2898 	mgmt_pending_remove(cmd);
2899 }
2900 
2901 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2902 		       u16 len)
2903 {
2904 	struct mgmt_cp_pair_device *cp = data;
2905 	struct mgmt_rp_pair_device rp;
2906 	struct mgmt_pending_cmd *cmd;
2907 	u8 sec_level, auth_type;
2908 	struct hci_conn *conn;
2909 	int err;
2910 
2911 	bt_dev_dbg(hdev, "sock %p", sk);
2912 
2913 	memset(&rp, 0, sizeof(rp));
2914 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2915 	rp.addr.type = cp->addr.type;
2916 
2917 	if (!bdaddr_type_is_valid(cp->addr.type))
2918 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2919 					 MGMT_STATUS_INVALID_PARAMS,
2920 					 &rp, sizeof(rp));
2921 
2922 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2923 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2924 					 MGMT_STATUS_INVALID_PARAMS,
2925 					 &rp, sizeof(rp));
2926 
2927 	hci_dev_lock(hdev);
2928 
2929 	if (!hdev_is_powered(hdev)) {
2930 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2931 					MGMT_STATUS_NOT_POWERED, &rp,
2932 					sizeof(rp));
2933 		goto unlock;
2934 	}
2935 
2936 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2937 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2938 					MGMT_STATUS_ALREADY_PAIRED, &rp,
2939 					sizeof(rp));
2940 		goto unlock;
2941 	}
2942 
2943 	sec_level = BT_SECURITY_MEDIUM;
2944 	auth_type = HCI_AT_DEDICATED_BONDING;
2945 
2946 	if (cp->addr.type == BDADDR_BREDR) {
2947 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2948 				       auth_type, CONN_REASON_PAIR_DEVICE);
2949 	} else {
2950 		u8 addr_type = le_addr_type(cp->addr.type);
2951 		struct hci_conn_params *p;
2952 
2953 		/* When pairing a new device, it is expected to remember
2954 		 * this device for future connections. Adding the connection
2955 		 * parameter information ahead of time allows tracking
2956 		 * of the slave preferred values and will speed up any
2957 		 * further connection establishment.
2958 		 *
2959 		 * If connection parameters already exist, then they
2960 		 * will be kept and this function does nothing.
2961 		 */
2962 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2963 
2964 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2965 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
2966 
2967 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2968 					   sec_level, HCI_LE_CONN_TIMEOUT,
2969 					   CONN_REASON_PAIR_DEVICE);
2970 	}
2971 
2972 	if (IS_ERR(conn)) {
2973 		int status;
2974 
2975 		if (PTR_ERR(conn) == -EBUSY)
2976 			status = MGMT_STATUS_BUSY;
2977 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
2978 			status = MGMT_STATUS_NOT_SUPPORTED;
2979 		else if (PTR_ERR(conn) == -ECONNREFUSED)
2980 			status = MGMT_STATUS_REJECTED;
2981 		else
2982 			status = MGMT_STATUS_CONNECT_FAILED;
2983 
2984 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2985 					status, &rp, sizeof(rp));
2986 		goto unlock;
2987 	}
2988 
2989 	if (conn->connect_cfm_cb) {
2990 		hci_conn_drop(conn);
2991 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2992 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2993 		goto unlock;
2994 	}
2995 
2996 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2997 	if (!cmd) {
2998 		err = -ENOMEM;
2999 		hci_conn_drop(conn);
3000 		goto unlock;
3001 	}
3002 
3003 	cmd->cmd_complete = pairing_complete;
3004 
3005 	/* For LE, just connecting isn't a proof that the pairing finished */
3006 	if (cp->addr.type == BDADDR_BREDR) {
3007 		conn->connect_cfm_cb = pairing_complete_cb;
3008 		conn->security_cfm_cb = pairing_complete_cb;
3009 		conn->disconn_cfm_cb = pairing_complete_cb;
3010 	} else {
3011 		conn->connect_cfm_cb = le_pairing_complete_cb;
3012 		conn->security_cfm_cb = le_pairing_complete_cb;
3013 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3014 	}
3015 
3016 	conn->io_capability = cp->io_cap;
3017 	cmd->user_data = hci_conn_get(conn);
3018 
3019 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3020 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3021 		cmd->cmd_complete(cmd, 0);
3022 		mgmt_pending_remove(cmd);
3023 	}
3024 
3025 	err = 0;
3026 
3027 unlock:
3028 	hci_dev_unlock(hdev);
3029 	return err;
3030 }
3031 
3032 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3033 			      u16 len)
3034 {
3035 	struct mgmt_addr_info *addr = data;
3036 	struct mgmt_pending_cmd *cmd;
3037 	struct hci_conn *conn;
3038 	int err;
3039 
3040 	bt_dev_dbg(hdev, "sock %p", sk);
3041 
3042 	hci_dev_lock(hdev);
3043 
3044 	if (!hdev_is_powered(hdev)) {
3045 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3046 				      MGMT_STATUS_NOT_POWERED);
3047 		goto unlock;
3048 	}
3049 
3050 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3051 	if (!cmd) {
3052 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3053 				      MGMT_STATUS_INVALID_PARAMS);
3054 		goto unlock;
3055 	}
3056 
3057 	conn = cmd->user_data;
3058 
3059 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3060 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3061 				      MGMT_STATUS_INVALID_PARAMS);
3062 		goto unlock;
3063 	}
3064 
3065 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3066 	mgmt_pending_remove(cmd);
3067 
3068 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3069 				addr, sizeof(*addr));
3070 
3071 	/* Since user doesn't want to proceed with the connection, abort any
3072 	 * ongoing pairing and then terminate the link if it was created
3073 	 * because of the pair device action.
3074 	 */
3075 	if (addr->type == BDADDR_BREDR)
3076 		hci_remove_link_key(hdev, &addr->bdaddr);
3077 	else
3078 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3079 					      le_addr_type(addr->type));
3080 
3081 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3082 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3083 
3084 unlock:
3085 	hci_dev_unlock(hdev);
3086 	return err;
3087 }
3088 
3089 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3090 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3091 			     u16 hci_op, __le32 passkey)
3092 {
3093 	struct mgmt_pending_cmd *cmd;
3094 	struct hci_conn *conn;
3095 	int err;
3096 
3097 	hci_dev_lock(hdev);
3098 
3099 	if (!hdev_is_powered(hdev)) {
3100 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3101 					MGMT_STATUS_NOT_POWERED, addr,
3102 					sizeof(*addr));
3103 		goto done;
3104 	}
3105 
3106 	if (addr->type == BDADDR_BREDR)
3107 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3108 	else
3109 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3110 					       le_addr_type(addr->type));
3111 
3112 	if (!conn) {
3113 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3114 					MGMT_STATUS_NOT_CONNECTED, addr,
3115 					sizeof(*addr));
3116 		goto done;
3117 	}
3118 
3119 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3120 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3121 		if (!err)
3122 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3123 						MGMT_STATUS_SUCCESS, addr,
3124 						sizeof(*addr));
3125 		else
3126 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3127 						MGMT_STATUS_FAILED, addr,
3128 						sizeof(*addr));
3129 
3130 		goto done;
3131 	}
3132 
3133 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3134 	if (!cmd) {
3135 		err = -ENOMEM;
3136 		goto done;
3137 	}
3138 
3139 	cmd->cmd_complete = addr_cmd_complete;
3140 
3141 	/* Continue with pairing via HCI */
3142 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3143 		struct hci_cp_user_passkey_reply cp;
3144 
3145 		bacpy(&cp.bdaddr, &addr->bdaddr);
3146 		cp.passkey = passkey;
3147 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3148 	} else
3149 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3150 				   &addr->bdaddr);
3151 
3152 	if (err < 0)
3153 		mgmt_pending_remove(cmd);
3154 
3155 done:
3156 	hci_dev_unlock(hdev);
3157 	return err;
3158 }
3159 
3160 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3161 			      void *data, u16 len)
3162 {
3163 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3164 
3165 	bt_dev_dbg(hdev, "sock %p", sk);
3166 
3167 	return user_pairing_resp(sk, hdev, &cp->addr,
3168 				MGMT_OP_PIN_CODE_NEG_REPLY,
3169 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3170 }
3171 
3172 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3173 			      u16 len)
3174 {
3175 	struct mgmt_cp_user_confirm_reply *cp = data;
3176 
3177 	bt_dev_dbg(hdev, "sock %p", sk);
3178 
3179 	if (len != sizeof(*cp))
3180 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3181 				       MGMT_STATUS_INVALID_PARAMS);
3182 
3183 	return user_pairing_resp(sk, hdev, &cp->addr,
3184 				 MGMT_OP_USER_CONFIRM_REPLY,
3185 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3186 }
3187 
3188 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3189 				  void *data, u16 len)
3190 {
3191 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3192 
3193 	bt_dev_dbg(hdev, "sock %p", sk);
3194 
3195 	return user_pairing_resp(sk, hdev, &cp->addr,
3196 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3197 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3198 }
3199 
3200 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3201 			      u16 len)
3202 {
3203 	struct mgmt_cp_user_passkey_reply *cp = data;
3204 
3205 	bt_dev_dbg(hdev, "sock %p", sk);
3206 
3207 	return user_pairing_resp(sk, hdev, &cp->addr,
3208 				 MGMT_OP_USER_PASSKEY_REPLY,
3209 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3210 }
3211 
3212 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3213 				  void *data, u16 len)
3214 {
3215 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3216 
3217 	bt_dev_dbg(hdev, "sock %p", sk);
3218 
3219 	return user_pairing_resp(sk, hdev, &cp->addr,
3220 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3221 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3222 }
3223 
3224 static void adv_expire(struct hci_dev *hdev, u32 flags)
3225 {
3226 	struct adv_info *adv_instance;
3227 	struct hci_request req;
3228 	int err;
3229 
3230 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3231 	if (!adv_instance)
3232 		return;
3233 
3234 	/* stop if current instance doesn't need to be changed */
3235 	if (!(adv_instance->flags & flags))
3236 		return;
3237 
3238 	cancel_adv_timeout(hdev);
3239 
3240 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3241 	if (!adv_instance)
3242 		return;
3243 
3244 	hci_req_init(&req, hdev);
3245 	err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3246 					      true);
3247 	if (err)
3248 		return;
3249 
3250 	hci_req_run(&req, NULL);
3251 }
3252 
3253 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3254 {
3255 	struct mgmt_cp_set_local_name *cp;
3256 	struct mgmt_pending_cmd *cmd;
3257 
3258 	bt_dev_dbg(hdev, "status 0x%02x", status);
3259 
3260 	hci_dev_lock(hdev);
3261 
3262 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3263 	if (!cmd)
3264 		goto unlock;
3265 
3266 	cp = cmd->param;
3267 
3268 	if (status) {
3269 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3270 			        mgmt_status(status));
3271 	} else {
3272 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3273 				  cp, sizeof(*cp));
3274 
3275 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3276 			adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3277 	}
3278 
3279 	mgmt_pending_remove(cmd);
3280 
3281 unlock:
3282 	hci_dev_unlock(hdev);
3283 }
3284 
3285 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3286 			  u16 len)
3287 {
3288 	struct mgmt_cp_set_local_name *cp = data;
3289 	struct mgmt_pending_cmd *cmd;
3290 	struct hci_request req;
3291 	int err;
3292 
3293 	bt_dev_dbg(hdev, "sock %p", sk);
3294 
3295 	hci_dev_lock(hdev);
3296 
3297 	/* If the old values are the same as the new ones just return a
3298 	 * direct command complete event.
3299 	 */
3300 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3301 	    !memcmp(hdev->short_name, cp->short_name,
3302 		    sizeof(hdev->short_name))) {
3303 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3304 					data, len);
3305 		goto failed;
3306 	}
3307 
3308 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3309 
3310 	if (!hdev_is_powered(hdev)) {
3311 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3312 
3313 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3314 					data, len);
3315 		if (err < 0)
3316 			goto failed;
3317 
3318 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3319 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3320 		ext_info_changed(hdev, sk);
3321 
3322 		goto failed;
3323 	}
3324 
3325 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3326 	if (!cmd) {
3327 		err = -ENOMEM;
3328 		goto failed;
3329 	}
3330 
3331 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3332 
3333 	hci_req_init(&req, hdev);
3334 
3335 	if (lmp_bredr_capable(hdev)) {
3336 		__hci_req_update_name(&req);
3337 		__hci_req_update_eir(&req);
3338 	}
3339 
3340 	/* The name is stored in the scan response data and so
3341 	 * no need to udpate the advertising data here.
3342 	 */
3343 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3344 		__hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3345 
3346 	err = hci_req_run(&req, set_name_complete);
3347 	if (err < 0)
3348 		mgmt_pending_remove(cmd);
3349 
3350 failed:
3351 	hci_dev_unlock(hdev);
3352 	return err;
3353 }
3354 
3355 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3356 			  u16 len)
3357 {
3358 	struct mgmt_cp_set_appearance *cp = data;
3359 	u16 appearance;
3360 	int err;
3361 
3362 	bt_dev_dbg(hdev, "sock %p", sk);
3363 
3364 	if (!lmp_le_capable(hdev))
3365 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3366 				       MGMT_STATUS_NOT_SUPPORTED);
3367 
3368 	appearance = le16_to_cpu(cp->appearance);
3369 
3370 	hci_dev_lock(hdev);
3371 
3372 	if (hdev->appearance != appearance) {
3373 		hdev->appearance = appearance;
3374 
3375 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3376 			adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3377 
3378 		ext_info_changed(hdev, sk);
3379 	}
3380 
3381 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3382 				0);
3383 
3384 	hci_dev_unlock(hdev);
3385 
3386 	return err;
3387 }
3388 
3389 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3390 				 void *data, u16 len)
3391 {
3392 	struct mgmt_rp_get_phy_configuration rp;
3393 
3394 	bt_dev_dbg(hdev, "sock %p", sk);
3395 
3396 	hci_dev_lock(hdev);
3397 
3398 	memset(&rp, 0, sizeof(rp));
3399 
3400 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3401 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3402 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3403 
3404 	hci_dev_unlock(hdev);
3405 
3406 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3407 				 &rp, sizeof(rp));
3408 }
3409 
3410 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3411 {
3412 	struct mgmt_ev_phy_configuration_changed ev;
3413 
3414 	memset(&ev, 0, sizeof(ev));
3415 
3416 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3417 
3418 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3419 			  sizeof(ev), skip);
3420 }
3421 
3422 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3423 				     u16 opcode, struct sk_buff *skb)
3424 {
3425 	struct mgmt_pending_cmd *cmd;
3426 
3427 	bt_dev_dbg(hdev, "status 0x%02x", status);
3428 
3429 	hci_dev_lock(hdev);
3430 
3431 	cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3432 	if (!cmd)
3433 		goto unlock;
3434 
3435 	if (status) {
3436 		mgmt_cmd_status(cmd->sk, hdev->id,
3437 				MGMT_OP_SET_PHY_CONFIGURATION,
3438 				mgmt_status(status));
3439 	} else {
3440 		mgmt_cmd_complete(cmd->sk, hdev->id,
3441 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3442 				  NULL, 0);
3443 
3444 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3445 	}
3446 
3447 	mgmt_pending_remove(cmd);
3448 
3449 unlock:
3450 	hci_dev_unlock(hdev);
3451 }
3452 
3453 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3454 				 void *data, u16 len)
3455 {
3456 	struct mgmt_cp_set_phy_configuration *cp = data;
3457 	struct hci_cp_le_set_default_phy cp_phy;
3458 	struct mgmt_pending_cmd *cmd;
3459 	struct hci_request req;
3460 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3461 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3462 	bool changed = false;
3463 	int err;
3464 
3465 	bt_dev_dbg(hdev, "sock %p", sk);
3466 
3467 	configurable_phys = get_configurable_phys(hdev);
3468 	supported_phys = get_supported_phys(hdev);
3469 	selected_phys = __le32_to_cpu(cp->selected_phys);
3470 
3471 	if (selected_phys & ~supported_phys)
3472 		return mgmt_cmd_status(sk, hdev->id,
3473 				       MGMT_OP_SET_PHY_CONFIGURATION,
3474 				       MGMT_STATUS_INVALID_PARAMS);
3475 
3476 	unconfigure_phys = supported_phys & ~configurable_phys;
3477 
3478 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3479 		return mgmt_cmd_status(sk, hdev->id,
3480 				       MGMT_OP_SET_PHY_CONFIGURATION,
3481 				       MGMT_STATUS_INVALID_PARAMS);
3482 
3483 	if (selected_phys == get_selected_phys(hdev))
3484 		return mgmt_cmd_complete(sk, hdev->id,
3485 					 MGMT_OP_SET_PHY_CONFIGURATION,
3486 					 0, NULL, 0);
3487 
3488 	hci_dev_lock(hdev);
3489 
3490 	if (!hdev_is_powered(hdev)) {
3491 		err = mgmt_cmd_status(sk, hdev->id,
3492 				      MGMT_OP_SET_PHY_CONFIGURATION,
3493 				      MGMT_STATUS_REJECTED);
3494 		goto unlock;
3495 	}
3496 
3497 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3498 		err = mgmt_cmd_status(sk, hdev->id,
3499 				      MGMT_OP_SET_PHY_CONFIGURATION,
3500 				      MGMT_STATUS_BUSY);
3501 		goto unlock;
3502 	}
3503 
3504 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3505 		pkt_type |= (HCI_DH3 | HCI_DM3);
3506 	else
3507 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3508 
3509 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3510 		pkt_type |= (HCI_DH5 | HCI_DM5);
3511 	else
3512 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3513 
3514 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3515 		pkt_type &= ~HCI_2DH1;
3516 	else
3517 		pkt_type |= HCI_2DH1;
3518 
3519 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3520 		pkt_type &= ~HCI_2DH3;
3521 	else
3522 		pkt_type |= HCI_2DH3;
3523 
3524 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3525 		pkt_type &= ~HCI_2DH5;
3526 	else
3527 		pkt_type |= HCI_2DH5;
3528 
3529 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3530 		pkt_type &= ~HCI_3DH1;
3531 	else
3532 		pkt_type |= HCI_3DH1;
3533 
3534 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3535 		pkt_type &= ~HCI_3DH3;
3536 	else
3537 		pkt_type |= HCI_3DH3;
3538 
3539 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3540 		pkt_type &= ~HCI_3DH5;
3541 	else
3542 		pkt_type |= HCI_3DH5;
3543 
3544 	if (pkt_type != hdev->pkt_type) {
3545 		hdev->pkt_type = pkt_type;
3546 		changed = true;
3547 	}
3548 
3549 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3550 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3551 		if (changed)
3552 			mgmt_phy_configuration_changed(hdev, sk);
3553 
3554 		err = mgmt_cmd_complete(sk, hdev->id,
3555 					MGMT_OP_SET_PHY_CONFIGURATION,
3556 					0, NULL, 0);
3557 
3558 		goto unlock;
3559 	}
3560 
3561 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3562 			       len);
3563 	if (!cmd) {
3564 		err = -ENOMEM;
3565 		goto unlock;
3566 	}
3567 
3568 	hci_req_init(&req, hdev);
3569 
3570 	memset(&cp_phy, 0, sizeof(cp_phy));
3571 
3572 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3573 		cp_phy.all_phys |= 0x01;
3574 
3575 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3576 		cp_phy.all_phys |= 0x02;
3577 
3578 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3579 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3580 
3581 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3582 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3583 
3584 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3585 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3586 
3587 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3588 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3589 
3590 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3591 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3592 
3593 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3594 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3595 
3596 	hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3597 
3598 	err = hci_req_run_skb(&req, set_default_phy_complete);
3599 	if (err < 0)
3600 		mgmt_pending_remove(cmd);
3601 
3602 unlock:
3603 	hci_dev_unlock(hdev);
3604 
3605 	return err;
3606 }
3607 
3608 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3609 			    u16 len)
3610 {
3611 	int err = MGMT_STATUS_SUCCESS;
3612 	struct mgmt_cp_set_blocked_keys *keys = data;
3613 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3614 				   sizeof(struct mgmt_blocked_key_info));
3615 	u16 key_count, expected_len;
3616 	int i;
3617 
3618 	bt_dev_dbg(hdev, "sock %p", sk);
3619 
3620 	key_count = __le16_to_cpu(keys->key_count);
3621 	if (key_count > max_key_count) {
3622 		bt_dev_err(hdev, "too big key_count value %u", key_count);
3623 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3624 				       MGMT_STATUS_INVALID_PARAMS);
3625 	}
3626 
3627 	expected_len = struct_size(keys, keys, key_count);
3628 	if (expected_len != len) {
3629 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3630 			   expected_len, len);
3631 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3632 				       MGMT_STATUS_INVALID_PARAMS);
3633 	}
3634 
3635 	hci_dev_lock(hdev);
3636 
3637 	hci_blocked_keys_clear(hdev);
3638 
3639 	for (i = 0; i < keys->key_count; ++i) {
3640 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3641 
3642 		if (!b) {
3643 			err = MGMT_STATUS_NO_RESOURCES;
3644 			break;
3645 		}
3646 
3647 		b->type = keys->keys[i].type;
3648 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3649 		list_add_rcu(&b->list, &hdev->blocked_keys);
3650 	}
3651 	hci_dev_unlock(hdev);
3652 
3653 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3654 				err, NULL, 0);
3655 }
3656 
3657 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3658 			       void *data, u16 len)
3659 {
3660 	struct mgmt_mode *cp = data;
3661 	int err;
3662 	bool changed = false;
3663 
3664 	bt_dev_dbg(hdev, "sock %p", sk);
3665 
3666 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3667 		return mgmt_cmd_status(sk, hdev->id,
3668 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3669 				       MGMT_STATUS_NOT_SUPPORTED);
3670 
3671 	if (cp->val != 0x00 && cp->val != 0x01)
3672 		return mgmt_cmd_status(sk, hdev->id,
3673 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3674 				       MGMT_STATUS_INVALID_PARAMS);
3675 
3676 	hci_dev_lock(hdev);
3677 
3678 	if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3679 		err = mgmt_cmd_status(sk, hdev->id,
3680 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3681 				      MGMT_STATUS_BUSY);
3682 		goto unlock;
3683 	}
3684 
3685 	if (hdev_is_powered(hdev) &&
3686 	    !!cp->val != hci_dev_test_flag(hdev,
3687 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
3688 		err = mgmt_cmd_status(sk, hdev->id,
3689 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3690 				      MGMT_STATUS_REJECTED);
3691 		goto unlock;
3692 	}
3693 
3694 	if (cp->val)
3695 		changed = !hci_dev_test_and_set_flag(hdev,
3696 						   HCI_WIDEBAND_SPEECH_ENABLED);
3697 	else
3698 		changed = hci_dev_test_and_clear_flag(hdev,
3699 						   HCI_WIDEBAND_SPEECH_ENABLED);
3700 
3701 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3702 	if (err < 0)
3703 		goto unlock;
3704 
3705 	if (changed)
3706 		err = new_settings(hdev, sk);
3707 
3708 unlock:
3709 	hci_dev_unlock(hdev);
3710 	return err;
3711 }
3712 
3713 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3714 			       void *data, u16 data_len)
3715 {
3716 	char buf[20];
3717 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3718 	u16 cap_len = 0;
3719 	u8 flags = 0;
3720 	u8 tx_power_range[2];
3721 
3722 	bt_dev_dbg(hdev, "sock %p", sk);
3723 
3724 	memset(&buf, 0, sizeof(buf));
3725 
3726 	hci_dev_lock(hdev);
3727 
3728 	/* When the Read Simple Pairing Options command is supported, then
3729 	 * the remote public key validation is supported.
3730 	 */
3731 	if (hdev->commands[41] & 0x08)
3732 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
3733 
3734 	flags |= 0x02;		/* Remote public key validation (LE) */
3735 
3736 	/* When the Read Encryption Key Size command is supported, then the
3737 	 * encryption key size is enforced.
3738 	 */
3739 	if (hdev->commands[20] & 0x10)
3740 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
3741 
3742 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
3743 
3744 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3745 				  &flags, 1);
3746 
3747 	/* When the Read Simple Pairing Options command is supported, then
3748 	 * also max encryption key size information is provided.
3749 	 */
3750 	if (hdev->commands[41] & 0x08)
3751 		cap_len = eir_append_le16(rp->cap, cap_len,
3752 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
3753 					  hdev->max_enc_key_size);
3754 
3755 	cap_len = eir_append_le16(rp->cap, cap_len,
3756 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3757 				  SMP_MAX_ENC_KEY_SIZE);
3758 
3759 	/* Append the min/max LE tx power parameters if we were able to fetch
3760 	 * it from the controller
3761 	 */
3762 	if (hdev->commands[38] & 0x80) {
3763 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3764 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3765 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3766 					  tx_power_range, 2);
3767 	}
3768 
3769 	rp->cap_len = cpu_to_le16(cap_len);
3770 
3771 	hci_dev_unlock(hdev);
3772 
3773 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3774 				 rp, sizeof(*rp) + cap_len);
3775 }
3776 
3777 #ifdef CONFIG_BT_FEATURE_DEBUG
3778 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3779 static const u8 debug_uuid[16] = {
3780 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3781 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3782 };
3783 #endif
3784 
3785 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3786 static const u8 simult_central_periph_uuid[16] = {
3787 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3788 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3789 };
3790 
3791 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3792 static const u8 rpa_resolution_uuid[16] = {
3793 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3794 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3795 };
3796 
3797 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3798 				  void *data, u16 data_len)
3799 {
3800 	char buf[62];	/* Enough space for 3 features */
3801 	struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3802 	u16 idx = 0;
3803 	u32 flags;
3804 
3805 	bt_dev_dbg(hdev, "sock %p", sk);
3806 
3807 	memset(&buf, 0, sizeof(buf));
3808 
3809 #ifdef CONFIG_BT_FEATURE_DEBUG
3810 	if (!hdev) {
3811 		flags = bt_dbg_get() ? BIT(0) : 0;
3812 
3813 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
3814 		rp->features[idx].flags = cpu_to_le32(flags);
3815 		idx++;
3816 	}
3817 #endif
3818 
3819 	if (hdev) {
3820 		if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3821 		    (hdev->le_states[4] & 0x08) &&	/* Central */
3822 		    (hdev->le_states[4] & 0x40) &&	/* Peripheral */
3823 		    (hdev->le_states[3] & 0x10))	/* Simultaneous */
3824 			flags = BIT(0);
3825 		else
3826 			flags = 0;
3827 
3828 		memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3829 		rp->features[idx].flags = cpu_to_le32(flags);
3830 		idx++;
3831 	}
3832 
3833 	if (hdev && use_ll_privacy(hdev)) {
3834 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3835 			flags = BIT(0) | BIT(1);
3836 		else
3837 			flags = BIT(1);
3838 
3839 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3840 		rp->features[idx].flags = cpu_to_le32(flags);
3841 		idx++;
3842 	}
3843 
3844 	rp->feature_count = cpu_to_le16(idx);
3845 
3846 	/* After reading the experimental features information, enable
3847 	 * the events to update client on any future change.
3848 	 */
3849 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3850 
3851 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3852 				 MGMT_OP_READ_EXP_FEATURES_INFO,
3853 				 0, rp, sizeof(*rp) + (20 * idx));
3854 }
3855 
3856 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3857 					  struct sock *skip)
3858 {
3859 	struct mgmt_ev_exp_feature_changed ev;
3860 
3861 	memset(&ev, 0, sizeof(ev));
3862 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
3863 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3864 
3865 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3866 				  &ev, sizeof(ev),
3867 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3868 
3869 }
3870 
3871 #ifdef CONFIG_BT_FEATURE_DEBUG
3872 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3873 {
3874 	struct mgmt_ev_exp_feature_changed ev;
3875 
3876 	memset(&ev, 0, sizeof(ev));
3877 	memcpy(ev.uuid, debug_uuid, 16);
3878 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3879 
3880 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3881 				  &ev, sizeof(ev),
3882 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3883 }
3884 #endif
3885 
3886 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3887 			   void *data, u16 data_len)
3888 {
3889 	struct mgmt_cp_set_exp_feature *cp = data;
3890 	struct mgmt_rp_set_exp_feature rp;
3891 
3892 	bt_dev_dbg(hdev, "sock %p", sk);
3893 
3894 	if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3895 		memset(rp.uuid, 0, 16);
3896 		rp.flags = cpu_to_le32(0);
3897 
3898 #ifdef CONFIG_BT_FEATURE_DEBUG
3899 		if (!hdev) {
3900 			bool changed = bt_dbg_get();
3901 
3902 			bt_dbg_set(false);
3903 
3904 			if (changed)
3905 				exp_debug_feature_changed(false, sk);
3906 		}
3907 #endif
3908 
3909 		if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3910 			bool changed = hci_dev_test_flag(hdev,
3911 							 HCI_ENABLE_LL_PRIVACY);
3912 
3913 			hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3914 
3915 			if (changed)
3916 				exp_ll_privacy_feature_changed(false, hdev, sk);
3917 		}
3918 
3919 		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3920 
3921 		return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3922 					 MGMT_OP_SET_EXP_FEATURE, 0,
3923 					 &rp, sizeof(rp));
3924 	}
3925 
3926 #ifdef CONFIG_BT_FEATURE_DEBUG
3927 	if (!memcmp(cp->uuid, debug_uuid, 16)) {
3928 		bool val, changed;
3929 		int err;
3930 
3931 		/* Command requires to use the non-controller index */
3932 		if (hdev)
3933 			return mgmt_cmd_status(sk, hdev->id,
3934 					       MGMT_OP_SET_EXP_FEATURE,
3935 					       MGMT_STATUS_INVALID_INDEX);
3936 
3937 		/* Parameters are limited to a single octet */
3938 		if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3939 			return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3940 					       MGMT_OP_SET_EXP_FEATURE,
3941 					       MGMT_STATUS_INVALID_PARAMS);
3942 
3943 		/* Only boolean on/off is supported */
3944 		if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3945 			return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3946 					       MGMT_OP_SET_EXP_FEATURE,
3947 					       MGMT_STATUS_INVALID_PARAMS);
3948 
3949 		val = !!cp->param[0];
3950 		changed = val ? !bt_dbg_get() : bt_dbg_get();
3951 		bt_dbg_set(val);
3952 
3953 		memcpy(rp.uuid, debug_uuid, 16);
3954 		rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3955 
3956 		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3957 
3958 		err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3959 					MGMT_OP_SET_EXP_FEATURE, 0,
3960 					&rp, sizeof(rp));
3961 
3962 		if (changed)
3963 			exp_debug_feature_changed(val, sk);
3964 
3965 		return err;
3966 	}
3967 #endif
3968 
3969 	if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
3970 		bool val, changed;
3971 		int err;
3972 		u32 flags;
3973 
3974 		/* Command requires to use the controller index */
3975 		if (!hdev)
3976 			return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3977 					       MGMT_OP_SET_EXP_FEATURE,
3978 					       MGMT_STATUS_INVALID_INDEX);
3979 
3980 		/* Changes can only be made when controller is powered down */
3981 		if (hdev_is_powered(hdev))
3982 			return mgmt_cmd_status(sk, hdev->id,
3983 					       MGMT_OP_SET_EXP_FEATURE,
3984 					       MGMT_STATUS_NOT_POWERED);
3985 
3986 		/* Parameters are limited to a single octet */
3987 		if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3988 			return mgmt_cmd_status(sk, hdev->id,
3989 					       MGMT_OP_SET_EXP_FEATURE,
3990 					       MGMT_STATUS_INVALID_PARAMS);
3991 
3992 		/* Only boolean on/off is supported */
3993 		if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3994 			return mgmt_cmd_status(sk, hdev->id,
3995 					       MGMT_OP_SET_EXP_FEATURE,
3996 					       MGMT_STATUS_INVALID_PARAMS);
3997 
3998 		val = !!cp->param[0];
3999 
4000 		if (val) {
4001 			changed = !hci_dev_test_flag(hdev,
4002 						     HCI_ENABLE_LL_PRIVACY);
4003 			hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4004 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4005 
4006 			/* Enable LL privacy + supported settings changed */
4007 			flags = BIT(0) | BIT(1);
4008 		} else {
4009 			changed = hci_dev_test_flag(hdev,
4010 						    HCI_ENABLE_LL_PRIVACY);
4011 			hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4012 
4013 			/* Disable LL privacy + supported settings changed */
4014 			flags = BIT(1);
4015 		}
4016 
4017 		memcpy(rp.uuid, rpa_resolution_uuid, 16);
4018 		rp.flags = cpu_to_le32(flags);
4019 
4020 		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4021 
4022 		err = mgmt_cmd_complete(sk, hdev->id,
4023 					MGMT_OP_SET_EXP_FEATURE, 0,
4024 					&rp, sizeof(rp));
4025 
4026 		if (changed)
4027 			exp_ll_privacy_feature_changed(val, hdev, sk);
4028 
4029 		return err;
4030 	}
4031 
4032 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4033 			       MGMT_OP_SET_EXP_FEATURE,
4034 			       MGMT_STATUS_NOT_SUPPORTED);
4035 }
4036 
4037 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4038 
4039 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4040 			    u16 data_len)
4041 {
4042 	struct mgmt_cp_get_device_flags *cp = data;
4043 	struct mgmt_rp_get_device_flags rp;
4044 	struct bdaddr_list_with_flags *br_params;
4045 	struct hci_conn_params *params;
4046 	u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4047 	u32 current_flags = 0;
4048 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4049 
4050 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4051 		   &cp->addr.bdaddr, cp->addr.type);
4052 
4053 	hci_dev_lock(hdev);
4054 
4055 	if (cp->addr.type == BDADDR_BREDR) {
4056 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4057 							      &cp->addr.bdaddr,
4058 							      cp->addr.type);
4059 		if (!br_params)
4060 			goto done;
4061 
4062 		current_flags = br_params->current_flags;
4063 	} else {
4064 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4065 						le_addr_type(cp->addr.type));
4066 
4067 		if (!params)
4068 			goto done;
4069 
4070 		current_flags = params->current_flags;
4071 	}
4072 
4073 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4074 	rp.addr.type = cp->addr.type;
4075 	rp.supported_flags = cpu_to_le32(supported_flags);
4076 	rp.current_flags = cpu_to_le32(current_flags);
4077 
4078 	status = MGMT_STATUS_SUCCESS;
4079 
4080 done:
4081 	hci_dev_unlock(hdev);
4082 
4083 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4084 				&rp, sizeof(rp));
4085 }
4086 
4087 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4088 				 bdaddr_t *bdaddr, u8 bdaddr_type,
4089 				 u32 supported_flags, u32 current_flags)
4090 {
4091 	struct mgmt_ev_device_flags_changed ev;
4092 
4093 	bacpy(&ev.addr.bdaddr, bdaddr);
4094 	ev.addr.type = bdaddr_type;
4095 	ev.supported_flags = cpu_to_le32(supported_flags);
4096 	ev.current_flags = cpu_to_le32(current_flags);
4097 
4098 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4099 }
4100 
4101 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4102 			    u16 len)
4103 {
4104 	struct mgmt_cp_set_device_flags *cp = data;
4105 	struct bdaddr_list_with_flags *br_params;
4106 	struct hci_conn_params *params;
4107 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4108 	u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4109 	u32 current_flags = __le32_to_cpu(cp->current_flags);
4110 
4111 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4112 		   &cp->addr.bdaddr, cp->addr.type,
4113 		   __le32_to_cpu(current_flags));
4114 
4115 	if ((supported_flags | current_flags) != supported_flags) {
4116 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4117 			    current_flags, supported_flags);
4118 		goto done;
4119 	}
4120 
4121 	hci_dev_lock(hdev);
4122 
4123 	if (cp->addr.type == BDADDR_BREDR) {
4124 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4125 							      &cp->addr.bdaddr,
4126 							      cp->addr.type);
4127 
4128 		if (br_params) {
4129 			br_params->current_flags = current_flags;
4130 			status = MGMT_STATUS_SUCCESS;
4131 		} else {
4132 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4133 				    &cp->addr.bdaddr, cp->addr.type);
4134 		}
4135 	} else {
4136 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4137 						le_addr_type(cp->addr.type));
4138 		if (params) {
4139 			params->current_flags = current_flags;
4140 			status = MGMT_STATUS_SUCCESS;
4141 		} else {
4142 			bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4143 				    &cp->addr.bdaddr,
4144 				    le_addr_type(cp->addr.type));
4145 		}
4146 	}
4147 
4148 done:
4149 	hci_dev_unlock(hdev);
4150 
4151 	if (status == MGMT_STATUS_SUCCESS)
4152 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4153 				     supported_flags, current_flags);
4154 
4155 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4156 				 &cp->addr, sizeof(cp->addr));
4157 }
4158 
4159 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4160 				   u16 handle)
4161 {
4162 	struct mgmt_ev_adv_monitor_added ev;
4163 
4164 	ev.monitor_handle = cpu_to_le16(handle);
4165 
4166 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4167 }
4168 
4169 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
4170 				     u16 handle)
4171 {
4172 	struct mgmt_ev_adv_monitor_added ev;
4173 
4174 	ev.monitor_handle = cpu_to_le16(handle);
4175 
4176 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
4177 }
4178 
4179 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4180 				 void *data, u16 len)
4181 {
4182 	struct adv_monitor *monitor = NULL;
4183 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4184 	int handle, err;
4185 	size_t rp_size = 0;
4186 	__u32 supported = 0;
4187 	__u16 num_handles = 0;
4188 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4189 
4190 	BT_DBG("request for %s", hdev->name);
4191 
4192 	hci_dev_lock(hdev);
4193 
4194 	if (msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR)
4195 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4196 
4197 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) {
4198 		handles[num_handles++] = monitor->handle;
4199 	}
4200 
4201 	hci_dev_unlock(hdev);
4202 
4203 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4204 	rp = kmalloc(rp_size, GFP_KERNEL);
4205 	if (!rp)
4206 		return -ENOMEM;
4207 
4208 	/* Once controller-based monitoring is in place, the enabled_features
4209 	 * should reflect the use.
4210 	 */
4211 	rp->supported_features = cpu_to_le32(supported);
4212 	rp->enabled_features = 0;
4213 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4214 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4215 	rp->num_handles = cpu_to_le16(num_handles);
4216 	if (num_handles)
4217 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4218 
4219 	err = mgmt_cmd_complete(sk, hdev->id,
4220 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
4221 				MGMT_STATUS_SUCCESS, rp, rp_size);
4222 
4223 	kfree(rp);
4224 
4225 	return err;
4226 }
4227 
4228 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4229 				    void *data, u16 len)
4230 {
4231 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4232 	struct mgmt_rp_add_adv_patterns_monitor rp;
4233 	struct adv_monitor *m = NULL;
4234 	struct adv_pattern *p = NULL;
4235 	unsigned int mp_cnt = 0, prev_adv_monitors_cnt;
4236 	__u8 cp_ofst = 0, cp_len = 0;
4237 	int err, i;
4238 
4239 	BT_DBG("request for %s", hdev->name);
4240 
4241 	if (len <= sizeof(*cp) || cp->pattern_count == 0) {
4242 		err = mgmt_cmd_status(sk, hdev->id,
4243 				      MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4244 				      MGMT_STATUS_INVALID_PARAMS);
4245 		goto failed;
4246 	}
4247 
4248 	m = kmalloc(sizeof(*m), GFP_KERNEL);
4249 	if (!m) {
4250 		err = -ENOMEM;
4251 		goto failed;
4252 	}
4253 
4254 	INIT_LIST_HEAD(&m->patterns);
4255 	m->active = false;
4256 
4257 	for (i = 0; i < cp->pattern_count; i++) {
4258 		if (++mp_cnt > HCI_MAX_ADV_MONITOR_NUM_PATTERNS) {
4259 			err = mgmt_cmd_status(sk, hdev->id,
4260 					      MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4261 					      MGMT_STATUS_INVALID_PARAMS);
4262 			goto failed;
4263 		}
4264 
4265 		cp_ofst = cp->patterns[i].offset;
4266 		cp_len = cp->patterns[i].length;
4267 		if (cp_ofst >= HCI_MAX_AD_LENGTH ||
4268 		    cp_len > HCI_MAX_AD_LENGTH ||
4269 		    (cp_ofst + cp_len) > HCI_MAX_AD_LENGTH) {
4270 			err = mgmt_cmd_status(sk, hdev->id,
4271 					      MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4272 					      MGMT_STATUS_INVALID_PARAMS);
4273 			goto failed;
4274 		}
4275 
4276 		p = kmalloc(sizeof(*p), GFP_KERNEL);
4277 		if (!p) {
4278 			err = -ENOMEM;
4279 			goto failed;
4280 		}
4281 
4282 		p->ad_type = cp->patterns[i].ad_type;
4283 		p->offset = cp->patterns[i].offset;
4284 		p->length = cp->patterns[i].length;
4285 		memcpy(p->value, cp->patterns[i].value, p->length);
4286 
4287 		INIT_LIST_HEAD(&p->list);
4288 		list_add(&p->list, &m->patterns);
4289 	}
4290 
4291 	if (mp_cnt != cp->pattern_count) {
4292 		err = mgmt_cmd_status(sk, hdev->id,
4293 				      MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4294 				      MGMT_STATUS_INVALID_PARAMS);
4295 		goto failed;
4296 	}
4297 
4298 	hci_dev_lock(hdev);
4299 
4300 	prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4301 
4302 	err = hci_add_adv_monitor(hdev, m);
4303 	if (err) {
4304 		if (err == -ENOSPC) {
4305 			mgmt_cmd_status(sk, hdev->id,
4306 					MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4307 					MGMT_STATUS_NO_RESOURCES);
4308 		}
4309 		goto unlock;
4310 	}
4311 
4312 	if (hdev->adv_monitors_cnt > prev_adv_monitors_cnt)
4313 		mgmt_adv_monitor_added(sk, hdev, m->handle);
4314 
4315 	hci_dev_unlock(hdev);
4316 
4317 	rp.monitor_handle = cpu_to_le16(m->handle);
4318 
4319 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4320 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4321 
4322 unlock:
4323 	hci_dev_unlock(hdev);
4324 
4325 failed:
4326 	hci_free_adv_monitor(m);
4327 	return err;
4328 }
4329 
4330 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4331 			      void *data, u16 len)
4332 {
4333 	struct mgmt_cp_remove_adv_monitor *cp = data;
4334 	struct mgmt_rp_remove_adv_monitor rp;
4335 	unsigned int prev_adv_monitors_cnt;
4336 	u16 handle;
4337 	int err;
4338 
4339 	BT_DBG("request for %s", hdev->name);
4340 
4341 	hci_dev_lock(hdev);
4342 
4343 	handle = __le16_to_cpu(cp->monitor_handle);
4344 	prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4345 
4346 	err = hci_remove_adv_monitor(hdev, handle);
4347 	if (err == -ENOENT) {
4348 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4349 				      MGMT_STATUS_INVALID_INDEX);
4350 		goto unlock;
4351 	}
4352 
4353 	if (hdev->adv_monitors_cnt < prev_adv_monitors_cnt)
4354 		mgmt_adv_monitor_removed(sk, hdev, handle);
4355 
4356 	hci_dev_unlock(hdev);
4357 
4358 	rp.monitor_handle = cp->monitor_handle;
4359 
4360 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4361 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4362 
4363 unlock:
4364 	hci_dev_unlock(hdev);
4365 	return err;
4366 }
4367 
4368 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4369 				         u16 opcode, struct sk_buff *skb)
4370 {
4371 	struct mgmt_rp_read_local_oob_data mgmt_rp;
4372 	size_t rp_size = sizeof(mgmt_rp);
4373 	struct mgmt_pending_cmd *cmd;
4374 
4375 	bt_dev_dbg(hdev, "status %u", status);
4376 
4377 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4378 	if (!cmd)
4379 		return;
4380 
4381 	if (status || !skb) {
4382 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4383 				status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4384 		goto remove;
4385 	}
4386 
4387 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4388 
4389 	if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4390 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4391 
4392 		if (skb->len < sizeof(*rp)) {
4393 			mgmt_cmd_status(cmd->sk, hdev->id,
4394 					MGMT_OP_READ_LOCAL_OOB_DATA,
4395 					MGMT_STATUS_FAILED);
4396 			goto remove;
4397 		}
4398 
4399 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4400 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4401 
4402 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4403 	} else {
4404 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4405 
4406 		if (skb->len < sizeof(*rp)) {
4407 			mgmt_cmd_status(cmd->sk, hdev->id,
4408 					MGMT_OP_READ_LOCAL_OOB_DATA,
4409 					MGMT_STATUS_FAILED);
4410 			goto remove;
4411 		}
4412 
4413 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4414 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4415 
4416 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4417 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4418 	}
4419 
4420 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4421 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4422 
4423 remove:
4424 	mgmt_pending_remove(cmd);
4425 }
4426 
4427 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4428 			       void *data, u16 data_len)
4429 {
4430 	struct mgmt_pending_cmd *cmd;
4431 	struct hci_request req;
4432 	int err;
4433 
4434 	bt_dev_dbg(hdev, "sock %p", sk);
4435 
4436 	hci_dev_lock(hdev);
4437 
4438 	if (!hdev_is_powered(hdev)) {
4439 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4440 				      MGMT_STATUS_NOT_POWERED);
4441 		goto unlock;
4442 	}
4443 
4444 	if (!lmp_ssp_capable(hdev)) {
4445 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4446 				      MGMT_STATUS_NOT_SUPPORTED);
4447 		goto unlock;
4448 	}
4449 
4450 	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4451 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4452 				      MGMT_STATUS_BUSY);
4453 		goto unlock;
4454 	}
4455 
4456 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4457 	if (!cmd) {
4458 		err = -ENOMEM;
4459 		goto unlock;
4460 	}
4461 
4462 	hci_req_init(&req, hdev);
4463 
4464 	if (bredr_sc_enabled(hdev))
4465 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4466 	else
4467 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4468 
4469 	err = hci_req_run_skb(&req, read_local_oob_data_complete);
4470 	if (err < 0)
4471 		mgmt_pending_remove(cmd);
4472 
4473 unlock:
4474 	hci_dev_unlock(hdev);
4475 	return err;
4476 }
4477 
4478 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4479 			       void *data, u16 len)
4480 {
4481 	struct mgmt_addr_info *addr = data;
4482 	int err;
4483 
4484 	bt_dev_dbg(hdev, "sock %p", sk);
4485 
4486 	if (!bdaddr_type_is_valid(addr->type))
4487 		return mgmt_cmd_complete(sk, hdev->id,
4488 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
4489 					 MGMT_STATUS_INVALID_PARAMS,
4490 					 addr, sizeof(*addr));
4491 
4492 	hci_dev_lock(hdev);
4493 
4494 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4495 		struct mgmt_cp_add_remote_oob_data *cp = data;
4496 		u8 status;
4497 
4498 		if (cp->addr.type != BDADDR_BREDR) {
4499 			err = mgmt_cmd_complete(sk, hdev->id,
4500 						MGMT_OP_ADD_REMOTE_OOB_DATA,
4501 						MGMT_STATUS_INVALID_PARAMS,
4502 						&cp->addr, sizeof(cp->addr));
4503 			goto unlock;
4504 		}
4505 
4506 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4507 					      cp->addr.type, cp->hash,
4508 					      cp->rand, NULL, NULL);
4509 		if (err < 0)
4510 			status = MGMT_STATUS_FAILED;
4511 		else
4512 			status = MGMT_STATUS_SUCCESS;
4513 
4514 		err = mgmt_cmd_complete(sk, hdev->id,
4515 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4516 					&cp->addr, sizeof(cp->addr));
4517 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4518 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4519 		u8 *rand192, *hash192, *rand256, *hash256;
4520 		u8 status;
4521 
4522 		if (bdaddr_type_is_le(cp->addr.type)) {
4523 			/* Enforce zero-valued 192-bit parameters as
4524 			 * long as legacy SMP OOB isn't implemented.
4525 			 */
4526 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4527 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
4528 				err = mgmt_cmd_complete(sk, hdev->id,
4529 							MGMT_OP_ADD_REMOTE_OOB_DATA,
4530 							MGMT_STATUS_INVALID_PARAMS,
4531 							addr, sizeof(*addr));
4532 				goto unlock;
4533 			}
4534 
4535 			rand192 = NULL;
4536 			hash192 = NULL;
4537 		} else {
4538 			/* In case one of the P-192 values is set to zero,
4539 			 * then just disable OOB data for P-192.
4540 			 */
4541 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4542 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
4543 				rand192 = NULL;
4544 				hash192 = NULL;
4545 			} else {
4546 				rand192 = cp->rand192;
4547 				hash192 = cp->hash192;
4548 			}
4549 		}
4550 
4551 		/* In case one of the P-256 values is set to zero, then just
4552 		 * disable OOB data for P-256.
4553 		 */
4554 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4555 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
4556 			rand256 = NULL;
4557 			hash256 = NULL;
4558 		} else {
4559 			rand256 = cp->rand256;
4560 			hash256 = cp->hash256;
4561 		}
4562 
4563 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4564 					      cp->addr.type, hash192, rand192,
4565 					      hash256, rand256);
4566 		if (err < 0)
4567 			status = MGMT_STATUS_FAILED;
4568 		else
4569 			status = MGMT_STATUS_SUCCESS;
4570 
4571 		err = mgmt_cmd_complete(sk, hdev->id,
4572 					MGMT_OP_ADD_REMOTE_OOB_DATA,
4573 					status, &cp->addr, sizeof(cp->addr));
4574 	} else {
4575 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4576 			   len);
4577 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4578 				      MGMT_STATUS_INVALID_PARAMS);
4579 	}
4580 
4581 unlock:
4582 	hci_dev_unlock(hdev);
4583 	return err;
4584 }
4585 
4586 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4587 				  void *data, u16 len)
4588 {
4589 	struct mgmt_cp_remove_remote_oob_data *cp = data;
4590 	u8 status;
4591 	int err;
4592 
4593 	bt_dev_dbg(hdev, "sock %p", sk);
4594 
4595 	if (cp->addr.type != BDADDR_BREDR)
4596 		return mgmt_cmd_complete(sk, hdev->id,
4597 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4598 					 MGMT_STATUS_INVALID_PARAMS,
4599 					 &cp->addr, sizeof(cp->addr));
4600 
4601 	hci_dev_lock(hdev);
4602 
4603 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4604 		hci_remote_oob_data_clear(hdev);
4605 		status = MGMT_STATUS_SUCCESS;
4606 		goto done;
4607 	}
4608 
4609 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4610 	if (err < 0)
4611 		status = MGMT_STATUS_INVALID_PARAMS;
4612 	else
4613 		status = MGMT_STATUS_SUCCESS;
4614 
4615 done:
4616 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4617 				status, &cp->addr, sizeof(cp->addr));
4618 
4619 	hci_dev_unlock(hdev);
4620 	return err;
4621 }
4622 
4623 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4624 {
4625 	struct mgmt_pending_cmd *cmd;
4626 
4627 	bt_dev_dbg(hdev, "status %d", status);
4628 
4629 	hci_dev_lock(hdev);
4630 
4631 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4632 	if (!cmd)
4633 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4634 
4635 	if (!cmd)
4636 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4637 
4638 	if (cmd) {
4639 		cmd->cmd_complete(cmd, mgmt_status(status));
4640 		mgmt_pending_remove(cmd);
4641 	}
4642 
4643 	hci_dev_unlock(hdev);
4644 
4645 	/* Handle suspend notifier */
4646 	if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4647 			       hdev->suspend_tasks)) {
4648 		bt_dev_dbg(hdev, "Unpaused discovery");
4649 		wake_up(&hdev->suspend_wait_q);
4650 	}
4651 }
4652 
4653 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4654 				    uint8_t *mgmt_status)
4655 {
4656 	switch (type) {
4657 	case DISCOV_TYPE_LE:
4658 		*mgmt_status = mgmt_le_support(hdev);
4659 		if (*mgmt_status)
4660 			return false;
4661 		break;
4662 	case DISCOV_TYPE_INTERLEAVED:
4663 		*mgmt_status = mgmt_le_support(hdev);
4664 		if (*mgmt_status)
4665 			return false;
4666 		fallthrough;
4667 	case DISCOV_TYPE_BREDR:
4668 		*mgmt_status = mgmt_bredr_support(hdev);
4669 		if (*mgmt_status)
4670 			return false;
4671 		break;
4672 	default:
4673 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4674 		return false;
4675 	}
4676 
4677 	return true;
4678 }
4679 
4680 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4681 				    u16 op, void *data, u16 len)
4682 {
4683 	struct mgmt_cp_start_discovery *cp = data;
4684 	struct mgmt_pending_cmd *cmd;
4685 	u8 status;
4686 	int err;
4687 
4688 	bt_dev_dbg(hdev, "sock %p", sk);
4689 
4690 	hci_dev_lock(hdev);
4691 
4692 	if (!hdev_is_powered(hdev)) {
4693 		err = mgmt_cmd_complete(sk, hdev->id, op,
4694 					MGMT_STATUS_NOT_POWERED,
4695 					&cp->type, sizeof(cp->type));
4696 		goto failed;
4697 	}
4698 
4699 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4700 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4701 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4702 					&cp->type, sizeof(cp->type));
4703 		goto failed;
4704 	}
4705 
4706 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4707 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
4708 					&cp->type, sizeof(cp->type));
4709 		goto failed;
4710 	}
4711 
4712 	/* Can't start discovery when it is paused */
4713 	if (hdev->discovery_paused) {
4714 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4715 					&cp->type, sizeof(cp->type));
4716 		goto failed;
4717 	}
4718 
4719 	/* Clear the discovery filter first to free any previously
4720 	 * allocated memory for the UUID list.
4721 	 */
4722 	hci_discovery_filter_clear(hdev);
4723 
4724 	hdev->discovery.type = cp->type;
4725 	hdev->discovery.report_invalid_rssi = false;
4726 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4727 		hdev->discovery.limited = true;
4728 	else
4729 		hdev->discovery.limited = false;
4730 
4731 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4732 	if (!cmd) {
4733 		err = -ENOMEM;
4734 		goto failed;
4735 	}
4736 
4737 	cmd->cmd_complete = generic_cmd_complete;
4738 
4739 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4740 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4741 	err = 0;
4742 
4743 failed:
4744 	hci_dev_unlock(hdev);
4745 	return err;
4746 }
4747 
4748 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4749 			   void *data, u16 len)
4750 {
4751 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4752 					data, len);
4753 }
4754 
4755 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4756 				   void *data, u16 len)
4757 {
4758 	return start_discovery_internal(sk, hdev,
4759 					MGMT_OP_START_LIMITED_DISCOVERY,
4760 					data, len);
4761 }
4762 
4763 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4764 					  u8 status)
4765 {
4766 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4767 				 cmd->param, 1);
4768 }
4769 
4770 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4771 				   void *data, u16 len)
4772 {
4773 	struct mgmt_cp_start_service_discovery *cp = data;
4774 	struct mgmt_pending_cmd *cmd;
4775 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4776 	u16 uuid_count, expected_len;
4777 	u8 status;
4778 	int err;
4779 
4780 	bt_dev_dbg(hdev, "sock %p", sk);
4781 
4782 	hci_dev_lock(hdev);
4783 
4784 	if (!hdev_is_powered(hdev)) {
4785 		err = mgmt_cmd_complete(sk, hdev->id,
4786 					MGMT_OP_START_SERVICE_DISCOVERY,
4787 					MGMT_STATUS_NOT_POWERED,
4788 					&cp->type, sizeof(cp->type));
4789 		goto failed;
4790 	}
4791 
4792 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4793 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4794 		err = mgmt_cmd_complete(sk, hdev->id,
4795 					MGMT_OP_START_SERVICE_DISCOVERY,
4796 					MGMT_STATUS_BUSY, &cp->type,
4797 					sizeof(cp->type));
4798 		goto failed;
4799 	}
4800 
4801 	uuid_count = __le16_to_cpu(cp->uuid_count);
4802 	if (uuid_count > max_uuid_count) {
4803 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
4804 			   uuid_count);
4805 		err = mgmt_cmd_complete(sk, hdev->id,
4806 					MGMT_OP_START_SERVICE_DISCOVERY,
4807 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4808 					sizeof(cp->type));
4809 		goto failed;
4810 	}
4811 
4812 	expected_len = sizeof(*cp) + uuid_count * 16;
4813 	if (expected_len != len) {
4814 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
4815 			   expected_len, len);
4816 		err = mgmt_cmd_complete(sk, hdev->id,
4817 					MGMT_OP_START_SERVICE_DISCOVERY,
4818 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4819 					sizeof(cp->type));
4820 		goto failed;
4821 	}
4822 
4823 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4824 		err = mgmt_cmd_complete(sk, hdev->id,
4825 					MGMT_OP_START_SERVICE_DISCOVERY,
4826 					status, &cp->type, sizeof(cp->type));
4827 		goto failed;
4828 	}
4829 
4830 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4831 			       hdev, data, len);
4832 	if (!cmd) {
4833 		err = -ENOMEM;
4834 		goto failed;
4835 	}
4836 
4837 	cmd->cmd_complete = service_discovery_cmd_complete;
4838 
4839 	/* Clear the discovery filter first to free any previously
4840 	 * allocated memory for the UUID list.
4841 	 */
4842 	hci_discovery_filter_clear(hdev);
4843 
4844 	hdev->discovery.result_filtering = true;
4845 	hdev->discovery.type = cp->type;
4846 	hdev->discovery.rssi = cp->rssi;
4847 	hdev->discovery.uuid_count = uuid_count;
4848 
4849 	if (uuid_count > 0) {
4850 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4851 						GFP_KERNEL);
4852 		if (!hdev->discovery.uuids) {
4853 			err = mgmt_cmd_complete(sk, hdev->id,
4854 						MGMT_OP_START_SERVICE_DISCOVERY,
4855 						MGMT_STATUS_FAILED,
4856 						&cp->type, sizeof(cp->type));
4857 			mgmt_pending_remove(cmd);
4858 			goto failed;
4859 		}
4860 	}
4861 
4862 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4863 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4864 	err = 0;
4865 
4866 failed:
4867 	hci_dev_unlock(hdev);
4868 	return err;
4869 }
4870 
4871 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4872 {
4873 	struct mgmt_pending_cmd *cmd;
4874 
4875 	bt_dev_dbg(hdev, "status %d", status);
4876 
4877 	hci_dev_lock(hdev);
4878 
4879 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4880 	if (cmd) {
4881 		cmd->cmd_complete(cmd, mgmt_status(status));
4882 		mgmt_pending_remove(cmd);
4883 	}
4884 
4885 	hci_dev_unlock(hdev);
4886 
4887 	/* Handle suspend notifier */
4888 	if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
4889 		bt_dev_dbg(hdev, "Paused discovery");
4890 		wake_up(&hdev->suspend_wait_q);
4891 	}
4892 }
4893 
4894 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4895 			  u16 len)
4896 {
4897 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
4898 	struct mgmt_pending_cmd *cmd;
4899 	int err;
4900 
4901 	bt_dev_dbg(hdev, "sock %p", sk);
4902 
4903 	hci_dev_lock(hdev);
4904 
4905 	if (!hci_discovery_active(hdev)) {
4906 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4907 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
4908 					sizeof(mgmt_cp->type));
4909 		goto unlock;
4910 	}
4911 
4912 	if (hdev->discovery.type != mgmt_cp->type) {
4913 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4914 					MGMT_STATUS_INVALID_PARAMS,
4915 					&mgmt_cp->type, sizeof(mgmt_cp->type));
4916 		goto unlock;
4917 	}
4918 
4919 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4920 	if (!cmd) {
4921 		err = -ENOMEM;
4922 		goto unlock;
4923 	}
4924 
4925 	cmd->cmd_complete = generic_cmd_complete;
4926 
4927 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4928 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4929 	err = 0;
4930 
4931 unlock:
4932 	hci_dev_unlock(hdev);
4933 	return err;
4934 }
4935 
4936 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4937 			u16 len)
4938 {
4939 	struct mgmt_cp_confirm_name *cp = data;
4940 	struct inquiry_entry *e;
4941 	int err;
4942 
4943 	bt_dev_dbg(hdev, "sock %p", sk);
4944 
4945 	hci_dev_lock(hdev);
4946 
4947 	if (!hci_discovery_active(hdev)) {
4948 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4949 					MGMT_STATUS_FAILED, &cp->addr,
4950 					sizeof(cp->addr));
4951 		goto failed;
4952 	}
4953 
4954 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4955 	if (!e) {
4956 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4957 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4958 					sizeof(cp->addr));
4959 		goto failed;
4960 	}
4961 
4962 	if (cp->name_known) {
4963 		e->name_state = NAME_KNOWN;
4964 		list_del(&e->list);
4965 	} else {
4966 		e->name_state = NAME_NEEDED;
4967 		hci_inquiry_cache_update_resolve(hdev, e);
4968 	}
4969 
4970 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4971 				&cp->addr, sizeof(cp->addr));
4972 
4973 failed:
4974 	hci_dev_unlock(hdev);
4975 	return err;
4976 }
4977 
4978 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4979 			u16 len)
4980 {
4981 	struct mgmt_cp_block_device *cp = data;
4982 	u8 status;
4983 	int err;
4984 
4985 	bt_dev_dbg(hdev, "sock %p", sk);
4986 
4987 	if (!bdaddr_type_is_valid(cp->addr.type))
4988 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4989 					 MGMT_STATUS_INVALID_PARAMS,
4990 					 &cp->addr, sizeof(cp->addr));
4991 
4992 	hci_dev_lock(hdev);
4993 
4994 	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4995 				  cp->addr.type);
4996 	if (err < 0) {
4997 		status = MGMT_STATUS_FAILED;
4998 		goto done;
4999 	}
5000 
5001 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5002 		   sk);
5003 	status = MGMT_STATUS_SUCCESS;
5004 
5005 done:
5006 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5007 				&cp->addr, sizeof(cp->addr));
5008 
5009 	hci_dev_unlock(hdev);
5010 
5011 	return err;
5012 }
5013 
5014 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5015 			  u16 len)
5016 {
5017 	struct mgmt_cp_unblock_device *cp = data;
5018 	u8 status;
5019 	int err;
5020 
5021 	bt_dev_dbg(hdev, "sock %p", sk);
5022 
5023 	if (!bdaddr_type_is_valid(cp->addr.type))
5024 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5025 					 MGMT_STATUS_INVALID_PARAMS,
5026 					 &cp->addr, sizeof(cp->addr));
5027 
5028 	hci_dev_lock(hdev);
5029 
5030 	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
5031 				  cp->addr.type);
5032 	if (err < 0) {
5033 		status = MGMT_STATUS_INVALID_PARAMS;
5034 		goto done;
5035 	}
5036 
5037 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5038 		   sk);
5039 	status = MGMT_STATUS_SUCCESS;
5040 
5041 done:
5042 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5043 				&cp->addr, sizeof(cp->addr));
5044 
5045 	hci_dev_unlock(hdev);
5046 
5047 	return err;
5048 }
5049 
5050 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5051 			 u16 len)
5052 {
5053 	struct mgmt_cp_set_device_id *cp = data;
5054 	struct hci_request req;
5055 	int err;
5056 	__u16 source;
5057 
5058 	bt_dev_dbg(hdev, "sock %p", sk);
5059 
5060 	source = __le16_to_cpu(cp->source);
5061 
5062 	if (source > 0x0002)
5063 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5064 				       MGMT_STATUS_INVALID_PARAMS);
5065 
5066 	hci_dev_lock(hdev);
5067 
5068 	hdev->devid_source = source;
5069 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5070 	hdev->devid_product = __le16_to_cpu(cp->product);
5071 	hdev->devid_version = __le16_to_cpu(cp->version);
5072 
5073 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5074 				NULL, 0);
5075 
5076 	hci_req_init(&req, hdev);
5077 	__hci_req_update_eir(&req);
5078 	hci_req_run(&req, NULL);
5079 
5080 	hci_dev_unlock(hdev);
5081 
5082 	return err;
5083 }
5084 
5085 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5086 					u16 opcode)
5087 {
5088 	bt_dev_dbg(hdev, "status %d", status);
5089 }
5090 
5091 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5092 				     u16 opcode)
5093 {
5094 	struct cmd_lookup match = { NULL, hdev };
5095 	struct hci_request req;
5096 	u8 instance;
5097 	struct adv_info *adv_instance;
5098 	int err;
5099 
5100 	hci_dev_lock(hdev);
5101 
5102 	if (status) {
5103 		u8 mgmt_err = mgmt_status(status);
5104 
5105 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5106 				     cmd_status_rsp, &mgmt_err);
5107 		goto unlock;
5108 	}
5109 
5110 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5111 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
5112 	else
5113 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5114 
5115 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5116 			     &match);
5117 
5118 	new_settings(hdev, match.sk);
5119 
5120 	if (match.sk)
5121 		sock_put(match.sk);
5122 
5123 	/* Handle suspend notifier */
5124 	if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5125 			       hdev->suspend_tasks)) {
5126 		bt_dev_dbg(hdev, "Paused advertising");
5127 		wake_up(&hdev->suspend_wait_q);
5128 	} else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5129 				      hdev->suspend_tasks)) {
5130 		bt_dev_dbg(hdev, "Unpaused advertising");
5131 		wake_up(&hdev->suspend_wait_q);
5132 	}
5133 
5134 	/* If "Set Advertising" was just disabled and instance advertising was
5135 	 * set up earlier, then re-enable multi-instance advertising.
5136 	 */
5137 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5138 	    list_empty(&hdev->adv_instances))
5139 		goto unlock;
5140 
5141 	instance = hdev->cur_adv_instance;
5142 	if (!instance) {
5143 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5144 							struct adv_info, list);
5145 		if (!adv_instance)
5146 			goto unlock;
5147 
5148 		instance = adv_instance->instance;
5149 	}
5150 
5151 	hci_req_init(&req, hdev);
5152 
5153 	err = __hci_req_schedule_adv_instance(&req, instance, true);
5154 
5155 	if (!err)
5156 		err = hci_req_run(&req, enable_advertising_instance);
5157 
5158 	if (err)
5159 		bt_dev_err(hdev, "failed to re-configure advertising");
5160 
5161 unlock:
5162 	hci_dev_unlock(hdev);
5163 }
5164 
5165 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5166 			   u16 len)
5167 {
5168 	struct mgmt_mode *cp = data;
5169 	struct mgmt_pending_cmd *cmd;
5170 	struct hci_request req;
5171 	u8 val, status;
5172 	int err;
5173 
5174 	bt_dev_dbg(hdev, "sock %p", sk);
5175 
5176 	status = mgmt_le_support(hdev);
5177 	if (status)
5178 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5179 				       status);
5180 
5181 	/* Enabling the experimental LL Privay support disables support for
5182 	 * advertising.
5183 	 */
5184 	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5185 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5186 				       MGMT_STATUS_NOT_SUPPORTED);
5187 
5188 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5189 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5190 				       MGMT_STATUS_INVALID_PARAMS);
5191 
5192 	if (hdev->advertising_paused)
5193 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5194 				       MGMT_STATUS_BUSY);
5195 
5196 	hci_dev_lock(hdev);
5197 
5198 	val = !!cp->val;
5199 
5200 	/* The following conditions are ones which mean that we should
5201 	 * not do any HCI communication but directly send a mgmt
5202 	 * response to user space (after toggling the flag if
5203 	 * necessary).
5204 	 */
5205 	if (!hdev_is_powered(hdev) ||
5206 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5207 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5208 	    hci_conn_num(hdev, LE_LINK) > 0 ||
5209 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5210 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5211 		bool changed;
5212 
5213 		if (cp->val) {
5214 			hdev->cur_adv_instance = 0x00;
5215 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5216 			if (cp->val == 0x02)
5217 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5218 			else
5219 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5220 		} else {
5221 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5222 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5223 		}
5224 
5225 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5226 		if (err < 0)
5227 			goto unlock;
5228 
5229 		if (changed)
5230 			err = new_settings(hdev, sk);
5231 
5232 		goto unlock;
5233 	}
5234 
5235 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5236 	    pending_find(MGMT_OP_SET_LE, hdev)) {
5237 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5238 				      MGMT_STATUS_BUSY);
5239 		goto unlock;
5240 	}
5241 
5242 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5243 	if (!cmd) {
5244 		err = -ENOMEM;
5245 		goto unlock;
5246 	}
5247 
5248 	hci_req_init(&req, hdev);
5249 
5250 	if (cp->val == 0x02)
5251 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5252 	else
5253 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5254 
5255 	cancel_adv_timeout(hdev);
5256 
5257 	if (val) {
5258 		/* Switch to instance "0" for the Set Advertising setting.
5259 		 * We cannot use update_[adv|scan_rsp]_data() here as the
5260 		 * HCI_ADVERTISING flag is not yet set.
5261 		 */
5262 		hdev->cur_adv_instance = 0x00;
5263 
5264 		if (ext_adv_capable(hdev)) {
5265 			__hci_req_start_ext_adv(&req, 0x00);
5266 		} else {
5267 			__hci_req_update_adv_data(&req, 0x00);
5268 			__hci_req_update_scan_rsp_data(&req, 0x00);
5269 			__hci_req_enable_advertising(&req);
5270 		}
5271 	} else {
5272 		__hci_req_disable_advertising(&req);
5273 	}
5274 
5275 	err = hci_req_run(&req, set_advertising_complete);
5276 	if (err < 0)
5277 		mgmt_pending_remove(cmd);
5278 
5279 unlock:
5280 	hci_dev_unlock(hdev);
5281 	return err;
5282 }
5283 
5284 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5285 			      void *data, u16 len)
5286 {
5287 	struct mgmt_cp_set_static_address *cp = data;
5288 	int err;
5289 
5290 	bt_dev_dbg(hdev, "sock %p", sk);
5291 
5292 	if (!lmp_le_capable(hdev))
5293 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5294 				       MGMT_STATUS_NOT_SUPPORTED);
5295 
5296 	if (hdev_is_powered(hdev))
5297 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5298 				       MGMT_STATUS_REJECTED);
5299 
5300 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5301 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5302 			return mgmt_cmd_status(sk, hdev->id,
5303 					       MGMT_OP_SET_STATIC_ADDRESS,
5304 					       MGMT_STATUS_INVALID_PARAMS);
5305 
5306 		/* Two most significant bits shall be set */
5307 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5308 			return mgmt_cmd_status(sk, hdev->id,
5309 					       MGMT_OP_SET_STATIC_ADDRESS,
5310 					       MGMT_STATUS_INVALID_PARAMS);
5311 	}
5312 
5313 	hci_dev_lock(hdev);
5314 
5315 	bacpy(&hdev->static_addr, &cp->bdaddr);
5316 
5317 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5318 	if (err < 0)
5319 		goto unlock;
5320 
5321 	err = new_settings(hdev, sk);
5322 
5323 unlock:
5324 	hci_dev_unlock(hdev);
5325 	return err;
5326 }
5327 
5328 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5329 			   void *data, u16 len)
5330 {
5331 	struct mgmt_cp_set_scan_params *cp = data;
5332 	__u16 interval, window;
5333 	int err;
5334 
5335 	bt_dev_dbg(hdev, "sock %p", sk);
5336 
5337 	if (!lmp_le_capable(hdev))
5338 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5339 				       MGMT_STATUS_NOT_SUPPORTED);
5340 
5341 	interval = __le16_to_cpu(cp->interval);
5342 
5343 	if (interval < 0x0004 || interval > 0x4000)
5344 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5345 				       MGMT_STATUS_INVALID_PARAMS);
5346 
5347 	window = __le16_to_cpu(cp->window);
5348 
5349 	if (window < 0x0004 || window > 0x4000)
5350 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5351 				       MGMT_STATUS_INVALID_PARAMS);
5352 
5353 	if (window > interval)
5354 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5355 				       MGMT_STATUS_INVALID_PARAMS);
5356 
5357 	hci_dev_lock(hdev);
5358 
5359 	hdev->le_scan_interval = interval;
5360 	hdev->le_scan_window = window;
5361 
5362 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5363 				NULL, 0);
5364 
5365 	/* If background scan is running, restart it so new parameters are
5366 	 * loaded.
5367 	 */
5368 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5369 	    hdev->discovery.state == DISCOVERY_STOPPED) {
5370 		struct hci_request req;
5371 
5372 		hci_req_init(&req, hdev);
5373 
5374 		hci_req_add_le_scan_disable(&req, false);
5375 		hci_req_add_le_passive_scan(&req);
5376 
5377 		hci_req_run(&req, NULL);
5378 	}
5379 
5380 	hci_dev_unlock(hdev);
5381 
5382 	return err;
5383 }
5384 
5385 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5386 				      u16 opcode)
5387 {
5388 	struct mgmt_pending_cmd *cmd;
5389 
5390 	bt_dev_dbg(hdev, "status 0x%02x", status);
5391 
5392 	hci_dev_lock(hdev);
5393 
5394 	cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5395 	if (!cmd)
5396 		goto unlock;
5397 
5398 	if (status) {
5399 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5400 			        mgmt_status(status));
5401 	} else {
5402 		struct mgmt_mode *cp = cmd->param;
5403 
5404 		if (cp->val)
5405 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5406 		else
5407 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5408 
5409 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5410 		new_settings(hdev, cmd->sk);
5411 	}
5412 
5413 	mgmt_pending_remove(cmd);
5414 
5415 unlock:
5416 	hci_dev_unlock(hdev);
5417 }
5418 
5419 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5420 				void *data, u16 len)
5421 {
5422 	struct mgmt_mode *cp = data;
5423 	struct mgmt_pending_cmd *cmd;
5424 	struct hci_request req;
5425 	int err;
5426 
5427 	bt_dev_dbg(hdev, "sock %p", sk);
5428 
5429 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5430 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
5431 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5432 				       MGMT_STATUS_NOT_SUPPORTED);
5433 
5434 	if (cp->val != 0x00 && cp->val != 0x01)
5435 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5436 				       MGMT_STATUS_INVALID_PARAMS);
5437 
5438 	hci_dev_lock(hdev);
5439 
5440 	if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5441 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5442 				      MGMT_STATUS_BUSY);
5443 		goto unlock;
5444 	}
5445 
5446 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5447 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5448 					hdev);
5449 		goto unlock;
5450 	}
5451 
5452 	if (!hdev_is_powered(hdev)) {
5453 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5454 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5455 					hdev);
5456 		new_settings(hdev, sk);
5457 		goto unlock;
5458 	}
5459 
5460 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5461 			       data, len);
5462 	if (!cmd) {
5463 		err = -ENOMEM;
5464 		goto unlock;
5465 	}
5466 
5467 	hci_req_init(&req, hdev);
5468 
5469 	__hci_req_write_fast_connectable(&req, cp->val);
5470 
5471 	err = hci_req_run(&req, fast_connectable_complete);
5472 	if (err < 0) {
5473 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5474 				      MGMT_STATUS_FAILED);
5475 		mgmt_pending_remove(cmd);
5476 	}
5477 
5478 unlock:
5479 	hci_dev_unlock(hdev);
5480 
5481 	return err;
5482 }
5483 
5484 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5485 {
5486 	struct mgmt_pending_cmd *cmd;
5487 
5488 	bt_dev_dbg(hdev, "status 0x%02x", status);
5489 
5490 	hci_dev_lock(hdev);
5491 
5492 	cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5493 	if (!cmd)
5494 		goto unlock;
5495 
5496 	if (status) {
5497 		u8 mgmt_err = mgmt_status(status);
5498 
5499 		/* We need to restore the flag if related HCI commands
5500 		 * failed.
5501 		 */
5502 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5503 
5504 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5505 	} else {
5506 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5507 		new_settings(hdev, cmd->sk);
5508 	}
5509 
5510 	mgmt_pending_remove(cmd);
5511 
5512 unlock:
5513 	hci_dev_unlock(hdev);
5514 }
5515 
5516 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5517 {
5518 	struct mgmt_mode *cp = data;
5519 	struct mgmt_pending_cmd *cmd;
5520 	struct hci_request req;
5521 	int err;
5522 
5523 	bt_dev_dbg(hdev, "sock %p", sk);
5524 
5525 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5526 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5527 				       MGMT_STATUS_NOT_SUPPORTED);
5528 
5529 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5530 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5531 				       MGMT_STATUS_REJECTED);
5532 
5533 	if (cp->val != 0x00 && cp->val != 0x01)
5534 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5535 				       MGMT_STATUS_INVALID_PARAMS);
5536 
5537 	hci_dev_lock(hdev);
5538 
5539 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5540 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5541 		goto unlock;
5542 	}
5543 
5544 	if (!hdev_is_powered(hdev)) {
5545 		if (!cp->val) {
5546 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5547 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5548 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5549 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5550 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5551 		}
5552 
5553 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5554 
5555 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5556 		if (err < 0)
5557 			goto unlock;
5558 
5559 		err = new_settings(hdev, sk);
5560 		goto unlock;
5561 	}
5562 
5563 	/* Reject disabling when powered on */
5564 	if (!cp->val) {
5565 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5566 				      MGMT_STATUS_REJECTED);
5567 		goto unlock;
5568 	} else {
5569 		/* When configuring a dual-mode controller to operate
5570 		 * with LE only and using a static address, then switching
5571 		 * BR/EDR back on is not allowed.
5572 		 *
5573 		 * Dual-mode controllers shall operate with the public
5574 		 * address as its identity address for BR/EDR and LE. So
5575 		 * reject the attempt to create an invalid configuration.
5576 		 *
5577 		 * The same restrictions applies when secure connections
5578 		 * has been enabled. For BR/EDR this is a controller feature
5579 		 * while for LE it is a host stack feature. This means that
5580 		 * switching BR/EDR back on when secure connections has been
5581 		 * enabled is not a supported transaction.
5582 		 */
5583 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5584 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5585 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5586 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5587 					      MGMT_STATUS_REJECTED);
5588 			goto unlock;
5589 		}
5590 	}
5591 
5592 	if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5593 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5594 				      MGMT_STATUS_BUSY);
5595 		goto unlock;
5596 	}
5597 
5598 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5599 	if (!cmd) {
5600 		err = -ENOMEM;
5601 		goto unlock;
5602 	}
5603 
5604 	/* We need to flip the bit already here so that
5605 	 * hci_req_update_adv_data generates the correct flags.
5606 	 */
5607 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5608 
5609 	hci_req_init(&req, hdev);
5610 
5611 	__hci_req_write_fast_connectable(&req, false);
5612 	__hci_req_update_scan(&req);
5613 
5614 	/* Since only the advertising data flags will change, there
5615 	 * is no need to update the scan response data.
5616 	 */
5617 	__hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5618 
5619 	err = hci_req_run(&req, set_bredr_complete);
5620 	if (err < 0)
5621 		mgmt_pending_remove(cmd);
5622 
5623 unlock:
5624 	hci_dev_unlock(hdev);
5625 	return err;
5626 }
5627 
5628 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5629 {
5630 	struct mgmt_pending_cmd *cmd;
5631 	struct mgmt_mode *cp;
5632 
5633 	bt_dev_dbg(hdev, "status %u", status);
5634 
5635 	hci_dev_lock(hdev);
5636 
5637 	cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5638 	if (!cmd)
5639 		goto unlock;
5640 
5641 	if (status) {
5642 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5643 			        mgmt_status(status));
5644 		goto remove;
5645 	}
5646 
5647 	cp = cmd->param;
5648 
5649 	switch (cp->val) {
5650 	case 0x00:
5651 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5652 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5653 		break;
5654 	case 0x01:
5655 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5656 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5657 		break;
5658 	case 0x02:
5659 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5660 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
5661 		break;
5662 	}
5663 
5664 	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5665 	new_settings(hdev, cmd->sk);
5666 
5667 remove:
5668 	mgmt_pending_remove(cmd);
5669 unlock:
5670 	hci_dev_unlock(hdev);
5671 }
5672 
5673 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5674 			   void *data, u16 len)
5675 {
5676 	struct mgmt_mode *cp = data;
5677 	struct mgmt_pending_cmd *cmd;
5678 	struct hci_request req;
5679 	u8 val;
5680 	int err;
5681 
5682 	bt_dev_dbg(hdev, "sock %p", sk);
5683 
5684 	if (!lmp_sc_capable(hdev) &&
5685 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5686 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5687 				       MGMT_STATUS_NOT_SUPPORTED);
5688 
5689 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5690 	    lmp_sc_capable(hdev) &&
5691 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5692 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5693 				       MGMT_STATUS_REJECTED);
5694 
5695 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5696 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5697 				  MGMT_STATUS_INVALID_PARAMS);
5698 
5699 	hci_dev_lock(hdev);
5700 
5701 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5702 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5703 		bool changed;
5704 
5705 		if (cp->val) {
5706 			changed = !hci_dev_test_and_set_flag(hdev,
5707 							     HCI_SC_ENABLED);
5708 			if (cp->val == 0x02)
5709 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
5710 			else
5711 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5712 		} else {
5713 			changed = hci_dev_test_and_clear_flag(hdev,
5714 							      HCI_SC_ENABLED);
5715 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5716 		}
5717 
5718 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5719 		if (err < 0)
5720 			goto failed;
5721 
5722 		if (changed)
5723 			err = new_settings(hdev, sk);
5724 
5725 		goto failed;
5726 	}
5727 
5728 	if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5729 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5730 				      MGMT_STATUS_BUSY);
5731 		goto failed;
5732 	}
5733 
5734 	val = !!cp->val;
5735 
5736 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5737 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5738 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5739 		goto failed;
5740 	}
5741 
5742 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5743 	if (!cmd) {
5744 		err = -ENOMEM;
5745 		goto failed;
5746 	}
5747 
5748 	hci_req_init(&req, hdev);
5749 	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5750 	err = hci_req_run(&req, sc_enable_complete);
5751 	if (err < 0) {
5752 		mgmt_pending_remove(cmd);
5753 		goto failed;
5754 	}
5755 
5756 failed:
5757 	hci_dev_unlock(hdev);
5758 	return err;
5759 }
5760 
5761 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5762 			  void *data, u16 len)
5763 {
5764 	struct mgmt_mode *cp = data;
5765 	bool changed, use_changed;
5766 	int err;
5767 
5768 	bt_dev_dbg(hdev, "sock %p", sk);
5769 
5770 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5771 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5772 				       MGMT_STATUS_INVALID_PARAMS);
5773 
5774 	hci_dev_lock(hdev);
5775 
5776 	if (cp->val)
5777 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5778 	else
5779 		changed = hci_dev_test_and_clear_flag(hdev,
5780 						      HCI_KEEP_DEBUG_KEYS);
5781 
5782 	if (cp->val == 0x02)
5783 		use_changed = !hci_dev_test_and_set_flag(hdev,
5784 							 HCI_USE_DEBUG_KEYS);
5785 	else
5786 		use_changed = hci_dev_test_and_clear_flag(hdev,
5787 							  HCI_USE_DEBUG_KEYS);
5788 
5789 	if (hdev_is_powered(hdev) && use_changed &&
5790 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5791 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5792 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5793 			     sizeof(mode), &mode);
5794 	}
5795 
5796 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5797 	if (err < 0)
5798 		goto unlock;
5799 
5800 	if (changed)
5801 		err = new_settings(hdev, sk);
5802 
5803 unlock:
5804 	hci_dev_unlock(hdev);
5805 	return err;
5806 }
5807 
5808 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5809 		       u16 len)
5810 {
5811 	struct mgmt_cp_set_privacy *cp = cp_data;
5812 	bool changed;
5813 	int err;
5814 
5815 	bt_dev_dbg(hdev, "sock %p", sk);
5816 
5817 	if (!lmp_le_capable(hdev))
5818 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5819 				       MGMT_STATUS_NOT_SUPPORTED);
5820 
5821 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
5822 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5823 				       MGMT_STATUS_INVALID_PARAMS);
5824 
5825 	if (hdev_is_powered(hdev))
5826 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5827 				       MGMT_STATUS_REJECTED);
5828 
5829 	hci_dev_lock(hdev);
5830 
5831 	/* If user space supports this command it is also expected to
5832 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5833 	 */
5834 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5835 
5836 	if (cp->privacy) {
5837 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5838 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5839 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5840 		hci_adv_instances_set_rpa_expired(hdev, true);
5841 		if (cp->privacy == 0x02)
5842 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
5843 		else
5844 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5845 	} else {
5846 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5847 		memset(hdev->irk, 0, sizeof(hdev->irk));
5848 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5849 		hci_adv_instances_set_rpa_expired(hdev, false);
5850 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5851 	}
5852 
5853 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5854 	if (err < 0)
5855 		goto unlock;
5856 
5857 	if (changed)
5858 		err = new_settings(hdev, sk);
5859 
5860 unlock:
5861 	hci_dev_unlock(hdev);
5862 	return err;
5863 }
5864 
5865 static bool irk_is_valid(struct mgmt_irk_info *irk)
5866 {
5867 	switch (irk->addr.type) {
5868 	case BDADDR_LE_PUBLIC:
5869 		return true;
5870 
5871 	case BDADDR_LE_RANDOM:
5872 		/* Two most significant bits shall be set */
5873 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5874 			return false;
5875 		return true;
5876 	}
5877 
5878 	return false;
5879 }
5880 
5881 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5882 		     u16 len)
5883 {
5884 	struct mgmt_cp_load_irks *cp = cp_data;
5885 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5886 				   sizeof(struct mgmt_irk_info));
5887 	u16 irk_count, expected_len;
5888 	int i, err;
5889 
5890 	bt_dev_dbg(hdev, "sock %p", sk);
5891 
5892 	if (!lmp_le_capable(hdev))
5893 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5894 				       MGMT_STATUS_NOT_SUPPORTED);
5895 
5896 	irk_count = __le16_to_cpu(cp->irk_count);
5897 	if (irk_count > max_irk_count) {
5898 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5899 			   irk_count);
5900 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5901 				       MGMT_STATUS_INVALID_PARAMS);
5902 	}
5903 
5904 	expected_len = struct_size(cp, irks, irk_count);
5905 	if (expected_len != len) {
5906 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5907 			   expected_len, len);
5908 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5909 				       MGMT_STATUS_INVALID_PARAMS);
5910 	}
5911 
5912 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
5913 
5914 	for (i = 0; i < irk_count; i++) {
5915 		struct mgmt_irk_info *key = &cp->irks[i];
5916 
5917 		if (!irk_is_valid(key))
5918 			return mgmt_cmd_status(sk, hdev->id,
5919 					       MGMT_OP_LOAD_IRKS,
5920 					       MGMT_STATUS_INVALID_PARAMS);
5921 	}
5922 
5923 	hci_dev_lock(hdev);
5924 
5925 	hci_smp_irks_clear(hdev);
5926 
5927 	for (i = 0; i < irk_count; i++) {
5928 		struct mgmt_irk_info *irk = &cp->irks[i];
5929 
5930 		if (hci_is_blocked_key(hdev,
5931 				       HCI_BLOCKED_KEY_TYPE_IRK,
5932 				       irk->val)) {
5933 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
5934 				    &irk->addr.bdaddr);
5935 			continue;
5936 		}
5937 
5938 		hci_add_irk(hdev, &irk->addr.bdaddr,
5939 			    le_addr_type(irk->addr.type), irk->val,
5940 			    BDADDR_ANY);
5941 	}
5942 
5943 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5944 
5945 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5946 
5947 	hci_dev_unlock(hdev);
5948 
5949 	return err;
5950 }
5951 
5952 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5953 {
5954 	if (key->master != 0x00 && key->master != 0x01)
5955 		return false;
5956 
5957 	switch (key->addr.type) {
5958 	case BDADDR_LE_PUBLIC:
5959 		return true;
5960 
5961 	case BDADDR_LE_RANDOM:
5962 		/* Two most significant bits shall be set */
5963 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5964 			return false;
5965 		return true;
5966 	}
5967 
5968 	return false;
5969 }
5970 
5971 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5972 			       void *cp_data, u16 len)
5973 {
5974 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
5975 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5976 				   sizeof(struct mgmt_ltk_info));
5977 	u16 key_count, expected_len;
5978 	int i, err;
5979 
5980 	bt_dev_dbg(hdev, "sock %p", sk);
5981 
5982 	if (!lmp_le_capable(hdev))
5983 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5984 				       MGMT_STATUS_NOT_SUPPORTED);
5985 
5986 	key_count = __le16_to_cpu(cp->key_count);
5987 	if (key_count > max_key_count) {
5988 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5989 			   key_count);
5990 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5991 				       MGMT_STATUS_INVALID_PARAMS);
5992 	}
5993 
5994 	expected_len = struct_size(cp, keys, key_count);
5995 	if (expected_len != len) {
5996 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5997 			   expected_len, len);
5998 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5999 				       MGMT_STATUS_INVALID_PARAMS);
6000 	}
6001 
6002 	bt_dev_dbg(hdev, "key_count %u", key_count);
6003 
6004 	for (i = 0; i < key_count; i++) {
6005 		struct mgmt_ltk_info *key = &cp->keys[i];
6006 
6007 		if (!ltk_is_valid(key))
6008 			return mgmt_cmd_status(sk, hdev->id,
6009 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
6010 					       MGMT_STATUS_INVALID_PARAMS);
6011 	}
6012 
6013 	hci_dev_lock(hdev);
6014 
6015 	hci_smp_ltks_clear(hdev);
6016 
6017 	for (i = 0; i < key_count; i++) {
6018 		struct mgmt_ltk_info *key = &cp->keys[i];
6019 		u8 type, authenticated;
6020 
6021 		if (hci_is_blocked_key(hdev,
6022 				       HCI_BLOCKED_KEY_TYPE_LTK,
6023 				       key->val)) {
6024 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6025 				    &key->addr.bdaddr);
6026 			continue;
6027 		}
6028 
6029 		switch (key->type) {
6030 		case MGMT_LTK_UNAUTHENTICATED:
6031 			authenticated = 0x00;
6032 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6033 			break;
6034 		case MGMT_LTK_AUTHENTICATED:
6035 			authenticated = 0x01;
6036 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6037 			break;
6038 		case MGMT_LTK_P256_UNAUTH:
6039 			authenticated = 0x00;
6040 			type = SMP_LTK_P256;
6041 			break;
6042 		case MGMT_LTK_P256_AUTH:
6043 			authenticated = 0x01;
6044 			type = SMP_LTK_P256;
6045 			break;
6046 		case MGMT_LTK_P256_DEBUG:
6047 			authenticated = 0x00;
6048 			type = SMP_LTK_P256_DEBUG;
6049 			fallthrough;
6050 		default:
6051 			continue;
6052 		}
6053 
6054 		hci_add_ltk(hdev, &key->addr.bdaddr,
6055 			    le_addr_type(key->addr.type), type, authenticated,
6056 			    key->val, key->enc_size, key->ediv, key->rand);
6057 	}
6058 
6059 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6060 			   NULL, 0);
6061 
6062 	hci_dev_unlock(hdev);
6063 
6064 	return err;
6065 }
6066 
6067 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6068 {
6069 	struct hci_conn *conn = cmd->user_data;
6070 	struct mgmt_rp_get_conn_info rp;
6071 	int err;
6072 
6073 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6074 
6075 	if (status == MGMT_STATUS_SUCCESS) {
6076 		rp.rssi = conn->rssi;
6077 		rp.tx_power = conn->tx_power;
6078 		rp.max_tx_power = conn->max_tx_power;
6079 	} else {
6080 		rp.rssi = HCI_RSSI_INVALID;
6081 		rp.tx_power = HCI_TX_POWER_INVALID;
6082 		rp.max_tx_power = HCI_TX_POWER_INVALID;
6083 	}
6084 
6085 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
6086 				status, &rp, sizeof(rp));
6087 
6088 	hci_conn_drop(conn);
6089 	hci_conn_put(conn);
6090 
6091 	return err;
6092 }
6093 
6094 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6095 				       u16 opcode)
6096 {
6097 	struct hci_cp_read_rssi *cp;
6098 	struct mgmt_pending_cmd *cmd;
6099 	struct hci_conn *conn;
6100 	u16 handle;
6101 	u8 status;
6102 
6103 	bt_dev_dbg(hdev, "status 0x%02x", hci_status);
6104 
6105 	hci_dev_lock(hdev);
6106 
6107 	/* Commands sent in request are either Read RSSI or Read Transmit Power
6108 	 * Level so we check which one was last sent to retrieve connection
6109 	 * handle.  Both commands have handle as first parameter so it's safe to
6110 	 * cast data on the same command struct.
6111 	 *
6112 	 * First command sent is always Read RSSI and we fail only if it fails.
6113 	 * In other case we simply override error to indicate success as we
6114 	 * already remembered if TX power value is actually valid.
6115 	 */
6116 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
6117 	if (!cp) {
6118 		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
6119 		status = MGMT_STATUS_SUCCESS;
6120 	} else {
6121 		status = mgmt_status(hci_status);
6122 	}
6123 
6124 	if (!cp) {
6125 		bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
6126 		goto unlock;
6127 	}
6128 
6129 	handle = __le16_to_cpu(cp->handle);
6130 	conn = hci_conn_hash_lookup_handle(hdev, handle);
6131 	if (!conn) {
6132 		bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
6133 			   handle);
6134 		goto unlock;
6135 	}
6136 
6137 	cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
6138 	if (!cmd)
6139 		goto unlock;
6140 
6141 	cmd->cmd_complete(cmd, status);
6142 	mgmt_pending_remove(cmd);
6143 
6144 unlock:
6145 	hci_dev_unlock(hdev);
6146 }
6147 
6148 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6149 			 u16 len)
6150 {
6151 	struct mgmt_cp_get_conn_info *cp = data;
6152 	struct mgmt_rp_get_conn_info rp;
6153 	struct hci_conn *conn;
6154 	unsigned long conn_info_age;
6155 	int err = 0;
6156 
6157 	bt_dev_dbg(hdev, "sock %p", sk);
6158 
6159 	memset(&rp, 0, sizeof(rp));
6160 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6161 	rp.addr.type = cp->addr.type;
6162 
6163 	if (!bdaddr_type_is_valid(cp->addr.type))
6164 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6165 					 MGMT_STATUS_INVALID_PARAMS,
6166 					 &rp, sizeof(rp));
6167 
6168 	hci_dev_lock(hdev);
6169 
6170 	if (!hdev_is_powered(hdev)) {
6171 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6172 					MGMT_STATUS_NOT_POWERED, &rp,
6173 					sizeof(rp));
6174 		goto unlock;
6175 	}
6176 
6177 	if (cp->addr.type == BDADDR_BREDR)
6178 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6179 					       &cp->addr.bdaddr);
6180 	else
6181 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6182 
6183 	if (!conn || conn->state != BT_CONNECTED) {
6184 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6185 					MGMT_STATUS_NOT_CONNECTED, &rp,
6186 					sizeof(rp));
6187 		goto unlock;
6188 	}
6189 
6190 	if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6191 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6192 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
6193 		goto unlock;
6194 	}
6195 
6196 	/* To avoid client trying to guess when to poll again for information we
6197 	 * calculate conn info age as random value between min/max set in hdev.
6198 	 */
6199 	conn_info_age = hdev->conn_info_min_age +
6200 			prandom_u32_max(hdev->conn_info_max_age -
6201 					hdev->conn_info_min_age);
6202 
6203 	/* Query controller to refresh cached values if they are too old or were
6204 	 * never read.
6205 	 */
6206 	if (time_after(jiffies, conn->conn_info_timestamp +
6207 		       msecs_to_jiffies(conn_info_age)) ||
6208 	    !conn->conn_info_timestamp) {
6209 		struct hci_request req;
6210 		struct hci_cp_read_tx_power req_txp_cp;
6211 		struct hci_cp_read_rssi req_rssi_cp;
6212 		struct mgmt_pending_cmd *cmd;
6213 
6214 		hci_req_init(&req, hdev);
6215 		req_rssi_cp.handle = cpu_to_le16(conn->handle);
6216 		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
6217 			    &req_rssi_cp);
6218 
6219 		/* For LE links TX power does not change thus we don't need to
6220 		 * query for it once value is known.
6221 		 */
6222 		if (!bdaddr_type_is_le(cp->addr.type) ||
6223 		    conn->tx_power == HCI_TX_POWER_INVALID) {
6224 			req_txp_cp.handle = cpu_to_le16(conn->handle);
6225 			req_txp_cp.type = 0x00;
6226 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
6227 				    sizeof(req_txp_cp), &req_txp_cp);
6228 		}
6229 
6230 		/* Max TX power needs to be read only once per connection */
6231 		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
6232 			req_txp_cp.handle = cpu_to_le16(conn->handle);
6233 			req_txp_cp.type = 0x01;
6234 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
6235 				    sizeof(req_txp_cp), &req_txp_cp);
6236 		}
6237 
6238 		err = hci_req_run(&req, conn_info_refresh_complete);
6239 		if (err < 0)
6240 			goto unlock;
6241 
6242 		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
6243 				       data, len);
6244 		if (!cmd) {
6245 			err = -ENOMEM;
6246 			goto unlock;
6247 		}
6248 
6249 		hci_conn_hold(conn);
6250 		cmd->user_data = hci_conn_get(conn);
6251 		cmd->cmd_complete = conn_info_cmd_complete;
6252 
6253 		conn->conn_info_timestamp = jiffies;
6254 	} else {
6255 		/* Cache is valid, just reply with values cached in hci_conn */
6256 		rp.rssi = conn->rssi;
6257 		rp.tx_power = conn->tx_power;
6258 		rp.max_tx_power = conn->max_tx_power;
6259 
6260 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6261 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6262 	}
6263 
6264 unlock:
6265 	hci_dev_unlock(hdev);
6266 	return err;
6267 }
6268 
6269 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6270 {
6271 	struct hci_conn *conn = cmd->user_data;
6272 	struct mgmt_rp_get_clock_info rp;
6273 	struct hci_dev *hdev;
6274 	int err;
6275 
6276 	memset(&rp, 0, sizeof(rp));
6277 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6278 
6279 	if (status)
6280 		goto complete;
6281 
6282 	hdev = hci_dev_get(cmd->index);
6283 	if (hdev) {
6284 		rp.local_clock = cpu_to_le32(hdev->clock);
6285 		hci_dev_put(hdev);
6286 	}
6287 
6288 	if (conn) {
6289 		rp.piconet_clock = cpu_to_le32(conn->clock);
6290 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6291 	}
6292 
6293 complete:
6294 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6295 				sizeof(rp));
6296 
6297 	if (conn) {
6298 		hci_conn_drop(conn);
6299 		hci_conn_put(conn);
6300 	}
6301 
6302 	return err;
6303 }
6304 
6305 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6306 {
6307 	struct hci_cp_read_clock *hci_cp;
6308 	struct mgmt_pending_cmd *cmd;
6309 	struct hci_conn *conn;
6310 
6311 	bt_dev_dbg(hdev, "status %u", status);
6312 
6313 	hci_dev_lock(hdev);
6314 
6315 	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6316 	if (!hci_cp)
6317 		goto unlock;
6318 
6319 	if (hci_cp->which) {
6320 		u16 handle = __le16_to_cpu(hci_cp->handle);
6321 		conn = hci_conn_hash_lookup_handle(hdev, handle);
6322 	} else {
6323 		conn = NULL;
6324 	}
6325 
6326 	cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6327 	if (!cmd)
6328 		goto unlock;
6329 
6330 	cmd->cmd_complete(cmd, mgmt_status(status));
6331 	mgmt_pending_remove(cmd);
6332 
6333 unlock:
6334 	hci_dev_unlock(hdev);
6335 }
6336 
6337 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6338 			 u16 len)
6339 {
6340 	struct mgmt_cp_get_clock_info *cp = data;
6341 	struct mgmt_rp_get_clock_info rp;
6342 	struct hci_cp_read_clock hci_cp;
6343 	struct mgmt_pending_cmd *cmd;
6344 	struct hci_request req;
6345 	struct hci_conn *conn;
6346 	int err;
6347 
6348 	bt_dev_dbg(hdev, "sock %p", sk);
6349 
6350 	memset(&rp, 0, sizeof(rp));
6351 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6352 	rp.addr.type = cp->addr.type;
6353 
6354 	if (cp->addr.type != BDADDR_BREDR)
6355 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6356 					 MGMT_STATUS_INVALID_PARAMS,
6357 					 &rp, sizeof(rp));
6358 
6359 	hci_dev_lock(hdev);
6360 
6361 	if (!hdev_is_powered(hdev)) {
6362 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6363 					MGMT_STATUS_NOT_POWERED, &rp,
6364 					sizeof(rp));
6365 		goto unlock;
6366 	}
6367 
6368 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6369 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6370 					       &cp->addr.bdaddr);
6371 		if (!conn || conn->state != BT_CONNECTED) {
6372 			err = mgmt_cmd_complete(sk, hdev->id,
6373 						MGMT_OP_GET_CLOCK_INFO,
6374 						MGMT_STATUS_NOT_CONNECTED,
6375 						&rp, sizeof(rp));
6376 			goto unlock;
6377 		}
6378 	} else {
6379 		conn = NULL;
6380 	}
6381 
6382 	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6383 	if (!cmd) {
6384 		err = -ENOMEM;
6385 		goto unlock;
6386 	}
6387 
6388 	cmd->cmd_complete = clock_info_cmd_complete;
6389 
6390 	hci_req_init(&req, hdev);
6391 
6392 	memset(&hci_cp, 0, sizeof(hci_cp));
6393 	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6394 
6395 	if (conn) {
6396 		hci_conn_hold(conn);
6397 		cmd->user_data = hci_conn_get(conn);
6398 
6399 		hci_cp.handle = cpu_to_le16(conn->handle);
6400 		hci_cp.which = 0x01; /* Piconet clock */
6401 		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6402 	}
6403 
6404 	err = hci_req_run(&req, get_clock_info_complete);
6405 	if (err < 0)
6406 		mgmt_pending_remove(cmd);
6407 
6408 unlock:
6409 	hci_dev_unlock(hdev);
6410 	return err;
6411 }
6412 
6413 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6414 {
6415 	struct hci_conn *conn;
6416 
6417 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6418 	if (!conn)
6419 		return false;
6420 
6421 	if (conn->dst_type != type)
6422 		return false;
6423 
6424 	if (conn->state != BT_CONNECTED)
6425 		return false;
6426 
6427 	return true;
6428 }
6429 
6430 /* This function requires the caller holds hdev->lock */
6431 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6432 			       u8 addr_type, u8 auto_connect)
6433 {
6434 	struct hci_conn_params *params;
6435 
6436 	params = hci_conn_params_add(hdev, addr, addr_type);
6437 	if (!params)
6438 		return -EIO;
6439 
6440 	if (params->auto_connect == auto_connect)
6441 		return 0;
6442 
6443 	list_del_init(&params->action);
6444 
6445 	switch (auto_connect) {
6446 	case HCI_AUTO_CONN_DISABLED:
6447 	case HCI_AUTO_CONN_LINK_LOSS:
6448 		/* If auto connect is being disabled when we're trying to
6449 		 * connect to device, keep connecting.
6450 		 */
6451 		if (params->explicit_connect)
6452 			list_add(&params->action, &hdev->pend_le_conns);
6453 		break;
6454 	case HCI_AUTO_CONN_REPORT:
6455 		if (params->explicit_connect)
6456 			list_add(&params->action, &hdev->pend_le_conns);
6457 		else
6458 			list_add(&params->action, &hdev->pend_le_reports);
6459 		break;
6460 	case HCI_AUTO_CONN_DIRECT:
6461 	case HCI_AUTO_CONN_ALWAYS:
6462 		if (!is_connected(hdev, addr, addr_type))
6463 			list_add(&params->action, &hdev->pend_le_conns);
6464 		break;
6465 	}
6466 
6467 	params->auto_connect = auto_connect;
6468 
6469 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6470 		   addr, addr_type, auto_connect);
6471 
6472 	return 0;
6473 }
6474 
6475 static void device_added(struct sock *sk, struct hci_dev *hdev,
6476 			 bdaddr_t *bdaddr, u8 type, u8 action)
6477 {
6478 	struct mgmt_ev_device_added ev;
6479 
6480 	bacpy(&ev.addr.bdaddr, bdaddr);
6481 	ev.addr.type = type;
6482 	ev.action = action;
6483 
6484 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6485 }
6486 
6487 static int add_device(struct sock *sk, struct hci_dev *hdev,
6488 		      void *data, u16 len)
6489 {
6490 	struct mgmt_cp_add_device *cp = data;
6491 	u8 auto_conn, addr_type;
6492 	struct hci_conn_params *params;
6493 	int err;
6494 	u32 current_flags = 0;
6495 
6496 	bt_dev_dbg(hdev, "sock %p", sk);
6497 
6498 	if (!bdaddr_type_is_valid(cp->addr.type) ||
6499 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6500 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6501 					 MGMT_STATUS_INVALID_PARAMS,
6502 					 &cp->addr, sizeof(cp->addr));
6503 
6504 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6505 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6506 					 MGMT_STATUS_INVALID_PARAMS,
6507 					 &cp->addr, sizeof(cp->addr));
6508 
6509 	hci_dev_lock(hdev);
6510 
6511 	if (cp->addr.type == BDADDR_BREDR) {
6512 		/* Only incoming connections action is supported for now */
6513 		if (cp->action != 0x01) {
6514 			err = mgmt_cmd_complete(sk, hdev->id,
6515 						MGMT_OP_ADD_DEVICE,
6516 						MGMT_STATUS_INVALID_PARAMS,
6517 						&cp->addr, sizeof(cp->addr));
6518 			goto unlock;
6519 		}
6520 
6521 		err = hci_bdaddr_list_add_with_flags(&hdev->whitelist,
6522 						     &cp->addr.bdaddr,
6523 						     cp->addr.type, 0);
6524 		if (err)
6525 			goto unlock;
6526 
6527 		hci_req_update_scan(hdev);
6528 
6529 		goto added;
6530 	}
6531 
6532 	addr_type = le_addr_type(cp->addr.type);
6533 
6534 	if (cp->action == 0x02)
6535 		auto_conn = HCI_AUTO_CONN_ALWAYS;
6536 	else if (cp->action == 0x01)
6537 		auto_conn = HCI_AUTO_CONN_DIRECT;
6538 	else
6539 		auto_conn = HCI_AUTO_CONN_REPORT;
6540 
6541 	/* Kernel internally uses conn_params with resolvable private
6542 	 * address, but Add Device allows only identity addresses.
6543 	 * Make sure it is enforced before calling
6544 	 * hci_conn_params_lookup.
6545 	 */
6546 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6547 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6548 					MGMT_STATUS_INVALID_PARAMS,
6549 					&cp->addr, sizeof(cp->addr));
6550 		goto unlock;
6551 	}
6552 
6553 	/* If the connection parameters don't exist for this device,
6554 	 * they will be created and configured with defaults.
6555 	 */
6556 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6557 				auto_conn) < 0) {
6558 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6559 					MGMT_STATUS_FAILED, &cp->addr,
6560 					sizeof(cp->addr));
6561 		goto unlock;
6562 	} else {
6563 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6564 						addr_type);
6565 		if (params)
6566 			current_flags = params->current_flags;
6567 	}
6568 
6569 	hci_update_background_scan(hdev);
6570 
6571 added:
6572 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6573 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
6574 			     SUPPORTED_DEVICE_FLAGS(), current_flags);
6575 
6576 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6577 				MGMT_STATUS_SUCCESS, &cp->addr,
6578 				sizeof(cp->addr));
6579 
6580 unlock:
6581 	hci_dev_unlock(hdev);
6582 	return err;
6583 }
6584 
6585 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6586 			   bdaddr_t *bdaddr, u8 type)
6587 {
6588 	struct mgmt_ev_device_removed ev;
6589 
6590 	bacpy(&ev.addr.bdaddr, bdaddr);
6591 	ev.addr.type = type;
6592 
6593 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6594 }
6595 
6596 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6597 			 void *data, u16 len)
6598 {
6599 	struct mgmt_cp_remove_device *cp = data;
6600 	int err;
6601 
6602 	bt_dev_dbg(hdev, "sock %p", sk);
6603 
6604 	hci_dev_lock(hdev);
6605 
6606 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6607 		struct hci_conn_params *params;
6608 		u8 addr_type;
6609 
6610 		if (!bdaddr_type_is_valid(cp->addr.type)) {
6611 			err = mgmt_cmd_complete(sk, hdev->id,
6612 						MGMT_OP_REMOVE_DEVICE,
6613 						MGMT_STATUS_INVALID_PARAMS,
6614 						&cp->addr, sizeof(cp->addr));
6615 			goto unlock;
6616 		}
6617 
6618 		if (cp->addr.type == BDADDR_BREDR) {
6619 			err = hci_bdaddr_list_del(&hdev->whitelist,
6620 						  &cp->addr.bdaddr,
6621 						  cp->addr.type);
6622 			if (err) {
6623 				err = mgmt_cmd_complete(sk, hdev->id,
6624 							MGMT_OP_REMOVE_DEVICE,
6625 							MGMT_STATUS_INVALID_PARAMS,
6626 							&cp->addr,
6627 							sizeof(cp->addr));
6628 				goto unlock;
6629 			}
6630 
6631 			hci_req_update_scan(hdev);
6632 
6633 			device_removed(sk, hdev, &cp->addr.bdaddr,
6634 				       cp->addr.type);
6635 			goto complete;
6636 		}
6637 
6638 		addr_type = le_addr_type(cp->addr.type);
6639 
6640 		/* Kernel internally uses conn_params with resolvable private
6641 		 * address, but Remove Device allows only identity addresses.
6642 		 * Make sure it is enforced before calling
6643 		 * hci_conn_params_lookup.
6644 		 */
6645 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6646 			err = mgmt_cmd_complete(sk, hdev->id,
6647 						MGMT_OP_REMOVE_DEVICE,
6648 						MGMT_STATUS_INVALID_PARAMS,
6649 						&cp->addr, sizeof(cp->addr));
6650 			goto unlock;
6651 		}
6652 
6653 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6654 						addr_type);
6655 		if (!params) {
6656 			err = mgmt_cmd_complete(sk, hdev->id,
6657 						MGMT_OP_REMOVE_DEVICE,
6658 						MGMT_STATUS_INVALID_PARAMS,
6659 						&cp->addr, sizeof(cp->addr));
6660 			goto unlock;
6661 		}
6662 
6663 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6664 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6665 			err = mgmt_cmd_complete(sk, hdev->id,
6666 						MGMT_OP_REMOVE_DEVICE,
6667 						MGMT_STATUS_INVALID_PARAMS,
6668 						&cp->addr, sizeof(cp->addr));
6669 			goto unlock;
6670 		}
6671 
6672 		list_del(&params->action);
6673 		list_del(&params->list);
6674 		kfree(params);
6675 		hci_update_background_scan(hdev);
6676 
6677 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6678 	} else {
6679 		struct hci_conn_params *p, *tmp;
6680 		struct bdaddr_list *b, *btmp;
6681 
6682 		if (cp->addr.type) {
6683 			err = mgmt_cmd_complete(sk, hdev->id,
6684 						MGMT_OP_REMOVE_DEVICE,
6685 						MGMT_STATUS_INVALID_PARAMS,
6686 						&cp->addr, sizeof(cp->addr));
6687 			goto unlock;
6688 		}
6689 
6690 		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6691 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6692 			list_del(&b->list);
6693 			kfree(b);
6694 		}
6695 
6696 		hci_req_update_scan(hdev);
6697 
6698 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6699 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6700 				continue;
6701 			device_removed(sk, hdev, &p->addr, p->addr_type);
6702 			if (p->explicit_connect) {
6703 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6704 				continue;
6705 			}
6706 			list_del(&p->action);
6707 			list_del(&p->list);
6708 			kfree(p);
6709 		}
6710 
6711 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
6712 
6713 		hci_update_background_scan(hdev);
6714 	}
6715 
6716 complete:
6717 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6718 				MGMT_STATUS_SUCCESS, &cp->addr,
6719 				sizeof(cp->addr));
6720 unlock:
6721 	hci_dev_unlock(hdev);
6722 	return err;
6723 }
6724 
6725 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6726 			   u16 len)
6727 {
6728 	struct mgmt_cp_load_conn_param *cp = data;
6729 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6730 				     sizeof(struct mgmt_conn_param));
6731 	u16 param_count, expected_len;
6732 	int i;
6733 
6734 	if (!lmp_le_capable(hdev))
6735 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6736 				       MGMT_STATUS_NOT_SUPPORTED);
6737 
6738 	param_count = __le16_to_cpu(cp->param_count);
6739 	if (param_count > max_param_count) {
6740 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
6741 			   param_count);
6742 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6743 				       MGMT_STATUS_INVALID_PARAMS);
6744 	}
6745 
6746 	expected_len = struct_size(cp, params, param_count);
6747 	if (expected_len != len) {
6748 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
6749 			   expected_len, len);
6750 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6751 				       MGMT_STATUS_INVALID_PARAMS);
6752 	}
6753 
6754 	bt_dev_dbg(hdev, "param_count %u", param_count);
6755 
6756 	hci_dev_lock(hdev);
6757 
6758 	hci_conn_params_clear_disabled(hdev);
6759 
6760 	for (i = 0; i < param_count; i++) {
6761 		struct mgmt_conn_param *param = &cp->params[i];
6762 		struct hci_conn_params *hci_param;
6763 		u16 min, max, latency, timeout;
6764 		u8 addr_type;
6765 
6766 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
6767 			   param->addr.type);
6768 
6769 		if (param->addr.type == BDADDR_LE_PUBLIC) {
6770 			addr_type = ADDR_LE_DEV_PUBLIC;
6771 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
6772 			addr_type = ADDR_LE_DEV_RANDOM;
6773 		} else {
6774 			bt_dev_err(hdev, "ignoring invalid connection parameters");
6775 			continue;
6776 		}
6777 
6778 		min = le16_to_cpu(param->min_interval);
6779 		max = le16_to_cpu(param->max_interval);
6780 		latency = le16_to_cpu(param->latency);
6781 		timeout = le16_to_cpu(param->timeout);
6782 
6783 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6784 			   min, max, latency, timeout);
6785 
6786 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6787 			bt_dev_err(hdev, "ignoring invalid connection parameters");
6788 			continue;
6789 		}
6790 
6791 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
6792 						addr_type);
6793 		if (!hci_param) {
6794 			bt_dev_err(hdev, "failed to add connection parameters");
6795 			continue;
6796 		}
6797 
6798 		hci_param->conn_min_interval = min;
6799 		hci_param->conn_max_interval = max;
6800 		hci_param->conn_latency = latency;
6801 		hci_param->supervision_timeout = timeout;
6802 	}
6803 
6804 	hci_dev_unlock(hdev);
6805 
6806 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6807 				 NULL, 0);
6808 }
6809 
6810 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6811 			       void *data, u16 len)
6812 {
6813 	struct mgmt_cp_set_external_config *cp = data;
6814 	bool changed;
6815 	int err;
6816 
6817 	bt_dev_dbg(hdev, "sock %p", sk);
6818 
6819 	if (hdev_is_powered(hdev))
6820 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6821 				       MGMT_STATUS_REJECTED);
6822 
6823 	if (cp->config != 0x00 && cp->config != 0x01)
6824 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6825 				         MGMT_STATUS_INVALID_PARAMS);
6826 
6827 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6828 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6829 				       MGMT_STATUS_NOT_SUPPORTED);
6830 
6831 	hci_dev_lock(hdev);
6832 
6833 	if (cp->config)
6834 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6835 	else
6836 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6837 
6838 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6839 	if (err < 0)
6840 		goto unlock;
6841 
6842 	if (!changed)
6843 		goto unlock;
6844 
6845 	err = new_options(hdev, sk);
6846 
6847 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6848 		mgmt_index_removed(hdev);
6849 
6850 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6851 			hci_dev_set_flag(hdev, HCI_CONFIG);
6852 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6853 
6854 			queue_work(hdev->req_workqueue, &hdev->power_on);
6855 		} else {
6856 			set_bit(HCI_RAW, &hdev->flags);
6857 			mgmt_index_added(hdev);
6858 		}
6859 	}
6860 
6861 unlock:
6862 	hci_dev_unlock(hdev);
6863 	return err;
6864 }
6865 
6866 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6867 			      void *data, u16 len)
6868 {
6869 	struct mgmt_cp_set_public_address *cp = data;
6870 	bool changed;
6871 	int err;
6872 
6873 	bt_dev_dbg(hdev, "sock %p", sk);
6874 
6875 	if (hdev_is_powered(hdev))
6876 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6877 				       MGMT_STATUS_REJECTED);
6878 
6879 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6880 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6881 				       MGMT_STATUS_INVALID_PARAMS);
6882 
6883 	if (!hdev->set_bdaddr)
6884 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6885 				       MGMT_STATUS_NOT_SUPPORTED);
6886 
6887 	hci_dev_lock(hdev);
6888 
6889 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6890 	bacpy(&hdev->public_addr, &cp->bdaddr);
6891 
6892 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6893 	if (err < 0)
6894 		goto unlock;
6895 
6896 	if (!changed)
6897 		goto unlock;
6898 
6899 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6900 		err = new_options(hdev, sk);
6901 
6902 	if (is_configured(hdev)) {
6903 		mgmt_index_removed(hdev);
6904 
6905 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6906 
6907 		hci_dev_set_flag(hdev, HCI_CONFIG);
6908 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6909 
6910 		queue_work(hdev->req_workqueue, &hdev->power_on);
6911 	}
6912 
6913 unlock:
6914 	hci_dev_unlock(hdev);
6915 	return err;
6916 }
6917 
6918 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6919 					     u16 opcode, struct sk_buff *skb)
6920 {
6921 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6922 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6923 	u8 *h192, *r192, *h256, *r256;
6924 	struct mgmt_pending_cmd *cmd;
6925 	u16 eir_len;
6926 	int err;
6927 
6928 	bt_dev_dbg(hdev, "status %u", status);
6929 
6930 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6931 	if (!cmd)
6932 		return;
6933 
6934 	mgmt_cp = cmd->param;
6935 
6936 	if (status) {
6937 		status = mgmt_status(status);
6938 		eir_len = 0;
6939 
6940 		h192 = NULL;
6941 		r192 = NULL;
6942 		h256 = NULL;
6943 		r256 = NULL;
6944 	} else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6945 		struct hci_rp_read_local_oob_data *rp;
6946 
6947 		if (skb->len != sizeof(*rp)) {
6948 			status = MGMT_STATUS_FAILED;
6949 			eir_len = 0;
6950 		} else {
6951 			status = MGMT_STATUS_SUCCESS;
6952 			rp = (void *)skb->data;
6953 
6954 			eir_len = 5 + 18 + 18;
6955 			h192 = rp->hash;
6956 			r192 = rp->rand;
6957 			h256 = NULL;
6958 			r256 = NULL;
6959 		}
6960 	} else {
6961 		struct hci_rp_read_local_oob_ext_data *rp;
6962 
6963 		if (skb->len != sizeof(*rp)) {
6964 			status = MGMT_STATUS_FAILED;
6965 			eir_len = 0;
6966 		} else {
6967 			status = MGMT_STATUS_SUCCESS;
6968 			rp = (void *)skb->data;
6969 
6970 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6971 				eir_len = 5 + 18 + 18;
6972 				h192 = NULL;
6973 				r192 = NULL;
6974 			} else {
6975 				eir_len = 5 + 18 + 18 + 18 + 18;
6976 				h192 = rp->hash192;
6977 				r192 = rp->rand192;
6978 			}
6979 
6980 			h256 = rp->hash256;
6981 			r256 = rp->rand256;
6982 		}
6983 	}
6984 
6985 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6986 	if (!mgmt_rp)
6987 		goto done;
6988 
6989 	if (status)
6990 		goto send_rsp;
6991 
6992 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6993 				  hdev->dev_class, 3);
6994 
6995 	if (h192 && r192) {
6996 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6997 					  EIR_SSP_HASH_C192, h192, 16);
6998 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6999 					  EIR_SSP_RAND_R192, r192, 16);
7000 	}
7001 
7002 	if (h256 && r256) {
7003 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7004 					  EIR_SSP_HASH_C256, h256, 16);
7005 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7006 					  EIR_SSP_RAND_R256, r256, 16);
7007 	}
7008 
7009 send_rsp:
7010 	mgmt_rp->type = mgmt_cp->type;
7011 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
7012 
7013 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
7014 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7015 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7016 	if (err < 0 || status)
7017 		goto done;
7018 
7019 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7020 
7021 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7022 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7023 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7024 done:
7025 	kfree(mgmt_rp);
7026 	mgmt_pending_remove(cmd);
7027 }
7028 
7029 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7030 				  struct mgmt_cp_read_local_oob_ext_data *cp)
7031 {
7032 	struct mgmt_pending_cmd *cmd;
7033 	struct hci_request req;
7034 	int err;
7035 
7036 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7037 			       cp, sizeof(*cp));
7038 	if (!cmd)
7039 		return -ENOMEM;
7040 
7041 	hci_req_init(&req, hdev);
7042 
7043 	if (bredr_sc_enabled(hdev))
7044 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7045 	else
7046 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7047 
7048 	err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7049 	if (err < 0) {
7050 		mgmt_pending_remove(cmd);
7051 		return err;
7052 	}
7053 
7054 	return 0;
7055 }
7056 
7057 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7058 				   void *data, u16 data_len)
7059 {
7060 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
7061 	struct mgmt_rp_read_local_oob_ext_data *rp;
7062 	size_t rp_len;
7063 	u16 eir_len;
7064 	u8 status, flags, role, addr[7], hash[16], rand[16];
7065 	int err;
7066 
7067 	bt_dev_dbg(hdev, "sock %p", sk);
7068 
7069 	if (hdev_is_powered(hdev)) {
7070 		switch (cp->type) {
7071 		case BIT(BDADDR_BREDR):
7072 			status = mgmt_bredr_support(hdev);
7073 			if (status)
7074 				eir_len = 0;
7075 			else
7076 				eir_len = 5;
7077 			break;
7078 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7079 			status = mgmt_le_support(hdev);
7080 			if (status)
7081 				eir_len = 0;
7082 			else
7083 				eir_len = 9 + 3 + 18 + 18 + 3;
7084 			break;
7085 		default:
7086 			status = MGMT_STATUS_INVALID_PARAMS;
7087 			eir_len = 0;
7088 			break;
7089 		}
7090 	} else {
7091 		status = MGMT_STATUS_NOT_POWERED;
7092 		eir_len = 0;
7093 	}
7094 
7095 	rp_len = sizeof(*rp) + eir_len;
7096 	rp = kmalloc(rp_len, GFP_ATOMIC);
7097 	if (!rp)
7098 		return -ENOMEM;
7099 
7100 	if (status)
7101 		goto complete;
7102 
7103 	hci_dev_lock(hdev);
7104 
7105 	eir_len = 0;
7106 	switch (cp->type) {
7107 	case BIT(BDADDR_BREDR):
7108 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7109 			err = read_local_ssp_oob_req(hdev, sk, cp);
7110 			hci_dev_unlock(hdev);
7111 			if (!err)
7112 				goto done;
7113 
7114 			status = MGMT_STATUS_FAILED;
7115 			goto complete;
7116 		} else {
7117 			eir_len = eir_append_data(rp->eir, eir_len,
7118 						  EIR_CLASS_OF_DEV,
7119 						  hdev->dev_class, 3);
7120 		}
7121 		break;
7122 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7123 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7124 		    smp_generate_oob(hdev, hash, rand) < 0) {
7125 			hci_dev_unlock(hdev);
7126 			status = MGMT_STATUS_FAILED;
7127 			goto complete;
7128 		}
7129 
7130 		/* This should return the active RPA, but since the RPA
7131 		 * is only programmed on demand, it is really hard to fill
7132 		 * this in at the moment. For now disallow retrieving
7133 		 * local out-of-band data when privacy is in use.
7134 		 *
7135 		 * Returning the identity address will not help here since
7136 		 * pairing happens before the identity resolving key is
7137 		 * known and thus the connection establishment happens
7138 		 * based on the RPA and not the identity address.
7139 		 */
7140 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7141 			hci_dev_unlock(hdev);
7142 			status = MGMT_STATUS_REJECTED;
7143 			goto complete;
7144 		}
7145 
7146 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7147 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7148 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7149 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
7150 			memcpy(addr, &hdev->static_addr, 6);
7151 			addr[6] = 0x01;
7152 		} else {
7153 			memcpy(addr, &hdev->bdaddr, 6);
7154 			addr[6] = 0x00;
7155 		}
7156 
7157 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7158 					  addr, sizeof(addr));
7159 
7160 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7161 			role = 0x02;
7162 		else
7163 			role = 0x01;
7164 
7165 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7166 					  &role, sizeof(role));
7167 
7168 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7169 			eir_len = eir_append_data(rp->eir, eir_len,
7170 						  EIR_LE_SC_CONFIRM,
7171 						  hash, sizeof(hash));
7172 
7173 			eir_len = eir_append_data(rp->eir, eir_len,
7174 						  EIR_LE_SC_RANDOM,
7175 						  rand, sizeof(rand));
7176 		}
7177 
7178 		flags = mgmt_get_adv_discov_flags(hdev);
7179 
7180 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7181 			flags |= LE_AD_NO_BREDR;
7182 
7183 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7184 					  &flags, sizeof(flags));
7185 		break;
7186 	}
7187 
7188 	hci_dev_unlock(hdev);
7189 
7190 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7191 
7192 	status = MGMT_STATUS_SUCCESS;
7193 
7194 complete:
7195 	rp->type = cp->type;
7196 	rp->eir_len = cpu_to_le16(eir_len);
7197 
7198 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7199 				status, rp, sizeof(*rp) + eir_len);
7200 	if (err < 0 || status)
7201 		goto done;
7202 
7203 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7204 				 rp, sizeof(*rp) + eir_len,
7205 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
7206 
7207 done:
7208 	kfree(rp);
7209 
7210 	return err;
7211 }
7212 
7213 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7214 {
7215 	u32 flags = 0;
7216 
7217 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
7218 	flags |= MGMT_ADV_FLAG_DISCOV;
7219 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7220 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7221 	flags |= MGMT_ADV_FLAG_APPEARANCE;
7222 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7223 	flags |= MGMT_ADV_PARAM_DURATION;
7224 	flags |= MGMT_ADV_PARAM_TIMEOUT;
7225 	flags |= MGMT_ADV_PARAM_INTERVALS;
7226 	flags |= MGMT_ADV_PARAM_TX_POWER;
7227 
7228 	/* In extended adv TX_POWER returned from Set Adv Param
7229 	 * will be always valid.
7230 	 */
7231 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7232 	    ext_adv_capable(hdev))
7233 		flags |= MGMT_ADV_FLAG_TX_POWER;
7234 
7235 	if (ext_adv_capable(hdev)) {
7236 		flags |= MGMT_ADV_FLAG_SEC_1M;
7237 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7238 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7239 
7240 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
7241 			flags |= MGMT_ADV_FLAG_SEC_2M;
7242 
7243 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7244 			flags |= MGMT_ADV_FLAG_SEC_CODED;
7245 	}
7246 
7247 	return flags;
7248 }
7249 
7250 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7251 			     void *data, u16 data_len)
7252 {
7253 	struct mgmt_rp_read_adv_features *rp;
7254 	size_t rp_len;
7255 	int err;
7256 	struct adv_info *adv_instance;
7257 	u32 supported_flags;
7258 	u8 *instance;
7259 
7260 	bt_dev_dbg(hdev, "sock %p", sk);
7261 
7262 	if (!lmp_le_capable(hdev))
7263 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7264 				       MGMT_STATUS_REJECTED);
7265 
7266 	/* Enabling the experimental LL Privay support disables support for
7267 	 * advertising.
7268 	 */
7269 	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7270 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7271 				       MGMT_STATUS_NOT_SUPPORTED);
7272 
7273 	hci_dev_lock(hdev);
7274 
7275 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7276 	rp = kmalloc(rp_len, GFP_ATOMIC);
7277 	if (!rp) {
7278 		hci_dev_unlock(hdev);
7279 		return -ENOMEM;
7280 	}
7281 
7282 	supported_flags = get_supported_adv_flags(hdev);
7283 
7284 	rp->supported_flags = cpu_to_le32(supported_flags);
7285 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7286 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7287 	rp->max_instances = hdev->le_num_of_adv_sets;
7288 	rp->num_instances = hdev->adv_instance_cnt;
7289 
7290 	instance = rp->instance;
7291 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7292 		*instance = adv_instance->instance;
7293 		instance++;
7294 	}
7295 
7296 	hci_dev_unlock(hdev);
7297 
7298 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7299 				MGMT_STATUS_SUCCESS, rp, rp_len);
7300 
7301 	kfree(rp);
7302 
7303 	return err;
7304 }
7305 
7306 static u8 calculate_name_len(struct hci_dev *hdev)
7307 {
7308 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7309 
7310 	return append_local_name(hdev, buf, 0);
7311 }
7312 
7313 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7314 			   bool is_adv_data)
7315 {
7316 	u8 max_len = HCI_MAX_AD_LENGTH;
7317 
7318 	if (is_adv_data) {
7319 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7320 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
7321 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
7322 			max_len -= 3;
7323 
7324 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7325 			max_len -= 3;
7326 	} else {
7327 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7328 			max_len -= calculate_name_len(hdev);
7329 
7330 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7331 			max_len -= 4;
7332 	}
7333 
7334 	return max_len;
7335 }
7336 
7337 static bool flags_managed(u32 adv_flags)
7338 {
7339 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7340 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
7341 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
7342 }
7343 
7344 static bool tx_power_managed(u32 adv_flags)
7345 {
7346 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7347 }
7348 
7349 static bool name_managed(u32 adv_flags)
7350 {
7351 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7352 }
7353 
7354 static bool appearance_managed(u32 adv_flags)
7355 {
7356 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7357 }
7358 
7359 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7360 			      u8 len, bool is_adv_data)
7361 {
7362 	int i, cur_len;
7363 	u8 max_len;
7364 
7365 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7366 
7367 	if (len > max_len)
7368 		return false;
7369 
7370 	/* Make sure that the data is correctly formatted. */
7371 	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7372 		cur_len = data[i];
7373 
7374 		if (data[i + 1] == EIR_FLAGS &&
7375 		    (!is_adv_data || flags_managed(adv_flags)))
7376 			return false;
7377 
7378 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7379 			return false;
7380 
7381 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7382 			return false;
7383 
7384 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7385 			return false;
7386 
7387 		if (data[i + 1] == EIR_APPEARANCE &&
7388 		    appearance_managed(adv_flags))
7389 			return false;
7390 
7391 		/* If the current field length would exceed the total data
7392 		 * length, then it's invalid.
7393 		 */
7394 		if (i + cur_len >= len)
7395 			return false;
7396 	}
7397 
7398 	return true;
7399 }
7400 
7401 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
7402 {
7403 	u32 supported_flags, phy_flags;
7404 
7405 	/* The current implementation only supports a subset of the specified
7406 	 * flags. Also need to check mutual exclusiveness of sec flags.
7407 	 */
7408 	supported_flags = get_supported_adv_flags(hdev);
7409 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
7410 	if (adv_flags & ~supported_flags ||
7411 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7412 		return false;
7413 
7414 	return true;
7415 }
7416 
7417 static bool adv_busy(struct hci_dev *hdev)
7418 {
7419 	return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7420 		pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7421 		pending_find(MGMT_OP_SET_LE, hdev) ||
7422 		pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
7423 		pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
7424 }
7425 
7426 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7427 				     u16 opcode)
7428 {
7429 	struct mgmt_pending_cmd *cmd;
7430 	struct mgmt_cp_add_advertising *cp;
7431 	struct mgmt_rp_add_advertising rp;
7432 	struct adv_info *adv_instance, *n;
7433 	u8 instance;
7434 
7435 	bt_dev_dbg(hdev, "status %d", status);
7436 
7437 	hci_dev_lock(hdev);
7438 
7439 	cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7440 	if (!cmd)
7441 		cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev);
7442 
7443 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7444 		if (!adv_instance->pending)
7445 			continue;
7446 
7447 		if (!status) {
7448 			adv_instance->pending = false;
7449 			continue;
7450 		}
7451 
7452 		instance = adv_instance->instance;
7453 
7454 		if (hdev->cur_adv_instance == instance)
7455 			cancel_adv_timeout(hdev);
7456 
7457 		hci_remove_adv_instance(hdev, instance);
7458 		mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7459 	}
7460 
7461 	if (!cmd)
7462 		goto unlock;
7463 
7464 	cp = cmd->param;
7465 	rp.instance = cp->instance;
7466 
7467 	if (status)
7468 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7469 				mgmt_status(status));
7470 	else
7471 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7472 				  mgmt_status(status), &rp, sizeof(rp));
7473 
7474 	mgmt_pending_remove(cmd);
7475 
7476 unlock:
7477 	hci_dev_unlock(hdev);
7478 }
7479 
7480 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7481 			   void *data, u16 data_len)
7482 {
7483 	struct mgmt_cp_add_advertising *cp = data;
7484 	struct mgmt_rp_add_advertising rp;
7485 	u32 flags;
7486 	u8 status;
7487 	u16 timeout, duration;
7488 	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7489 	u8 schedule_instance = 0;
7490 	struct adv_info *next_instance;
7491 	int err;
7492 	struct mgmt_pending_cmd *cmd;
7493 	struct hci_request req;
7494 
7495 	bt_dev_dbg(hdev, "sock %p", sk);
7496 
7497 	status = mgmt_le_support(hdev);
7498 	if (status)
7499 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7500 				       status);
7501 
7502 	/* Enabling the experimental LL Privay support disables support for
7503 	 * advertising.
7504 	 */
7505 	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7506 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7507 				       MGMT_STATUS_NOT_SUPPORTED);
7508 
7509 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7510 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7511 				       MGMT_STATUS_INVALID_PARAMS);
7512 
7513 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7514 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7515 				       MGMT_STATUS_INVALID_PARAMS);
7516 
7517 	flags = __le32_to_cpu(cp->flags);
7518 	timeout = __le16_to_cpu(cp->timeout);
7519 	duration = __le16_to_cpu(cp->duration);
7520 
7521 	if (!requested_adv_flags_are_valid(hdev, flags))
7522 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7523 				       MGMT_STATUS_INVALID_PARAMS);
7524 
7525 	hci_dev_lock(hdev);
7526 
7527 	if (timeout && !hdev_is_powered(hdev)) {
7528 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7529 				      MGMT_STATUS_REJECTED);
7530 		goto unlock;
7531 	}
7532 
7533 	if (adv_busy(hdev)) {
7534 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7535 				      MGMT_STATUS_BUSY);
7536 		goto unlock;
7537 	}
7538 
7539 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7540 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7541 			       cp->scan_rsp_len, false)) {
7542 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7543 				      MGMT_STATUS_INVALID_PARAMS);
7544 		goto unlock;
7545 	}
7546 
7547 	err = hci_add_adv_instance(hdev, cp->instance, flags,
7548 				   cp->adv_data_len, cp->data,
7549 				   cp->scan_rsp_len,
7550 				   cp->data + cp->adv_data_len,
7551 				   timeout, duration,
7552 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
7553 				   hdev->le_adv_min_interval,
7554 				   hdev->le_adv_max_interval);
7555 	if (err < 0) {
7556 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7557 				      MGMT_STATUS_FAILED);
7558 		goto unlock;
7559 	}
7560 
7561 	/* Only trigger an advertising added event if a new instance was
7562 	 * actually added.
7563 	 */
7564 	if (hdev->adv_instance_cnt > prev_instance_cnt)
7565 		mgmt_advertising_added(sk, hdev, cp->instance);
7566 
7567 	if (hdev->cur_adv_instance == cp->instance) {
7568 		/* If the currently advertised instance is being changed then
7569 		 * cancel the current advertising and schedule the next
7570 		 * instance. If there is only one instance then the overridden
7571 		 * advertising data will be visible right away.
7572 		 */
7573 		cancel_adv_timeout(hdev);
7574 
7575 		next_instance = hci_get_next_instance(hdev, cp->instance);
7576 		if (next_instance)
7577 			schedule_instance = next_instance->instance;
7578 	} else if (!hdev->adv_instance_timeout) {
7579 		/* Immediately advertise the new instance if no other
7580 		 * instance is currently being advertised.
7581 		 */
7582 		schedule_instance = cp->instance;
7583 	}
7584 
7585 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
7586 	 * there is no instance to be advertised then we have no HCI
7587 	 * communication to make. Simply return.
7588 	 */
7589 	if (!hdev_is_powered(hdev) ||
7590 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7591 	    !schedule_instance) {
7592 		rp.instance = cp->instance;
7593 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7594 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7595 		goto unlock;
7596 	}
7597 
7598 	/* We're good to go, update advertising data, parameters, and start
7599 	 * advertising.
7600 	 */
7601 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7602 			       data_len);
7603 	if (!cmd) {
7604 		err = -ENOMEM;
7605 		goto unlock;
7606 	}
7607 
7608 	hci_req_init(&req, hdev);
7609 
7610 	err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
7611 
7612 	if (!err)
7613 		err = hci_req_run(&req, add_advertising_complete);
7614 
7615 	if (err < 0) {
7616 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7617 				      MGMT_STATUS_FAILED);
7618 		mgmt_pending_remove(cmd);
7619 	}
7620 
7621 unlock:
7622 	hci_dev_unlock(hdev);
7623 
7624 	return err;
7625 }
7626 
7627 static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
7628 					u16 opcode)
7629 {
7630 	struct mgmt_pending_cmd *cmd;
7631 	struct mgmt_cp_add_ext_adv_params *cp;
7632 	struct mgmt_rp_add_ext_adv_params rp;
7633 	struct adv_info *adv_instance;
7634 	u32 flags;
7635 
7636 	BT_DBG("%s", hdev->name);
7637 
7638 	hci_dev_lock(hdev);
7639 
7640 	cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev);
7641 	if (!cmd)
7642 		goto unlock;
7643 
7644 	cp = cmd->param;
7645 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
7646 	if (!adv_instance)
7647 		goto unlock;
7648 
7649 	rp.instance = cp->instance;
7650 	rp.tx_power = adv_instance->tx_power;
7651 
7652 	/* While we're at it, inform userspace of the available space for this
7653 	 * advertisement, given the flags that will be used.
7654 	 */
7655 	flags = __le32_to_cpu(cp->flags);
7656 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7657 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7658 
7659 	if (status) {
7660 		/* If this advertisement was previously advertising and we
7661 		 * failed to update it, we signal that it has been removed and
7662 		 * delete its structure
7663 		 */
7664 		if (!adv_instance->pending)
7665 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
7666 
7667 		hci_remove_adv_instance(hdev, cp->instance);
7668 
7669 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7670 				mgmt_status(status));
7671 
7672 	} else {
7673 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7674 				  mgmt_status(status), &rp, sizeof(rp));
7675 	}
7676 
7677 unlock:
7678 	if (cmd)
7679 		mgmt_pending_remove(cmd);
7680 
7681 	hci_dev_unlock(hdev);
7682 }
7683 
7684 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
7685 			      void *data, u16 data_len)
7686 {
7687 	struct mgmt_cp_add_ext_adv_params *cp = data;
7688 	struct mgmt_rp_add_ext_adv_params rp;
7689 	struct mgmt_pending_cmd *cmd = NULL;
7690 	struct adv_info *adv_instance;
7691 	struct hci_request req;
7692 	u32 flags, min_interval, max_interval;
7693 	u16 timeout, duration;
7694 	u8 status;
7695 	s8 tx_power;
7696 	int err;
7697 
7698 	BT_DBG("%s", hdev->name);
7699 
7700 	status = mgmt_le_support(hdev);
7701 	if (status)
7702 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7703 				       status);
7704 
7705 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7706 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7707 				       MGMT_STATUS_INVALID_PARAMS);
7708 
7709 	/* The purpose of breaking add_advertising into two separate MGMT calls
7710 	 * for params and data is to allow more parameters to be added to this
7711 	 * structure in the future. For this reason, we verify that we have the
7712 	 * bare minimum structure we know of when the interface was defined. Any
7713 	 * extra parameters we don't know about will be ignored in this request.
7714 	 */
7715 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
7716 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7717 				       MGMT_STATUS_INVALID_PARAMS);
7718 
7719 	flags = __le32_to_cpu(cp->flags);
7720 
7721 	if (!requested_adv_flags_are_valid(hdev, flags))
7722 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7723 				       MGMT_STATUS_INVALID_PARAMS);
7724 
7725 	hci_dev_lock(hdev);
7726 
7727 	/* In new interface, we require that we are powered to register */
7728 	if (!hdev_is_powered(hdev)) {
7729 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7730 				      MGMT_STATUS_REJECTED);
7731 		goto unlock;
7732 	}
7733 
7734 	if (adv_busy(hdev)) {
7735 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7736 				      MGMT_STATUS_BUSY);
7737 		goto unlock;
7738 	}
7739 
7740 	/* Parse defined parameters from request, use defaults otherwise */
7741 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
7742 		  __le16_to_cpu(cp->timeout) : 0;
7743 
7744 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
7745 		   __le16_to_cpu(cp->duration) :
7746 		   hdev->def_multi_adv_rotation_duration;
7747 
7748 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
7749 		       __le32_to_cpu(cp->min_interval) :
7750 		       hdev->le_adv_min_interval;
7751 
7752 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
7753 		       __le32_to_cpu(cp->max_interval) :
7754 		       hdev->le_adv_max_interval;
7755 
7756 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
7757 		   cp->tx_power :
7758 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
7759 
7760 	/* Create advertising instance with no advertising or response data */
7761 	err = hci_add_adv_instance(hdev, cp->instance, flags,
7762 				   0, NULL, 0, NULL, timeout, duration,
7763 				   tx_power, min_interval, max_interval);
7764 
7765 	if (err < 0) {
7766 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7767 				      MGMT_STATUS_FAILED);
7768 		goto unlock;
7769 	}
7770 
7771 	hdev->cur_adv_instance = cp->instance;
7772 	/* Submit request for advertising params if ext adv available */
7773 	if (ext_adv_capable(hdev)) {
7774 		hci_req_init(&req, hdev);
7775 		adv_instance = hci_find_adv_instance(hdev, cp->instance);
7776 
7777 		/* Updating parameters of an active instance will return a
7778 		 * Command Disallowed error, so we must first disable the
7779 		 * instance if it is active.
7780 		 */
7781 		if (!adv_instance->pending)
7782 			__hci_req_disable_ext_adv_instance(&req, cp->instance);
7783 
7784 		__hci_req_setup_ext_adv_instance(&req, cp->instance);
7785 
7786 		err = hci_req_run(&req, add_ext_adv_params_complete);
7787 
7788 		if (!err)
7789 			cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS,
7790 					       hdev, data, data_len);
7791 		if (!cmd) {
7792 			err = -ENOMEM;
7793 			hci_remove_adv_instance(hdev, cp->instance);
7794 			goto unlock;
7795 		}
7796 
7797 	} else {
7798 		rp.instance = cp->instance;
7799 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
7800 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7801 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7802 		err = mgmt_cmd_complete(sk, hdev->id,
7803 					MGMT_OP_ADD_EXT_ADV_PARAMS,
7804 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7805 	}
7806 
7807 unlock:
7808 	hci_dev_unlock(hdev);
7809 
7810 	return err;
7811 }
7812 
7813 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
7814 			    u16 data_len)
7815 {
7816 	struct mgmt_cp_add_ext_adv_data *cp = data;
7817 	struct mgmt_rp_add_ext_adv_data rp;
7818 	u8 schedule_instance = 0;
7819 	struct adv_info *next_instance;
7820 	struct adv_info *adv_instance;
7821 	int err = 0;
7822 	struct mgmt_pending_cmd *cmd;
7823 	struct hci_request req;
7824 
7825 	BT_DBG("%s", hdev->name);
7826 
7827 	hci_dev_lock(hdev);
7828 
7829 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
7830 
7831 	if (!adv_instance) {
7832 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
7833 				      MGMT_STATUS_INVALID_PARAMS);
7834 		goto unlock;
7835 	}
7836 
7837 	/* In new interface, we require that we are powered to register */
7838 	if (!hdev_is_powered(hdev)) {
7839 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
7840 				      MGMT_STATUS_REJECTED);
7841 		goto clear_new_instance;
7842 	}
7843 
7844 	if (adv_busy(hdev)) {
7845 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
7846 				      MGMT_STATUS_BUSY);
7847 		goto clear_new_instance;
7848 	}
7849 
7850 	/* Validate new data */
7851 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
7852 			       cp->adv_data_len, true) ||
7853 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
7854 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
7855 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
7856 				      MGMT_STATUS_INVALID_PARAMS);
7857 		goto clear_new_instance;
7858 	}
7859 
7860 	/* Set the data in the advertising instance */
7861 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
7862 				  cp->data, cp->scan_rsp_len,
7863 				  cp->data + cp->adv_data_len);
7864 
7865 	/* We're good to go, update advertising data, parameters, and start
7866 	 * advertising.
7867 	 */
7868 
7869 	hci_req_init(&req, hdev);
7870 
7871 	hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
7872 
7873 	if (ext_adv_capable(hdev)) {
7874 		__hci_req_update_adv_data(&req, cp->instance);
7875 		__hci_req_update_scan_rsp_data(&req, cp->instance);
7876 		__hci_req_enable_ext_advertising(&req, cp->instance);
7877 
7878 	} else {
7879 		/* If using software rotation, determine next instance to use */
7880 
7881 		if (hdev->cur_adv_instance == cp->instance) {
7882 			/* If the currently advertised instance is being changed
7883 			 * then cancel the current advertising and schedule the
7884 			 * next instance. If there is only one instance then the
7885 			 * overridden advertising data will be visible right
7886 			 * away
7887 			 */
7888 			cancel_adv_timeout(hdev);
7889 
7890 			next_instance = hci_get_next_instance(hdev,
7891 							      cp->instance);
7892 			if (next_instance)
7893 				schedule_instance = next_instance->instance;
7894 		} else if (!hdev->adv_instance_timeout) {
7895 			/* Immediately advertise the new instance if no other
7896 			 * instance is currently being advertised.
7897 			 */
7898 			schedule_instance = cp->instance;
7899 		}
7900 
7901 		/* If the HCI_ADVERTISING flag is set or there is no instance to
7902 		 * be advertised then we have no HCI communication to make.
7903 		 * Simply return.
7904 		 */
7905 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7906 		    !schedule_instance) {
7907 			if (adv_instance->pending) {
7908 				mgmt_advertising_added(sk, hdev, cp->instance);
7909 				adv_instance->pending = false;
7910 			}
7911 			rp.instance = cp->instance;
7912 			err = mgmt_cmd_complete(sk, hdev->id,
7913 						MGMT_OP_ADD_EXT_ADV_DATA,
7914 						MGMT_STATUS_SUCCESS, &rp,
7915 						sizeof(rp));
7916 			goto unlock;
7917 		}
7918 
7919 		err = __hci_req_schedule_adv_instance(&req, schedule_instance,
7920 						      true);
7921 	}
7922 
7923 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
7924 			       data_len);
7925 	if (!cmd) {
7926 		err = -ENOMEM;
7927 		goto clear_new_instance;
7928 	}
7929 
7930 	if (!err)
7931 		err = hci_req_run(&req, add_advertising_complete);
7932 
7933 	if (err < 0) {
7934 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
7935 				      MGMT_STATUS_FAILED);
7936 		mgmt_pending_remove(cmd);
7937 		goto clear_new_instance;
7938 	}
7939 
7940 	/* We were successful in updating data, so trigger advertising_added
7941 	 * event if this is an instance that wasn't previously advertising. If
7942 	 * a failure occurs in the requests we initiated, we will remove the
7943 	 * instance again in add_advertising_complete
7944 	 */
7945 	if (adv_instance->pending)
7946 		mgmt_advertising_added(sk, hdev, cp->instance);
7947 
7948 	goto unlock;
7949 
7950 clear_new_instance:
7951 	hci_remove_adv_instance(hdev, cp->instance);
7952 
7953 unlock:
7954 	hci_dev_unlock(hdev);
7955 
7956 	return err;
7957 }
7958 
7959 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
7960 					u16 opcode)
7961 {
7962 	struct mgmt_pending_cmd *cmd;
7963 	struct mgmt_cp_remove_advertising *cp;
7964 	struct mgmt_rp_remove_advertising rp;
7965 
7966 	bt_dev_dbg(hdev, "status %d", status);
7967 
7968 	hci_dev_lock(hdev);
7969 
7970 	/* A failure status here only means that we failed to disable
7971 	 * advertising. Otherwise, the advertising instance has been removed,
7972 	 * so report success.
7973 	 */
7974 	cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
7975 	if (!cmd)
7976 		goto unlock;
7977 
7978 	cp = cmd->param;
7979 	rp.instance = cp->instance;
7980 
7981 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
7982 			  &rp, sizeof(rp));
7983 	mgmt_pending_remove(cmd);
7984 
7985 unlock:
7986 	hci_dev_unlock(hdev);
7987 }
7988 
7989 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
7990 			      void *data, u16 data_len)
7991 {
7992 	struct mgmt_cp_remove_advertising *cp = data;
7993 	struct mgmt_rp_remove_advertising rp;
7994 	struct mgmt_pending_cmd *cmd;
7995 	struct hci_request req;
7996 	int err;
7997 
7998 	bt_dev_dbg(hdev, "sock %p", sk);
7999 
8000 	/* Enabling the experimental LL Privay support disables support for
8001 	 * advertising.
8002 	 */
8003 	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
8004 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
8005 				       MGMT_STATUS_NOT_SUPPORTED);
8006 
8007 	hci_dev_lock(hdev);
8008 
8009 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8010 		err = mgmt_cmd_status(sk, hdev->id,
8011 				      MGMT_OP_REMOVE_ADVERTISING,
8012 				      MGMT_STATUS_INVALID_PARAMS);
8013 		goto unlock;
8014 	}
8015 
8016 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
8017 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
8018 	    pending_find(MGMT_OP_SET_LE, hdev)) {
8019 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8020 				      MGMT_STATUS_BUSY);
8021 		goto unlock;
8022 	}
8023 
8024 	if (list_empty(&hdev->adv_instances)) {
8025 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8026 				      MGMT_STATUS_INVALID_PARAMS);
8027 		goto unlock;
8028 	}
8029 
8030 	hci_req_init(&req, hdev);
8031 
8032 	/* If we use extended advertising, instance is disabled and removed */
8033 	if (ext_adv_capable(hdev)) {
8034 		__hci_req_disable_ext_adv_instance(&req, cp->instance);
8035 		__hci_req_remove_ext_adv_instance(&req, cp->instance);
8036 	}
8037 
8038 	hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
8039 
8040 	if (list_empty(&hdev->adv_instances))
8041 		__hci_req_disable_advertising(&req);
8042 
8043 	/* If no HCI commands have been collected so far or the HCI_ADVERTISING
8044 	 * flag is set or the device isn't powered then we have no HCI
8045 	 * communication to make. Simply return.
8046 	 */
8047 	if (skb_queue_empty(&req.cmd_q) ||
8048 	    !hdev_is_powered(hdev) ||
8049 	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
8050 		hci_req_purge(&req);
8051 		rp.instance = cp->instance;
8052 		err = mgmt_cmd_complete(sk, hdev->id,
8053 					MGMT_OP_REMOVE_ADVERTISING,
8054 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8055 		goto unlock;
8056 	}
8057 
8058 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8059 			       data_len);
8060 	if (!cmd) {
8061 		err = -ENOMEM;
8062 		goto unlock;
8063 	}
8064 
8065 	err = hci_req_run(&req, remove_advertising_complete);
8066 	if (err < 0)
8067 		mgmt_pending_remove(cmd);
8068 
8069 unlock:
8070 	hci_dev_unlock(hdev);
8071 
8072 	return err;
8073 }
8074 
8075 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8076 			     void *data, u16 data_len)
8077 {
8078 	struct mgmt_cp_get_adv_size_info *cp = data;
8079 	struct mgmt_rp_get_adv_size_info rp;
8080 	u32 flags, supported_flags;
8081 	int err;
8082 
8083 	bt_dev_dbg(hdev, "sock %p", sk);
8084 
8085 	if (!lmp_le_capable(hdev))
8086 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8087 				       MGMT_STATUS_REJECTED);
8088 
8089 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8090 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8091 				       MGMT_STATUS_INVALID_PARAMS);
8092 
8093 	flags = __le32_to_cpu(cp->flags);
8094 
8095 	/* The current implementation only supports a subset of the specified
8096 	 * flags.
8097 	 */
8098 	supported_flags = get_supported_adv_flags(hdev);
8099 	if (flags & ~supported_flags)
8100 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8101 				       MGMT_STATUS_INVALID_PARAMS);
8102 
8103 	rp.instance = cp->instance;
8104 	rp.flags = cp->flags;
8105 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8106 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8107 
8108 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8109 				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8110 
8111 	return err;
8112 }
8113 
8114 static const struct hci_mgmt_handler mgmt_handlers[] = {
8115 	{ NULL }, /* 0x0000 (no command) */
8116 	{ read_version,            MGMT_READ_VERSION_SIZE,
8117 						HCI_MGMT_NO_HDEV |
8118 						HCI_MGMT_UNTRUSTED },
8119 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
8120 						HCI_MGMT_NO_HDEV |
8121 						HCI_MGMT_UNTRUSTED },
8122 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
8123 						HCI_MGMT_NO_HDEV |
8124 						HCI_MGMT_UNTRUSTED },
8125 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
8126 						HCI_MGMT_UNTRUSTED },
8127 	{ set_powered,             MGMT_SETTING_SIZE },
8128 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
8129 	{ set_connectable,         MGMT_SETTING_SIZE },
8130 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
8131 	{ set_bondable,            MGMT_SETTING_SIZE },
8132 	{ set_link_security,       MGMT_SETTING_SIZE },
8133 	{ set_ssp,                 MGMT_SETTING_SIZE },
8134 	{ set_hs,                  MGMT_SETTING_SIZE },
8135 	{ set_le,                  MGMT_SETTING_SIZE },
8136 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
8137 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
8138 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
8139 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
8140 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
8141 						HCI_MGMT_VAR_LEN },
8142 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8143 						HCI_MGMT_VAR_LEN },
8144 	{ disconnect,              MGMT_DISCONNECT_SIZE },
8145 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
8146 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
8147 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
8148 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
8149 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
8150 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
8151 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
8152 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
8153 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8154 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
8155 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8156 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
8157 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8158 						HCI_MGMT_VAR_LEN },
8159 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8160 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
8161 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
8162 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
8163 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
8164 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
8165 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
8166 	{ set_advertising,         MGMT_SETTING_SIZE },
8167 	{ set_bredr,               MGMT_SETTING_SIZE },
8168 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
8169 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
8170 	{ set_secure_conn,         MGMT_SETTING_SIZE },
8171 	{ set_debug_keys,          MGMT_SETTING_SIZE },
8172 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
8173 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
8174 						HCI_MGMT_VAR_LEN },
8175 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
8176 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
8177 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
8178 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
8179 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
8180 						HCI_MGMT_VAR_LEN },
8181 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8182 						HCI_MGMT_NO_HDEV |
8183 						HCI_MGMT_UNTRUSTED },
8184 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
8185 						HCI_MGMT_UNCONFIGURED |
8186 						HCI_MGMT_UNTRUSTED },
8187 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
8188 						HCI_MGMT_UNCONFIGURED },
8189 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
8190 						HCI_MGMT_UNCONFIGURED },
8191 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8192 						HCI_MGMT_VAR_LEN },
8193 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8194 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
8195 						HCI_MGMT_NO_HDEV |
8196 						HCI_MGMT_UNTRUSTED },
8197 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
8198 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
8199 						HCI_MGMT_VAR_LEN },
8200 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
8201 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
8202 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8203 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8204 						HCI_MGMT_UNTRUSTED },
8205 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
8206 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
8207 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
8208 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8209 						HCI_MGMT_VAR_LEN },
8210 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
8211 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
8212 						HCI_MGMT_UNTRUSTED },
8213 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
8214 						HCI_MGMT_UNTRUSTED |
8215 						HCI_MGMT_HDEV_OPTIONAL },
8216 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
8217 						HCI_MGMT_VAR_LEN |
8218 						HCI_MGMT_HDEV_OPTIONAL },
8219 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8220 						HCI_MGMT_UNTRUSTED },
8221 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8222 						HCI_MGMT_VAR_LEN },
8223 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8224 						HCI_MGMT_UNTRUSTED },
8225 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8226 						HCI_MGMT_VAR_LEN },
8227 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
8228 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
8229 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8230 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8231 						HCI_MGMT_VAR_LEN },
8232 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
8233 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8234 						HCI_MGMT_VAR_LEN },
8235 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
8236 						HCI_MGMT_VAR_LEN },
8237 };
8238 
8239 void mgmt_index_added(struct hci_dev *hdev)
8240 {
8241 	struct mgmt_ev_ext_index ev;
8242 
8243 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8244 		return;
8245 
8246 	switch (hdev->dev_type) {
8247 	case HCI_PRIMARY:
8248 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8249 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8250 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8251 			ev.type = 0x01;
8252 		} else {
8253 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8254 					 HCI_MGMT_INDEX_EVENTS);
8255 			ev.type = 0x00;
8256 		}
8257 		break;
8258 	case HCI_AMP:
8259 		ev.type = 0x02;
8260 		break;
8261 	default:
8262 		return;
8263 	}
8264 
8265 	ev.bus = hdev->bus;
8266 
8267 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8268 			 HCI_MGMT_EXT_INDEX_EVENTS);
8269 }
8270 
8271 void mgmt_index_removed(struct hci_dev *hdev)
8272 {
8273 	struct mgmt_ev_ext_index ev;
8274 	u8 status = MGMT_STATUS_INVALID_INDEX;
8275 
8276 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8277 		return;
8278 
8279 	switch (hdev->dev_type) {
8280 	case HCI_PRIMARY:
8281 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8282 
8283 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8284 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8285 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8286 			ev.type = 0x01;
8287 		} else {
8288 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8289 					 HCI_MGMT_INDEX_EVENTS);
8290 			ev.type = 0x00;
8291 		}
8292 		break;
8293 	case HCI_AMP:
8294 		ev.type = 0x02;
8295 		break;
8296 	default:
8297 		return;
8298 	}
8299 
8300 	ev.bus = hdev->bus;
8301 
8302 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8303 			 HCI_MGMT_EXT_INDEX_EVENTS);
8304 }
8305 
8306 /* This function requires the caller holds hdev->lock */
8307 static void restart_le_actions(struct hci_dev *hdev)
8308 {
8309 	struct hci_conn_params *p;
8310 
8311 	list_for_each_entry(p, &hdev->le_conn_params, list) {
8312 		/* Needed for AUTO_OFF case where might not "really"
8313 		 * have been powered off.
8314 		 */
8315 		list_del_init(&p->action);
8316 
8317 		switch (p->auto_connect) {
8318 		case HCI_AUTO_CONN_DIRECT:
8319 		case HCI_AUTO_CONN_ALWAYS:
8320 			list_add(&p->action, &hdev->pend_le_conns);
8321 			break;
8322 		case HCI_AUTO_CONN_REPORT:
8323 			list_add(&p->action, &hdev->pend_le_reports);
8324 			break;
8325 		default:
8326 			break;
8327 		}
8328 	}
8329 }
8330 
8331 void mgmt_power_on(struct hci_dev *hdev, int err)
8332 {
8333 	struct cmd_lookup match = { NULL, hdev };
8334 
8335 	bt_dev_dbg(hdev, "err %d", err);
8336 
8337 	hci_dev_lock(hdev);
8338 
8339 	if (!err) {
8340 		restart_le_actions(hdev);
8341 		hci_update_background_scan(hdev);
8342 	}
8343 
8344 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8345 
8346 	new_settings(hdev, match.sk);
8347 
8348 	if (match.sk)
8349 		sock_put(match.sk);
8350 
8351 	hci_dev_unlock(hdev);
8352 }
8353 
8354 void __mgmt_power_off(struct hci_dev *hdev)
8355 {
8356 	struct cmd_lookup match = { NULL, hdev };
8357 	u8 status, zero_cod[] = { 0, 0, 0 };
8358 
8359 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8360 
8361 	/* If the power off is because of hdev unregistration let
8362 	 * use the appropriate INVALID_INDEX status. Otherwise use
8363 	 * NOT_POWERED. We cover both scenarios here since later in
8364 	 * mgmt_index_removed() any hci_conn callbacks will have already
8365 	 * been triggered, potentially causing misleading DISCONNECTED
8366 	 * status responses.
8367 	 */
8368 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8369 		status = MGMT_STATUS_INVALID_INDEX;
8370 	else
8371 		status = MGMT_STATUS_NOT_POWERED;
8372 
8373 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8374 
8375 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8376 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8377 				   zero_cod, sizeof(zero_cod),
8378 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8379 		ext_info_changed(hdev, NULL);
8380 	}
8381 
8382 	new_settings(hdev, match.sk);
8383 
8384 	if (match.sk)
8385 		sock_put(match.sk);
8386 }
8387 
8388 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8389 {
8390 	struct mgmt_pending_cmd *cmd;
8391 	u8 status;
8392 
8393 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8394 	if (!cmd)
8395 		return;
8396 
8397 	if (err == -ERFKILL)
8398 		status = MGMT_STATUS_RFKILLED;
8399 	else
8400 		status = MGMT_STATUS_FAILED;
8401 
8402 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8403 
8404 	mgmt_pending_remove(cmd);
8405 }
8406 
8407 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8408 		       bool persistent)
8409 {
8410 	struct mgmt_ev_new_link_key ev;
8411 
8412 	memset(&ev, 0, sizeof(ev));
8413 
8414 	ev.store_hint = persistent;
8415 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8416 	ev.key.addr.type = BDADDR_BREDR;
8417 	ev.key.type = key->type;
8418 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8419 	ev.key.pin_len = key->pin_len;
8420 
8421 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8422 }
8423 
8424 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8425 {
8426 	switch (ltk->type) {
8427 	case SMP_LTK:
8428 	case SMP_LTK_SLAVE:
8429 		if (ltk->authenticated)
8430 			return MGMT_LTK_AUTHENTICATED;
8431 		return MGMT_LTK_UNAUTHENTICATED;
8432 	case SMP_LTK_P256:
8433 		if (ltk->authenticated)
8434 			return MGMT_LTK_P256_AUTH;
8435 		return MGMT_LTK_P256_UNAUTH;
8436 	case SMP_LTK_P256_DEBUG:
8437 		return MGMT_LTK_P256_DEBUG;
8438 	}
8439 
8440 	return MGMT_LTK_UNAUTHENTICATED;
8441 }
8442 
8443 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8444 {
8445 	struct mgmt_ev_new_long_term_key ev;
8446 
8447 	memset(&ev, 0, sizeof(ev));
8448 
8449 	/* Devices using resolvable or non-resolvable random addresses
8450 	 * without providing an identity resolving key don't require
8451 	 * to store long term keys. Their addresses will change the
8452 	 * next time around.
8453 	 *
8454 	 * Only when a remote device provides an identity address
8455 	 * make sure the long term key is stored. If the remote
8456 	 * identity is known, the long term keys are internally
8457 	 * mapped to the identity address. So allow static random
8458 	 * and public addresses here.
8459 	 */
8460 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8461 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
8462 		ev.store_hint = 0x00;
8463 	else
8464 		ev.store_hint = persistent;
8465 
8466 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8467 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8468 	ev.key.type = mgmt_ltk_type(key);
8469 	ev.key.enc_size = key->enc_size;
8470 	ev.key.ediv = key->ediv;
8471 	ev.key.rand = key->rand;
8472 
8473 	if (key->type == SMP_LTK)
8474 		ev.key.master = 1;
8475 
8476 	/* Make sure we copy only the significant bytes based on the
8477 	 * encryption key size, and set the rest of the value to zeroes.
8478 	 */
8479 	memcpy(ev.key.val, key->val, key->enc_size);
8480 	memset(ev.key.val + key->enc_size, 0,
8481 	       sizeof(ev.key.val) - key->enc_size);
8482 
8483 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8484 }
8485 
8486 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8487 {
8488 	struct mgmt_ev_new_irk ev;
8489 
8490 	memset(&ev, 0, sizeof(ev));
8491 
8492 	ev.store_hint = persistent;
8493 
8494 	bacpy(&ev.rpa, &irk->rpa);
8495 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8496 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
8497 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8498 
8499 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8500 }
8501 
8502 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8503 		   bool persistent)
8504 {
8505 	struct mgmt_ev_new_csrk ev;
8506 
8507 	memset(&ev, 0, sizeof(ev));
8508 
8509 	/* Devices using resolvable or non-resolvable random addresses
8510 	 * without providing an identity resolving key don't require
8511 	 * to store signature resolving keys. Their addresses will change
8512 	 * the next time around.
8513 	 *
8514 	 * Only when a remote device provides an identity address
8515 	 * make sure the signature resolving key is stored. So allow
8516 	 * static random and public addresses here.
8517 	 */
8518 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8519 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8520 		ev.store_hint = 0x00;
8521 	else
8522 		ev.store_hint = persistent;
8523 
8524 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8525 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
8526 	ev.key.type = csrk->type;
8527 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8528 
8529 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8530 }
8531 
8532 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8533 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8534 			 u16 max_interval, u16 latency, u16 timeout)
8535 {
8536 	struct mgmt_ev_new_conn_param ev;
8537 
8538 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
8539 		return;
8540 
8541 	memset(&ev, 0, sizeof(ev));
8542 	bacpy(&ev.addr.bdaddr, bdaddr);
8543 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8544 	ev.store_hint = store_hint;
8545 	ev.min_interval = cpu_to_le16(min_interval);
8546 	ev.max_interval = cpu_to_le16(max_interval);
8547 	ev.latency = cpu_to_le16(latency);
8548 	ev.timeout = cpu_to_le16(timeout);
8549 
8550 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8551 }
8552 
8553 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
8554 			   u32 flags, u8 *name, u8 name_len)
8555 {
8556 	char buf[512];
8557 	struct mgmt_ev_device_connected *ev = (void *) buf;
8558 	u16 eir_len = 0;
8559 
8560 	bacpy(&ev->addr.bdaddr, &conn->dst);
8561 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8562 
8563 	ev->flags = __cpu_to_le32(flags);
8564 
8565 	/* We must ensure that the EIR Data fields are ordered and
8566 	 * unique. Keep it simple for now and avoid the problem by not
8567 	 * adding any BR/EDR data to the LE adv.
8568 	 */
8569 	if (conn->le_adv_data_len > 0) {
8570 		memcpy(&ev->eir[eir_len],
8571 		       conn->le_adv_data, conn->le_adv_data_len);
8572 		eir_len = conn->le_adv_data_len;
8573 	} else {
8574 		if (name_len > 0)
8575 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
8576 						  name, name_len);
8577 
8578 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
8579 			eir_len = eir_append_data(ev->eir, eir_len,
8580 						  EIR_CLASS_OF_DEV,
8581 						  conn->dev_class, 3);
8582 	}
8583 
8584 	ev->eir_len = cpu_to_le16(eir_len);
8585 
8586 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
8587 		    sizeof(*ev) + eir_len, NULL);
8588 }
8589 
8590 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
8591 {
8592 	struct sock **sk = data;
8593 
8594 	cmd->cmd_complete(cmd, 0);
8595 
8596 	*sk = cmd->sk;
8597 	sock_hold(*sk);
8598 
8599 	mgmt_pending_remove(cmd);
8600 }
8601 
8602 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
8603 {
8604 	struct hci_dev *hdev = data;
8605 	struct mgmt_cp_unpair_device *cp = cmd->param;
8606 
8607 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
8608 
8609 	cmd->cmd_complete(cmd, 0);
8610 	mgmt_pending_remove(cmd);
8611 }
8612 
8613 bool mgmt_powering_down(struct hci_dev *hdev)
8614 {
8615 	struct mgmt_pending_cmd *cmd;
8616 	struct mgmt_mode *cp;
8617 
8618 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8619 	if (!cmd)
8620 		return false;
8621 
8622 	cp = cmd->param;
8623 	if (!cp->val)
8624 		return true;
8625 
8626 	return false;
8627 }
8628 
8629 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
8630 			      u8 link_type, u8 addr_type, u8 reason,
8631 			      bool mgmt_connected)
8632 {
8633 	struct mgmt_ev_device_disconnected ev;
8634 	struct sock *sk = NULL;
8635 
8636 	/* The connection is still in hci_conn_hash so test for 1
8637 	 * instead of 0 to know if this is the last one.
8638 	 */
8639 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8640 		cancel_delayed_work(&hdev->power_off);
8641 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
8642 	}
8643 
8644 	if (!mgmt_connected)
8645 		return;
8646 
8647 	if (link_type != ACL_LINK && link_type != LE_LINK)
8648 		return;
8649 
8650 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
8651 
8652 	bacpy(&ev.addr.bdaddr, bdaddr);
8653 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8654 	ev.reason = reason;
8655 
8656 	/* Report disconnects due to suspend */
8657 	if (hdev->suspended)
8658 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
8659 
8660 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
8661 
8662 	if (sk)
8663 		sock_put(sk);
8664 
8665 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8666 			     hdev);
8667 }
8668 
8669 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8670 			    u8 link_type, u8 addr_type, u8 status)
8671 {
8672 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
8673 	struct mgmt_cp_disconnect *cp;
8674 	struct mgmt_pending_cmd *cmd;
8675 
8676 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8677 			     hdev);
8678 
8679 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
8680 	if (!cmd)
8681 		return;
8682 
8683 	cp = cmd->param;
8684 
8685 	if (bacmp(bdaddr, &cp->addr.bdaddr))
8686 		return;
8687 
8688 	if (cp->addr.type != bdaddr_type)
8689 		return;
8690 
8691 	cmd->cmd_complete(cmd, mgmt_status(status));
8692 	mgmt_pending_remove(cmd);
8693 }
8694 
8695 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8696 			 u8 addr_type, u8 status)
8697 {
8698 	struct mgmt_ev_connect_failed ev;
8699 
8700 	/* The connection is still in hci_conn_hash so test for 1
8701 	 * instead of 0 to know if this is the last one.
8702 	 */
8703 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8704 		cancel_delayed_work(&hdev->power_off);
8705 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
8706 	}
8707 
8708 	bacpy(&ev.addr.bdaddr, bdaddr);
8709 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8710 	ev.status = mgmt_status(status);
8711 
8712 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
8713 }
8714 
8715 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
8716 {
8717 	struct mgmt_ev_pin_code_request ev;
8718 
8719 	bacpy(&ev.addr.bdaddr, bdaddr);
8720 	ev.addr.type = BDADDR_BREDR;
8721 	ev.secure = secure;
8722 
8723 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
8724 }
8725 
8726 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8727 				  u8 status)
8728 {
8729 	struct mgmt_pending_cmd *cmd;
8730 
8731 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
8732 	if (!cmd)
8733 		return;
8734 
8735 	cmd->cmd_complete(cmd, mgmt_status(status));
8736 	mgmt_pending_remove(cmd);
8737 }
8738 
8739 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8740 				      u8 status)
8741 {
8742 	struct mgmt_pending_cmd *cmd;
8743 
8744 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
8745 	if (!cmd)
8746 		return;
8747 
8748 	cmd->cmd_complete(cmd, mgmt_status(status));
8749 	mgmt_pending_remove(cmd);
8750 }
8751 
8752 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8753 			      u8 link_type, u8 addr_type, u32 value,
8754 			      u8 confirm_hint)
8755 {
8756 	struct mgmt_ev_user_confirm_request ev;
8757 
8758 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8759 
8760 	bacpy(&ev.addr.bdaddr, bdaddr);
8761 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8762 	ev.confirm_hint = confirm_hint;
8763 	ev.value = cpu_to_le32(value);
8764 
8765 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
8766 			  NULL);
8767 }
8768 
8769 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8770 			      u8 link_type, u8 addr_type)
8771 {
8772 	struct mgmt_ev_user_passkey_request ev;
8773 
8774 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8775 
8776 	bacpy(&ev.addr.bdaddr, bdaddr);
8777 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8778 
8779 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
8780 			  NULL);
8781 }
8782 
8783 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8784 				      u8 link_type, u8 addr_type, u8 status,
8785 				      u8 opcode)
8786 {
8787 	struct mgmt_pending_cmd *cmd;
8788 
8789 	cmd = pending_find(opcode, hdev);
8790 	if (!cmd)
8791 		return -ENOENT;
8792 
8793 	cmd->cmd_complete(cmd, mgmt_status(status));
8794 	mgmt_pending_remove(cmd);
8795 
8796 	return 0;
8797 }
8798 
8799 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8800 				     u8 link_type, u8 addr_type, u8 status)
8801 {
8802 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8803 					  status, MGMT_OP_USER_CONFIRM_REPLY);
8804 }
8805 
8806 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8807 					 u8 link_type, u8 addr_type, u8 status)
8808 {
8809 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8810 					  status,
8811 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
8812 }
8813 
8814 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8815 				     u8 link_type, u8 addr_type, u8 status)
8816 {
8817 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8818 					  status, MGMT_OP_USER_PASSKEY_REPLY);
8819 }
8820 
8821 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8822 					 u8 link_type, u8 addr_type, u8 status)
8823 {
8824 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8825 					  status,
8826 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
8827 }
8828 
8829 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
8830 			     u8 link_type, u8 addr_type, u32 passkey,
8831 			     u8 entered)
8832 {
8833 	struct mgmt_ev_passkey_notify ev;
8834 
8835 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8836 
8837 	bacpy(&ev.addr.bdaddr, bdaddr);
8838 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8839 	ev.passkey = __cpu_to_le32(passkey);
8840 	ev.entered = entered;
8841 
8842 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
8843 }
8844 
8845 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
8846 {
8847 	struct mgmt_ev_auth_failed ev;
8848 	struct mgmt_pending_cmd *cmd;
8849 	u8 status = mgmt_status(hci_status);
8850 
8851 	bacpy(&ev.addr.bdaddr, &conn->dst);
8852 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8853 	ev.status = status;
8854 
8855 	cmd = find_pairing(conn);
8856 
8857 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
8858 		    cmd ? cmd->sk : NULL);
8859 
8860 	if (cmd) {
8861 		cmd->cmd_complete(cmd, status);
8862 		mgmt_pending_remove(cmd);
8863 	}
8864 }
8865 
8866 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
8867 {
8868 	struct cmd_lookup match = { NULL, hdev };
8869 	bool changed;
8870 
8871 	if (status) {
8872 		u8 mgmt_err = mgmt_status(status);
8873 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
8874 				     cmd_status_rsp, &mgmt_err);
8875 		return;
8876 	}
8877 
8878 	if (test_bit(HCI_AUTH, &hdev->flags))
8879 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
8880 	else
8881 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
8882 
8883 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
8884 			     &match);
8885 
8886 	if (changed)
8887 		new_settings(hdev, match.sk);
8888 
8889 	if (match.sk)
8890 		sock_put(match.sk);
8891 }
8892 
8893 static void clear_eir(struct hci_request *req)
8894 {
8895 	struct hci_dev *hdev = req->hdev;
8896 	struct hci_cp_write_eir cp;
8897 
8898 	if (!lmp_ext_inq_capable(hdev))
8899 		return;
8900 
8901 	memset(hdev->eir, 0, sizeof(hdev->eir));
8902 
8903 	memset(&cp, 0, sizeof(cp));
8904 
8905 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8906 }
8907 
8908 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8909 {
8910 	struct cmd_lookup match = { NULL, hdev };
8911 	struct hci_request req;
8912 	bool changed = false;
8913 
8914 	if (status) {
8915 		u8 mgmt_err = mgmt_status(status);
8916 
8917 		if (enable && hci_dev_test_and_clear_flag(hdev,
8918 							  HCI_SSP_ENABLED)) {
8919 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8920 			new_settings(hdev, NULL);
8921 		}
8922 
8923 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
8924 				     &mgmt_err);
8925 		return;
8926 	}
8927 
8928 	if (enable) {
8929 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8930 	} else {
8931 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8932 		if (!changed)
8933 			changed = hci_dev_test_and_clear_flag(hdev,
8934 							      HCI_HS_ENABLED);
8935 		else
8936 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8937 	}
8938 
8939 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
8940 
8941 	if (changed)
8942 		new_settings(hdev, match.sk);
8943 
8944 	if (match.sk)
8945 		sock_put(match.sk);
8946 
8947 	hci_req_init(&req, hdev);
8948 
8949 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8950 		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8951 			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
8952 				    sizeof(enable), &enable);
8953 		__hci_req_update_eir(&req);
8954 	} else {
8955 		clear_eir(&req);
8956 	}
8957 
8958 	hci_req_run(&req, NULL);
8959 }
8960 
8961 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8962 {
8963 	struct cmd_lookup *match = data;
8964 
8965 	if (match->sk == NULL) {
8966 		match->sk = cmd->sk;
8967 		sock_hold(match->sk);
8968 	}
8969 }
8970 
8971 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
8972 				    u8 status)
8973 {
8974 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8975 
8976 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
8977 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
8978 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8979 
8980 	if (!status) {
8981 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
8982 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8983 		ext_info_changed(hdev, NULL);
8984 	}
8985 
8986 	if (match.sk)
8987 		sock_put(match.sk);
8988 }
8989 
8990 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8991 {
8992 	struct mgmt_cp_set_local_name ev;
8993 	struct mgmt_pending_cmd *cmd;
8994 
8995 	if (status)
8996 		return;
8997 
8998 	memset(&ev, 0, sizeof(ev));
8999 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9000 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9001 
9002 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9003 	if (!cmd) {
9004 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9005 
9006 		/* If this is a HCI command related to powering on the
9007 		 * HCI dev don't send any mgmt signals.
9008 		 */
9009 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
9010 			return;
9011 	}
9012 
9013 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9014 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9015 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9016 }
9017 
9018 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9019 {
9020 	int i;
9021 
9022 	for (i = 0; i < uuid_count; i++) {
9023 		if (!memcmp(uuid, uuids[i], 16))
9024 			return true;
9025 	}
9026 
9027 	return false;
9028 }
9029 
9030 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9031 {
9032 	u16 parsed = 0;
9033 
9034 	while (parsed < eir_len) {
9035 		u8 field_len = eir[0];
9036 		u8 uuid[16];
9037 		int i;
9038 
9039 		if (field_len == 0)
9040 			break;
9041 
9042 		if (eir_len - parsed < field_len + 1)
9043 			break;
9044 
9045 		switch (eir[1]) {
9046 		case EIR_UUID16_ALL:
9047 		case EIR_UUID16_SOME:
9048 			for (i = 0; i + 3 <= field_len; i += 2) {
9049 				memcpy(uuid, bluetooth_base_uuid, 16);
9050 				uuid[13] = eir[i + 3];
9051 				uuid[12] = eir[i + 2];
9052 				if (has_uuid(uuid, uuid_count, uuids))
9053 					return true;
9054 			}
9055 			break;
9056 		case EIR_UUID32_ALL:
9057 		case EIR_UUID32_SOME:
9058 			for (i = 0; i + 5 <= field_len; i += 4) {
9059 				memcpy(uuid, bluetooth_base_uuid, 16);
9060 				uuid[15] = eir[i + 5];
9061 				uuid[14] = eir[i + 4];
9062 				uuid[13] = eir[i + 3];
9063 				uuid[12] = eir[i + 2];
9064 				if (has_uuid(uuid, uuid_count, uuids))
9065 					return true;
9066 			}
9067 			break;
9068 		case EIR_UUID128_ALL:
9069 		case EIR_UUID128_SOME:
9070 			for (i = 0; i + 17 <= field_len; i += 16) {
9071 				memcpy(uuid, eir + i + 2, 16);
9072 				if (has_uuid(uuid, uuid_count, uuids))
9073 					return true;
9074 			}
9075 			break;
9076 		}
9077 
9078 		parsed += field_len + 1;
9079 		eir += field_len + 1;
9080 	}
9081 
9082 	return false;
9083 }
9084 
9085 static void restart_le_scan(struct hci_dev *hdev)
9086 {
9087 	/* If controller is not scanning we are done. */
9088 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9089 		return;
9090 
9091 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9092 		       hdev->discovery.scan_start +
9093 		       hdev->discovery.scan_duration))
9094 		return;
9095 
9096 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9097 			   DISCOV_LE_RESTART_DELAY);
9098 }
9099 
9100 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9101 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9102 {
9103 	/* If a RSSI threshold has been specified, and
9104 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9105 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9106 	 * is set, let it through for further processing, as we might need to
9107 	 * restart the scan.
9108 	 *
9109 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9110 	 * the results are also dropped.
9111 	 */
9112 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9113 	    (rssi == HCI_RSSI_INVALID ||
9114 	    (rssi < hdev->discovery.rssi &&
9115 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9116 		return  false;
9117 
9118 	if (hdev->discovery.uuid_count != 0) {
9119 		/* If a list of UUIDs is provided in filter, results with no
9120 		 * matching UUID should be dropped.
9121 		 */
9122 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9123 				   hdev->discovery.uuids) &&
9124 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
9125 				   hdev->discovery.uuid_count,
9126 				   hdev->discovery.uuids))
9127 			return false;
9128 	}
9129 
9130 	/* If duplicate filtering does not report RSSI changes, then restart
9131 	 * scanning to ensure updated result with updated RSSI values.
9132 	 */
9133 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9134 		restart_le_scan(hdev);
9135 
9136 		/* Validate RSSI value against the RSSI threshold once more. */
9137 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9138 		    rssi < hdev->discovery.rssi)
9139 			return false;
9140 	}
9141 
9142 	return true;
9143 }
9144 
9145 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9146 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9147 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9148 {
9149 	char buf[512];
9150 	struct mgmt_ev_device_found *ev = (void *)buf;
9151 	size_t ev_size;
9152 
9153 	/* Don't send events for a non-kernel initiated discovery. With
9154 	 * LE one exception is if we have pend_le_reports > 0 in which
9155 	 * case we're doing passive scanning and want these events.
9156 	 */
9157 	if (!hci_discovery_active(hdev)) {
9158 		if (link_type == ACL_LINK)
9159 			return;
9160 		if (link_type == LE_LINK &&
9161 		    list_empty(&hdev->pend_le_reports) &&
9162 		    !hci_is_adv_monitoring(hdev)) {
9163 			return;
9164 		}
9165 	}
9166 
9167 	if (hdev->discovery.result_filtering) {
9168 		/* We are using service discovery */
9169 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9170 				     scan_rsp_len))
9171 			return;
9172 	}
9173 
9174 	if (hdev->discovery.limited) {
9175 		/* Check for limited discoverable bit */
9176 		if (dev_class) {
9177 			if (!(dev_class[1] & 0x20))
9178 				return;
9179 		} else {
9180 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9181 			if (!flags || !(flags[0] & LE_AD_LIMITED))
9182 				return;
9183 		}
9184 	}
9185 
9186 	/* Make sure that the buffer is big enough. The 5 extra bytes
9187 	 * are for the potential CoD field.
9188 	 */
9189 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9190 		return;
9191 
9192 	memset(buf, 0, sizeof(buf));
9193 
9194 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
9195 	 * RSSI value was reported as 0 when not available. This behavior
9196 	 * is kept when using device discovery. This is required for full
9197 	 * backwards compatibility with the API.
9198 	 *
9199 	 * However when using service discovery, the value 127 will be
9200 	 * returned when the RSSI is not available.
9201 	 */
9202 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9203 	    link_type == ACL_LINK)
9204 		rssi = 0;
9205 
9206 	bacpy(&ev->addr.bdaddr, bdaddr);
9207 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9208 	ev->rssi = rssi;
9209 	ev->flags = cpu_to_le32(flags);
9210 
9211 	if (eir_len > 0)
9212 		/* Copy EIR or advertising data into event */
9213 		memcpy(ev->eir, eir, eir_len);
9214 
9215 	if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9216 				       NULL))
9217 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9218 					  dev_class, 3);
9219 
9220 	if (scan_rsp_len > 0)
9221 		/* Append scan response data to event */
9222 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9223 
9224 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9225 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9226 
9227 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9228 }
9229 
9230 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9231 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9232 {
9233 	struct mgmt_ev_device_found *ev;
9234 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
9235 	u16 eir_len;
9236 
9237 	ev = (struct mgmt_ev_device_found *) buf;
9238 
9239 	memset(buf, 0, sizeof(buf));
9240 
9241 	bacpy(&ev->addr.bdaddr, bdaddr);
9242 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9243 	ev->rssi = rssi;
9244 
9245 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9246 				  name_len);
9247 
9248 	ev->eir_len = cpu_to_le16(eir_len);
9249 
9250 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
9251 }
9252 
9253 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9254 {
9255 	struct mgmt_ev_discovering ev;
9256 
9257 	bt_dev_dbg(hdev, "discovering %u", discovering);
9258 
9259 	memset(&ev, 0, sizeof(ev));
9260 	ev.type = hdev->discovery.type;
9261 	ev.discovering = discovering;
9262 
9263 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9264 }
9265 
9266 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9267 {
9268 	struct mgmt_ev_controller_suspend ev;
9269 
9270 	ev.suspend_state = state;
9271 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9272 }
9273 
9274 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9275 		   u8 addr_type)
9276 {
9277 	struct mgmt_ev_controller_resume ev;
9278 
9279 	ev.wake_reason = reason;
9280 	if (bdaddr) {
9281 		bacpy(&ev.addr.bdaddr, bdaddr);
9282 		ev.addr.type = addr_type;
9283 	} else {
9284 		memset(&ev.addr, 0, sizeof(ev.addr));
9285 	}
9286 
9287 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9288 }
9289 
9290 static struct hci_mgmt_chan chan = {
9291 	.channel	= HCI_CHANNEL_CONTROL,
9292 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
9293 	.handlers	= mgmt_handlers,
9294 	.hdev_init	= mgmt_init_hdev,
9295 };
9296 
9297 int mgmt_init(void)
9298 {
9299 	return hci_mgmt_chan_register(&chan);
9300 }
9301 
9302 void mgmt_exit(void)
9303 {
9304 	hci_mgmt_chan_unregister(&chan);
9305 }
9306