xref: /linux/net/bluetooth/mgmt.c (revision af873fcecef567abf8a3468b06dd4e4aab46da6d)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 
40 #define MGMT_VERSION	1
41 #define MGMT_REVISION	14
42 
43 static const u16 mgmt_commands[] = {
44 	MGMT_OP_READ_INDEX_LIST,
45 	MGMT_OP_READ_INFO,
46 	MGMT_OP_SET_POWERED,
47 	MGMT_OP_SET_DISCOVERABLE,
48 	MGMT_OP_SET_CONNECTABLE,
49 	MGMT_OP_SET_FAST_CONNECTABLE,
50 	MGMT_OP_SET_BONDABLE,
51 	MGMT_OP_SET_LINK_SECURITY,
52 	MGMT_OP_SET_SSP,
53 	MGMT_OP_SET_HS,
54 	MGMT_OP_SET_LE,
55 	MGMT_OP_SET_DEV_CLASS,
56 	MGMT_OP_SET_LOCAL_NAME,
57 	MGMT_OP_ADD_UUID,
58 	MGMT_OP_REMOVE_UUID,
59 	MGMT_OP_LOAD_LINK_KEYS,
60 	MGMT_OP_LOAD_LONG_TERM_KEYS,
61 	MGMT_OP_DISCONNECT,
62 	MGMT_OP_GET_CONNECTIONS,
63 	MGMT_OP_PIN_CODE_REPLY,
64 	MGMT_OP_PIN_CODE_NEG_REPLY,
65 	MGMT_OP_SET_IO_CAPABILITY,
66 	MGMT_OP_PAIR_DEVICE,
67 	MGMT_OP_CANCEL_PAIR_DEVICE,
68 	MGMT_OP_UNPAIR_DEVICE,
69 	MGMT_OP_USER_CONFIRM_REPLY,
70 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 	MGMT_OP_USER_PASSKEY_REPLY,
72 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 	MGMT_OP_READ_LOCAL_OOB_DATA,
74 	MGMT_OP_ADD_REMOTE_OOB_DATA,
75 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 	MGMT_OP_START_DISCOVERY,
77 	MGMT_OP_STOP_DISCOVERY,
78 	MGMT_OP_CONFIRM_NAME,
79 	MGMT_OP_BLOCK_DEVICE,
80 	MGMT_OP_UNBLOCK_DEVICE,
81 	MGMT_OP_SET_DEVICE_ID,
82 	MGMT_OP_SET_ADVERTISING,
83 	MGMT_OP_SET_BREDR,
84 	MGMT_OP_SET_STATIC_ADDRESS,
85 	MGMT_OP_SET_SCAN_PARAMS,
86 	MGMT_OP_SET_SECURE_CONN,
87 	MGMT_OP_SET_DEBUG_KEYS,
88 	MGMT_OP_SET_PRIVACY,
89 	MGMT_OP_LOAD_IRKS,
90 	MGMT_OP_GET_CONN_INFO,
91 	MGMT_OP_GET_CLOCK_INFO,
92 	MGMT_OP_ADD_DEVICE,
93 	MGMT_OP_REMOVE_DEVICE,
94 	MGMT_OP_LOAD_CONN_PARAM,
95 	MGMT_OP_READ_UNCONF_INDEX_LIST,
96 	MGMT_OP_READ_CONFIG_INFO,
97 	MGMT_OP_SET_EXTERNAL_CONFIG,
98 	MGMT_OP_SET_PUBLIC_ADDRESS,
99 	MGMT_OP_START_SERVICE_DISCOVERY,
100 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 	MGMT_OP_READ_EXT_INDEX_LIST,
102 	MGMT_OP_READ_ADV_FEATURES,
103 	MGMT_OP_ADD_ADVERTISING,
104 	MGMT_OP_REMOVE_ADVERTISING,
105 	MGMT_OP_GET_ADV_SIZE_INFO,
106 	MGMT_OP_START_LIMITED_DISCOVERY,
107 	MGMT_OP_READ_EXT_INFO,
108 	MGMT_OP_SET_APPEARANCE,
109 };
110 
111 static const u16 mgmt_events[] = {
112 	MGMT_EV_CONTROLLER_ERROR,
113 	MGMT_EV_INDEX_ADDED,
114 	MGMT_EV_INDEX_REMOVED,
115 	MGMT_EV_NEW_SETTINGS,
116 	MGMT_EV_CLASS_OF_DEV_CHANGED,
117 	MGMT_EV_LOCAL_NAME_CHANGED,
118 	MGMT_EV_NEW_LINK_KEY,
119 	MGMT_EV_NEW_LONG_TERM_KEY,
120 	MGMT_EV_DEVICE_CONNECTED,
121 	MGMT_EV_DEVICE_DISCONNECTED,
122 	MGMT_EV_CONNECT_FAILED,
123 	MGMT_EV_PIN_CODE_REQUEST,
124 	MGMT_EV_USER_CONFIRM_REQUEST,
125 	MGMT_EV_USER_PASSKEY_REQUEST,
126 	MGMT_EV_AUTH_FAILED,
127 	MGMT_EV_DEVICE_FOUND,
128 	MGMT_EV_DISCOVERING,
129 	MGMT_EV_DEVICE_BLOCKED,
130 	MGMT_EV_DEVICE_UNBLOCKED,
131 	MGMT_EV_DEVICE_UNPAIRED,
132 	MGMT_EV_PASSKEY_NOTIFY,
133 	MGMT_EV_NEW_IRK,
134 	MGMT_EV_NEW_CSRK,
135 	MGMT_EV_DEVICE_ADDED,
136 	MGMT_EV_DEVICE_REMOVED,
137 	MGMT_EV_NEW_CONN_PARAM,
138 	MGMT_EV_UNCONF_INDEX_ADDED,
139 	MGMT_EV_UNCONF_INDEX_REMOVED,
140 	MGMT_EV_NEW_CONFIG_OPTIONS,
141 	MGMT_EV_EXT_INDEX_ADDED,
142 	MGMT_EV_EXT_INDEX_REMOVED,
143 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
144 	MGMT_EV_ADVERTISING_ADDED,
145 	MGMT_EV_ADVERTISING_REMOVED,
146 	MGMT_EV_EXT_INFO_CHANGED,
147 };
148 
149 static const u16 mgmt_untrusted_commands[] = {
150 	MGMT_OP_READ_INDEX_LIST,
151 	MGMT_OP_READ_INFO,
152 	MGMT_OP_READ_UNCONF_INDEX_LIST,
153 	MGMT_OP_READ_CONFIG_INFO,
154 	MGMT_OP_READ_EXT_INDEX_LIST,
155 	MGMT_OP_READ_EXT_INFO,
156 };
157 
158 static const u16 mgmt_untrusted_events[] = {
159 	MGMT_EV_INDEX_ADDED,
160 	MGMT_EV_INDEX_REMOVED,
161 	MGMT_EV_NEW_SETTINGS,
162 	MGMT_EV_CLASS_OF_DEV_CHANGED,
163 	MGMT_EV_LOCAL_NAME_CHANGED,
164 	MGMT_EV_UNCONF_INDEX_ADDED,
165 	MGMT_EV_UNCONF_INDEX_REMOVED,
166 	MGMT_EV_NEW_CONFIG_OPTIONS,
167 	MGMT_EV_EXT_INDEX_ADDED,
168 	MGMT_EV_EXT_INDEX_REMOVED,
169 	MGMT_EV_EXT_INFO_CHANGED,
170 };
171 
172 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
173 
174 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
175 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
176 
177 /* HCI to MGMT error code conversion table */
178 static u8 mgmt_status_table[] = {
179 	MGMT_STATUS_SUCCESS,
180 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
181 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
182 	MGMT_STATUS_FAILED,		/* Hardware Failure */
183 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
184 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
185 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
186 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
187 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
188 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
189 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
190 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
191 	MGMT_STATUS_BUSY,		/* Command Disallowed */
192 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
193 	MGMT_STATUS_REJECTED,		/* Rejected Security */
194 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
195 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
196 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
197 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
198 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
199 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
200 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
201 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
202 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
203 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
204 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
205 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
206 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
207 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
208 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
209 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
210 	MGMT_STATUS_FAILED,		/* Unspecified Error */
211 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
212 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
213 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
214 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
215 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
216 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
217 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
218 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
219 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
220 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
221 	MGMT_STATUS_FAILED,		/* Transaction Collision */
222 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
223 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
224 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
225 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
226 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
227 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
228 	MGMT_STATUS_FAILED,		/* Slot Violation */
229 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
230 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
231 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
232 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
233 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
234 	MGMT_STATUS_BUSY,		/* Controller Busy */
235 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
236 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
237 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
238 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
239 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
240 };
241 
242 static u8 mgmt_status(u8 hci_status)
243 {
244 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
245 		return mgmt_status_table[hci_status];
246 
247 	return MGMT_STATUS_FAILED;
248 }
249 
250 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
251 			    u16 len, int flag)
252 {
253 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
254 			       flag, NULL);
255 }
256 
257 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
258 			      u16 len, int flag, struct sock *skip_sk)
259 {
260 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
261 			       flag, skip_sk);
262 }
263 
264 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
265 		      struct sock *skip_sk)
266 {
267 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
268 			       HCI_SOCK_TRUSTED, skip_sk);
269 }
270 
271 static u8 le_addr_type(u8 mgmt_addr_type)
272 {
273 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
274 		return ADDR_LE_DEV_PUBLIC;
275 	else
276 		return ADDR_LE_DEV_RANDOM;
277 }
278 
279 void mgmt_fill_version_info(void *ver)
280 {
281 	struct mgmt_rp_read_version *rp = ver;
282 
283 	rp->version = MGMT_VERSION;
284 	rp->revision = cpu_to_le16(MGMT_REVISION);
285 }
286 
287 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
288 			u16 data_len)
289 {
290 	struct mgmt_rp_read_version rp;
291 
292 	BT_DBG("sock %p", sk);
293 
294 	mgmt_fill_version_info(&rp);
295 
296 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
297 				 &rp, sizeof(rp));
298 }
299 
300 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
301 			 u16 data_len)
302 {
303 	struct mgmt_rp_read_commands *rp;
304 	u16 num_commands, num_events;
305 	size_t rp_size;
306 	int i, err;
307 
308 	BT_DBG("sock %p", sk);
309 
310 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
311 		num_commands = ARRAY_SIZE(mgmt_commands);
312 		num_events = ARRAY_SIZE(mgmt_events);
313 	} else {
314 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
315 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
316 	}
317 
318 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
319 
320 	rp = kmalloc(rp_size, GFP_KERNEL);
321 	if (!rp)
322 		return -ENOMEM;
323 
324 	rp->num_commands = cpu_to_le16(num_commands);
325 	rp->num_events = cpu_to_le16(num_events);
326 
327 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
328 		__le16 *opcode = rp->opcodes;
329 
330 		for (i = 0; i < num_commands; i++, opcode++)
331 			put_unaligned_le16(mgmt_commands[i], opcode);
332 
333 		for (i = 0; i < num_events; i++, opcode++)
334 			put_unaligned_le16(mgmt_events[i], opcode);
335 	} else {
336 		__le16 *opcode = rp->opcodes;
337 
338 		for (i = 0; i < num_commands; i++, opcode++)
339 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
340 
341 		for (i = 0; i < num_events; i++, opcode++)
342 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
343 	}
344 
345 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
346 				rp, rp_size);
347 	kfree(rp);
348 
349 	return err;
350 }
351 
352 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
353 			   u16 data_len)
354 {
355 	struct mgmt_rp_read_index_list *rp;
356 	struct hci_dev *d;
357 	size_t rp_len;
358 	u16 count;
359 	int err;
360 
361 	BT_DBG("sock %p", sk);
362 
363 	read_lock(&hci_dev_list_lock);
364 
365 	count = 0;
366 	list_for_each_entry(d, &hci_dev_list, list) {
367 		if (d->dev_type == HCI_PRIMARY &&
368 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
369 			count++;
370 	}
371 
372 	rp_len = sizeof(*rp) + (2 * count);
373 	rp = kmalloc(rp_len, GFP_ATOMIC);
374 	if (!rp) {
375 		read_unlock(&hci_dev_list_lock);
376 		return -ENOMEM;
377 	}
378 
379 	count = 0;
380 	list_for_each_entry(d, &hci_dev_list, list) {
381 		if (hci_dev_test_flag(d, HCI_SETUP) ||
382 		    hci_dev_test_flag(d, HCI_CONFIG) ||
383 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
384 			continue;
385 
386 		/* Devices marked as raw-only are neither configured
387 		 * nor unconfigured controllers.
388 		 */
389 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
390 			continue;
391 
392 		if (d->dev_type == HCI_PRIMARY &&
393 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
394 			rp->index[count++] = cpu_to_le16(d->id);
395 			BT_DBG("Added hci%u", d->id);
396 		}
397 	}
398 
399 	rp->num_controllers = cpu_to_le16(count);
400 	rp_len = sizeof(*rp) + (2 * count);
401 
402 	read_unlock(&hci_dev_list_lock);
403 
404 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
405 				0, rp, rp_len);
406 
407 	kfree(rp);
408 
409 	return err;
410 }
411 
412 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
413 				  void *data, u16 data_len)
414 {
415 	struct mgmt_rp_read_unconf_index_list *rp;
416 	struct hci_dev *d;
417 	size_t rp_len;
418 	u16 count;
419 	int err;
420 
421 	BT_DBG("sock %p", sk);
422 
423 	read_lock(&hci_dev_list_lock);
424 
425 	count = 0;
426 	list_for_each_entry(d, &hci_dev_list, list) {
427 		if (d->dev_type == HCI_PRIMARY &&
428 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
429 			count++;
430 	}
431 
432 	rp_len = sizeof(*rp) + (2 * count);
433 	rp = kmalloc(rp_len, GFP_ATOMIC);
434 	if (!rp) {
435 		read_unlock(&hci_dev_list_lock);
436 		return -ENOMEM;
437 	}
438 
439 	count = 0;
440 	list_for_each_entry(d, &hci_dev_list, list) {
441 		if (hci_dev_test_flag(d, HCI_SETUP) ||
442 		    hci_dev_test_flag(d, HCI_CONFIG) ||
443 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
444 			continue;
445 
446 		/* Devices marked as raw-only are neither configured
447 		 * nor unconfigured controllers.
448 		 */
449 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
450 			continue;
451 
452 		if (d->dev_type == HCI_PRIMARY &&
453 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
454 			rp->index[count++] = cpu_to_le16(d->id);
455 			BT_DBG("Added hci%u", d->id);
456 		}
457 	}
458 
459 	rp->num_controllers = cpu_to_le16(count);
460 	rp_len = sizeof(*rp) + (2 * count);
461 
462 	read_unlock(&hci_dev_list_lock);
463 
464 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
465 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
466 
467 	kfree(rp);
468 
469 	return err;
470 }
471 
472 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
473 			       void *data, u16 data_len)
474 {
475 	struct mgmt_rp_read_ext_index_list *rp;
476 	struct hci_dev *d;
477 	u16 count;
478 	int err;
479 
480 	BT_DBG("sock %p", sk);
481 
482 	read_lock(&hci_dev_list_lock);
483 
484 	count = 0;
485 	list_for_each_entry(d, &hci_dev_list, list) {
486 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
487 			count++;
488 	}
489 
490 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
491 	if (!rp) {
492 		read_unlock(&hci_dev_list_lock);
493 		return -ENOMEM;
494 	}
495 
496 	count = 0;
497 	list_for_each_entry(d, &hci_dev_list, list) {
498 		if (hci_dev_test_flag(d, HCI_SETUP) ||
499 		    hci_dev_test_flag(d, HCI_CONFIG) ||
500 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
501 			continue;
502 
503 		/* Devices marked as raw-only are neither configured
504 		 * nor unconfigured controllers.
505 		 */
506 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
507 			continue;
508 
509 		if (d->dev_type == HCI_PRIMARY) {
510 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
511 				rp->entry[count].type = 0x01;
512 			else
513 				rp->entry[count].type = 0x00;
514 		} else if (d->dev_type == HCI_AMP) {
515 			rp->entry[count].type = 0x02;
516 		} else {
517 			continue;
518 		}
519 
520 		rp->entry[count].bus = d->bus;
521 		rp->entry[count++].index = cpu_to_le16(d->id);
522 		BT_DBG("Added hci%u", d->id);
523 	}
524 
525 	rp->num_controllers = cpu_to_le16(count);
526 
527 	read_unlock(&hci_dev_list_lock);
528 
529 	/* If this command is called at least once, then all the
530 	 * default index and unconfigured index events are disabled
531 	 * and from now on only extended index events are used.
532 	 */
533 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
534 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
535 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
536 
537 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
538 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
539 				struct_size(rp, entry, count));
540 
541 	kfree(rp);
542 
543 	return err;
544 }
545 
546 static bool is_configured(struct hci_dev *hdev)
547 {
548 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
549 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
550 		return false;
551 
552 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
553 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
554 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
555 		return false;
556 
557 	return true;
558 }
559 
560 static __le32 get_missing_options(struct hci_dev *hdev)
561 {
562 	u32 options = 0;
563 
564 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
565 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
566 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
567 
568 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
569 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
570 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
571 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
572 
573 	return cpu_to_le32(options);
574 }
575 
576 static int new_options(struct hci_dev *hdev, struct sock *skip)
577 {
578 	__le32 options = get_missing_options(hdev);
579 
580 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
581 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
582 }
583 
584 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
585 {
586 	__le32 options = get_missing_options(hdev);
587 
588 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
589 				 sizeof(options));
590 }
591 
592 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
593 			    void *data, u16 data_len)
594 {
595 	struct mgmt_rp_read_config_info rp;
596 	u32 options = 0;
597 
598 	BT_DBG("sock %p %s", sk, hdev->name);
599 
600 	hci_dev_lock(hdev);
601 
602 	memset(&rp, 0, sizeof(rp));
603 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
604 
605 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
606 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
607 
608 	if (hdev->set_bdaddr)
609 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
610 
611 	rp.supported_options = cpu_to_le32(options);
612 	rp.missing_options = get_missing_options(hdev);
613 
614 	hci_dev_unlock(hdev);
615 
616 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
617 				 &rp, sizeof(rp));
618 }
619 
620 static u32 get_supported_phys(struct hci_dev *hdev)
621 {
622 	u32 supported_phys = 0;
623 
624 	if (lmp_bredr_capable(hdev)) {
625 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
626 
627 		if (hdev->features[0][0] & LMP_3SLOT)
628 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
629 
630 		if (hdev->features[0][0] & LMP_5SLOT)
631 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
632 
633 		if (lmp_edr_2m_capable(hdev)) {
634 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
635 
636 			if (lmp_edr_3slot_capable(hdev))
637 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
638 
639 			if (lmp_edr_5slot_capable(hdev))
640 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
641 
642 			if (lmp_edr_3m_capable(hdev)) {
643 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
644 
645 				if (lmp_edr_3slot_capable(hdev))
646 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
647 
648 				if (lmp_edr_5slot_capable(hdev))
649 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
650 			}
651 		}
652 	}
653 
654 	if (lmp_le_capable(hdev)) {
655 		supported_phys |= MGMT_PHY_LE_1M_TX;
656 		supported_phys |= MGMT_PHY_LE_1M_RX;
657 
658 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
659 			supported_phys |= MGMT_PHY_LE_2M_TX;
660 			supported_phys |= MGMT_PHY_LE_2M_RX;
661 		}
662 
663 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
664 			supported_phys |= MGMT_PHY_LE_CODED_TX;
665 			supported_phys |= MGMT_PHY_LE_CODED_RX;
666 		}
667 	}
668 
669 	return supported_phys;
670 }
671 
672 static u32 get_selected_phys(struct hci_dev *hdev)
673 {
674 	u32 selected_phys = 0;
675 
676 	if (lmp_bredr_capable(hdev)) {
677 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
678 
679 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
680 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
681 
682 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
683 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
684 
685 		if (lmp_edr_2m_capable(hdev)) {
686 			if (!(hdev->pkt_type & HCI_2DH1))
687 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
688 
689 			if (lmp_edr_3slot_capable(hdev) &&
690 			    !(hdev->pkt_type & HCI_2DH3))
691 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
692 
693 			if (lmp_edr_5slot_capable(hdev) &&
694 			    !(hdev->pkt_type & HCI_2DH5))
695 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
696 
697 			if (lmp_edr_3m_capable(hdev)) {
698 				if (!(hdev->pkt_type & HCI_3DH1))
699 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
700 
701 				if (lmp_edr_3slot_capable(hdev) &&
702 				    !(hdev->pkt_type & HCI_3DH3))
703 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
704 
705 				if (lmp_edr_5slot_capable(hdev) &&
706 				    !(hdev->pkt_type & HCI_3DH5))
707 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
708 			}
709 		}
710 	}
711 
712 	if (lmp_le_capable(hdev)) {
713 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
714 			selected_phys |= MGMT_PHY_LE_1M_TX;
715 
716 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
717 			selected_phys |= MGMT_PHY_LE_1M_RX;
718 
719 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
720 			selected_phys |= MGMT_PHY_LE_2M_TX;
721 
722 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
723 			selected_phys |= MGMT_PHY_LE_2M_RX;
724 
725 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
726 			selected_phys |= MGMT_PHY_LE_CODED_TX;
727 
728 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
729 			selected_phys |= MGMT_PHY_LE_CODED_RX;
730 	}
731 
732 	return selected_phys;
733 }
734 
735 static u32 get_configurable_phys(struct hci_dev *hdev)
736 {
737 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
738 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
739 }
740 
741 static u32 get_supported_settings(struct hci_dev *hdev)
742 {
743 	u32 settings = 0;
744 
745 	settings |= MGMT_SETTING_POWERED;
746 	settings |= MGMT_SETTING_BONDABLE;
747 	settings |= MGMT_SETTING_DEBUG_KEYS;
748 	settings |= MGMT_SETTING_CONNECTABLE;
749 	settings |= MGMT_SETTING_DISCOVERABLE;
750 
751 	if (lmp_bredr_capable(hdev)) {
752 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
753 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
754 		settings |= MGMT_SETTING_BREDR;
755 		settings |= MGMT_SETTING_LINK_SECURITY;
756 
757 		if (lmp_ssp_capable(hdev)) {
758 			settings |= MGMT_SETTING_SSP;
759 			settings |= MGMT_SETTING_HS;
760 		}
761 
762 		if (lmp_sc_capable(hdev))
763 			settings |= MGMT_SETTING_SECURE_CONN;
764 	}
765 
766 	if (lmp_le_capable(hdev)) {
767 		settings |= MGMT_SETTING_LE;
768 		settings |= MGMT_SETTING_ADVERTISING;
769 		settings |= MGMT_SETTING_SECURE_CONN;
770 		settings |= MGMT_SETTING_PRIVACY;
771 		settings |= MGMT_SETTING_STATIC_ADDRESS;
772 	}
773 
774 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
775 	    hdev->set_bdaddr)
776 		settings |= MGMT_SETTING_CONFIGURATION;
777 
778 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
779 
780 	return settings;
781 }
782 
783 static u32 get_current_settings(struct hci_dev *hdev)
784 {
785 	u32 settings = 0;
786 
787 	if (hdev_is_powered(hdev))
788 		settings |= MGMT_SETTING_POWERED;
789 
790 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
791 		settings |= MGMT_SETTING_CONNECTABLE;
792 
793 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
794 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
795 
796 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
797 		settings |= MGMT_SETTING_DISCOVERABLE;
798 
799 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
800 		settings |= MGMT_SETTING_BONDABLE;
801 
802 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
803 		settings |= MGMT_SETTING_BREDR;
804 
805 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
806 		settings |= MGMT_SETTING_LE;
807 
808 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
809 		settings |= MGMT_SETTING_LINK_SECURITY;
810 
811 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
812 		settings |= MGMT_SETTING_SSP;
813 
814 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
815 		settings |= MGMT_SETTING_HS;
816 
817 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
818 		settings |= MGMT_SETTING_ADVERTISING;
819 
820 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
821 		settings |= MGMT_SETTING_SECURE_CONN;
822 
823 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
824 		settings |= MGMT_SETTING_DEBUG_KEYS;
825 
826 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
827 		settings |= MGMT_SETTING_PRIVACY;
828 
829 	/* The current setting for static address has two purposes. The
830 	 * first is to indicate if the static address will be used and
831 	 * the second is to indicate if it is actually set.
832 	 *
833 	 * This means if the static address is not configured, this flag
834 	 * will never be set. If the address is configured, then if the
835 	 * address is actually used decides if the flag is set or not.
836 	 *
837 	 * For single mode LE only controllers and dual-mode controllers
838 	 * with BR/EDR disabled, the existence of the static address will
839 	 * be evaluated.
840 	 */
841 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
842 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
843 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
844 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
845 			settings |= MGMT_SETTING_STATIC_ADDRESS;
846 	}
847 
848 	return settings;
849 }
850 
851 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
852 {
853 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
854 }
855 
856 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
857 						  struct hci_dev *hdev,
858 						  const void *data)
859 {
860 	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
861 }
862 
863 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
864 {
865 	struct mgmt_pending_cmd *cmd;
866 
867 	/* If there's a pending mgmt command the flags will not yet have
868 	 * their final values, so check for this first.
869 	 */
870 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
871 	if (cmd) {
872 		struct mgmt_mode *cp = cmd->param;
873 		if (cp->val == 0x01)
874 			return LE_AD_GENERAL;
875 		else if (cp->val == 0x02)
876 			return LE_AD_LIMITED;
877 	} else {
878 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
879 			return LE_AD_LIMITED;
880 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
881 			return LE_AD_GENERAL;
882 	}
883 
884 	return 0;
885 }
886 
887 bool mgmt_get_connectable(struct hci_dev *hdev)
888 {
889 	struct mgmt_pending_cmd *cmd;
890 
891 	/* If there's a pending mgmt command the flag will not yet have
892 	 * it's final value, so check for this first.
893 	 */
894 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
895 	if (cmd) {
896 		struct mgmt_mode *cp = cmd->param;
897 
898 		return cp->val;
899 	}
900 
901 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
902 }
903 
904 static void service_cache_off(struct work_struct *work)
905 {
906 	struct hci_dev *hdev = container_of(work, struct hci_dev,
907 					    service_cache.work);
908 	struct hci_request req;
909 
910 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
911 		return;
912 
913 	hci_req_init(&req, hdev);
914 
915 	hci_dev_lock(hdev);
916 
917 	__hci_req_update_eir(&req);
918 	__hci_req_update_class(&req);
919 
920 	hci_dev_unlock(hdev);
921 
922 	hci_req_run(&req, NULL);
923 }
924 
925 static void rpa_expired(struct work_struct *work)
926 {
927 	struct hci_dev *hdev = container_of(work, struct hci_dev,
928 					    rpa_expired.work);
929 	struct hci_request req;
930 
931 	BT_DBG("");
932 
933 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
934 
935 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
936 		return;
937 
938 	/* The generation of a new RPA and programming it into the
939 	 * controller happens in the hci_req_enable_advertising()
940 	 * function.
941 	 */
942 	hci_req_init(&req, hdev);
943 	if (ext_adv_capable(hdev))
944 		__hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
945 	else
946 		__hci_req_enable_advertising(&req);
947 	hci_req_run(&req, NULL);
948 }
949 
950 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
951 {
952 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
953 		return;
954 
955 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
956 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
957 
958 	/* Non-mgmt controlled devices get this bit set
959 	 * implicitly so that pairing works for them, however
960 	 * for mgmt we require user-space to explicitly enable
961 	 * it
962 	 */
963 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
964 }
965 
966 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
967 				void *data, u16 data_len)
968 {
969 	struct mgmt_rp_read_info rp;
970 
971 	BT_DBG("sock %p %s", sk, hdev->name);
972 
973 	hci_dev_lock(hdev);
974 
975 	memset(&rp, 0, sizeof(rp));
976 
977 	bacpy(&rp.bdaddr, &hdev->bdaddr);
978 
979 	rp.version = hdev->hci_ver;
980 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
981 
982 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
983 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
984 
985 	memcpy(rp.dev_class, hdev->dev_class, 3);
986 
987 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
988 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
989 
990 	hci_dev_unlock(hdev);
991 
992 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
993 				 sizeof(rp));
994 }
995 
996 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
997 {
998 	u16 eir_len = 0;
999 	size_t name_len;
1000 
1001 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1002 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1003 					  hdev->dev_class, 3);
1004 
1005 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1006 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1007 					  hdev->appearance);
1008 
1009 	name_len = strlen(hdev->dev_name);
1010 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1011 				  hdev->dev_name, name_len);
1012 
1013 	name_len = strlen(hdev->short_name);
1014 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1015 				  hdev->short_name, name_len);
1016 
1017 	return eir_len;
1018 }
1019 
1020 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1021 				    void *data, u16 data_len)
1022 {
1023 	char buf[512];
1024 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1025 	u16 eir_len;
1026 
1027 	BT_DBG("sock %p %s", sk, hdev->name);
1028 
1029 	memset(&buf, 0, sizeof(buf));
1030 
1031 	hci_dev_lock(hdev);
1032 
1033 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1034 
1035 	rp->version = hdev->hci_ver;
1036 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1037 
1038 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1039 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1040 
1041 
1042 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1043 	rp->eir_len = cpu_to_le16(eir_len);
1044 
1045 	hci_dev_unlock(hdev);
1046 
1047 	/* If this command is called at least once, then the events
1048 	 * for class of device and local name changes are disabled
1049 	 * and only the new extended controller information event
1050 	 * is used.
1051 	 */
1052 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1053 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1054 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1055 
1056 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1057 				 sizeof(*rp) + eir_len);
1058 }
1059 
1060 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1061 {
1062 	char buf[512];
1063 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1064 	u16 eir_len;
1065 
1066 	memset(buf, 0, sizeof(buf));
1067 
1068 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1069 	ev->eir_len = cpu_to_le16(eir_len);
1070 
1071 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1072 				  sizeof(*ev) + eir_len,
1073 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1074 }
1075 
1076 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1077 {
1078 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1079 
1080 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1081 				 sizeof(settings));
1082 }
1083 
1084 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1085 {
1086 	BT_DBG("%s status 0x%02x", hdev->name, status);
1087 
1088 	if (hci_conn_count(hdev) == 0) {
1089 		cancel_delayed_work(&hdev->power_off);
1090 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1091 	}
1092 }
1093 
1094 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1095 {
1096 	struct mgmt_ev_advertising_added ev;
1097 
1098 	ev.instance = instance;
1099 
1100 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1101 }
1102 
1103 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1104 			      u8 instance)
1105 {
1106 	struct mgmt_ev_advertising_removed ev;
1107 
1108 	ev.instance = instance;
1109 
1110 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1111 }
1112 
1113 static void cancel_adv_timeout(struct hci_dev *hdev)
1114 {
1115 	if (hdev->adv_instance_timeout) {
1116 		hdev->adv_instance_timeout = 0;
1117 		cancel_delayed_work(&hdev->adv_instance_expire);
1118 	}
1119 }
1120 
1121 static int clean_up_hci_state(struct hci_dev *hdev)
1122 {
1123 	struct hci_request req;
1124 	struct hci_conn *conn;
1125 	bool discov_stopped;
1126 	int err;
1127 
1128 	hci_req_init(&req, hdev);
1129 
1130 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1131 	    test_bit(HCI_PSCAN, &hdev->flags)) {
1132 		u8 scan = 0x00;
1133 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1134 	}
1135 
1136 	hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1137 
1138 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1139 		__hci_req_disable_advertising(&req);
1140 
1141 	discov_stopped = hci_req_stop_discovery(&req);
1142 
1143 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1144 		/* 0x15 == Terminated due to Power Off */
1145 		__hci_abort_conn(&req, conn, 0x15);
1146 	}
1147 
1148 	err = hci_req_run(&req, clean_up_hci_complete);
1149 	if (!err && discov_stopped)
1150 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1151 
1152 	return err;
1153 }
1154 
1155 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1156 		       u16 len)
1157 {
1158 	struct mgmt_mode *cp = data;
1159 	struct mgmt_pending_cmd *cmd;
1160 	int err;
1161 
1162 	BT_DBG("request for %s", hdev->name);
1163 
1164 	if (cp->val != 0x00 && cp->val != 0x01)
1165 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1166 				       MGMT_STATUS_INVALID_PARAMS);
1167 
1168 	hci_dev_lock(hdev);
1169 
1170 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1171 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1172 				      MGMT_STATUS_BUSY);
1173 		goto failed;
1174 	}
1175 
1176 	if (!!cp->val == hdev_is_powered(hdev)) {
1177 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1178 		goto failed;
1179 	}
1180 
1181 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1182 	if (!cmd) {
1183 		err = -ENOMEM;
1184 		goto failed;
1185 	}
1186 
1187 	if (cp->val) {
1188 		queue_work(hdev->req_workqueue, &hdev->power_on);
1189 		err = 0;
1190 	} else {
1191 		/* Disconnect connections, stop scans, etc */
1192 		err = clean_up_hci_state(hdev);
1193 		if (!err)
1194 			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1195 					   HCI_POWER_OFF_TIMEOUT);
1196 
1197 		/* ENODATA means there were no HCI commands queued */
1198 		if (err == -ENODATA) {
1199 			cancel_delayed_work(&hdev->power_off);
1200 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1201 			err = 0;
1202 		}
1203 	}
1204 
1205 failed:
1206 	hci_dev_unlock(hdev);
1207 	return err;
1208 }
1209 
1210 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1211 {
1212 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1213 
1214 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1215 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1216 }
1217 
1218 int mgmt_new_settings(struct hci_dev *hdev)
1219 {
1220 	return new_settings(hdev, NULL);
1221 }
1222 
1223 struct cmd_lookup {
1224 	struct sock *sk;
1225 	struct hci_dev *hdev;
1226 	u8 mgmt_status;
1227 };
1228 
1229 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1230 {
1231 	struct cmd_lookup *match = data;
1232 
1233 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1234 
1235 	list_del(&cmd->list);
1236 
1237 	if (match->sk == NULL) {
1238 		match->sk = cmd->sk;
1239 		sock_hold(match->sk);
1240 	}
1241 
1242 	mgmt_pending_free(cmd);
1243 }
1244 
1245 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1246 {
1247 	u8 *status = data;
1248 
1249 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1250 	mgmt_pending_remove(cmd);
1251 }
1252 
1253 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1254 {
1255 	if (cmd->cmd_complete) {
1256 		u8 *status = data;
1257 
1258 		cmd->cmd_complete(cmd, *status);
1259 		mgmt_pending_remove(cmd);
1260 
1261 		return;
1262 	}
1263 
1264 	cmd_status_rsp(cmd, data);
1265 }
1266 
1267 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1268 {
1269 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1270 				 cmd->param, cmd->param_len);
1271 }
1272 
1273 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1274 {
1275 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1276 				 cmd->param, sizeof(struct mgmt_addr_info));
1277 }
1278 
1279 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1280 {
1281 	if (!lmp_bredr_capable(hdev))
1282 		return MGMT_STATUS_NOT_SUPPORTED;
1283 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1284 		return MGMT_STATUS_REJECTED;
1285 	else
1286 		return MGMT_STATUS_SUCCESS;
1287 }
1288 
1289 static u8 mgmt_le_support(struct hci_dev *hdev)
1290 {
1291 	if (!lmp_le_capable(hdev))
1292 		return MGMT_STATUS_NOT_SUPPORTED;
1293 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1294 		return MGMT_STATUS_REJECTED;
1295 	else
1296 		return MGMT_STATUS_SUCCESS;
1297 }
1298 
1299 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1300 {
1301 	struct mgmt_pending_cmd *cmd;
1302 
1303 	BT_DBG("status 0x%02x", status);
1304 
1305 	hci_dev_lock(hdev);
1306 
1307 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1308 	if (!cmd)
1309 		goto unlock;
1310 
1311 	if (status) {
1312 		u8 mgmt_err = mgmt_status(status);
1313 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1314 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1315 		goto remove_cmd;
1316 	}
1317 
1318 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1319 	    hdev->discov_timeout > 0) {
1320 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1321 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1322 	}
1323 
1324 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1325 	new_settings(hdev, cmd->sk);
1326 
1327 remove_cmd:
1328 	mgmt_pending_remove(cmd);
1329 
1330 unlock:
1331 	hci_dev_unlock(hdev);
1332 }
1333 
1334 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1335 			    u16 len)
1336 {
1337 	struct mgmt_cp_set_discoverable *cp = data;
1338 	struct mgmt_pending_cmd *cmd;
1339 	u16 timeout;
1340 	int err;
1341 
1342 	BT_DBG("request for %s", hdev->name);
1343 
1344 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1345 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1346 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1347 				       MGMT_STATUS_REJECTED);
1348 
1349 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1350 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1351 				       MGMT_STATUS_INVALID_PARAMS);
1352 
1353 	timeout = __le16_to_cpu(cp->timeout);
1354 
1355 	/* Disabling discoverable requires that no timeout is set,
1356 	 * and enabling limited discoverable requires a timeout.
1357 	 */
1358 	if ((cp->val == 0x00 && timeout > 0) ||
1359 	    (cp->val == 0x02 && timeout == 0))
1360 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1361 				       MGMT_STATUS_INVALID_PARAMS);
1362 
1363 	hci_dev_lock(hdev);
1364 
1365 	if (!hdev_is_powered(hdev) && timeout > 0) {
1366 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1367 				      MGMT_STATUS_NOT_POWERED);
1368 		goto failed;
1369 	}
1370 
1371 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1372 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1373 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1374 				      MGMT_STATUS_BUSY);
1375 		goto failed;
1376 	}
1377 
1378 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1379 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1380 				      MGMT_STATUS_REJECTED);
1381 		goto failed;
1382 	}
1383 
1384 	if (!hdev_is_powered(hdev)) {
1385 		bool changed = false;
1386 
1387 		/* Setting limited discoverable when powered off is
1388 		 * not a valid operation since it requires a timeout
1389 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1390 		 */
1391 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1392 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1393 			changed = true;
1394 		}
1395 
1396 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1397 		if (err < 0)
1398 			goto failed;
1399 
1400 		if (changed)
1401 			err = new_settings(hdev, sk);
1402 
1403 		goto failed;
1404 	}
1405 
1406 	/* If the current mode is the same, then just update the timeout
1407 	 * value with the new value. And if only the timeout gets updated,
1408 	 * then no need for any HCI transactions.
1409 	 */
1410 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1411 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1412 						   HCI_LIMITED_DISCOVERABLE)) {
1413 		cancel_delayed_work(&hdev->discov_off);
1414 		hdev->discov_timeout = timeout;
1415 
1416 		if (cp->val && hdev->discov_timeout > 0) {
1417 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1418 			queue_delayed_work(hdev->req_workqueue,
1419 					   &hdev->discov_off, to);
1420 		}
1421 
1422 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1423 		goto failed;
1424 	}
1425 
1426 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1427 	if (!cmd) {
1428 		err = -ENOMEM;
1429 		goto failed;
1430 	}
1431 
1432 	/* Cancel any potential discoverable timeout that might be
1433 	 * still active and store new timeout value. The arming of
1434 	 * the timeout happens in the complete handler.
1435 	 */
1436 	cancel_delayed_work(&hdev->discov_off);
1437 	hdev->discov_timeout = timeout;
1438 
1439 	if (cp->val)
1440 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1441 	else
1442 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1443 
1444 	/* Limited discoverable mode */
1445 	if (cp->val == 0x02)
1446 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1447 	else
1448 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1449 
1450 	queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1451 	err = 0;
1452 
1453 failed:
1454 	hci_dev_unlock(hdev);
1455 	return err;
1456 }
1457 
1458 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1459 {
1460 	struct mgmt_pending_cmd *cmd;
1461 
1462 	BT_DBG("status 0x%02x", status);
1463 
1464 	hci_dev_lock(hdev);
1465 
1466 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1467 	if (!cmd)
1468 		goto unlock;
1469 
1470 	if (status) {
1471 		u8 mgmt_err = mgmt_status(status);
1472 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1473 		goto remove_cmd;
1474 	}
1475 
1476 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1477 	new_settings(hdev, cmd->sk);
1478 
1479 remove_cmd:
1480 	mgmt_pending_remove(cmd);
1481 
1482 unlock:
1483 	hci_dev_unlock(hdev);
1484 }
1485 
1486 static int set_connectable_update_settings(struct hci_dev *hdev,
1487 					   struct sock *sk, u8 val)
1488 {
1489 	bool changed = false;
1490 	int err;
1491 
1492 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1493 		changed = true;
1494 
1495 	if (val) {
1496 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1497 	} else {
1498 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1499 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1500 	}
1501 
1502 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1503 	if (err < 0)
1504 		return err;
1505 
1506 	if (changed) {
1507 		hci_req_update_scan(hdev);
1508 		hci_update_background_scan(hdev);
1509 		return new_settings(hdev, sk);
1510 	}
1511 
1512 	return 0;
1513 }
1514 
1515 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1516 			   u16 len)
1517 {
1518 	struct mgmt_mode *cp = data;
1519 	struct mgmt_pending_cmd *cmd;
1520 	int err;
1521 
1522 	BT_DBG("request for %s", hdev->name);
1523 
1524 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1525 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1526 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1527 				       MGMT_STATUS_REJECTED);
1528 
1529 	if (cp->val != 0x00 && cp->val != 0x01)
1530 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1531 				       MGMT_STATUS_INVALID_PARAMS);
1532 
1533 	hci_dev_lock(hdev);
1534 
1535 	if (!hdev_is_powered(hdev)) {
1536 		err = set_connectable_update_settings(hdev, sk, cp->val);
1537 		goto failed;
1538 	}
1539 
1540 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1541 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1542 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1543 				      MGMT_STATUS_BUSY);
1544 		goto failed;
1545 	}
1546 
1547 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1548 	if (!cmd) {
1549 		err = -ENOMEM;
1550 		goto failed;
1551 	}
1552 
1553 	if (cp->val) {
1554 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1555 	} else {
1556 		if (hdev->discov_timeout > 0)
1557 			cancel_delayed_work(&hdev->discov_off);
1558 
1559 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1560 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1561 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1562 	}
1563 
1564 	queue_work(hdev->req_workqueue, &hdev->connectable_update);
1565 	err = 0;
1566 
1567 failed:
1568 	hci_dev_unlock(hdev);
1569 	return err;
1570 }
1571 
1572 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1573 			u16 len)
1574 {
1575 	struct mgmt_mode *cp = data;
1576 	bool changed;
1577 	int err;
1578 
1579 	BT_DBG("request for %s", hdev->name);
1580 
1581 	if (cp->val != 0x00 && cp->val != 0x01)
1582 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1583 				       MGMT_STATUS_INVALID_PARAMS);
1584 
1585 	hci_dev_lock(hdev);
1586 
1587 	if (cp->val)
1588 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1589 	else
1590 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1591 
1592 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1593 	if (err < 0)
1594 		goto unlock;
1595 
1596 	if (changed) {
1597 		/* In limited privacy mode the change of bondable mode
1598 		 * may affect the local advertising address.
1599 		 */
1600 		if (hdev_is_powered(hdev) &&
1601 		    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1602 		    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1603 		    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1604 			queue_work(hdev->req_workqueue,
1605 				   &hdev->discoverable_update);
1606 
1607 		err = new_settings(hdev, sk);
1608 	}
1609 
1610 unlock:
1611 	hci_dev_unlock(hdev);
1612 	return err;
1613 }
1614 
1615 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1616 			     u16 len)
1617 {
1618 	struct mgmt_mode *cp = data;
1619 	struct mgmt_pending_cmd *cmd;
1620 	u8 val, status;
1621 	int err;
1622 
1623 	BT_DBG("request for %s", hdev->name);
1624 
1625 	status = mgmt_bredr_support(hdev);
1626 	if (status)
1627 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1628 				       status);
1629 
1630 	if (cp->val != 0x00 && cp->val != 0x01)
1631 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1632 				       MGMT_STATUS_INVALID_PARAMS);
1633 
1634 	hci_dev_lock(hdev);
1635 
1636 	if (!hdev_is_powered(hdev)) {
1637 		bool changed = false;
1638 
1639 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1640 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1641 			changed = true;
1642 		}
1643 
1644 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1645 		if (err < 0)
1646 			goto failed;
1647 
1648 		if (changed)
1649 			err = new_settings(hdev, sk);
1650 
1651 		goto failed;
1652 	}
1653 
1654 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1655 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1656 				      MGMT_STATUS_BUSY);
1657 		goto failed;
1658 	}
1659 
1660 	val = !!cp->val;
1661 
1662 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1663 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1664 		goto failed;
1665 	}
1666 
1667 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1668 	if (!cmd) {
1669 		err = -ENOMEM;
1670 		goto failed;
1671 	}
1672 
1673 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1674 	if (err < 0) {
1675 		mgmt_pending_remove(cmd);
1676 		goto failed;
1677 	}
1678 
1679 failed:
1680 	hci_dev_unlock(hdev);
1681 	return err;
1682 }
1683 
1684 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1685 {
1686 	struct mgmt_mode *cp = data;
1687 	struct mgmt_pending_cmd *cmd;
1688 	u8 status;
1689 	int err;
1690 
1691 	BT_DBG("request for %s", hdev->name);
1692 
1693 	status = mgmt_bredr_support(hdev);
1694 	if (status)
1695 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1696 
1697 	if (!lmp_ssp_capable(hdev))
1698 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1699 				       MGMT_STATUS_NOT_SUPPORTED);
1700 
1701 	if (cp->val != 0x00 && cp->val != 0x01)
1702 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1703 				       MGMT_STATUS_INVALID_PARAMS);
1704 
1705 	hci_dev_lock(hdev);
1706 
1707 	if (!hdev_is_powered(hdev)) {
1708 		bool changed;
1709 
1710 		if (cp->val) {
1711 			changed = !hci_dev_test_and_set_flag(hdev,
1712 							     HCI_SSP_ENABLED);
1713 		} else {
1714 			changed = hci_dev_test_and_clear_flag(hdev,
1715 							      HCI_SSP_ENABLED);
1716 			if (!changed)
1717 				changed = hci_dev_test_and_clear_flag(hdev,
1718 								      HCI_HS_ENABLED);
1719 			else
1720 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1721 		}
1722 
1723 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1724 		if (err < 0)
1725 			goto failed;
1726 
1727 		if (changed)
1728 			err = new_settings(hdev, sk);
1729 
1730 		goto failed;
1731 	}
1732 
1733 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1734 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1735 				      MGMT_STATUS_BUSY);
1736 		goto failed;
1737 	}
1738 
1739 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1740 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1741 		goto failed;
1742 	}
1743 
1744 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1745 	if (!cmd) {
1746 		err = -ENOMEM;
1747 		goto failed;
1748 	}
1749 
1750 	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1751 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1752 			     sizeof(cp->val), &cp->val);
1753 
1754 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1755 	if (err < 0) {
1756 		mgmt_pending_remove(cmd);
1757 		goto failed;
1758 	}
1759 
1760 failed:
1761 	hci_dev_unlock(hdev);
1762 	return err;
1763 }
1764 
1765 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1766 {
1767 	struct mgmt_mode *cp = data;
1768 	bool changed;
1769 	u8 status;
1770 	int err;
1771 
1772 	BT_DBG("request for %s", hdev->name);
1773 
1774 	status = mgmt_bredr_support(hdev);
1775 	if (status)
1776 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1777 
1778 	if (!lmp_ssp_capable(hdev))
1779 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1780 				       MGMT_STATUS_NOT_SUPPORTED);
1781 
1782 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1783 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1784 				       MGMT_STATUS_REJECTED);
1785 
1786 	if (cp->val != 0x00 && cp->val != 0x01)
1787 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1788 				       MGMT_STATUS_INVALID_PARAMS);
1789 
1790 	hci_dev_lock(hdev);
1791 
1792 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1793 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1794 				      MGMT_STATUS_BUSY);
1795 		goto unlock;
1796 	}
1797 
1798 	if (cp->val) {
1799 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1800 	} else {
1801 		if (hdev_is_powered(hdev)) {
1802 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1803 					      MGMT_STATUS_REJECTED);
1804 			goto unlock;
1805 		}
1806 
1807 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1808 	}
1809 
1810 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1811 	if (err < 0)
1812 		goto unlock;
1813 
1814 	if (changed)
1815 		err = new_settings(hdev, sk);
1816 
1817 unlock:
1818 	hci_dev_unlock(hdev);
1819 	return err;
1820 }
1821 
1822 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1823 {
1824 	struct cmd_lookup match = { NULL, hdev };
1825 
1826 	hci_dev_lock(hdev);
1827 
1828 	if (status) {
1829 		u8 mgmt_err = mgmt_status(status);
1830 
1831 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1832 				     &mgmt_err);
1833 		goto unlock;
1834 	}
1835 
1836 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1837 
1838 	new_settings(hdev, match.sk);
1839 
1840 	if (match.sk)
1841 		sock_put(match.sk);
1842 
1843 	/* Make sure the controller has a good default for
1844 	 * advertising data. Restrict the update to when LE
1845 	 * has actually been enabled. During power on, the
1846 	 * update in powered_update_hci will take care of it.
1847 	 */
1848 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1849 		struct hci_request req;
1850 		hci_req_init(&req, hdev);
1851 		if (ext_adv_capable(hdev)) {
1852 			int err;
1853 
1854 			err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1855 			if (!err)
1856 				__hci_req_update_scan_rsp_data(&req, 0x00);
1857 		} else {
1858 			__hci_req_update_adv_data(&req, 0x00);
1859 			__hci_req_update_scan_rsp_data(&req, 0x00);
1860 		}
1861 		hci_req_run(&req, NULL);
1862 		hci_update_background_scan(hdev);
1863 	}
1864 
1865 unlock:
1866 	hci_dev_unlock(hdev);
1867 }
1868 
1869 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1870 {
1871 	struct mgmt_mode *cp = data;
1872 	struct hci_cp_write_le_host_supported hci_cp;
1873 	struct mgmt_pending_cmd *cmd;
1874 	struct hci_request req;
1875 	int err;
1876 	u8 val, enabled;
1877 
1878 	BT_DBG("request for %s", hdev->name);
1879 
1880 	if (!lmp_le_capable(hdev))
1881 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1882 				       MGMT_STATUS_NOT_SUPPORTED);
1883 
1884 	if (cp->val != 0x00 && cp->val != 0x01)
1885 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1886 				       MGMT_STATUS_INVALID_PARAMS);
1887 
1888 	/* Bluetooth single mode LE only controllers or dual-mode
1889 	 * controllers configured as LE only devices, do not allow
1890 	 * switching LE off. These have either LE enabled explicitly
1891 	 * or BR/EDR has been previously switched off.
1892 	 *
1893 	 * When trying to enable an already enabled LE, then gracefully
1894 	 * send a positive response. Trying to disable it however will
1895 	 * result into rejection.
1896 	 */
1897 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1898 		if (cp->val == 0x01)
1899 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1900 
1901 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1902 				       MGMT_STATUS_REJECTED);
1903 	}
1904 
1905 	hci_dev_lock(hdev);
1906 
1907 	val = !!cp->val;
1908 	enabled = lmp_host_le_capable(hdev);
1909 
1910 	if (!val)
1911 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1912 
1913 	if (!hdev_is_powered(hdev) || val == enabled) {
1914 		bool changed = false;
1915 
1916 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1917 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1918 			changed = true;
1919 		}
1920 
1921 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1922 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1923 			changed = true;
1924 		}
1925 
1926 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1927 		if (err < 0)
1928 			goto unlock;
1929 
1930 		if (changed)
1931 			err = new_settings(hdev, sk);
1932 
1933 		goto unlock;
1934 	}
1935 
1936 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
1937 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1938 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1939 				      MGMT_STATUS_BUSY);
1940 		goto unlock;
1941 	}
1942 
1943 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1944 	if (!cmd) {
1945 		err = -ENOMEM;
1946 		goto unlock;
1947 	}
1948 
1949 	hci_req_init(&req, hdev);
1950 
1951 	memset(&hci_cp, 0, sizeof(hci_cp));
1952 
1953 	if (val) {
1954 		hci_cp.le = val;
1955 		hci_cp.simul = 0x00;
1956 	} else {
1957 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1958 			__hci_req_disable_advertising(&req);
1959 
1960 		if (ext_adv_capable(hdev))
1961 			__hci_req_clear_ext_adv_sets(&req);
1962 	}
1963 
1964 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1965 		    &hci_cp);
1966 
1967 	err = hci_req_run(&req, le_enable_complete);
1968 	if (err < 0)
1969 		mgmt_pending_remove(cmd);
1970 
1971 unlock:
1972 	hci_dev_unlock(hdev);
1973 	return err;
1974 }
1975 
1976 /* This is a helper function to test for pending mgmt commands that can
1977  * cause CoD or EIR HCI commands. We can only allow one such pending
1978  * mgmt command at a time since otherwise we cannot easily track what
1979  * the current values are, will be, and based on that calculate if a new
1980  * HCI command needs to be sent and if yes with what value.
1981  */
1982 static bool pending_eir_or_class(struct hci_dev *hdev)
1983 {
1984 	struct mgmt_pending_cmd *cmd;
1985 
1986 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1987 		switch (cmd->opcode) {
1988 		case MGMT_OP_ADD_UUID:
1989 		case MGMT_OP_REMOVE_UUID:
1990 		case MGMT_OP_SET_DEV_CLASS:
1991 		case MGMT_OP_SET_POWERED:
1992 			return true;
1993 		}
1994 	}
1995 
1996 	return false;
1997 }
1998 
1999 static const u8 bluetooth_base_uuid[] = {
2000 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2001 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2002 };
2003 
2004 static u8 get_uuid_size(const u8 *uuid)
2005 {
2006 	u32 val;
2007 
2008 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2009 		return 128;
2010 
2011 	val = get_unaligned_le32(&uuid[12]);
2012 	if (val > 0xffff)
2013 		return 32;
2014 
2015 	return 16;
2016 }
2017 
2018 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2019 {
2020 	struct mgmt_pending_cmd *cmd;
2021 
2022 	hci_dev_lock(hdev);
2023 
2024 	cmd = pending_find(mgmt_op, hdev);
2025 	if (!cmd)
2026 		goto unlock;
2027 
2028 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2029 			  mgmt_status(status), hdev->dev_class, 3);
2030 
2031 	mgmt_pending_remove(cmd);
2032 
2033 unlock:
2034 	hci_dev_unlock(hdev);
2035 }
2036 
2037 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2038 {
2039 	BT_DBG("status 0x%02x", status);
2040 
2041 	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2042 }
2043 
2044 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2045 {
2046 	struct mgmt_cp_add_uuid *cp = data;
2047 	struct mgmt_pending_cmd *cmd;
2048 	struct hci_request req;
2049 	struct bt_uuid *uuid;
2050 	int err;
2051 
2052 	BT_DBG("request for %s", hdev->name);
2053 
2054 	hci_dev_lock(hdev);
2055 
2056 	if (pending_eir_or_class(hdev)) {
2057 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2058 				      MGMT_STATUS_BUSY);
2059 		goto failed;
2060 	}
2061 
2062 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2063 	if (!uuid) {
2064 		err = -ENOMEM;
2065 		goto failed;
2066 	}
2067 
2068 	memcpy(uuid->uuid, cp->uuid, 16);
2069 	uuid->svc_hint = cp->svc_hint;
2070 	uuid->size = get_uuid_size(cp->uuid);
2071 
2072 	list_add_tail(&uuid->list, &hdev->uuids);
2073 
2074 	hci_req_init(&req, hdev);
2075 
2076 	__hci_req_update_class(&req);
2077 	__hci_req_update_eir(&req);
2078 
2079 	err = hci_req_run(&req, add_uuid_complete);
2080 	if (err < 0) {
2081 		if (err != -ENODATA)
2082 			goto failed;
2083 
2084 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2085 					hdev->dev_class, 3);
2086 		goto failed;
2087 	}
2088 
2089 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2090 	if (!cmd) {
2091 		err = -ENOMEM;
2092 		goto failed;
2093 	}
2094 
2095 	err = 0;
2096 
2097 failed:
2098 	hci_dev_unlock(hdev);
2099 	return err;
2100 }
2101 
2102 static bool enable_service_cache(struct hci_dev *hdev)
2103 {
2104 	if (!hdev_is_powered(hdev))
2105 		return false;
2106 
2107 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2108 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2109 				   CACHE_TIMEOUT);
2110 		return true;
2111 	}
2112 
2113 	return false;
2114 }
2115 
2116 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2117 {
2118 	BT_DBG("status 0x%02x", status);
2119 
2120 	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2121 }
2122 
2123 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2124 		       u16 len)
2125 {
2126 	struct mgmt_cp_remove_uuid *cp = data;
2127 	struct mgmt_pending_cmd *cmd;
2128 	struct bt_uuid *match, *tmp;
2129 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2130 	struct hci_request req;
2131 	int err, found;
2132 
2133 	BT_DBG("request for %s", hdev->name);
2134 
2135 	hci_dev_lock(hdev);
2136 
2137 	if (pending_eir_or_class(hdev)) {
2138 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2139 				      MGMT_STATUS_BUSY);
2140 		goto unlock;
2141 	}
2142 
2143 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2144 		hci_uuids_clear(hdev);
2145 
2146 		if (enable_service_cache(hdev)) {
2147 			err = mgmt_cmd_complete(sk, hdev->id,
2148 						MGMT_OP_REMOVE_UUID,
2149 						0, hdev->dev_class, 3);
2150 			goto unlock;
2151 		}
2152 
2153 		goto update_class;
2154 	}
2155 
2156 	found = 0;
2157 
2158 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2159 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2160 			continue;
2161 
2162 		list_del(&match->list);
2163 		kfree(match);
2164 		found++;
2165 	}
2166 
2167 	if (found == 0) {
2168 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2169 				      MGMT_STATUS_INVALID_PARAMS);
2170 		goto unlock;
2171 	}
2172 
2173 update_class:
2174 	hci_req_init(&req, hdev);
2175 
2176 	__hci_req_update_class(&req);
2177 	__hci_req_update_eir(&req);
2178 
2179 	err = hci_req_run(&req, remove_uuid_complete);
2180 	if (err < 0) {
2181 		if (err != -ENODATA)
2182 			goto unlock;
2183 
2184 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2185 					hdev->dev_class, 3);
2186 		goto unlock;
2187 	}
2188 
2189 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2190 	if (!cmd) {
2191 		err = -ENOMEM;
2192 		goto unlock;
2193 	}
2194 
2195 	err = 0;
2196 
2197 unlock:
2198 	hci_dev_unlock(hdev);
2199 	return err;
2200 }
2201 
2202 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2203 {
2204 	BT_DBG("status 0x%02x", status);
2205 
2206 	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2207 }
2208 
2209 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2210 			 u16 len)
2211 {
2212 	struct mgmt_cp_set_dev_class *cp = data;
2213 	struct mgmt_pending_cmd *cmd;
2214 	struct hci_request req;
2215 	int err;
2216 
2217 	BT_DBG("request for %s", hdev->name);
2218 
2219 	if (!lmp_bredr_capable(hdev))
2220 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2221 				       MGMT_STATUS_NOT_SUPPORTED);
2222 
2223 	hci_dev_lock(hdev);
2224 
2225 	if (pending_eir_or_class(hdev)) {
2226 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2227 				      MGMT_STATUS_BUSY);
2228 		goto unlock;
2229 	}
2230 
2231 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2232 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2233 				      MGMT_STATUS_INVALID_PARAMS);
2234 		goto unlock;
2235 	}
2236 
2237 	hdev->major_class = cp->major;
2238 	hdev->minor_class = cp->minor;
2239 
2240 	if (!hdev_is_powered(hdev)) {
2241 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2242 					hdev->dev_class, 3);
2243 		goto unlock;
2244 	}
2245 
2246 	hci_req_init(&req, hdev);
2247 
2248 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2249 		hci_dev_unlock(hdev);
2250 		cancel_delayed_work_sync(&hdev->service_cache);
2251 		hci_dev_lock(hdev);
2252 		__hci_req_update_eir(&req);
2253 	}
2254 
2255 	__hci_req_update_class(&req);
2256 
2257 	err = hci_req_run(&req, set_class_complete);
2258 	if (err < 0) {
2259 		if (err != -ENODATA)
2260 			goto unlock;
2261 
2262 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2263 					hdev->dev_class, 3);
2264 		goto unlock;
2265 	}
2266 
2267 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2268 	if (!cmd) {
2269 		err = -ENOMEM;
2270 		goto unlock;
2271 	}
2272 
2273 	err = 0;
2274 
2275 unlock:
2276 	hci_dev_unlock(hdev);
2277 	return err;
2278 }
2279 
2280 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2281 			  u16 len)
2282 {
2283 	struct mgmt_cp_load_link_keys *cp = data;
2284 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2285 				   sizeof(struct mgmt_link_key_info));
2286 	u16 key_count, expected_len;
2287 	bool changed;
2288 	int i;
2289 
2290 	BT_DBG("request for %s", hdev->name);
2291 
2292 	if (!lmp_bredr_capable(hdev))
2293 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2294 				       MGMT_STATUS_NOT_SUPPORTED);
2295 
2296 	key_count = __le16_to_cpu(cp->key_count);
2297 	if (key_count > max_key_count) {
2298 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2299 			   key_count);
2300 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2301 				       MGMT_STATUS_INVALID_PARAMS);
2302 	}
2303 
2304 	expected_len = struct_size(cp, keys, key_count);
2305 	if (expected_len != len) {
2306 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2307 			   expected_len, len);
2308 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2309 				       MGMT_STATUS_INVALID_PARAMS);
2310 	}
2311 
2312 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2313 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2314 				       MGMT_STATUS_INVALID_PARAMS);
2315 
2316 	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2317 	       key_count);
2318 
2319 	for (i = 0; i < key_count; i++) {
2320 		struct mgmt_link_key_info *key = &cp->keys[i];
2321 
2322 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2323 			return mgmt_cmd_status(sk, hdev->id,
2324 					       MGMT_OP_LOAD_LINK_KEYS,
2325 					       MGMT_STATUS_INVALID_PARAMS);
2326 	}
2327 
2328 	hci_dev_lock(hdev);
2329 
2330 	hci_link_keys_clear(hdev);
2331 
2332 	if (cp->debug_keys)
2333 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2334 	else
2335 		changed = hci_dev_test_and_clear_flag(hdev,
2336 						      HCI_KEEP_DEBUG_KEYS);
2337 
2338 	if (changed)
2339 		new_settings(hdev, NULL);
2340 
2341 	for (i = 0; i < key_count; i++) {
2342 		struct mgmt_link_key_info *key = &cp->keys[i];
2343 
2344 		/* Always ignore debug keys and require a new pairing if
2345 		 * the user wants to use them.
2346 		 */
2347 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2348 			continue;
2349 
2350 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2351 				 key->type, key->pin_len, NULL);
2352 	}
2353 
2354 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2355 
2356 	hci_dev_unlock(hdev);
2357 
2358 	return 0;
2359 }
2360 
2361 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2362 			   u8 addr_type, struct sock *skip_sk)
2363 {
2364 	struct mgmt_ev_device_unpaired ev;
2365 
2366 	bacpy(&ev.addr.bdaddr, bdaddr);
2367 	ev.addr.type = addr_type;
2368 
2369 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2370 			  skip_sk);
2371 }
2372 
2373 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2374 			 u16 len)
2375 {
2376 	struct mgmt_cp_unpair_device *cp = data;
2377 	struct mgmt_rp_unpair_device rp;
2378 	struct hci_conn_params *params;
2379 	struct mgmt_pending_cmd *cmd;
2380 	struct hci_conn *conn;
2381 	u8 addr_type;
2382 	int err;
2383 
2384 	memset(&rp, 0, sizeof(rp));
2385 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2386 	rp.addr.type = cp->addr.type;
2387 
2388 	if (!bdaddr_type_is_valid(cp->addr.type))
2389 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2390 					 MGMT_STATUS_INVALID_PARAMS,
2391 					 &rp, sizeof(rp));
2392 
2393 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2394 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2395 					 MGMT_STATUS_INVALID_PARAMS,
2396 					 &rp, sizeof(rp));
2397 
2398 	hci_dev_lock(hdev);
2399 
2400 	if (!hdev_is_powered(hdev)) {
2401 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2402 					MGMT_STATUS_NOT_POWERED, &rp,
2403 					sizeof(rp));
2404 		goto unlock;
2405 	}
2406 
2407 	if (cp->addr.type == BDADDR_BREDR) {
2408 		/* If disconnection is requested, then look up the
2409 		 * connection. If the remote device is connected, it
2410 		 * will be later used to terminate the link.
2411 		 *
2412 		 * Setting it to NULL explicitly will cause no
2413 		 * termination of the link.
2414 		 */
2415 		if (cp->disconnect)
2416 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2417 						       &cp->addr.bdaddr);
2418 		else
2419 			conn = NULL;
2420 
2421 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2422 		if (err < 0) {
2423 			err = mgmt_cmd_complete(sk, hdev->id,
2424 						MGMT_OP_UNPAIR_DEVICE,
2425 						MGMT_STATUS_NOT_PAIRED, &rp,
2426 						sizeof(rp));
2427 			goto unlock;
2428 		}
2429 
2430 		goto done;
2431 	}
2432 
2433 	/* LE address type */
2434 	addr_type = le_addr_type(cp->addr.type);
2435 
2436 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2437 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2438 	if (err < 0) {
2439 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2440 					MGMT_STATUS_NOT_PAIRED, &rp,
2441 					sizeof(rp));
2442 		goto unlock;
2443 	}
2444 
2445 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2446 	if (!conn) {
2447 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2448 		goto done;
2449 	}
2450 
2451 
2452 	/* Defer clearing up the connection parameters until closing to
2453 	 * give a chance of keeping them if a repairing happens.
2454 	 */
2455 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2456 
2457 	/* Disable auto-connection parameters if present */
2458 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2459 	if (params) {
2460 		if (params->explicit_connect)
2461 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2462 		else
2463 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2464 	}
2465 
2466 	/* If disconnection is not requested, then clear the connection
2467 	 * variable so that the link is not terminated.
2468 	 */
2469 	if (!cp->disconnect)
2470 		conn = NULL;
2471 
2472 done:
2473 	/* If the connection variable is set, then termination of the
2474 	 * link is requested.
2475 	 */
2476 	if (!conn) {
2477 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2478 					&rp, sizeof(rp));
2479 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2480 		goto unlock;
2481 	}
2482 
2483 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2484 			       sizeof(*cp));
2485 	if (!cmd) {
2486 		err = -ENOMEM;
2487 		goto unlock;
2488 	}
2489 
2490 	cmd->cmd_complete = addr_cmd_complete;
2491 
2492 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2493 	if (err < 0)
2494 		mgmt_pending_remove(cmd);
2495 
2496 unlock:
2497 	hci_dev_unlock(hdev);
2498 	return err;
2499 }
2500 
2501 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2502 		      u16 len)
2503 {
2504 	struct mgmt_cp_disconnect *cp = data;
2505 	struct mgmt_rp_disconnect rp;
2506 	struct mgmt_pending_cmd *cmd;
2507 	struct hci_conn *conn;
2508 	int err;
2509 
2510 	BT_DBG("");
2511 
2512 	memset(&rp, 0, sizeof(rp));
2513 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2514 	rp.addr.type = cp->addr.type;
2515 
2516 	if (!bdaddr_type_is_valid(cp->addr.type))
2517 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2518 					 MGMT_STATUS_INVALID_PARAMS,
2519 					 &rp, sizeof(rp));
2520 
2521 	hci_dev_lock(hdev);
2522 
2523 	if (!test_bit(HCI_UP, &hdev->flags)) {
2524 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2525 					MGMT_STATUS_NOT_POWERED, &rp,
2526 					sizeof(rp));
2527 		goto failed;
2528 	}
2529 
2530 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2531 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2532 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2533 		goto failed;
2534 	}
2535 
2536 	if (cp->addr.type == BDADDR_BREDR)
2537 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2538 					       &cp->addr.bdaddr);
2539 	else
2540 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2541 					       le_addr_type(cp->addr.type));
2542 
2543 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2544 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2545 					MGMT_STATUS_NOT_CONNECTED, &rp,
2546 					sizeof(rp));
2547 		goto failed;
2548 	}
2549 
2550 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2551 	if (!cmd) {
2552 		err = -ENOMEM;
2553 		goto failed;
2554 	}
2555 
2556 	cmd->cmd_complete = generic_cmd_complete;
2557 
2558 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2559 	if (err < 0)
2560 		mgmt_pending_remove(cmd);
2561 
2562 failed:
2563 	hci_dev_unlock(hdev);
2564 	return err;
2565 }
2566 
2567 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2568 {
2569 	switch (link_type) {
2570 	case LE_LINK:
2571 		switch (addr_type) {
2572 		case ADDR_LE_DEV_PUBLIC:
2573 			return BDADDR_LE_PUBLIC;
2574 
2575 		default:
2576 			/* Fallback to LE Random address type */
2577 			return BDADDR_LE_RANDOM;
2578 		}
2579 
2580 	default:
2581 		/* Fallback to BR/EDR type */
2582 		return BDADDR_BREDR;
2583 	}
2584 }
2585 
2586 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2587 			   u16 data_len)
2588 {
2589 	struct mgmt_rp_get_connections *rp;
2590 	struct hci_conn *c;
2591 	size_t rp_len;
2592 	int err;
2593 	u16 i;
2594 
2595 	BT_DBG("");
2596 
2597 	hci_dev_lock(hdev);
2598 
2599 	if (!hdev_is_powered(hdev)) {
2600 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2601 				      MGMT_STATUS_NOT_POWERED);
2602 		goto unlock;
2603 	}
2604 
2605 	i = 0;
2606 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2607 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2608 			i++;
2609 	}
2610 
2611 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2612 	rp = kmalloc(rp_len, GFP_KERNEL);
2613 	if (!rp) {
2614 		err = -ENOMEM;
2615 		goto unlock;
2616 	}
2617 
2618 	i = 0;
2619 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2620 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2621 			continue;
2622 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2623 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2624 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2625 			continue;
2626 		i++;
2627 	}
2628 
2629 	rp->conn_count = cpu_to_le16(i);
2630 
2631 	/* Recalculate length in case of filtered SCO connections, etc */
2632 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2633 
2634 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2635 				rp_len);
2636 
2637 	kfree(rp);
2638 
2639 unlock:
2640 	hci_dev_unlock(hdev);
2641 	return err;
2642 }
2643 
2644 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2645 				   struct mgmt_cp_pin_code_neg_reply *cp)
2646 {
2647 	struct mgmt_pending_cmd *cmd;
2648 	int err;
2649 
2650 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2651 			       sizeof(*cp));
2652 	if (!cmd)
2653 		return -ENOMEM;
2654 
2655 	cmd->cmd_complete = addr_cmd_complete;
2656 
2657 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2658 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2659 	if (err < 0)
2660 		mgmt_pending_remove(cmd);
2661 
2662 	return err;
2663 }
2664 
2665 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2666 			  u16 len)
2667 {
2668 	struct hci_conn *conn;
2669 	struct mgmt_cp_pin_code_reply *cp = data;
2670 	struct hci_cp_pin_code_reply reply;
2671 	struct mgmt_pending_cmd *cmd;
2672 	int err;
2673 
2674 	BT_DBG("");
2675 
2676 	hci_dev_lock(hdev);
2677 
2678 	if (!hdev_is_powered(hdev)) {
2679 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2680 				      MGMT_STATUS_NOT_POWERED);
2681 		goto failed;
2682 	}
2683 
2684 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2685 	if (!conn) {
2686 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2687 				      MGMT_STATUS_NOT_CONNECTED);
2688 		goto failed;
2689 	}
2690 
2691 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2692 		struct mgmt_cp_pin_code_neg_reply ncp;
2693 
2694 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2695 
2696 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2697 
2698 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2699 		if (err >= 0)
2700 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2701 					      MGMT_STATUS_INVALID_PARAMS);
2702 
2703 		goto failed;
2704 	}
2705 
2706 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2707 	if (!cmd) {
2708 		err = -ENOMEM;
2709 		goto failed;
2710 	}
2711 
2712 	cmd->cmd_complete = addr_cmd_complete;
2713 
2714 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2715 	reply.pin_len = cp->pin_len;
2716 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2717 
2718 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2719 	if (err < 0)
2720 		mgmt_pending_remove(cmd);
2721 
2722 failed:
2723 	hci_dev_unlock(hdev);
2724 	return err;
2725 }
2726 
2727 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2728 			     u16 len)
2729 {
2730 	struct mgmt_cp_set_io_capability *cp = data;
2731 
2732 	BT_DBG("");
2733 
2734 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2735 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2736 				       MGMT_STATUS_INVALID_PARAMS);
2737 
2738 	hci_dev_lock(hdev);
2739 
2740 	hdev->io_capability = cp->io_capability;
2741 
2742 	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2743 	       hdev->io_capability);
2744 
2745 	hci_dev_unlock(hdev);
2746 
2747 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2748 				 NULL, 0);
2749 }
2750 
2751 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2752 {
2753 	struct hci_dev *hdev = conn->hdev;
2754 	struct mgmt_pending_cmd *cmd;
2755 
2756 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2757 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2758 			continue;
2759 
2760 		if (cmd->user_data != conn)
2761 			continue;
2762 
2763 		return cmd;
2764 	}
2765 
2766 	return NULL;
2767 }
2768 
2769 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2770 {
2771 	struct mgmt_rp_pair_device rp;
2772 	struct hci_conn *conn = cmd->user_data;
2773 	int err;
2774 
2775 	bacpy(&rp.addr.bdaddr, &conn->dst);
2776 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2777 
2778 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2779 				status, &rp, sizeof(rp));
2780 
2781 	/* So we don't get further callbacks for this connection */
2782 	conn->connect_cfm_cb = NULL;
2783 	conn->security_cfm_cb = NULL;
2784 	conn->disconn_cfm_cb = NULL;
2785 
2786 	hci_conn_drop(conn);
2787 
2788 	/* The device is paired so there is no need to remove
2789 	 * its connection parameters anymore.
2790 	 */
2791 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2792 
2793 	hci_conn_put(conn);
2794 
2795 	return err;
2796 }
2797 
2798 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2799 {
2800 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2801 	struct mgmt_pending_cmd *cmd;
2802 
2803 	cmd = find_pairing(conn);
2804 	if (cmd) {
2805 		cmd->cmd_complete(cmd, status);
2806 		mgmt_pending_remove(cmd);
2807 	}
2808 }
2809 
2810 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2811 {
2812 	struct mgmt_pending_cmd *cmd;
2813 
2814 	BT_DBG("status %u", status);
2815 
2816 	cmd = find_pairing(conn);
2817 	if (!cmd) {
2818 		BT_DBG("Unable to find a pending command");
2819 		return;
2820 	}
2821 
2822 	cmd->cmd_complete(cmd, mgmt_status(status));
2823 	mgmt_pending_remove(cmd);
2824 }
2825 
2826 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2827 {
2828 	struct mgmt_pending_cmd *cmd;
2829 
2830 	BT_DBG("status %u", status);
2831 
2832 	if (!status)
2833 		return;
2834 
2835 	cmd = find_pairing(conn);
2836 	if (!cmd) {
2837 		BT_DBG("Unable to find a pending command");
2838 		return;
2839 	}
2840 
2841 	cmd->cmd_complete(cmd, mgmt_status(status));
2842 	mgmt_pending_remove(cmd);
2843 }
2844 
2845 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2846 		       u16 len)
2847 {
2848 	struct mgmt_cp_pair_device *cp = data;
2849 	struct mgmt_rp_pair_device rp;
2850 	struct mgmt_pending_cmd *cmd;
2851 	u8 sec_level, auth_type;
2852 	struct hci_conn *conn;
2853 	int err;
2854 
2855 	BT_DBG("");
2856 
2857 	memset(&rp, 0, sizeof(rp));
2858 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2859 	rp.addr.type = cp->addr.type;
2860 
2861 	if (!bdaddr_type_is_valid(cp->addr.type))
2862 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2863 					 MGMT_STATUS_INVALID_PARAMS,
2864 					 &rp, sizeof(rp));
2865 
2866 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2867 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2868 					 MGMT_STATUS_INVALID_PARAMS,
2869 					 &rp, sizeof(rp));
2870 
2871 	hci_dev_lock(hdev);
2872 
2873 	if (!hdev_is_powered(hdev)) {
2874 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2875 					MGMT_STATUS_NOT_POWERED, &rp,
2876 					sizeof(rp));
2877 		goto unlock;
2878 	}
2879 
2880 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2881 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2882 					MGMT_STATUS_ALREADY_PAIRED, &rp,
2883 					sizeof(rp));
2884 		goto unlock;
2885 	}
2886 
2887 	sec_level = BT_SECURITY_MEDIUM;
2888 	auth_type = HCI_AT_DEDICATED_BONDING;
2889 
2890 	if (cp->addr.type == BDADDR_BREDR) {
2891 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2892 				       auth_type);
2893 	} else {
2894 		u8 addr_type = le_addr_type(cp->addr.type);
2895 		struct hci_conn_params *p;
2896 
2897 		/* When pairing a new device, it is expected to remember
2898 		 * this device for future connections. Adding the connection
2899 		 * parameter information ahead of time allows tracking
2900 		 * of the slave preferred values and will speed up any
2901 		 * further connection establishment.
2902 		 *
2903 		 * If connection parameters already exist, then they
2904 		 * will be kept and this function does nothing.
2905 		 */
2906 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2907 
2908 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2909 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
2910 
2911 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
2912 					   addr_type, sec_level,
2913 					   HCI_LE_CONN_TIMEOUT);
2914 	}
2915 
2916 	if (IS_ERR(conn)) {
2917 		int status;
2918 
2919 		if (PTR_ERR(conn) == -EBUSY)
2920 			status = MGMT_STATUS_BUSY;
2921 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
2922 			status = MGMT_STATUS_NOT_SUPPORTED;
2923 		else if (PTR_ERR(conn) == -ECONNREFUSED)
2924 			status = MGMT_STATUS_REJECTED;
2925 		else
2926 			status = MGMT_STATUS_CONNECT_FAILED;
2927 
2928 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2929 					status, &rp, sizeof(rp));
2930 		goto unlock;
2931 	}
2932 
2933 	if (conn->connect_cfm_cb) {
2934 		hci_conn_drop(conn);
2935 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2936 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2937 		goto unlock;
2938 	}
2939 
2940 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2941 	if (!cmd) {
2942 		err = -ENOMEM;
2943 		hci_conn_drop(conn);
2944 		goto unlock;
2945 	}
2946 
2947 	cmd->cmd_complete = pairing_complete;
2948 
2949 	/* For LE, just connecting isn't a proof that the pairing finished */
2950 	if (cp->addr.type == BDADDR_BREDR) {
2951 		conn->connect_cfm_cb = pairing_complete_cb;
2952 		conn->security_cfm_cb = pairing_complete_cb;
2953 		conn->disconn_cfm_cb = pairing_complete_cb;
2954 	} else {
2955 		conn->connect_cfm_cb = le_pairing_complete_cb;
2956 		conn->security_cfm_cb = le_pairing_complete_cb;
2957 		conn->disconn_cfm_cb = le_pairing_complete_cb;
2958 	}
2959 
2960 	conn->io_capability = cp->io_cap;
2961 	cmd->user_data = hci_conn_get(conn);
2962 
2963 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
2964 	    hci_conn_security(conn, sec_level, auth_type, true)) {
2965 		cmd->cmd_complete(cmd, 0);
2966 		mgmt_pending_remove(cmd);
2967 	}
2968 
2969 	err = 0;
2970 
2971 unlock:
2972 	hci_dev_unlock(hdev);
2973 	return err;
2974 }
2975 
2976 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2977 			      u16 len)
2978 {
2979 	struct mgmt_addr_info *addr = data;
2980 	struct mgmt_pending_cmd *cmd;
2981 	struct hci_conn *conn;
2982 	int err;
2983 
2984 	BT_DBG("");
2985 
2986 	hci_dev_lock(hdev);
2987 
2988 	if (!hdev_is_powered(hdev)) {
2989 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2990 				      MGMT_STATUS_NOT_POWERED);
2991 		goto unlock;
2992 	}
2993 
2994 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2995 	if (!cmd) {
2996 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2997 				      MGMT_STATUS_INVALID_PARAMS);
2998 		goto unlock;
2999 	}
3000 
3001 	conn = cmd->user_data;
3002 
3003 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3004 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3005 				      MGMT_STATUS_INVALID_PARAMS);
3006 		goto unlock;
3007 	}
3008 
3009 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3010 	mgmt_pending_remove(cmd);
3011 
3012 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3013 				addr, sizeof(*addr));
3014 unlock:
3015 	hci_dev_unlock(hdev);
3016 	return err;
3017 }
3018 
3019 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3020 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3021 			     u16 hci_op, __le32 passkey)
3022 {
3023 	struct mgmt_pending_cmd *cmd;
3024 	struct hci_conn *conn;
3025 	int err;
3026 
3027 	hci_dev_lock(hdev);
3028 
3029 	if (!hdev_is_powered(hdev)) {
3030 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3031 					MGMT_STATUS_NOT_POWERED, addr,
3032 					sizeof(*addr));
3033 		goto done;
3034 	}
3035 
3036 	if (addr->type == BDADDR_BREDR)
3037 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3038 	else
3039 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3040 					       le_addr_type(addr->type));
3041 
3042 	if (!conn) {
3043 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3044 					MGMT_STATUS_NOT_CONNECTED, addr,
3045 					sizeof(*addr));
3046 		goto done;
3047 	}
3048 
3049 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3050 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3051 		if (!err)
3052 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3053 						MGMT_STATUS_SUCCESS, addr,
3054 						sizeof(*addr));
3055 		else
3056 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3057 						MGMT_STATUS_FAILED, addr,
3058 						sizeof(*addr));
3059 
3060 		goto done;
3061 	}
3062 
3063 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3064 	if (!cmd) {
3065 		err = -ENOMEM;
3066 		goto done;
3067 	}
3068 
3069 	cmd->cmd_complete = addr_cmd_complete;
3070 
3071 	/* Continue with pairing via HCI */
3072 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3073 		struct hci_cp_user_passkey_reply cp;
3074 
3075 		bacpy(&cp.bdaddr, &addr->bdaddr);
3076 		cp.passkey = passkey;
3077 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3078 	} else
3079 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3080 				   &addr->bdaddr);
3081 
3082 	if (err < 0)
3083 		mgmt_pending_remove(cmd);
3084 
3085 done:
3086 	hci_dev_unlock(hdev);
3087 	return err;
3088 }
3089 
3090 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3091 			      void *data, u16 len)
3092 {
3093 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3094 
3095 	BT_DBG("");
3096 
3097 	return user_pairing_resp(sk, hdev, &cp->addr,
3098 				MGMT_OP_PIN_CODE_NEG_REPLY,
3099 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3100 }
3101 
3102 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3103 			      u16 len)
3104 {
3105 	struct mgmt_cp_user_confirm_reply *cp = data;
3106 
3107 	BT_DBG("");
3108 
3109 	if (len != sizeof(*cp))
3110 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3111 				       MGMT_STATUS_INVALID_PARAMS);
3112 
3113 	return user_pairing_resp(sk, hdev, &cp->addr,
3114 				 MGMT_OP_USER_CONFIRM_REPLY,
3115 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3116 }
3117 
3118 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3119 				  void *data, u16 len)
3120 {
3121 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3122 
3123 	BT_DBG("");
3124 
3125 	return user_pairing_resp(sk, hdev, &cp->addr,
3126 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3127 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3128 }
3129 
3130 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3131 			      u16 len)
3132 {
3133 	struct mgmt_cp_user_passkey_reply *cp = data;
3134 
3135 	BT_DBG("");
3136 
3137 	return user_pairing_resp(sk, hdev, &cp->addr,
3138 				 MGMT_OP_USER_PASSKEY_REPLY,
3139 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3140 }
3141 
3142 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3143 				  void *data, u16 len)
3144 {
3145 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3146 
3147 	BT_DBG("");
3148 
3149 	return user_pairing_resp(sk, hdev, &cp->addr,
3150 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3151 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3152 }
3153 
3154 static void adv_expire(struct hci_dev *hdev, u32 flags)
3155 {
3156 	struct adv_info *adv_instance;
3157 	struct hci_request req;
3158 	int err;
3159 
3160 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3161 	if (!adv_instance)
3162 		return;
3163 
3164 	/* stop if current instance doesn't need to be changed */
3165 	if (!(adv_instance->flags & flags))
3166 		return;
3167 
3168 	cancel_adv_timeout(hdev);
3169 
3170 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3171 	if (!adv_instance)
3172 		return;
3173 
3174 	hci_req_init(&req, hdev);
3175 	err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3176 					      true);
3177 	if (err)
3178 		return;
3179 
3180 	hci_req_run(&req, NULL);
3181 }
3182 
3183 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3184 {
3185 	struct mgmt_cp_set_local_name *cp;
3186 	struct mgmt_pending_cmd *cmd;
3187 
3188 	BT_DBG("status 0x%02x", status);
3189 
3190 	hci_dev_lock(hdev);
3191 
3192 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3193 	if (!cmd)
3194 		goto unlock;
3195 
3196 	cp = cmd->param;
3197 
3198 	if (status) {
3199 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3200 			        mgmt_status(status));
3201 	} else {
3202 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3203 				  cp, sizeof(*cp));
3204 
3205 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3206 			adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3207 	}
3208 
3209 	mgmt_pending_remove(cmd);
3210 
3211 unlock:
3212 	hci_dev_unlock(hdev);
3213 }
3214 
3215 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3216 			  u16 len)
3217 {
3218 	struct mgmt_cp_set_local_name *cp = data;
3219 	struct mgmt_pending_cmd *cmd;
3220 	struct hci_request req;
3221 	int err;
3222 
3223 	BT_DBG("");
3224 
3225 	hci_dev_lock(hdev);
3226 
3227 	/* If the old values are the same as the new ones just return a
3228 	 * direct command complete event.
3229 	 */
3230 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3231 	    !memcmp(hdev->short_name, cp->short_name,
3232 		    sizeof(hdev->short_name))) {
3233 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3234 					data, len);
3235 		goto failed;
3236 	}
3237 
3238 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3239 
3240 	if (!hdev_is_powered(hdev)) {
3241 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3242 
3243 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3244 					data, len);
3245 		if (err < 0)
3246 			goto failed;
3247 
3248 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3249 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3250 		ext_info_changed(hdev, sk);
3251 
3252 		goto failed;
3253 	}
3254 
3255 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3256 	if (!cmd) {
3257 		err = -ENOMEM;
3258 		goto failed;
3259 	}
3260 
3261 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3262 
3263 	hci_req_init(&req, hdev);
3264 
3265 	if (lmp_bredr_capable(hdev)) {
3266 		__hci_req_update_name(&req);
3267 		__hci_req_update_eir(&req);
3268 	}
3269 
3270 	/* The name is stored in the scan response data and so
3271 	 * no need to udpate the advertising data here.
3272 	 */
3273 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3274 		__hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3275 
3276 	err = hci_req_run(&req, set_name_complete);
3277 	if (err < 0)
3278 		mgmt_pending_remove(cmd);
3279 
3280 failed:
3281 	hci_dev_unlock(hdev);
3282 	return err;
3283 }
3284 
3285 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3286 			  u16 len)
3287 {
3288 	struct mgmt_cp_set_appearance *cp = data;
3289 	u16 apperance;
3290 	int err;
3291 
3292 	BT_DBG("");
3293 
3294 	if (!lmp_le_capable(hdev))
3295 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3296 				       MGMT_STATUS_NOT_SUPPORTED);
3297 
3298 	apperance = le16_to_cpu(cp->appearance);
3299 
3300 	hci_dev_lock(hdev);
3301 
3302 	if (hdev->appearance != apperance) {
3303 		hdev->appearance = apperance;
3304 
3305 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3306 			adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3307 
3308 		ext_info_changed(hdev, sk);
3309 	}
3310 
3311 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3312 				0);
3313 
3314 	hci_dev_unlock(hdev);
3315 
3316 	return err;
3317 }
3318 
3319 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3320 				 void *data, u16 len)
3321 {
3322 	struct mgmt_rp_get_phy_confguration rp;
3323 
3324 	BT_DBG("sock %p %s", sk, hdev->name);
3325 
3326 	hci_dev_lock(hdev);
3327 
3328 	memset(&rp, 0, sizeof(rp));
3329 
3330 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3331 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3332 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3333 
3334 	hci_dev_unlock(hdev);
3335 
3336 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3337 				 &rp, sizeof(rp));
3338 }
3339 
3340 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3341 {
3342 	struct mgmt_ev_phy_configuration_changed ev;
3343 
3344 	memset(&ev, 0, sizeof(ev));
3345 
3346 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3347 
3348 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3349 			  sizeof(ev), skip);
3350 }
3351 
3352 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3353 				     u16 opcode, struct sk_buff *skb)
3354 {
3355 	struct mgmt_pending_cmd *cmd;
3356 
3357 	BT_DBG("status 0x%02x", status);
3358 
3359 	hci_dev_lock(hdev);
3360 
3361 	cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3362 	if (!cmd)
3363 		goto unlock;
3364 
3365 	if (status) {
3366 		mgmt_cmd_status(cmd->sk, hdev->id,
3367 				MGMT_OP_SET_PHY_CONFIGURATION,
3368 				mgmt_status(status));
3369 	} else {
3370 		mgmt_cmd_complete(cmd->sk, hdev->id,
3371 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3372 				  NULL, 0);
3373 
3374 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3375 	}
3376 
3377 	mgmt_pending_remove(cmd);
3378 
3379 unlock:
3380 	hci_dev_unlock(hdev);
3381 }
3382 
3383 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3384 				 void *data, u16 len)
3385 {
3386 	struct mgmt_cp_set_phy_confguration *cp = data;
3387 	struct hci_cp_le_set_default_phy cp_phy;
3388 	struct mgmt_pending_cmd *cmd;
3389 	struct hci_request req;
3390 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3391 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3392 	bool changed = false;
3393 	int err;
3394 
3395 	BT_DBG("sock %p %s", sk, hdev->name);
3396 
3397 	configurable_phys = get_configurable_phys(hdev);
3398 	supported_phys = get_supported_phys(hdev);
3399 	selected_phys = __le32_to_cpu(cp->selected_phys);
3400 
3401 	if (selected_phys & ~supported_phys)
3402 		return mgmt_cmd_status(sk, hdev->id,
3403 				       MGMT_OP_SET_PHY_CONFIGURATION,
3404 				       MGMT_STATUS_INVALID_PARAMS);
3405 
3406 	unconfigure_phys = supported_phys & ~configurable_phys;
3407 
3408 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3409 		return mgmt_cmd_status(sk, hdev->id,
3410 				       MGMT_OP_SET_PHY_CONFIGURATION,
3411 				       MGMT_STATUS_INVALID_PARAMS);
3412 
3413 	if (selected_phys == get_selected_phys(hdev))
3414 		return mgmt_cmd_complete(sk, hdev->id,
3415 					 MGMT_OP_SET_PHY_CONFIGURATION,
3416 					 0, NULL, 0);
3417 
3418 	hci_dev_lock(hdev);
3419 
3420 	if (!hdev_is_powered(hdev)) {
3421 		err = mgmt_cmd_status(sk, hdev->id,
3422 				      MGMT_OP_SET_PHY_CONFIGURATION,
3423 				      MGMT_STATUS_REJECTED);
3424 		goto unlock;
3425 	}
3426 
3427 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3428 		err = mgmt_cmd_status(sk, hdev->id,
3429 				      MGMT_OP_SET_PHY_CONFIGURATION,
3430 				      MGMT_STATUS_BUSY);
3431 		goto unlock;
3432 	}
3433 
3434 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3435 		pkt_type |= (HCI_DH3 | HCI_DM3);
3436 	else
3437 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3438 
3439 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3440 		pkt_type |= (HCI_DH5 | HCI_DM5);
3441 	else
3442 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3443 
3444 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3445 		pkt_type &= ~HCI_2DH1;
3446 	else
3447 		pkt_type |= HCI_2DH1;
3448 
3449 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3450 		pkt_type &= ~HCI_2DH3;
3451 	else
3452 		pkt_type |= HCI_2DH3;
3453 
3454 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3455 		pkt_type &= ~HCI_2DH5;
3456 	else
3457 		pkt_type |= HCI_2DH5;
3458 
3459 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3460 		pkt_type &= ~HCI_3DH1;
3461 	else
3462 		pkt_type |= HCI_3DH1;
3463 
3464 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3465 		pkt_type &= ~HCI_3DH3;
3466 	else
3467 		pkt_type |= HCI_3DH3;
3468 
3469 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3470 		pkt_type &= ~HCI_3DH5;
3471 	else
3472 		pkt_type |= HCI_3DH5;
3473 
3474 	if (pkt_type != hdev->pkt_type) {
3475 		hdev->pkt_type = pkt_type;
3476 		changed = true;
3477 	}
3478 
3479 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3480 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3481 		if (changed)
3482 			mgmt_phy_configuration_changed(hdev, sk);
3483 
3484 		err = mgmt_cmd_complete(sk, hdev->id,
3485 					MGMT_OP_SET_PHY_CONFIGURATION,
3486 					0, NULL, 0);
3487 
3488 		goto unlock;
3489 	}
3490 
3491 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3492 			       len);
3493 	if (!cmd) {
3494 		err = -ENOMEM;
3495 		goto unlock;
3496 	}
3497 
3498 	hci_req_init(&req, hdev);
3499 
3500 	memset(&cp_phy, 0, sizeof(cp_phy));
3501 
3502 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3503 		cp_phy.all_phys |= 0x01;
3504 
3505 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3506 		cp_phy.all_phys |= 0x02;
3507 
3508 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3509 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3510 
3511 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3512 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3513 
3514 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3515 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3516 
3517 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3518 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3519 
3520 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3521 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3522 
3523 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3524 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3525 
3526 	hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3527 
3528 	err = hci_req_run_skb(&req, set_default_phy_complete);
3529 	if (err < 0)
3530 		mgmt_pending_remove(cmd);
3531 
3532 unlock:
3533 	hci_dev_unlock(hdev);
3534 
3535 	return err;
3536 }
3537 
3538 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3539 				         u16 opcode, struct sk_buff *skb)
3540 {
3541 	struct mgmt_rp_read_local_oob_data mgmt_rp;
3542 	size_t rp_size = sizeof(mgmt_rp);
3543 	struct mgmt_pending_cmd *cmd;
3544 
3545 	BT_DBG("%s status %u", hdev->name, status);
3546 
3547 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3548 	if (!cmd)
3549 		return;
3550 
3551 	if (status || !skb) {
3552 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3553 				status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3554 		goto remove;
3555 	}
3556 
3557 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3558 
3559 	if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3560 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3561 
3562 		if (skb->len < sizeof(*rp)) {
3563 			mgmt_cmd_status(cmd->sk, hdev->id,
3564 					MGMT_OP_READ_LOCAL_OOB_DATA,
3565 					MGMT_STATUS_FAILED);
3566 			goto remove;
3567 		}
3568 
3569 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3570 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3571 
3572 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3573 	} else {
3574 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3575 
3576 		if (skb->len < sizeof(*rp)) {
3577 			mgmt_cmd_status(cmd->sk, hdev->id,
3578 					MGMT_OP_READ_LOCAL_OOB_DATA,
3579 					MGMT_STATUS_FAILED);
3580 			goto remove;
3581 		}
3582 
3583 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3584 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3585 
3586 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3587 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3588 	}
3589 
3590 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3591 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3592 
3593 remove:
3594 	mgmt_pending_remove(cmd);
3595 }
3596 
3597 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3598 			       void *data, u16 data_len)
3599 {
3600 	struct mgmt_pending_cmd *cmd;
3601 	struct hci_request req;
3602 	int err;
3603 
3604 	BT_DBG("%s", hdev->name);
3605 
3606 	hci_dev_lock(hdev);
3607 
3608 	if (!hdev_is_powered(hdev)) {
3609 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3610 				      MGMT_STATUS_NOT_POWERED);
3611 		goto unlock;
3612 	}
3613 
3614 	if (!lmp_ssp_capable(hdev)) {
3615 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3616 				      MGMT_STATUS_NOT_SUPPORTED);
3617 		goto unlock;
3618 	}
3619 
3620 	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3621 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3622 				      MGMT_STATUS_BUSY);
3623 		goto unlock;
3624 	}
3625 
3626 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3627 	if (!cmd) {
3628 		err = -ENOMEM;
3629 		goto unlock;
3630 	}
3631 
3632 	hci_req_init(&req, hdev);
3633 
3634 	if (bredr_sc_enabled(hdev))
3635 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3636 	else
3637 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3638 
3639 	err = hci_req_run_skb(&req, read_local_oob_data_complete);
3640 	if (err < 0)
3641 		mgmt_pending_remove(cmd);
3642 
3643 unlock:
3644 	hci_dev_unlock(hdev);
3645 	return err;
3646 }
3647 
3648 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3649 			       void *data, u16 len)
3650 {
3651 	struct mgmt_addr_info *addr = data;
3652 	int err;
3653 
3654 	BT_DBG("%s ", hdev->name);
3655 
3656 	if (!bdaddr_type_is_valid(addr->type))
3657 		return mgmt_cmd_complete(sk, hdev->id,
3658 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
3659 					 MGMT_STATUS_INVALID_PARAMS,
3660 					 addr, sizeof(*addr));
3661 
3662 	hci_dev_lock(hdev);
3663 
3664 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3665 		struct mgmt_cp_add_remote_oob_data *cp = data;
3666 		u8 status;
3667 
3668 		if (cp->addr.type != BDADDR_BREDR) {
3669 			err = mgmt_cmd_complete(sk, hdev->id,
3670 						MGMT_OP_ADD_REMOTE_OOB_DATA,
3671 						MGMT_STATUS_INVALID_PARAMS,
3672 						&cp->addr, sizeof(cp->addr));
3673 			goto unlock;
3674 		}
3675 
3676 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3677 					      cp->addr.type, cp->hash,
3678 					      cp->rand, NULL, NULL);
3679 		if (err < 0)
3680 			status = MGMT_STATUS_FAILED;
3681 		else
3682 			status = MGMT_STATUS_SUCCESS;
3683 
3684 		err = mgmt_cmd_complete(sk, hdev->id,
3685 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3686 					&cp->addr, sizeof(cp->addr));
3687 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3688 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3689 		u8 *rand192, *hash192, *rand256, *hash256;
3690 		u8 status;
3691 
3692 		if (bdaddr_type_is_le(cp->addr.type)) {
3693 			/* Enforce zero-valued 192-bit parameters as
3694 			 * long as legacy SMP OOB isn't implemented.
3695 			 */
3696 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3697 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
3698 				err = mgmt_cmd_complete(sk, hdev->id,
3699 							MGMT_OP_ADD_REMOTE_OOB_DATA,
3700 							MGMT_STATUS_INVALID_PARAMS,
3701 							addr, sizeof(*addr));
3702 				goto unlock;
3703 			}
3704 
3705 			rand192 = NULL;
3706 			hash192 = NULL;
3707 		} else {
3708 			/* In case one of the P-192 values is set to zero,
3709 			 * then just disable OOB data for P-192.
3710 			 */
3711 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3712 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
3713 				rand192 = NULL;
3714 				hash192 = NULL;
3715 			} else {
3716 				rand192 = cp->rand192;
3717 				hash192 = cp->hash192;
3718 			}
3719 		}
3720 
3721 		/* In case one of the P-256 values is set to zero, then just
3722 		 * disable OOB data for P-256.
3723 		 */
3724 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3725 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
3726 			rand256 = NULL;
3727 			hash256 = NULL;
3728 		} else {
3729 			rand256 = cp->rand256;
3730 			hash256 = cp->hash256;
3731 		}
3732 
3733 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3734 					      cp->addr.type, hash192, rand192,
3735 					      hash256, rand256);
3736 		if (err < 0)
3737 			status = MGMT_STATUS_FAILED;
3738 		else
3739 			status = MGMT_STATUS_SUCCESS;
3740 
3741 		err = mgmt_cmd_complete(sk, hdev->id,
3742 					MGMT_OP_ADD_REMOTE_OOB_DATA,
3743 					status, &cp->addr, sizeof(cp->addr));
3744 	} else {
3745 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
3746 			   len);
3747 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3748 				      MGMT_STATUS_INVALID_PARAMS);
3749 	}
3750 
3751 unlock:
3752 	hci_dev_unlock(hdev);
3753 	return err;
3754 }
3755 
3756 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3757 				  void *data, u16 len)
3758 {
3759 	struct mgmt_cp_remove_remote_oob_data *cp = data;
3760 	u8 status;
3761 	int err;
3762 
3763 	BT_DBG("%s", hdev->name);
3764 
3765 	if (cp->addr.type != BDADDR_BREDR)
3766 		return mgmt_cmd_complete(sk, hdev->id,
3767 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3768 					 MGMT_STATUS_INVALID_PARAMS,
3769 					 &cp->addr, sizeof(cp->addr));
3770 
3771 	hci_dev_lock(hdev);
3772 
3773 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3774 		hci_remote_oob_data_clear(hdev);
3775 		status = MGMT_STATUS_SUCCESS;
3776 		goto done;
3777 	}
3778 
3779 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3780 	if (err < 0)
3781 		status = MGMT_STATUS_INVALID_PARAMS;
3782 	else
3783 		status = MGMT_STATUS_SUCCESS;
3784 
3785 done:
3786 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3787 				status, &cp->addr, sizeof(cp->addr));
3788 
3789 	hci_dev_unlock(hdev);
3790 	return err;
3791 }
3792 
3793 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
3794 {
3795 	struct mgmt_pending_cmd *cmd;
3796 
3797 	BT_DBG("status %d", status);
3798 
3799 	hci_dev_lock(hdev);
3800 
3801 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
3802 	if (!cmd)
3803 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3804 
3805 	if (!cmd)
3806 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
3807 
3808 	if (cmd) {
3809 		cmd->cmd_complete(cmd, mgmt_status(status));
3810 		mgmt_pending_remove(cmd);
3811 	}
3812 
3813 	hci_dev_unlock(hdev);
3814 }
3815 
3816 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
3817 				    uint8_t *mgmt_status)
3818 {
3819 	switch (type) {
3820 	case DISCOV_TYPE_LE:
3821 		*mgmt_status = mgmt_le_support(hdev);
3822 		if (*mgmt_status)
3823 			return false;
3824 		break;
3825 	case DISCOV_TYPE_INTERLEAVED:
3826 		*mgmt_status = mgmt_le_support(hdev);
3827 		if (*mgmt_status)
3828 			return false;
3829 		/* Intentional fall-through */
3830 	case DISCOV_TYPE_BREDR:
3831 		*mgmt_status = mgmt_bredr_support(hdev);
3832 		if (*mgmt_status)
3833 			return false;
3834 		break;
3835 	default:
3836 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
3837 		return false;
3838 	}
3839 
3840 	return true;
3841 }
3842 
3843 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
3844 				    u16 op, void *data, u16 len)
3845 {
3846 	struct mgmt_cp_start_discovery *cp = data;
3847 	struct mgmt_pending_cmd *cmd;
3848 	u8 status;
3849 	int err;
3850 
3851 	BT_DBG("%s", hdev->name);
3852 
3853 	hci_dev_lock(hdev);
3854 
3855 	if (!hdev_is_powered(hdev)) {
3856 		err = mgmt_cmd_complete(sk, hdev->id, op,
3857 					MGMT_STATUS_NOT_POWERED,
3858 					&cp->type, sizeof(cp->type));
3859 		goto failed;
3860 	}
3861 
3862 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
3863 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3864 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
3865 					&cp->type, sizeof(cp->type));
3866 		goto failed;
3867 	}
3868 
3869 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3870 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
3871 					&cp->type, sizeof(cp->type));
3872 		goto failed;
3873 	}
3874 
3875 	/* Clear the discovery filter first to free any previously
3876 	 * allocated memory for the UUID list.
3877 	 */
3878 	hci_discovery_filter_clear(hdev);
3879 
3880 	hdev->discovery.type = cp->type;
3881 	hdev->discovery.report_invalid_rssi = false;
3882 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
3883 		hdev->discovery.limited = true;
3884 	else
3885 		hdev->discovery.limited = false;
3886 
3887 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
3888 	if (!cmd) {
3889 		err = -ENOMEM;
3890 		goto failed;
3891 	}
3892 
3893 	cmd->cmd_complete = generic_cmd_complete;
3894 
3895 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3896 	queue_work(hdev->req_workqueue, &hdev->discov_update);
3897 	err = 0;
3898 
3899 failed:
3900 	hci_dev_unlock(hdev);
3901 	return err;
3902 }
3903 
3904 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3905 			   void *data, u16 len)
3906 {
3907 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
3908 					data, len);
3909 }
3910 
3911 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
3912 				   void *data, u16 len)
3913 {
3914 	return start_discovery_internal(sk, hdev,
3915 					MGMT_OP_START_LIMITED_DISCOVERY,
3916 					data, len);
3917 }
3918 
3919 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
3920 					  u8 status)
3921 {
3922 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
3923 				 cmd->param, 1);
3924 }
3925 
3926 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3927 				   void *data, u16 len)
3928 {
3929 	struct mgmt_cp_start_service_discovery *cp = data;
3930 	struct mgmt_pending_cmd *cmd;
3931 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3932 	u16 uuid_count, expected_len;
3933 	u8 status;
3934 	int err;
3935 
3936 	BT_DBG("%s", hdev->name);
3937 
3938 	hci_dev_lock(hdev);
3939 
3940 	if (!hdev_is_powered(hdev)) {
3941 		err = mgmt_cmd_complete(sk, hdev->id,
3942 					MGMT_OP_START_SERVICE_DISCOVERY,
3943 					MGMT_STATUS_NOT_POWERED,
3944 					&cp->type, sizeof(cp->type));
3945 		goto failed;
3946 	}
3947 
3948 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
3949 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3950 		err = mgmt_cmd_complete(sk, hdev->id,
3951 					MGMT_OP_START_SERVICE_DISCOVERY,
3952 					MGMT_STATUS_BUSY, &cp->type,
3953 					sizeof(cp->type));
3954 		goto failed;
3955 	}
3956 
3957 	uuid_count = __le16_to_cpu(cp->uuid_count);
3958 	if (uuid_count > max_uuid_count) {
3959 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
3960 			   uuid_count);
3961 		err = mgmt_cmd_complete(sk, hdev->id,
3962 					MGMT_OP_START_SERVICE_DISCOVERY,
3963 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
3964 					sizeof(cp->type));
3965 		goto failed;
3966 	}
3967 
3968 	expected_len = sizeof(*cp) + uuid_count * 16;
3969 	if (expected_len != len) {
3970 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
3971 			   expected_len, len);
3972 		err = mgmt_cmd_complete(sk, hdev->id,
3973 					MGMT_OP_START_SERVICE_DISCOVERY,
3974 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
3975 					sizeof(cp->type));
3976 		goto failed;
3977 	}
3978 
3979 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3980 		err = mgmt_cmd_complete(sk, hdev->id,
3981 					MGMT_OP_START_SERVICE_DISCOVERY,
3982 					status, &cp->type, sizeof(cp->type));
3983 		goto failed;
3984 	}
3985 
3986 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
3987 			       hdev, data, len);
3988 	if (!cmd) {
3989 		err = -ENOMEM;
3990 		goto failed;
3991 	}
3992 
3993 	cmd->cmd_complete = service_discovery_cmd_complete;
3994 
3995 	/* Clear the discovery filter first to free any previously
3996 	 * allocated memory for the UUID list.
3997 	 */
3998 	hci_discovery_filter_clear(hdev);
3999 
4000 	hdev->discovery.result_filtering = true;
4001 	hdev->discovery.type = cp->type;
4002 	hdev->discovery.rssi = cp->rssi;
4003 	hdev->discovery.uuid_count = uuid_count;
4004 
4005 	if (uuid_count > 0) {
4006 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4007 						GFP_KERNEL);
4008 		if (!hdev->discovery.uuids) {
4009 			err = mgmt_cmd_complete(sk, hdev->id,
4010 						MGMT_OP_START_SERVICE_DISCOVERY,
4011 						MGMT_STATUS_FAILED,
4012 						&cp->type, sizeof(cp->type));
4013 			mgmt_pending_remove(cmd);
4014 			goto failed;
4015 		}
4016 	}
4017 
4018 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4019 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4020 	err = 0;
4021 
4022 failed:
4023 	hci_dev_unlock(hdev);
4024 	return err;
4025 }
4026 
4027 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4028 {
4029 	struct mgmt_pending_cmd *cmd;
4030 
4031 	BT_DBG("status %d", status);
4032 
4033 	hci_dev_lock(hdev);
4034 
4035 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4036 	if (cmd) {
4037 		cmd->cmd_complete(cmd, mgmt_status(status));
4038 		mgmt_pending_remove(cmd);
4039 	}
4040 
4041 	hci_dev_unlock(hdev);
4042 }
4043 
4044 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4045 			  u16 len)
4046 {
4047 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
4048 	struct mgmt_pending_cmd *cmd;
4049 	int err;
4050 
4051 	BT_DBG("%s", hdev->name);
4052 
4053 	hci_dev_lock(hdev);
4054 
4055 	if (!hci_discovery_active(hdev)) {
4056 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4057 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
4058 					sizeof(mgmt_cp->type));
4059 		goto unlock;
4060 	}
4061 
4062 	if (hdev->discovery.type != mgmt_cp->type) {
4063 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4064 					MGMT_STATUS_INVALID_PARAMS,
4065 					&mgmt_cp->type, sizeof(mgmt_cp->type));
4066 		goto unlock;
4067 	}
4068 
4069 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4070 	if (!cmd) {
4071 		err = -ENOMEM;
4072 		goto unlock;
4073 	}
4074 
4075 	cmd->cmd_complete = generic_cmd_complete;
4076 
4077 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4078 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4079 	err = 0;
4080 
4081 unlock:
4082 	hci_dev_unlock(hdev);
4083 	return err;
4084 }
4085 
4086 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4087 			u16 len)
4088 {
4089 	struct mgmt_cp_confirm_name *cp = data;
4090 	struct inquiry_entry *e;
4091 	int err;
4092 
4093 	BT_DBG("%s", hdev->name);
4094 
4095 	hci_dev_lock(hdev);
4096 
4097 	if (!hci_discovery_active(hdev)) {
4098 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4099 					MGMT_STATUS_FAILED, &cp->addr,
4100 					sizeof(cp->addr));
4101 		goto failed;
4102 	}
4103 
4104 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4105 	if (!e) {
4106 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4107 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4108 					sizeof(cp->addr));
4109 		goto failed;
4110 	}
4111 
4112 	if (cp->name_known) {
4113 		e->name_state = NAME_KNOWN;
4114 		list_del(&e->list);
4115 	} else {
4116 		e->name_state = NAME_NEEDED;
4117 		hci_inquiry_cache_update_resolve(hdev, e);
4118 	}
4119 
4120 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4121 				&cp->addr, sizeof(cp->addr));
4122 
4123 failed:
4124 	hci_dev_unlock(hdev);
4125 	return err;
4126 }
4127 
4128 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4129 			u16 len)
4130 {
4131 	struct mgmt_cp_block_device *cp = data;
4132 	u8 status;
4133 	int err;
4134 
4135 	BT_DBG("%s", hdev->name);
4136 
4137 	if (!bdaddr_type_is_valid(cp->addr.type))
4138 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4139 					 MGMT_STATUS_INVALID_PARAMS,
4140 					 &cp->addr, sizeof(cp->addr));
4141 
4142 	hci_dev_lock(hdev);
4143 
4144 	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4145 				  cp->addr.type);
4146 	if (err < 0) {
4147 		status = MGMT_STATUS_FAILED;
4148 		goto done;
4149 	}
4150 
4151 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4152 		   sk);
4153 	status = MGMT_STATUS_SUCCESS;
4154 
4155 done:
4156 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4157 				&cp->addr, sizeof(cp->addr));
4158 
4159 	hci_dev_unlock(hdev);
4160 
4161 	return err;
4162 }
4163 
4164 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4165 			  u16 len)
4166 {
4167 	struct mgmt_cp_unblock_device *cp = data;
4168 	u8 status;
4169 	int err;
4170 
4171 	BT_DBG("%s", hdev->name);
4172 
4173 	if (!bdaddr_type_is_valid(cp->addr.type))
4174 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4175 					 MGMT_STATUS_INVALID_PARAMS,
4176 					 &cp->addr, sizeof(cp->addr));
4177 
4178 	hci_dev_lock(hdev);
4179 
4180 	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4181 				  cp->addr.type);
4182 	if (err < 0) {
4183 		status = MGMT_STATUS_INVALID_PARAMS;
4184 		goto done;
4185 	}
4186 
4187 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4188 		   sk);
4189 	status = MGMT_STATUS_SUCCESS;
4190 
4191 done:
4192 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4193 				&cp->addr, sizeof(cp->addr));
4194 
4195 	hci_dev_unlock(hdev);
4196 
4197 	return err;
4198 }
4199 
4200 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4201 			 u16 len)
4202 {
4203 	struct mgmt_cp_set_device_id *cp = data;
4204 	struct hci_request req;
4205 	int err;
4206 	__u16 source;
4207 
4208 	BT_DBG("%s", hdev->name);
4209 
4210 	source = __le16_to_cpu(cp->source);
4211 
4212 	if (source > 0x0002)
4213 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4214 				       MGMT_STATUS_INVALID_PARAMS);
4215 
4216 	hci_dev_lock(hdev);
4217 
4218 	hdev->devid_source = source;
4219 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4220 	hdev->devid_product = __le16_to_cpu(cp->product);
4221 	hdev->devid_version = __le16_to_cpu(cp->version);
4222 
4223 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4224 				NULL, 0);
4225 
4226 	hci_req_init(&req, hdev);
4227 	__hci_req_update_eir(&req);
4228 	hci_req_run(&req, NULL);
4229 
4230 	hci_dev_unlock(hdev);
4231 
4232 	return err;
4233 }
4234 
4235 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4236 					u16 opcode)
4237 {
4238 	BT_DBG("status %d", status);
4239 }
4240 
4241 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4242 				     u16 opcode)
4243 {
4244 	struct cmd_lookup match = { NULL, hdev };
4245 	struct hci_request req;
4246 	u8 instance;
4247 	struct adv_info *adv_instance;
4248 	int err;
4249 
4250 	hci_dev_lock(hdev);
4251 
4252 	if (status) {
4253 		u8 mgmt_err = mgmt_status(status);
4254 
4255 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4256 				     cmd_status_rsp, &mgmt_err);
4257 		goto unlock;
4258 	}
4259 
4260 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4261 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
4262 	else
4263 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4264 
4265 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4266 			     &match);
4267 
4268 	new_settings(hdev, match.sk);
4269 
4270 	if (match.sk)
4271 		sock_put(match.sk);
4272 
4273 	/* If "Set Advertising" was just disabled and instance advertising was
4274 	 * set up earlier, then re-enable multi-instance advertising.
4275 	 */
4276 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4277 	    list_empty(&hdev->adv_instances))
4278 		goto unlock;
4279 
4280 	instance = hdev->cur_adv_instance;
4281 	if (!instance) {
4282 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
4283 							struct adv_info, list);
4284 		if (!adv_instance)
4285 			goto unlock;
4286 
4287 		instance = adv_instance->instance;
4288 	}
4289 
4290 	hci_req_init(&req, hdev);
4291 
4292 	err = __hci_req_schedule_adv_instance(&req, instance, true);
4293 
4294 	if (!err)
4295 		err = hci_req_run(&req, enable_advertising_instance);
4296 
4297 	if (err)
4298 		bt_dev_err(hdev, "failed to re-configure advertising");
4299 
4300 unlock:
4301 	hci_dev_unlock(hdev);
4302 }
4303 
4304 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4305 			   u16 len)
4306 {
4307 	struct mgmt_mode *cp = data;
4308 	struct mgmt_pending_cmd *cmd;
4309 	struct hci_request req;
4310 	u8 val, status;
4311 	int err;
4312 
4313 	BT_DBG("request for %s", hdev->name);
4314 
4315 	status = mgmt_le_support(hdev);
4316 	if (status)
4317 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4318 				       status);
4319 
4320 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4321 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4322 				       MGMT_STATUS_INVALID_PARAMS);
4323 
4324 	hci_dev_lock(hdev);
4325 
4326 	val = !!cp->val;
4327 
4328 	/* The following conditions are ones which mean that we should
4329 	 * not do any HCI communication but directly send a mgmt
4330 	 * response to user space (after toggling the flag if
4331 	 * necessary).
4332 	 */
4333 	if (!hdev_is_powered(hdev) ||
4334 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4335 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4336 	    hci_conn_num(hdev, LE_LINK) > 0 ||
4337 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4338 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4339 		bool changed;
4340 
4341 		if (cp->val) {
4342 			hdev->cur_adv_instance = 0x00;
4343 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4344 			if (cp->val == 0x02)
4345 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4346 			else
4347 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4348 		} else {
4349 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4350 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4351 		}
4352 
4353 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4354 		if (err < 0)
4355 			goto unlock;
4356 
4357 		if (changed)
4358 			err = new_settings(hdev, sk);
4359 
4360 		goto unlock;
4361 	}
4362 
4363 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4364 	    pending_find(MGMT_OP_SET_LE, hdev)) {
4365 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4366 				      MGMT_STATUS_BUSY);
4367 		goto unlock;
4368 	}
4369 
4370 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4371 	if (!cmd) {
4372 		err = -ENOMEM;
4373 		goto unlock;
4374 	}
4375 
4376 	hci_req_init(&req, hdev);
4377 
4378 	if (cp->val == 0x02)
4379 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4380 	else
4381 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4382 
4383 	cancel_adv_timeout(hdev);
4384 
4385 	if (val) {
4386 		/* Switch to instance "0" for the Set Advertising setting.
4387 		 * We cannot use update_[adv|scan_rsp]_data() here as the
4388 		 * HCI_ADVERTISING flag is not yet set.
4389 		 */
4390 		hdev->cur_adv_instance = 0x00;
4391 
4392 		if (ext_adv_capable(hdev)) {
4393 			__hci_req_start_ext_adv(&req, 0x00);
4394 		} else {
4395 			__hci_req_update_adv_data(&req, 0x00);
4396 			__hci_req_update_scan_rsp_data(&req, 0x00);
4397 			__hci_req_enable_advertising(&req);
4398 		}
4399 	} else {
4400 		__hci_req_disable_advertising(&req);
4401 	}
4402 
4403 	err = hci_req_run(&req, set_advertising_complete);
4404 	if (err < 0)
4405 		mgmt_pending_remove(cmd);
4406 
4407 unlock:
4408 	hci_dev_unlock(hdev);
4409 	return err;
4410 }
4411 
4412 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4413 			      void *data, u16 len)
4414 {
4415 	struct mgmt_cp_set_static_address *cp = data;
4416 	int err;
4417 
4418 	BT_DBG("%s", hdev->name);
4419 
4420 	if (!lmp_le_capable(hdev))
4421 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4422 				       MGMT_STATUS_NOT_SUPPORTED);
4423 
4424 	if (hdev_is_powered(hdev))
4425 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4426 				       MGMT_STATUS_REJECTED);
4427 
4428 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4429 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4430 			return mgmt_cmd_status(sk, hdev->id,
4431 					       MGMT_OP_SET_STATIC_ADDRESS,
4432 					       MGMT_STATUS_INVALID_PARAMS);
4433 
4434 		/* Two most significant bits shall be set */
4435 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4436 			return mgmt_cmd_status(sk, hdev->id,
4437 					       MGMT_OP_SET_STATIC_ADDRESS,
4438 					       MGMT_STATUS_INVALID_PARAMS);
4439 	}
4440 
4441 	hci_dev_lock(hdev);
4442 
4443 	bacpy(&hdev->static_addr, &cp->bdaddr);
4444 
4445 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4446 	if (err < 0)
4447 		goto unlock;
4448 
4449 	err = new_settings(hdev, sk);
4450 
4451 unlock:
4452 	hci_dev_unlock(hdev);
4453 	return err;
4454 }
4455 
4456 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4457 			   void *data, u16 len)
4458 {
4459 	struct mgmt_cp_set_scan_params *cp = data;
4460 	__u16 interval, window;
4461 	int err;
4462 
4463 	BT_DBG("%s", hdev->name);
4464 
4465 	if (!lmp_le_capable(hdev))
4466 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4467 				       MGMT_STATUS_NOT_SUPPORTED);
4468 
4469 	interval = __le16_to_cpu(cp->interval);
4470 
4471 	if (interval < 0x0004 || interval > 0x4000)
4472 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4473 				       MGMT_STATUS_INVALID_PARAMS);
4474 
4475 	window = __le16_to_cpu(cp->window);
4476 
4477 	if (window < 0x0004 || window > 0x4000)
4478 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4479 				       MGMT_STATUS_INVALID_PARAMS);
4480 
4481 	if (window > interval)
4482 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4483 				       MGMT_STATUS_INVALID_PARAMS);
4484 
4485 	hci_dev_lock(hdev);
4486 
4487 	hdev->le_scan_interval = interval;
4488 	hdev->le_scan_window = window;
4489 
4490 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4491 				NULL, 0);
4492 
4493 	/* If background scan is running, restart it so new parameters are
4494 	 * loaded.
4495 	 */
4496 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4497 	    hdev->discovery.state == DISCOVERY_STOPPED) {
4498 		struct hci_request req;
4499 
4500 		hci_req_init(&req, hdev);
4501 
4502 		hci_req_add_le_scan_disable(&req);
4503 		hci_req_add_le_passive_scan(&req);
4504 
4505 		hci_req_run(&req, NULL);
4506 	}
4507 
4508 	hci_dev_unlock(hdev);
4509 
4510 	return err;
4511 }
4512 
4513 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4514 				      u16 opcode)
4515 {
4516 	struct mgmt_pending_cmd *cmd;
4517 
4518 	BT_DBG("status 0x%02x", status);
4519 
4520 	hci_dev_lock(hdev);
4521 
4522 	cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4523 	if (!cmd)
4524 		goto unlock;
4525 
4526 	if (status) {
4527 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4528 			        mgmt_status(status));
4529 	} else {
4530 		struct mgmt_mode *cp = cmd->param;
4531 
4532 		if (cp->val)
4533 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4534 		else
4535 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4536 
4537 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4538 		new_settings(hdev, cmd->sk);
4539 	}
4540 
4541 	mgmt_pending_remove(cmd);
4542 
4543 unlock:
4544 	hci_dev_unlock(hdev);
4545 }
4546 
4547 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4548 				void *data, u16 len)
4549 {
4550 	struct mgmt_mode *cp = data;
4551 	struct mgmt_pending_cmd *cmd;
4552 	struct hci_request req;
4553 	int err;
4554 
4555 	BT_DBG("%s", hdev->name);
4556 
4557 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4558 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
4559 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4560 				       MGMT_STATUS_NOT_SUPPORTED);
4561 
4562 	if (cp->val != 0x00 && cp->val != 0x01)
4563 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4564 				       MGMT_STATUS_INVALID_PARAMS);
4565 
4566 	hci_dev_lock(hdev);
4567 
4568 	if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4569 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4570 				      MGMT_STATUS_BUSY);
4571 		goto unlock;
4572 	}
4573 
4574 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4575 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4576 					hdev);
4577 		goto unlock;
4578 	}
4579 
4580 	if (!hdev_is_powered(hdev)) {
4581 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4582 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4583 					hdev);
4584 		new_settings(hdev, sk);
4585 		goto unlock;
4586 	}
4587 
4588 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4589 			       data, len);
4590 	if (!cmd) {
4591 		err = -ENOMEM;
4592 		goto unlock;
4593 	}
4594 
4595 	hci_req_init(&req, hdev);
4596 
4597 	__hci_req_write_fast_connectable(&req, cp->val);
4598 
4599 	err = hci_req_run(&req, fast_connectable_complete);
4600 	if (err < 0) {
4601 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4602 				      MGMT_STATUS_FAILED);
4603 		mgmt_pending_remove(cmd);
4604 	}
4605 
4606 unlock:
4607 	hci_dev_unlock(hdev);
4608 
4609 	return err;
4610 }
4611 
4612 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4613 {
4614 	struct mgmt_pending_cmd *cmd;
4615 
4616 	BT_DBG("status 0x%02x", status);
4617 
4618 	hci_dev_lock(hdev);
4619 
4620 	cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4621 	if (!cmd)
4622 		goto unlock;
4623 
4624 	if (status) {
4625 		u8 mgmt_err = mgmt_status(status);
4626 
4627 		/* We need to restore the flag if related HCI commands
4628 		 * failed.
4629 		 */
4630 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4631 
4632 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4633 	} else {
4634 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4635 		new_settings(hdev, cmd->sk);
4636 	}
4637 
4638 	mgmt_pending_remove(cmd);
4639 
4640 unlock:
4641 	hci_dev_unlock(hdev);
4642 }
4643 
4644 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4645 {
4646 	struct mgmt_mode *cp = data;
4647 	struct mgmt_pending_cmd *cmd;
4648 	struct hci_request req;
4649 	int err;
4650 
4651 	BT_DBG("request for %s", hdev->name);
4652 
4653 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4654 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4655 				       MGMT_STATUS_NOT_SUPPORTED);
4656 
4657 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4658 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4659 				       MGMT_STATUS_REJECTED);
4660 
4661 	if (cp->val != 0x00 && cp->val != 0x01)
4662 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4663 				       MGMT_STATUS_INVALID_PARAMS);
4664 
4665 	hci_dev_lock(hdev);
4666 
4667 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4668 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4669 		goto unlock;
4670 	}
4671 
4672 	if (!hdev_is_powered(hdev)) {
4673 		if (!cp->val) {
4674 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4675 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4676 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4677 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4678 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4679 		}
4680 
4681 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4682 
4683 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4684 		if (err < 0)
4685 			goto unlock;
4686 
4687 		err = new_settings(hdev, sk);
4688 		goto unlock;
4689 	}
4690 
4691 	/* Reject disabling when powered on */
4692 	if (!cp->val) {
4693 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4694 				      MGMT_STATUS_REJECTED);
4695 		goto unlock;
4696 	} else {
4697 		/* When configuring a dual-mode controller to operate
4698 		 * with LE only and using a static address, then switching
4699 		 * BR/EDR back on is not allowed.
4700 		 *
4701 		 * Dual-mode controllers shall operate with the public
4702 		 * address as its identity address for BR/EDR and LE. So
4703 		 * reject the attempt to create an invalid configuration.
4704 		 *
4705 		 * The same restrictions applies when secure connections
4706 		 * has been enabled. For BR/EDR this is a controller feature
4707 		 * while for LE it is a host stack feature. This means that
4708 		 * switching BR/EDR back on when secure connections has been
4709 		 * enabled is not a supported transaction.
4710 		 */
4711 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4712 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4713 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4714 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4715 					      MGMT_STATUS_REJECTED);
4716 			goto unlock;
4717 		}
4718 	}
4719 
4720 	if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
4721 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4722 				      MGMT_STATUS_BUSY);
4723 		goto unlock;
4724 	}
4725 
4726 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4727 	if (!cmd) {
4728 		err = -ENOMEM;
4729 		goto unlock;
4730 	}
4731 
4732 	/* We need to flip the bit already here so that
4733 	 * hci_req_update_adv_data generates the correct flags.
4734 	 */
4735 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4736 
4737 	hci_req_init(&req, hdev);
4738 
4739 	__hci_req_write_fast_connectable(&req, false);
4740 	__hci_req_update_scan(&req);
4741 
4742 	/* Since only the advertising data flags will change, there
4743 	 * is no need to update the scan response data.
4744 	 */
4745 	__hci_req_update_adv_data(&req, hdev->cur_adv_instance);
4746 
4747 	err = hci_req_run(&req, set_bredr_complete);
4748 	if (err < 0)
4749 		mgmt_pending_remove(cmd);
4750 
4751 unlock:
4752 	hci_dev_unlock(hdev);
4753 	return err;
4754 }
4755 
4756 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4757 {
4758 	struct mgmt_pending_cmd *cmd;
4759 	struct mgmt_mode *cp;
4760 
4761 	BT_DBG("%s status %u", hdev->name, status);
4762 
4763 	hci_dev_lock(hdev);
4764 
4765 	cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4766 	if (!cmd)
4767 		goto unlock;
4768 
4769 	if (status) {
4770 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4771 			        mgmt_status(status));
4772 		goto remove;
4773 	}
4774 
4775 	cp = cmd->param;
4776 
4777 	switch (cp->val) {
4778 	case 0x00:
4779 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
4780 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4781 		break;
4782 	case 0x01:
4783 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4784 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4785 		break;
4786 	case 0x02:
4787 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4788 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
4789 		break;
4790 	}
4791 
4792 	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4793 	new_settings(hdev, cmd->sk);
4794 
4795 remove:
4796 	mgmt_pending_remove(cmd);
4797 unlock:
4798 	hci_dev_unlock(hdev);
4799 }
4800 
4801 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4802 			   void *data, u16 len)
4803 {
4804 	struct mgmt_mode *cp = data;
4805 	struct mgmt_pending_cmd *cmd;
4806 	struct hci_request req;
4807 	u8 val;
4808 	int err;
4809 
4810 	BT_DBG("request for %s", hdev->name);
4811 
4812 	if (!lmp_sc_capable(hdev) &&
4813 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4814 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4815 				       MGMT_STATUS_NOT_SUPPORTED);
4816 
4817 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4818 	    lmp_sc_capable(hdev) &&
4819 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4820 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4821 				       MGMT_STATUS_REJECTED);
4822 
4823 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4824 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4825 				  MGMT_STATUS_INVALID_PARAMS);
4826 
4827 	hci_dev_lock(hdev);
4828 
4829 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4830 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4831 		bool changed;
4832 
4833 		if (cp->val) {
4834 			changed = !hci_dev_test_and_set_flag(hdev,
4835 							     HCI_SC_ENABLED);
4836 			if (cp->val == 0x02)
4837 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
4838 			else
4839 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4840 		} else {
4841 			changed = hci_dev_test_and_clear_flag(hdev,
4842 							      HCI_SC_ENABLED);
4843 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4844 		}
4845 
4846 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4847 		if (err < 0)
4848 			goto failed;
4849 
4850 		if (changed)
4851 			err = new_settings(hdev, sk);
4852 
4853 		goto failed;
4854 	}
4855 
4856 	if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4857 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4858 				      MGMT_STATUS_BUSY);
4859 		goto failed;
4860 	}
4861 
4862 	val = !!cp->val;
4863 
4864 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
4865 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4866 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4867 		goto failed;
4868 	}
4869 
4870 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4871 	if (!cmd) {
4872 		err = -ENOMEM;
4873 		goto failed;
4874 	}
4875 
4876 	hci_req_init(&req, hdev);
4877 	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4878 	err = hci_req_run(&req, sc_enable_complete);
4879 	if (err < 0) {
4880 		mgmt_pending_remove(cmd);
4881 		goto failed;
4882 	}
4883 
4884 failed:
4885 	hci_dev_unlock(hdev);
4886 	return err;
4887 }
4888 
4889 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4890 			  void *data, u16 len)
4891 {
4892 	struct mgmt_mode *cp = data;
4893 	bool changed, use_changed;
4894 	int err;
4895 
4896 	BT_DBG("request for %s", hdev->name);
4897 
4898 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4899 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4900 				       MGMT_STATUS_INVALID_PARAMS);
4901 
4902 	hci_dev_lock(hdev);
4903 
4904 	if (cp->val)
4905 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
4906 	else
4907 		changed = hci_dev_test_and_clear_flag(hdev,
4908 						      HCI_KEEP_DEBUG_KEYS);
4909 
4910 	if (cp->val == 0x02)
4911 		use_changed = !hci_dev_test_and_set_flag(hdev,
4912 							 HCI_USE_DEBUG_KEYS);
4913 	else
4914 		use_changed = hci_dev_test_and_clear_flag(hdev,
4915 							  HCI_USE_DEBUG_KEYS);
4916 
4917 	if (hdev_is_powered(hdev) && use_changed &&
4918 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
4919 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4920 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4921 			     sizeof(mode), &mode);
4922 	}
4923 
4924 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4925 	if (err < 0)
4926 		goto unlock;
4927 
4928 	if (changed)
4929 		err = new_settings(hdev, sk);
4930 
4931 unlock:
4932 	hci_dev_unlock(hdev);
4933 	return err;
4934 }
4935 
4936 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4937 		       u16 len)
4938 {
4939 	struct mgmt_cp_set_privacy *cp = cp_data;
4940 	bool changed;
4941 	int err;
4942 
4943 	BT_DBG("request for %s", hdev->name);
4944 
4945 	if (!lmp_le_capable(hdev))
4946 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4947 				       MGMT_STATUS_NOT_SUPPORTED);
4948 
4949 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
4950 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4951 				       MGMT_STATUS_INVALID_PARAMS);
4952 
4953 	if (hdev_is_powered(hdev))
4954 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4955 				       MGMT_STATUS_REJECTED);
4956 
4957 	hci_dev_lock(hdev);
4958 
4959 	/* If user space supports this command it is also expected to
4960 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4961 	 */
4962 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
4963 
4964 	if (cp->privacy) {
4965 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
4966 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4967 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
4968 		hci_adv_instances_set_rpa_expired(hdev, true);
4969 		if (cp->privacy == 0x02)
4970 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
4971 		else
4972 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4973 	} else {
4974 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
4975 		memset(hdev->irk, 0, sizeof(hdev->irk));
4976 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
4977 		hci_adv_instances_set_rpa_expired(hdev, false);
4978 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4979 	}
4980 
4981 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4982 	if (err < 0)
4983 		goto unlock;
4984 
4985 	if (changed)
4986 		err = new_settings(hdev, sk);
4987 
4988 unlock:
4989 	hci_dev_unlock(hdev);
4990 	return err;
4991 }
4992 
4993 static bool irk_is_valid(struct mgmt_irk_info *irk)
4994 {
4995 	switch (irk->addr.type) {
4996 	case BDADDR_LE_PUBLIC:
4997 		return true;
4998 
4999 	case BDADDR_LE_RANDOM:
5000 		/* Two most significant bits shall be set */
5001 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5002 			return false;
5003 		return true;
5004 	}
5005 
5006 	return false;
5007 }
5008 
5009 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5010 		     u16 len)
5011 {
5012 	struct mgmt_cp_load_irks *cp = cp_data;
5013 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5014 				   sizeof(struct mgmt_irk_info));
5015 	u16 irk_count, expected_len;
5016 	int i, err;
5017 
5018 	BT_DBG("request for %s", hdev->name);
5019 
5020 	if (!lmp_le_capable(hdev))
5021 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5022 				       MGMT_STATUS_NOT_SUPPORTED);
5023 
5024 	irk_count = __le16_to_cpu(cp->irk_count);
5025 	if (irk_count > max_irk_count) {
5026 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5027 			   irk_count);
5028 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5029 				       MGMT_STATUS_INVALID_PARAMS);
5030 	}
5031 
5032 	expected_len = struct_size(cp, irks, irk_count);
5033 	if (expected_len != len) {
5034 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5035 			   expected_len, len);
5036 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5037 				       MGMT_STATUS_INVALID_PARAMS);
5038 	}
5039 
5040 	BT_DBG("%s irk_count %u", hdev->name, irk_count);
5041 
5042 	for (i = 0; i < irk_count; i++) {
5043 		struct mgmt_irk_info *key = &cp->irks[i];
5044 
5045 		if (!irk_is_valid(key))
5046 			return mgmt_cmd_status(sk, hdev->id,
5047 					       MGMT_OP_LOAD_IRKS,
5048 					       MGMT_STATUS_INVALID_PARAMS);
5049 	}
5050 
5051 	hci_dev_lock(hdev);
5052 
5053 	hci_smp_irks_clear(hdev);
5054 
5055 	for (i = 0; i < irk_count; i++) {
5056 		struct mgmt_irk_info *irk = &cp->irks[i];
5057 
5058 		hci_add_irk(hdev, &irk->addr.bdaddr,
5059 			    le_addr_type(irk->addr.type), irk->val,
5060 			    BDADDR_ANY);
5061 	}
5062 
5063 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5064 
5065 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5066 
5067 	hci_dev_unlock(hdev);
5068 
5069 	return err;
5070 }
5071 
5072 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5073 {
5074 	if (key->master != 0x00 && key->master != 0x01)
5075 		return false;
5076 
5077 	switch (key->addr.type) {
5078 	case BDADDR_LE_PUBLIC:
5079 		return true;
5080 
5081 	case BDADDR_LE_RANDOM:
5082 		/* Two most significant bits shall be set */
5083 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5084 			return false;
5085 		return true;
5086 	}
5087 
5088 	return false;
5089 }
5090 
5091 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5092 			       void *cp_data, u16 len)
5093 {
5094 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
5095 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5096 				   sizeof(struct mgmt_ltk_info));
5097 	u16 key_count, expected_len;
5098 	int i, err;
5099 
5100 	BT_DBG("request for %s", hdev->name);
5101 
5102 	if (!lmp_le_capable(hdev))
5103 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5104 				       MGMT_STATUS_NOT_SUPPORTED);
5105 
5106 	key_count = __le16_to_cpu(cp->key_count);
5107 	if (key_count > max_key_count) {
5108 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5109 			   key_count);
5110 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5111 				       MGMT_STATUS_INVALID_PARAMS);
5112 	}
5113 
5114 	expected_len = struct_size(cp, keys, key_count);
5115 	if (expected_len != len) {
5116 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5117 			   expected_len, len);
5118 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5119 				       MGMT_STATUS_INVALID_PARAMS);
5120 	}
5121 
5122 	BT_DBG("%s key_count %u", hdev->name, key_count);
5123 
5124 	for (i = 0; i < key_count; i++) {
5125 		struct mgmt_ltk_info *key = &cp->keys[i];
5126 
5127 		if (!ltk_is_valid(key))
5128 			return mgmt_cmd_status(sk, hdev->id,
5129 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
5130 					       MGMT_STATUS_INVALID_PARAMS);
5131 	}
5132 
5133 	hci_dev_lock(hdev);
5134 
5135 	hci_smp_ltks_clear(hdev);
5136 
5137 	for (i = 0; i < key_count; i++) {
5138 		struct mgmt_ltk_info *key = &cp->keys[i];
5139 		u8 type, authenticated;
5140 
5141 		switch (key->type) {
5142 		case MGMT_LTK_UNAUTHENTICATED:
5143 			authenticated = 0x00;
5144 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5145 			break;
5146 		case MGMT_LTK_AUTHENTICATED:
5147 			authenticated = 0x01;
5148 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5149 			break;
5150 		case MGMT_LTK_P256_UNAUTH:
5151 			authenticated = 0x00;
5152 			type = SMP_LTK_P256;
5153 			break;
5154 		case MGMT_LTK_P256_AUTH:
5155 			authenticated = 0x01;
5156 			type = SMP_LTK_P256;
5157 			break;
5158 		case MGMT_LTK_P256_DEBUG:
5159 			authenticated = 0x00;
5160 			type = SMP_LTK_P256_DEBUG;
5161 			/* fall through */
5162 		default:
5163 			continue;
5164 		}
5165 
5166 		hci_add_ltk(hdev, &key->addr.bdaddr,
5167 			    le_addr_type(key->addr.type), type, authenticated,
5168 			    key->val, key->enc_size, key->ediv, key->rand);
5169 	}
5170 
5171 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5172 			   NULL, 0);
5173 
5174 	hci_dev_unlock(hdev);
5175 
5176 	return err;
5177 }
5178 
5179 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5180 {
5181 	struct hci_conn *conn = cmd->user_data;
5182 	struct mgmt_rp_get_conn_info rp;
5183 	int err;
5184 
5185 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5186 
5187 	if (status == MGMT_STATUS_SUCCESS) {
5188 		rp.rssi = conn->rssi;
5189 		rp.tx_power = conn->tx_power;
5190 		rp.max_tx_power = conn->max_tx_power;
5191 	} else {
5192 		rp.rssi = HCI_RSSI_INVALID;
5193 		rp.tx_power = HCI_TX_POWER_INVALID;
5194 		rp.max_tx_power = HCI_TX_POWER_INVALID;
5195 	}
5196 
5197 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5198 				status, &rp, sizeof(rp));
5199 
5200 	hci_conn_drop(conn);
5201 	hci_conn_put(conn);
5202 
5203 	return err;
5204 }
5205 
5206 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5207 				       u16 opcode)
5208 {
5209 	struct hci_cp_read_rssi *cp;
5210 	struct mgmt_pending_cmd *cmd;
5211 	struct hci_conn *conn;
5212 	u16 handle;
5213 	u8 status;
5214 
5215 	BT_DBG("status 0x%02x", hci_status);
5216 
5217 	hci_dev_lock(hdev);
5218 
5219 	/* Commands sent in request are either Read RSSI or Read Transmit Power
5220 	 * Level so we check which one was last sent to retrieve connection
5221 	 * handle.  Both commands have handle as first parameter so it's safe to
5222 	 * cast data on the same command struct.
5223 	 *
5224 	 * First command sent is always Read RSSI and we fail only if it fails.
5225 	 * In other case we simply override error to indicate success as we
5226 	 * already remembered if TX power value is actually valid.
5227 	 */
5228 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5229 	if (!cp) {
5230 		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5231 		status = MGMT_STATUS_SUCCESS;
5232 	} else {
5233 		status = mgmt_status(hci_status);
5234 	}
5235 
5236 	if (!cp) {
5237 		bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
5238 		goto unlock;
5239 	}
5240 
5241 	handle = __le16_to_cpu(cp->handle);
5242 	conn = hci_conn_hash_lookup_handle(hdev, handle);
5243 	if (!conn) {
5244 		bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
5245 			   handle);
5246 		goto unlock;
5247 	}
5248 
5249 	cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5250 	if (!cmd)
5251 		goto unlock;
5252 
5253 	cmd->cmd_complete(cmd, status);
5254 	mgmt_pending_remove(cmd);
5255 
5256 unlock:
5257 	hci_dev_unlock(hdev);
5258 }
5259 
5260 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5261 			 u16 len)
5262 {
5263 	struct mgmt_cp_get_conn_info *cp = data;
5264 	struct mgmt_rp_get_conn_info rp;
5265 	struct hci_conn *conn;
5266 	unsigned long conn_info_age;
5267 	int err = 0;
5268 
5269 	BT_DBG("%s", hdev->name);
5270 
5271 	memset(&rp, 0, sizeof(rp));
5272 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5273 	rp.addr.type = cp->addr.type;
5274 
5275 	if (!bdaddr_type_is_valid(cp->addr.type))
5276 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5277 					 MGMT_STATUS_INVALID_PARAMS,
5278 					 &rp, sizeof(rp));
5279 
5280 	hci_dev_lock(hdev);
5281 
5282 	if (!hdev_is_powered(hdev)) {
5283 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5284 					MGMT_STATUS_NOT_POWERED, &rp,
5285 					sizeof(rp));
5286 		goto unlock;
5287 	}
5288 
5289 	if (cp->addr.type == BDADDR_BREDR)
5290 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5291 					       &cp->addr.bdaddr);
5292 	else
5293 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5294 
5295 	if (!conn || conn->state != BT_CONNECTED) {
5296 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5297 					MGMT_STATUS_NOT_CONNECTED, &rp,
5298 					sizeof(rp));
5299 		goto unlock;
5300 	}
5301 
5302 	if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5303 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5304 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
5305 		goto unlock;
5306 	}
5307 
5308 	/* To avoid client trying to guess when to poll again for information we
5309 	 * calculate conn info age as random value between min/max set in hdev.
5310 	 */
5311 	conn_info_age = hdev->conn_info_min_age +
5312 			prandom_u32_max(hdev->conn_info_max_age -
5313 					hdev->conn_info_min_age);
5314 
5315 	/* Query controller to refresh cached values if they are too old or were
5316 	 * never read.
5317 	 */
5318 	if (time_after(jiffies, conn->conn_info_timestamp +
5319 		       msecs_to_jiffies(conn_info_age)) ||
5320 	    !conn->conn_info_timestamp) {
5321 		struct hci_request req;
5322 		struct hci_cp_read_tx_power req_txp_cp;
5323 		struct hci_cp_read_rssi req_rssi_cp;
5324 		struct mgmt_pending_cmd *cmd;
5325 
5326 		hci_req_init(&req, hdev);
5327 		req_rssi_cp.handle = cpu_to_le16(conn->handle);
5328 		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5329 			    &req_rssi_cp);
5330 
5331 		/* For LE links TX power does not change thus we don't need to
5332 		 * query for it once value is known.
5333 		 */
5334 		if (!bdaddr_type_is_le(cp->addr.type) ||
5335 		    conn->tx_power == HCI_TX_POWER_INVALID) {
5336 			req_txp_cp.handle = cpu_to_le16(conn->handle);
5337 			req_txp_cp.type = 0x00;
5338 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5339 				    sizeof(req_txp_cp), &req_txp_cp);
5340 		}
5341 
5342 		/* Max TX power needs to be read only once per connection */
5343 		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5344 			req_txp_cp.handle = cpu_to_le16(conn->handle);
5345 			req_txp_cp.type = 0x01;
5346 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5347 				    sizeof(req_txp_cp), &req_txp_cp);
5348 		}
5349 
5350 		err = hci_req_run(&req, conn_info_refresh_complete);
5351 		if (err < 0)
5352 			goto unlock;
5353 
5354 		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5355 				       data, len);
5356 		if (!cmd) {
5357 			err = -ENOMEM;
5358 			goto unlock;
5359 		}
5360 
5361 		hci_conn_hold(conn);
5362 		cmd->user_data = hci_conn_get(conn);
5363 		cmd->cmd_complete = conn_info_cmd_complete;
5364 
5365 		conn->conn_info_timestamp = jiffies;
5366 	} else {
5367 		/* Cache is valid, just reply with values cached in hci_conn */
5368 		rp.rssi = conn->rssi;
5369 		rp.tx_power = conn->tx_power;
5370 		rp.max_tx_power = conn->max_tx_power;
5371 
5372 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5373 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5374 	}
5375 
5376 unlock:
5377 	hci_dev_unlock(hdev);
5378 	return err;
5379 }
5380 
5381 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5382 {
5383 	struct hci_conn *conn = cmd->user_data;
5384 	struct mgmt_rp_get_clock_info rp;
5385 	struct hci_dev *hdev;
5386 	int err;
5387 
5388 	memset(&rp, 0, sizeof(rp));
5389 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5390 
5391 	if (status)
5392 		goto complete;
5393 
5394 	hdev = hci_dev_get(cmd->index);
5395 	if (hdev) {
5396 		rp.local_clock = cpu_to_le32(hdev->clock);
5397 		hci_dev_put(hdev);
5398 	}
5399 
5400 	if (conn) {
5401 		rp.piconet_clock = cpu_to_le32(conn->clock);
5402 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5403 	}
5404 
5405 complete:
5406 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5407 				sizeof(rp));
5408 
5409 	if (conn) {
5410 		hci_conn_drop(conn);
5411 		hci_conn_put(conn);
5412 	}
5413 
5414 	return err;
5415 }
5416 
5417 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5418 {
5419 	struct hci_cp_read_clock *hci_cp;
5420 	struct mgmt_pending_cmd *cmd;
5421 	struct hci_conn *conn;
5422 
5423 	BT_DBG("%s status %u", hdev->name, status);
5424 
5425 	hci_dev_lock(hdev);
5426 
5427 	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5428 	if (!hci_cp)
5429 		goto unlock;
5430 
5431 	if (hci_cp->which) {
5432 		u16 handle = __le16_to_cpu(hci_cp->handle);
5433 		conn = hci_conn_hash_lookup_handle(hdev, handle);
5434 	} else {
5435 		conn = NULL;
5436 	}
5437 
5438 	cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5439 	if (!cmd)
5440 		goto unlock;
5441 
5442 	cmd->cmd_complete(cmd, mgmt_status(status));
5443 	mgmt_pending_remove(cmd);
5444 
5445 unlock:
5446 	hci_dev_unlock(hdev);
5447 }
5448 
5449 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5450 			 u16 len)
5451 {
5452 	struct mgmt_cp_get_clock_info *cp = data;
5453 	struct mgmt_rp_get_clock_info rp;
5454 	struct hci_cp_read_clock hci_cp;
5455 	struct mgmt_pending_cmd *cmd;
5456 	struct hci_request req;
5457 	struct hci_conn *conn;
5458 	int err;
5459 
5460 	BT_DBG("%s", hdev->name);
5461 
5462 	memset(&rp, 0, sizeof(rp));
5463 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5464 	rp.addr.type = cp->addr.type;
5465 
5466 	if (cp->addr.type != BDADDR_BREDR)
5467 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5468 					 MGMT_STATUS_INVALID_PARAMS,
5469 					 &rp, sizeof(rp));
5470 
5471 	hci_dev_lock(hdev);
5472 
5473 	if (!hdev_is_powered(hdev)) {
5474 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5475 					MGMT_STATUS_NOT_POWERED, &rp,
5476 					sizeof(rp));
5477 		goto unlock;
5478 	}
5479 
5480 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5481 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5482 					       &cp->addr.bdaddr);
5483 		if (!conn || conn->state != BT_CONNECTED) {
5484 			err = mgmt_cmd_complete(sk, hdev->id,
5485 						MGMT_OP_GET_CLOCK_INFO,
5486 						MGMT_STATUS_NOT_CONNECTED,
5487 						&rp, sizeof(rp));
5488 			goto unlock;
5489 		}
5490 	} else {
5491 		conn = NULL;
5492 	}
5493 
5494 	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5495 	if (!cmd) {
5496 		err = -ENOMEM;
5497 		goto unlock;
5498 	}
5499 
5500 	cmd->cmd_complete = clock_info_cmd_complete;
5501 
5502 	hci_req_init(&req, hdev);
5503 
5504 	memset(&hci_cp, 0, sizeof(hci_cp));
5505 	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5506 
5507 	if (conn) {
5508 		hci_conn_hold(conn);
5509 		cmd->user_data = hci_conn_get(conn);
5510 
5511 		hci_cp.handle = cpu_to_le16(conn->handle);
5512 		hci_cp.which = 0x01; /* Piconet clock */
5513 		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5514 	}
5515 
5516 	err = hci_req_run(&req, get_clock_info_complete);
5517 	if (err < 0)
5518 		mgmt_pending_remove(cmd);
5519 
5520 unlock:
5521 	hci_dev_unlock(hdev);
5522 	return err;
5523 }
5524 
5525 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5526 {
5527 	struct hci_conn *conn;
5528 
5529 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5530 	if (!conn)
5531 		return false;
5532 
5533 	if (conn->dst_type != type)
5534 		return false;
5535 
5536 	if (conn->state != BT_CONNECTED)
5537 		return false;
5538 
5539 	return true;
5540 }
5541 
5542 /* This function requires the caller holds hdev->lock */
5543 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
5544 			       u8 addr_type, u8 auto_connect)
5545 {
5546 	struct hci_conn_params *params;
5547 
5548 	params = hci_conn_params_add(hdev, addr, addr_type);
5549 	if (!params)
5550 		return -EIO;
5551 
5552 	if (params->auto_connect == auto_connect)
5553 		return 0;
5554 
5555 	list_del_init(&params->action);
5556 
5557 	switch (auto_connect) {
5558 	case HCI_AUTO_CONN_DISABLED:
5559 	case HCI_AUTO_CONN_LINK_LOSS:
5560 		/* If auto connect is being disabled when we're trying to
5561 		 * connect to device, keep connecting.
5562 		 */
5563 		if (params->explicit_connect)
5564 			list_add(&params->action, &hdev->pend_le_conns);
5565 		break;
5566 	case HCI_AUTO_CONN_REPORT:
5567 		if (params->explicit_connect)
5568 			list_add(&params->action, &hdev->pend_le_conns);
5569 		else
5570 			list_add(&params->action, &hdev->pend_le_reports);
5571 		break;
5572 	case HCI_AUTO_CONN_DIRECT:
5573 	case HCI_AUTO_CONN_ALWAYS:
5574 		if (!is_connected(hdev, addr, addr_type))
5575 			list_add(&params->action, &hdev->pend_le_conns);
5576 		break;
5577 	}
5578 
5579 	params->auto_connect = auto_connect;
5580 
5581 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5582 	       auto_connect);
5583 
5584 	return 0;
5585 }
5586 
5587 static void device_added(struct sock *sk, struct hci_dev *hdev,
5588 			 bdaddr_t *bdaddr, u8 type, u8 action)
5589 {
5590 	struct mgmt_ev_device_added ev;
5591 
5592 	bacpy(&ev.addr.bdaddr, bdaddr);
5593 	ev.addr.type = type;
5594 	ev.action = action;
5595 
5596 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5597 }
5598 
5599 static int add_device(struct sock *sk, struct hci_dev *hdev,
5600 		      void *data, u16 len)
5601 {
5602 	struct mgmt_cp_add_device *cp = data;
5603 	u8 auto_conn, addr_type;
5604 	int err;
5605 
5606 	BT_DBG("%s", hdev->name);
5607 
5608 	if (!bdaddr_type_is_valid(cp->addr.type) ||
5609 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5610 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5611 					 MGMT_STATUS_INVALID_PARAMS,
5612 					 &cp->addr, sizeof(cp->addr));
5613 
5614 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5615 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5616 					 MGMT_STATUS_INVALID_PARAMS,
5617 					 &cp->addr, sizeof(cp->addr));
5618 
5619 	hci_dev_lock(hdev);
5620 
5621 	if (cp->addr.type == BDADDR_BREDR) {
5622 		/* Only incoming connections action is supported for now */
5623 		if (cp->action != 0x01) {
5624 			err = mgmt_cmd_complete(sk, hdev->id,
5625 						MGMT_OP_ADD_DEVICE,
5626 						MGMT_STATUS_INVALID_PARAMS,
5627 						&cp->addr, sizeof(cp->addr));
5628 			goto unlock;
5629 		}
5630 
5631 		err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5632 					  cp->addr.type);
5633 		if (err)
5634 			goto unlock;
5635 
5636 		hci_req_update_scan(hdev);
5637 
5638 		goto added;
5639 	}
5640 
5641 	addr_type = le_addr_type(cp->addr.type);
5642 
5643 	if (cp->action == 0x02)
5644 		auto_conn = HCI_AUTO_CONN_ALWAYS;
5645 	else if (cp->action == 0x01)
5646 		auto_conn = HCI_AUTO_CONN_DIRECT;
5647 	else
5648 		auto_conn = HCI_AUTO_CONN_REPORT;
5649 
5650 	/* Kernel internally uses conn_params with resolvable private
5651 	 * address, but Add Device allows only identity addresses.
5652 	 * Make sure it is enforced before calling
5653 	 * hci_conn_params_lookup.
5654 	 */
5655 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5656 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5657 					MGMT_STATUS_INVALID_PARAMS,
5658 					&cp->addr, sizeof(cp->addr));
5659 		goto unlock;
5660 	}
5661 
5662 	/* If the connection parameters don't exist for this device,
5663 	 * they will be created and configured with defaults.
5664 	 */
5665 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5666 				auto_conn) < 0) {
5667 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5668 					MGMT_STATUS_FAILED, &cp->addr,
5669 					sizeof(cp->addr));
5670 		goto unlock;
5671 	}
5672 
5673 	hci_update_background_scan(hdev);
5674 
5675 added:
5676 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5677 
5678 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5679 				MGMT_STATUS_SUCCESS, &cp->addr,
5680 				sizeof(cp->addr));
5681 
5682 unlock:
5683 	hci_dev_unlock(hdev);
5684 	return err;
5685 }
5686 
5687 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5688 			   bdaddr_t *bdaddr, u8 type)
5689 {
5690 	struct mgmt_ev_device_removed ev;
5691 
5692 	bacpy(&ev.addr.bdaddr, bdaddr);
5693 	ev.addr.type = type;
5694 
5695 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5696 }
5697 
5698 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5699 			 void *data, u16 len)
5700 {
5701 	struct mgmt_cp_remove_device *cp = data;
5702 	int err;
5703 
5704 	BT_DBG("%s", hdev->name);
5705 
5706 	hci_dev_lock(hdev);
5707 
5708 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5709 		struct hci_conn_params *params;
5710 		u8 addr_type;
5711 
5712 		if (!bdaddr_type_is_valid(cp->addr.type)) {
5713 			err = mgmt_cmd_complete(sk, hdev->id,
5714 						MGMT_OP_REMOVE_DEVICE,
5715 						MGMT_STATUS_INVALID_PARAMS,
5716 						&cp->addr, sizeof(cp->addr));
5717 			goto unlock;
5718 		}
5719 
5720 		if (cp->addr.type == BDADDR_BREDR) {
5721 			err = hci_bdaddr_list_del(&hdev->whitelist,
5722 						  &cp->addr.bdaddr,
5723 						  cp->addr.type);
5724 			if (err) {
5725 				err = mgmt_cmd_complete(sk, hdev->id,
5726 							MGMT_OP_REMOVE_DEVICE,
5727 							MGMT_STATUS_INVALID_PARAMS,
5728 							&cp->addr,
5729 							sizeof(cp->addr));
5730 				goto unlock;
5731 			}
5732 
5733 			hci_req_update_scan(hdev);
5734 
5735 			device_removed(sk, hdev, &cp->addr.bdaddr,
5736 				       cp->addr.type);
5737 			goto complete;
5738 		}
5739 
5740 		addr_type = le_addr_type(cp->addr.type);
5741 
5742 		/* Kernel internally uses conn_params with resolvable private
5743 		 * address, but Remove Device allows only identity addresses.
5744 		 * Make sure it is enforced before calling
5745 		 * hci_conn_params_lookup.
5746 		 */
5747 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5748 			err = mgmt_cmd_complete(sk, hdev->id,
5749 						MGMT_OP_REMOVE_DEVICE,
5750 						MGMT_STATUS_INVALID_PARAMS,
5751 						&cp->addr, sizeof(cp->addr));
5752 			goto unlock;
5753 		}
5754 
5755 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5756 						addr_type);
5757 		if (!params) {
5758 			err = mgmt_cmd_complete(sk, hdev->id,
5759 						MGMT_OP_REMOVE_DEVICE,
5760 						MGMT_STATUS_INVALID_PARAMS,
5761 						&cp->addr, sizeof(cp->addr));
5762 			goto unlock;
5763 		}
5764 
5765 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
5766 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
5767 			err = mgmt_cmd_complete(sk, hdev->id,
5768 						MGMT_OP_REMOVE_DEVICE,
5769 						MGMT_STATUS_INVALID_PARAMS,
5770 						&cp->addr, sizeof(cp->addr));
5771 			goto unlock;
5772 		}
5773 
5774 		list_del(&params->action);
5775 		list_del(&params->list);
5776 		kfree(params);
5777 		hci_update_background_scan(hdev);
5778 
5779 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5780 	} else {
5781 		struct hci_conn_params *p, *tmp;
5782 		struct bdaddr_list *b, *btmp;
5783 
5784 		if (cp->addr.type) {
5785 			err = mgmt_cmd_complete(sk, hdev->id,
5786 						MGMT_OP_REMOVE_DEVICE,
5787 						MGMT_STATUS_INVALID_PARAMS,
5788 						&cp->addr, sizeof(cp->addr));
5789 			goto unlock;
5790 		}
5791 
5792 		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5793 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5794 			list_del(&b->list);
5795 			kfree(b);
5796 		}
5797 
5798 		hci_req_update_scan(hdev);
5799 
5800 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5801 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5802 				continue;
5803 			device_removed(sk, hdev, &p->addr, p->addr_type);
5804 			if (p->explicit_connect) {
5805 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
5806 				continue;
5807 			}
5808 			list_del(&p->action);
5809 			list_del(&p->list);
5810 			kfree(p);
5811 		}
5812 
5813 		BT_DBG("All LE connection parameters were removed");
5814 
5815 		hci_update_background_scan(hdev);
5816 	}
5817 
5818 complete:
5819 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5820 				MGMT_STATUS_SUCCESS, &cp->addr,
5821 				sizeof(cp->addr));
5822 unlock:
5823 	hci_dev_unlock(hdev);
5824 	return err;
5825 }
5826 
5827 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5828 			   u16 len)
5829 {
5830 	struct mgmt_cp_load_conn_param *cp = data;
5831 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5832 				     sizeof(struct mgmt_conn_param));
5833 	u16 param_count, expected_len;
5834 	int i;
5835 
5836 	if (!lmp_le_capable(hdev))
5837 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5838 				       MGMT_STATUS_NOT_SUPPORTED);
5839 
5840 	param_count = __le16_to_cpu(cp->param_count);
5841 	if (param_count > max_param_count) {
5842 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
5843 			   param_count);
5844 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5845 				       MGMT_STATUS_INVALID_PARAMS);
5846 	}
5847 
5848 	expected_len = struct_size(cp, params, param_count);
5849 	if (expected_len != len) {
5850 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
5851 			   expected_len, len);
5852 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5853 				       MGMT_STATUS_INVALID_PARAMS);
5854 	}
5855 
5856 	BT_DBG("%s param_count %u", hdev->name, param_count);
5857 
5858 	hci_dev_lock(hdev);
5859 
5860 	hci_conn_params_clear_disabled(hdev);
5861 
5862 	for (i = 0; i < param_count; i++) {
5863 		struct mgmt_conn_param *param = &cp->params[i];
5864 		struct hci_conn_params *hci_param;
5865 		u16 min, max, latency, timeout;
5866 		u8 addr_type;
5867 
5868 		BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5869 		       param->addr.type);
5870 
5871 		if (param->addr.type == BDADDR_LE_PUBLIC) {
5872 			addr_type = ADDR_LE_DEV_PUBLIC;
5873 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
5874 			addr_type = ADDR_LE_DEV_RANDOM;
5875 		} else {
5876 			bt_dev_err(hdev, "ignoring invalid connection parameters");
5877 			continue;
5878 		}
5879 
5880 		min = le16_to_cpu(param->min_interval);
5881 		max = le16_to_cpu(param->max_interval);
5882 		latency = le16_to_cpu(param->latency);
5883 		timeout = le16_to_cpu(param->timeout);
5884 
5885 		BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5886 		       min, max, latency, timeout);
5887 
5888 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5889 			bt_dev_err(hdev, "ignoring invalid connection parameters");
5890 			continue;
5891 		}
5892 
5893 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5894 						addr_type);
5895 		if (!hci_param) {
5896 			bt_dev_err(hdev, "failed to add connection parameters");
5897 			continue;
5898 		}
5899 
5900 		hci_param->conn_min_interval = min;
5901 		hci_param->conn_max_interval = max;
5902 		hci_param->conn_latency = latency;
5903 		hci_param->supervision_timeout = timeout;
5904 	}
5905 
5906 	hci_dev_unlock(hdev);
5907 
5908 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
5909 				 NULL, 0);
5910 }
5911 
5912 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5913 			       void *data, u16 len)
5914 {
5915 	struct mgmt_cp_set_external_config *cp = data;
5916 	bool changed;
5917 	int err;
5918 
5919 	BT_DBG("%s", hdev->name);
5920 
5921 	if (hdev_is_powered(hdev))
5922 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5923 				       MGMT_STATUS_REJECTED);
5924 
5925 	if (cp->config != 0x00 && cp->config != 0x01)
5926 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5927 				         MGMT_STATUS_INVALID_PARAMS);
5928 
5929 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5930 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5931 				       MGMT_STATUS_NOT_SUPPORTED);
5932 
5933 	hci_dev_lock(hdev);
5934 
5935 	if (cp->config)
5936 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
5937 	else
5938 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
5939 
5940 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5941 	if (err < 0)
5942 		goto unlock;
5943 
5944 	if (!changed)
5945 		goto unlock;
5946 
5947 	err = new_options(hdev, sk);
5948 
5949 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
5950 		mgmt_index_removed(hdev);
5951 
5952 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
5953 			hci_dev_set_flag(hdev, HCI_CONFIG);
5954 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
5955 
5956 			queue_work(hdev->req_workqueue, &hdev->power_on);
5957 		} else {
5958 			set_bit(HCI_RAW, &hdev->flags);
5959 			mgmt_index_added(hdev);
5960 		}
5961 	}
5962 
5963 unlock:
5964 	hci_dev_unlock(hdev);
5965 	return err;
5966 }
5967 
5968 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5969 			      void *data, u16 len)
5970 {
5971 	struct mgmt_cp_set_public_address *cp = data;
5972 	bool changed;
5973 	int err;
5974 
5975 	BT_DBG("%s", hdev->name);
5976 
5977 	if (hdev_is_powered(hdev))
5978 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5979 				       MGMT_STATUS_REJECTED);
5980 
5981 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5982 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5983 				       MGMT_STATUS_INVALID_PARAMS);
5984 
5985 	if (!hdev->set_bdaddr)
5986 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5987 				       MGMT_STATUS_NOT_SUPPORTED);
5988 
5989 	hci_dev_lock(hdev);
5990 
5991 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5992 	bacpy(&hdev->public_addr, &cp->bdaddr);
5993 
5994 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5995 	if (err < 0)
5996 		goto unlock;
5997 
5998 	if (!changed)
5999 		goto unlock;
6000 
6001 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6002 		err = new_options(hdev, sk);
6003 
6004 	if (is_configured(hdev)) {
6005 		mgmt_index_removed(hdev);
6006 
6007 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6008 
6009 		hci_dev_set_flag(hdev, HCI_CONFIG);
6010 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6011 
6012 		queue_work(hdev->req_workqueue, &hdev->power_on);
6013 	}
6014 
6015 unlock:
6016 	hci_dev_unlock(hdev);
6017 	return err;
6018 }
6019 
6020 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6021 					     u16 opcode, struct sk_buff *skb)
6022 {
6023 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6024 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6025 	u8 *h192, *r192, *h256, *r256;
6026 	struct mgmt_pending_cmd *cmd;
6027 	u16 eir_len;
6028 	int err;
6029 
6030 	BT_DBG("%s status %u", hdev->name, status);
6031 
6032 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6033 	if (!cmd)
6034 		return;
6035 
6036 	mgmt_cp = cmd->param;
6037 
6038 	if (status) {
6039 		status = mgmt_status(status);
6040 		eir_len = 0;
6041 
6042 		h192 = NULL;
6043 		r192 = NULL;
6044 		h256 = NULL;
6045 		r256 = NULL;
6046 	} else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6047 		struct hci_rp_read_local_oob_data *rp;
6048 
6049 		if (skb->len != sizeof(*rp)) {
6050 			status = MGMT_STATUS_FAILED;
6051 			eir_len = 0;
6052 		} else {
6053 			status = MGMT_STATUS_SUCCESS;
6054 			rp = (void *)skb->data;
6055 
6056 			eir_len = 5 + 18 + 18;
6057 			h192 = rp->hash;
6058 			r192 = rp->rand;
6059 			h256 = NULL;
6060 			r256 = NULL;
6061 		}
6062 	} else {
6063 		struct hci_rp_read_local_oob_ext_data *rp;
6064 
6065 		if (skb->len != sizeof(*rp)) {
6066 			status = MGMT_STATUS_FAILED;
6067 			eir_len = 0;
6068 		} else {
6069 			status = MGMT_STATUS_SUCCESS;
6070 			rp = (void *)skb->data;
6071 
6072 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6073 				eir_len = 5 + 18 + 18;
6074 				h192 = NULL;
6075 				r192 = NULL;
6076 			} else {
6077 				eir_len = 5 + 18 + 18 + 18 + 18;
6078 				h192 = rp->hash192;
6079 				r192 = rp->rand192;
6080 			}
6081 
6082 			h256 = rp->hash256;
6083 			r256 = rp->rand256;
6084 		}
6085 	}
6086 
6087 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6088 	if (!mgmt_rp)
6089 		goto done;
6090 
6091 	if (status)
6092 		goto send_rsp;
6093 
6094 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6095 				  hdev->dev_class, 3);
6096 
6097 	if (h192 && r192) {
6098 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6099 					  EIR_SSP_HASH_C192, h192, 16);
6100 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6101 					  EIR_SSP_RAND_R192, r192, 16);
6102 	}
6103 
6104 	if (h256 && r256) {
6105 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6106 					  EIR_SSP_HASH_C256, h256, 16);
6107 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6108 					  EIR_SSP_RAND_R256, r256, 16);
6109 	}
6110 
6111 send_rsp:
6112 	mgmt_rp->type = mgmt_cp->type;
6113 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
6114 
6115 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
6116 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6117 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6118 	if (err < 0 || status)
6119 		goto done;
6120 
6121 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
6122 
6123 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6124 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
6125 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
6126 done:
6127 	kfree(mgmt_rp);
6128 	mgmt_pending_remove(cmd);
6129 }
6130 
6131 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
6132 				  struct mgmt_cp_read_local_oob_ext_data *cp)
6133 {
6134 	struct mgmt_pending_cmd *cmd;
6135 	struct hci_request req;
6136 	int err;
6137 
6138 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
6139 			       cp, sizeof(*cp));
6140 	if (!cmd)
6141 		return -ENOMEM;
6142 
6143 	hci_req_init(&req, hdev);
6144 
6145 	if (bredr_sc_enabled(hdev))
6146 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
6147 	else
6148 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
6149 
6150 	err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
6151 	if (err < 0) {
6152 		mgmt_pending_remove(cmd);
6153 		return err;
6154 	}
6155 
6156 	return 0;
6157 }
6158 
6159 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6160 				   void *data, u16 data_len)
6161 {
6162 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
6163 	struct mgmt_rp_read_local_oob_ext_data *rp;
6164 	size_t rp_len;
6165 	u16 eir_len;
6166 	u8 status, flags, role, addr[7], hash[16], rand[16];
6167 	int err;
6168 
6169 	BT_DBG("%s", hdev->name);
6170 
6171 	if (hdev_is_powered(hdev)) {
6172 		switch (cp->type) {
6173 		case BIT(BDADDR_BREDR):
6174 			status = mgmt_bredr_support(hdev);
6175 			if (status)
6176 				eir_len = 0;
6177 			else
6178 				eir_len = 5;
6179 			break;
6180 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6181 			status = mgmt_le_support(hdev);
6182 			if (status)
6183 				eir_len = 0;
6184 			else
6185 				eir_len = 9 + 3 + 18 + 18 + 3;
6186 			break;
6187 		default:
6188 			status = MGMT_STATUS_INVALID_PARAMS;
6189 			eir_len = 0;
6190 			break;
6191 		}
6192 	} else {
6193 		status = MGMT_STATUS_NOT_POWERED;
6194 		eir_len = 0;
6195 	}
6196 
6197 	rp_len = sizeof(*rp) + eir_len;
6198 	rp = kmalloc(rp_len, GFP_ATOMIC);
6199 	if (!rp)
6200 		return -ENOMEM;
6201 
6202 	if (status)
6203 		goto complete;
6204 
6205 	hci_dev_lock(hdev);
6206 
6207 	eir_len = 0;
6208 	switch (cp->type) {
6209 	case BIT(BDADDR_BREDR):
6210 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6211 			err = read_local_ssp_oob_req(hdev, sk, cp);
6212 			hci_dev_unlock(hdev);
6213 			if (!err)
6214 				goto done;
6215 
6216 			status = MGMT_STATUS_FAILED;
6217 			goto complete;
6218 		} else {
6219 			eir_len = eir_append_data(rp->eir, eir_len,
6220 						  EIR_CLASS_OF_DEV,
6221 						  hdev->dev_class, 3);
6222 		}
6223 		break;
6224 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6225 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6226 		    smp_generate_oob(hdev, hash, rand) < 0) {
6227 			hci_dev_unlock(hdev);
6228 			status = MGMT_STATUS_FAILED;
6229 			goto complete;
6230 		}
6231 
6232 		/* This should return the active RPA, but since the RPA
6233 		 * is only programmed on demand, it is really hard to fill
6234 		 * this in at the moment. For now disallow retrieving
6235 		 * local out-of-band data when privacy is in use.
6236 		 *
6237 		 * Returning the identity address will not help here since
6238 		 * pairing happens before the identity resolving key is
6239 		 * known and thus the connection establishment happens
6240 		 * based on the RPA and not the identity address.
6241 		 */
6242 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6243 			hci_dev_unlock(hdev);
6244 			status = MGMT_STATUS_REJECTED;
6245 			goto complete;
6246 		}
6247 
6248 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6249 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6250 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6251 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
6252 			memcpy(addr, &hdev->static_addr, 6);
6253 			addr[6] = 0x01;
6254 		} else {
6255 			memcpy(addr, &hdev->bdaddr, 6);
6256 			addr[6] = 0x00;
6257 		}
6258 
6259 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6260 					  addr, sizeof(addr));
6261 
6262 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6263 			role = 0x02;
6264 		else
6265 			role = 0x01;
6266 
6267 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6268 					  &role, sizeof(role));
6269 
6270 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6271 			eir_len = eir_append_data(rp->eir, eir_len,
6272 						  EIR_LE_SC_CONFIRM,
6273 						  hash, sizeof(hash));
6274 
6275 			eir_len = eir_append_data(rp->eir, eir_len,
6276 						  EIR_LE_SC_RANDOM,
6277 						  rand, sizeof(rand));
6278 		}
6279 
6280 		flags = mgmt_get_adv_discov_flags(hdev);
6281 
6282 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6283 			flags |= LE_AD_NO_BREDR;
6284 
6285 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6286 					  &flags, sizeof(flags));
6287 		break;
6288 	}
6289 
6290 	hci_dev_unlock(hdev);
6291 
6292 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6293 
6294 	status = MGMT_STATUS_SUCCESS;
6295 
6296 complete:
6297 	rp->type = cp->type;
6298 	rp->eir_len = cpu_to_le16(eir_len);
6299 
6300 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6301 				status, rp, sizeof(*rp) + eir_len);
6302 	if (err < 0 || status)
6303 		goto done;
6304 
6305 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6306 				 rp, sizeof(*rp) + eir_len,
6307 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
6308 
6309 done:
6310 	kfree(rp);
6311 
6312 	return err;
6313 }
6314 
6315 static u32 get_supported_adv_flags(struct hci_dev *hdev)
6316 {
6317 	u32 flags = 0;
6318 
6319 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
6320 	flags |= MGMT_ADV_FLAG_DISCOV;
6321 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
6322 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
6323 	flags |= MGMT_ADV_FLAG_APPEARANCE;
6324 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
6325 
6326 	/* In extended adv TX_POWER returned from Set Adv Param
6327 	 * will be always valid.
6328 	 */
6329 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
6330 	    ext_adv_capable(hdev))
6331 		flags |= MGMT_ADV_FLAG_TX_POWER;
6332 
6333 	if (ext_adv_capable(hdev)) {
6334 		flags |= MGMT_ADV_FLAG_SEC_1M;
6335 
6336 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
6337 			flags |= MGMT_ADV_FLAG_SEC_2M;
6338 
6339 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
6340 			flags |= MGMT_ADV_FLAG_SEC_CODED;
6341 	}
6342 
6343 	return flags;
6344 }
6345 
6346 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6347 			     void *data, u16 data_len)
6348 {
6349 	struct mgmt_rp_read_adv_features *rp;
6350 	size_t rp_len;
6351 	int err;
6352 	struct adv_info *adv_instance;
6353 	u32 supported_flags;
6354 	u8 *instance;
6355 
6356 	BT_DBG("%s", hdev->name);
6357 
6358 	if (!lmp_le_capable(hdev))
6359 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6360 				       MGMT_STATUS_REJECTED);
6361 
6362 	hci_dev_lock(hdev);
6363 
6364 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
6365 	rp = kmalloc(rp_len, GFP_ATOMIC);
6366 	if (!rp) {
6367 		hci_dev_unlock(hdev);
6368 		return -ENOMEM;
6369 	}
6370 
6371 	supported_flags = get_supported_adv_flags(hdev);
6372 
6373 	rp->supported_flags = cpu_to_le32(supported_flags);
6374 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6375 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6376 	rp->max_instances = HCI_MAX_ADV_INSTANCES;
6377 	rp->num_instances = hdev->adv_instance_cnt;
6378 
6379 	instance = rp->instance;
6380 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
6381 		*instance = adv_instance->instance;
6382 		instance++;
6383 	}
6384 
6385 	hci_dev_unlock(hdev);
6386 
6387 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6388 				MGMT_STATUS_SUCCESS, rp, rp_len);
6389 
6390 	kfree(rp);
6391 
6392 	return err;
6393 }
6394 
6395 static u8 calculate_name_len(struct hci_dev *hdev)
6396 {
6397 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
6398 
6399 	return append_local_name(hdev, buf, 0);
6400 }
6401 
6402 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
6403 			   bool is_adv_data)
6404 {
6405 	u8 max_len = HCI_MAX_AD_LENGTH;
6406 
6407 	if (is_adv_data) {
6408 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
6409 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
6410 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
6411 			max_len -= 3;
6412 
6413 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
6414 			max_len -= 3;
6415 	} else {
6416 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
6417 			max_len -= calculate_name_len(hdev);
6418 
6419 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
6420 			max_len -= 4;
6421 	}
6422 
6423 	return max_len;
6424 }
6425 
6426 static bool flags_managed(u32 adv_flags)
6427 {
6428 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
6429 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
6430 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
6431 }
6432 
6433 static bool tx_power_managed(u32 adv_flags)
6434 {
6435 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
6436 }
6437 
6438 static bool name_managed(u32 adv_flags)
6439 {
6440 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
6441 }
6442 
6443 static bool appearance_managed(u32 adv_flags)
6444 {
6445 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
6446 }
6447 
6448 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6449 			      u8 len, bool is_adv_data)
6450 {
6451 	int i, cur_len;
6452 	u8 max_len;
6453 
6454 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
6455 
6456 	if (len > max_len)
6457 		return false;
6458 
6459 	/* Make sure that the data is correctly formatted. */
6460 	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6461 		cur_len = data[i];
6462 
6463 		if (data[i + 1] == EIR_FLAGS &&
6464 		    (!is_adv_data || flags_managed(adv_flags)))
6465 			return false;
6466 
6467 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
6468 			return false;
6469 
6470 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
6471 			return false;
6472 
6473 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
6474 			return false;
6475 
6476 		if (data[i + 1] == EIR_APPEARANCE &&
6477 		    appearance_managed(adv_flags))
6478 			return false;
6479 
6480 		/* If the current field length would exceed the total data
6481 		 * length, then it's invalid.
6482 		 */
6483 		if (i + cur_len >= len)
6484 			return false;
6485 	}
6486 
6487 	return true;
6488 }
6489 
6490 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6491 				     u16 opcode)
6492 {
6493 	struct mgmt_pending_cmd *cmd;
6494 	struct mgmt_cp_add_advertising *cp;
6495 	struct mgmt_rp_add_advertising rp;
6496 	struct adv_info *adv_instance, *n;
6497 	u8 instance;
6498 
6499 	BT_DBG("status %d", status);
6500 
6501 	hci_dev_lock(hdev);
6502 
6503 	cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6504 
6505 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
6506 		if (!adv_instance->pending)
6507 			continue;
6508 
6509 		if (!status) {
6510 			adv_instance->pending = false;
6511 			continue;
6512 		}
6513 
6514 		instance = adv_instance->instance;
6515 
6516 		if (hdev->cur_adv_instance == instance)
6517 			cancel_adv_timeout(hdev);
6518 
6519 		hci_remove_adv_instance(hdev, instance);
6520 		mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
6521 	}
6522 
6523 	if (!cmd)
6524 		goto unlock;
6525 
6526 	cp = cmd->param;
6527 	rp.instance = cp->instance;
6528 
6529 	if (status)
6530 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6531 				mgmt_status(status));
6532 	else
6533 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6534 				  mgmt_status(status), &rp, sizeof(rp));
6535 
6536 	mgmt_pending_remove(cmd);
6537 
6538 unlock:
6539 	hci_dev_unlock(hdev);
6540 }
6541 
6542 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6543 			   void *data, u16 data_len)
6544 {
6545 	struct mgmt_cp_add_advertising *cp = data;
6546 	struct mgmt_rp_add_advertising rp;
6547 	u32 flags;
6548 	u32 supported_flags, phy_flags;
6549 	u8 status;
6550 	u16 timeout, duration;
6551 	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
6552 	u8 schedule_instance = 0;
6553 	struct adv_info *next_instance;
6554 	int err;
6555 	struct mgmt_pending_cmd *cmd;
6556 	struct hci_request req;
6557 
6558 	BT_DBG("%s", hdev->name);
6559 
6560 	status = mgmt_le_support(hdev);
6561 	if (status)
6562 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6563 				       status);
6564 
6565 	if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6566 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6567 				       MGMT_STATUS_INVALID_PARAMS);
6568 
6569 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
6570 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6571 				       MGMT_STATUS_INVALID_PARAMS);
6572 
6573 	flags = __le32_to_cpu(cp->flags);
6574 	timeout = __le16_to_cpu(cp->timeout);
6575 	duration = __le16_to_cpu(cp->duration);
6576 
6577 	/* The current implementation only supports a subset of the specified
6578 	 * flags. Also need to check mutual exclusiveness of sec flags.
6579 	 */
6580 	supported_flags = get_supported_adv_flags(hdev);
6581 	phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
6582 	if (flags & ~supported_flags ||
6583 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
6584 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6585 				       MGMT_STATUS_INVALID_PARAMS);
6586 
6587 	hci_dev_lock(hdev);
6588 
6589 	if (timeout && !hdev_is_powered(hdev)) {
6590 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6591 				      MGMT_STATUS_REJECTED);
6592 		goto unlock;
6593 	}
6594 
6595 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6596 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6597 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6598 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6599 				      MGMT_STATUS_BUSY);
6600 		goto unlock;
6601 	}
6602 
6603 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6604 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6605 			       cp->scan_rsp_len, false)) {
6606 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6607 				      MGMT_STATUS_INVALID_PARAMS);
6608 		goto unlock;
6609 	}
6610 
6611 	err = hci_add_adv_instance(hdev, cp->instance, flags,
6612 				   cp->adv_data_len, cp->data,
6613 				   cp->scan_rsp_len,
6614 				   cp->data + cp->adv_data_len,
6615 				   timeout, duration);
6616 	if (err < 0) {
6617 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6618 				      MGMT_STATUS_FAILED);
6619 		goto unlock;
6620 	}
6621 
6622 	/* Only trigger an advertising added event if a new instance was
6623 	 * actually added.
6624 	 */
6625 	if (hdev->adv_instance_cnt > prev_instance_cnt)
6626 		mgmt_advertising_added(sk, hdev, cp->instance);
6627 
6628 	if (hdev->cur_adv_instance == cp->instance) {
6629 		/* If the currently advertised instance is being changed then
6630 		 * cancel the current advertising and schedule the next
6631 		 * instance. If there is only one instance then the overridden
6632 		 * advertising data will be visible right away.
6633 		 */
6634 		cancel_adv_timeout(hdev);
6635 
6636 		next_instance = hci_get_next_instance(hdev, cp->instance);
6637 		if (next_instance)
6638 			schedule_instance = next_instance->instance;
6639 	} else if (!hdev->adv_instance_timeout) {
6640 		/* Immediately advertise the new instance if no other
6641 		 * instance is currently being advertised.
6642 		 */
6643 		schedule_instance = cp->instance;
6644 	}
6645 
6646 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
6647 	 * there is no instance to be advertised then we have no HCI
6648 	 * communication to make. Simply return.
6649 	 */
6650 	if (!hdev_is_powered(hdev) ||
6651 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6652 	    !schedule_instance) {
6653 		rp.instance = cp->instance;
6654 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6655 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6656 		goto unlock;
6657 	}
6658 
6659 	/* We're good to go, update advertising data, parameters, and start
6660 	 * advertising.
6661 	 */
6662 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
6663 			       data_len);
6664 	if (!cmd) {
6665 		err = -ENOMEM;
6666 		goto unlock;
6667 	}
6668 
6669 	hci_req_init(&req, hdev);
6670 
6671 	err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
6672 
6673 	if (!err)
6674 		err = hci_req_run(&req, add_advertising_complete);
6675 
6676 	if (err < 0)
6677 		mgmt_pending_remove(cmd);
6678 
6679 unlock:
6680 	hci_dev_unlock(hdev);
6681 
6682 	return err;
6683 }
6684 
6685 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
6686 					u16 opcode)
6687 {
6688 	struct mgmt_pending_cmd *cmd;
6689 	struct mgmt_cp_remove_advertising *cp;
6690 	struct mgmt_rp_remove_advertising rp;
6691 
6692 	BT_DBG("status %d", status);
6693 
6694 	hci_dev_lock(hdev);
6695 
6696 	/* A failure status here only means that we failed to disable
6697 	 * advertising. Otherwise, the advertising instance has been removed,
6698 	 * so report success.
6699 	 */
6700 	cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
6701 	if (!cmd)
6702 		goto unlock;
6703 
6704 	cp = cmd->param;
6705 	rp.instance = cp->instance;
6706 
6707 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
6708 			  &rp, sizeof(rp));
6709 	mgmt_pending_remove(cmd);
6710 
6711 unlock:
6712 	hci_dev_unlock(hdev);
6713 }
6714 
6715 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
6716 			      void *data, u16 data_len)
6717 {
6718 	struct mgmt_cp_remove_advertising *cp = data;
6719 	struct mgmt_rp_remove_advertising rp;
6720 	struct mgmt_pending_cmd *cmd;
6721 	struct hci_request req;
6722 	int err;
6723 
6724 	BT_DBG("%s", hdev->name);
6725 
6726 	hci_dev_lock(hdev);
6727 
6728 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
6729 		err = mgmt_cmd_status(sk, hdev->id,
6730 				      MGMT_OP_REMOVE_ADVERTISING,
6731 				      MGMT_STATUS_INVALID_PARAMS);
6732 		goto unlock;
6733 	}
6734 
6735 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6736 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6737 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6738 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6739 				      MGMT_STATUS_BUSY);
6740 		goto unlock;
6741 	}
6742 
6743 	if (list_empty(&hdev->adv_instances)) {
6744 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6745 				      MGMT_STATUS_INVALID_PARAMS);
6746 		goto unlock;
6747 	}
6748 
6749 	hci_req_init(&req, hdev);
6750 
6751 	hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
6752 
6753 	if (list_empty(&hdev->adv_instances))
6754 		__hci_req_disable_advertising(&req);
6755 
6756 	/* If no HCI commands have been collected so far or the HCI_ADVERTISING
6757 	 * flag is set or the device isn't powered then we have no HCI
6758 	 * communication to make. Simply return.
6759 	 */
6760 	if (skb_queue_empty(&req.cmd_q) ||
6761 	    !hdev_is_powered(hdev) ||
6762 	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6763 		hci_req_purge(&req);
6764 		rp.instance = cp->instance;
6765 		err = mgmt_cmd_complete(sk, hdev->id,
6766 					MGMT_OP_REMOVE_ADVERTISING,
6767 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6768 		goto unlock;
6769 	}
6770 
6771 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
6772 			       data_len);
6773 	if (!cmd) {
6774 		err = -ENOMEM;
6775 		goto unlock;
6776 	}
6777 
6778 	err = hci_req_run(&req, remove_advertising_complete);
6779 	if (err < 0)
6780 		mgmt_pending_remove(cmd);
6781 
6782 unlock:
6783 	hci_dev_unlock(hdev);
6784 
6785 	return err;
6786 }
6787 
6788 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
6789 			     void *data, u16 data_len)
6790 {
6791 	struct mgmt_cp_get_adv_size_info *cp = data;
6792 	struct mgmt_rp_get_adv_size_info rp;
6793 	u32 flags, supported_flags;
6794 	int err;
6795 
6796 	BT_DBG("%s", hdev->name);
6797 
6798 	if (!lmp_le_capable(hdev))
6799 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6800 				       MGMT_STATUS_REJECTED);
6801 
6802 	if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6803 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6804 				       MGMT_STATUS_INVALID_PARAMS);
6805 
6806 	flags = __le32_to_cpu(cp->flags);
6807 
6808 	/* The current implementation only supports a subset of the specified
6809 	 * flags.
6810 	 */
6811 	supported_flags = get_supported_adv_flags(hdev);
6812 	if (flags & ~supported_flags)
6813 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6814 				       MGMT_STATUS_INVALID_PARAMS);
6815 
6816 	rp.instance = cp->instance;
6817 	rp.flags = cp->flags;
6818 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
6819 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
6820 
6821 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6822 				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6823 
6824 	return err;
6825 }
6826 
6827 static const struct hci_mgmt_handler mgmt_handlers[] = {
6828 	{ NULL }, /* 0x0000 (no command) */
6829 	{ read_version,            MGMT_READ_VERSION_SIZE,
6830 						HCI_MGMT_NO_HDEV |
6831 						HCI_MGMT_UNTRUSTED },
6832 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
6833 						HCI_MGMT_NO_HDEV |
6834 						HCI_MGMT_UNTRUSTED },
6835 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
6836 						HCI_MGMT_NO_HDEV |
6837 						HCI_MGMT_UNTRUSTED },
6838 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
6839 						HCI_MGMT_UNTRUSTED },
6840 	{ set_powered,             MGMT_SETTING_SIZE },
6841 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
6842 	{ set_connectable,         MGMT_SETTING_SIZE },
6843 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
6844 	{ set_bondable,            MGMT_SETTING_SIZE },
6845 	{ set_link_security,       MGMT_SETTING_SIZE },
6846 	{ set_ssp,                 MGMT_SETTING_SIZE },
6847 	{ set_hs,                  MGMT_SETTING_SIZE },
6848 	{ set_le,                  MGMT_SETTING_SIZE },
6849 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
6850 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
6851 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
6852 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
6853 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
6854 						HCI_MGMT_VAR_LEN },
6855 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6856 						HCI_MGMT_VAR_LEN },
6857 	{ disconnect,              MGMT_DISCONNECT_SIZE },
6858 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
6859 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
6860 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
6861 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
6862 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
6863 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
6864 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
6865 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
6866 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6867 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
6868 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6869 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
6870 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6871 						HCI_MGMT_VAR_LEN },
6872 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6873 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
6874 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
6875 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
6876 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
6877 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
6878 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
6879 	{ set_advertising,         MGMT_SETTING_SIZE },
6880 	{ set_bredr,               MGMT_SETTING_SIZE },
6881 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
6882 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
6883 	{ set_secure_conn,         MGMT_SETTING_SIZE },
6884 	{ set_debug_keys,          MGMT_SETTING_SIZE },
6885 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
6886 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
6887 						HCI_MGMT_VAR_LEN },
6888 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
6889 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
6890 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
6891 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
6892 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
6893 						HCI_MGMT_VAR_LEN },
6894 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6895 						HCI_MGMT_NO_HDEV |
6896 						HCI_MGMT_UNTRUSTED },
6897 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
6898 						HCI_MGMT_UNCONFIGURED |
6899 						HCI_MGMT_UNTRUSTED },
6900 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
6901 						HCI_MGMT_UNCONFIGURED },
6902 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
6903 						HCI_MGMT_UNCONFIGURED },
6904 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
6905 						HCI_MGMT_VAR_LEN },
6906 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
6907 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
6908 						HCI_MGMT_NO_HDEV |
6909 						HCI_MGMT_UNTRUSTED },
6910 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
6911 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
6912 						HCI_MGMT_VAR_LEN },
6913 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
6914 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
6915 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
6916 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
6917 						HCI_MGMT_UNTRUSTED },
6918 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
6919 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
6920 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
6921 };
6922 
6923 void mgmt_index_added(struct hci_dev *hdev)
6924 {
6925 	struct mgmt_ev_ext_index ev;
6926 
6927 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6928 		return;
6929 
6930 	switch (hdev->dev_type) {
6931 	case HCI_PRIMARY:
6932 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6933 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
6934 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6935 			ev.type = 0x01;
6936 		} else {
6937 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
6938 					 HCI_MGMT_INDEX_EVENTS);
6939 			ev.type = 0x00;
6940 		}
6941 		break;
6942 	case HCI_AMP:
6943 		ev.type = 0x02;
6944 		break;
6945 	default:
6946 		return;
6947 	}
6948 
6949 	ev.bus = hdev->bus;
6950 
6951 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
6952 			 HCI_MGMT_EXT_INDEX_EVENTS);
6953 }
6954 
6955 void mgmt_index_removed(struct hci_dev *hdev)
6956 {
6957 	struct mgmt_ev_ext_index ev;
6958 	u8 status = MGMT_STATUS_INVALID_INDEX;
6959 
6960 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6961 		return;
6962 
6963 	switch (hdev->dev_type) {
6964 	case HCI_PRIMARY:
6965 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6966 
6967 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6968 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
6969 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6970 			ev.type = 0x01;
6971 		} else {
6972 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
6973 					 HCI_MGMT_INDEX_EVENTS);
6974 			ev.type = 0x00;
6975 		}
6976 		break;
6977 	case HCI_AMP:
6978 		ev.type = 0x02;
6979 		break;
6980 	default:
6981 		return;
6982 	}
6983 
6984 	ev.bus = hdev->bus;
6985 
6986 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
6987 			 HCI_MGMT_EXT_INDEX_EVENTS);
6988 }
6989 
6990 /* This function requires the caller holds hdev->lock */
6991 static void restart_le_actions(struct hci_dev *hdev)
6992 {
6993 	struct hci_conn_params *p;
6994 
6995 	list_for_each_entry(p, &hdev->le_conn_params, list) {
6996 		/* Needed for AUTO_OFF case where might not "really"
6997 		 * have been powered off.
6998 		 */
6999 		list_del_init(&p->action);
7000 
7001 		switch (p->auto_connect) {
7002 		case HCI_AUTO_CONN_DIRECT:
7003 		case HCI_AUTO_CONN_ALWAYS:
7004 			list_add(&p->action, &hdev->pend_le_conns);
7005 			break;
7006 		case HCI_AUTO_CONN_REPORT:
7007 			list_add(&p->action, &hdev->pend_le_reports);
7008 			break;
7009 		default:
7010 			break;
7011 		}
7012 	}
7013 }
7014 
7015 void mgmt_power_on(struct hci_dev *hdev, int err)
7016 {
7017 	struct cmd_lookup match = { NULL, hdev };
7018 
7019 	BT_DBG("err %d", err);
7020 
7021 	hci_dev_lock(hdev);
7022 
7023 	if (!err) {
7024 		restart_le_actions(hdev);
7025 		hci_update_background_scan(hdev);
7026 	}
7027 
7028 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7029 
7030 	new_settings(hdev, match.sk);
7031 
7032 	if (match.sk)
7033 		sock_put(match.sk);
7034 
7035 	hci_dev_unlock(hdev);
7036 }
7037 
7038 void __mgmt_power_off(struct hci_dev *hdev)
7039 {
7040 	struct cmd_lookup match = { NULL, hdev };
7041 	u8 status, zero_cod[] = { 0, 0, 0 };
7042 
7043 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7044 
7045 	/* If the power off is because of hdev unregistration let
7046 	 * use the appropriate INVALID_INDEX status. Otherwise use
7047 	 * NOT_POWERED. We cover both scenarios here since later in
7048 	 * mgmt_index_removed() any hci_conn callbacks will have already
7049 	 * been triggered, potentially causing misleading DISCONNECTED
7050 	 * status responses.
7051 	 */
7052 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7053 		status = MGMT_STATUS_INVALID_INDEX;
7054 	else
7055 		status = MGMT_STATUS_NOT_POWERED;
7056 
7057 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7058 
7059 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
7060 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7061 				   zero_cod, sizeof(zero_cod),
7062 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7063 		ext_info_changed(hdev, NULL);
7064 	}
7065 
7066 	new_settings(hdev, match.sk);
7067 
7068 	if (match.sk)
7069 		sock_put(match.sk);
7070 }
7071 
7072 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7073 {
7074 	struct mgmt_pending_cmd *cmd;
7075 	u8 status;
7076 
7077 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7078 	if (!cmd)
7079 		return;
7080 
7081 	if (err == -ERFKILL)
7082 		status = MGMT_STATUS_RFKILLED;
7083 	else
7084 		status = MGMT_STATUS_FAILED;
7085 
7086 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7087 
7088 	mgmt_pending_remove(cmd);
7089 }
7090 
7091 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7092 		       bool persistent)
7093 {
7094 	struct mgmt_ev_new_link_key ev;
7095 
7096 	memset(&ev, 0, sizeof(ev));
7097 
7098 	ev.store_hint = persistent;
7099 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7100 	ev.key.addr.type = BDADDR_BREDR;
7101 	ev.key.type = key->type;
7102 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7103 	ev.key.pin_len = key->pin_len;
7104 
7105 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7106 }
7107 
7108 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7109 {
7110 	switch (ltk->type) {
7111 	case SMP_LTK:
7112 	case SMP_LTK_SLAVE:
7113 		if (ltk->authenticated)
7114 			return MGMT_LTK_AUTHENTICATED;
7115 		return MGMT_LTK_UNAUTHENTICATED;
7116 	case SMP_LTK_P256:
7117 		if (ltk->authenticated)
7118 			return MGMT_LTK_P256_AUTH;
7119 		return MGMT_LTK_P256_UNAUTH;
7120 	case SMP_LTK_P256_DEBUG:
7121 		return MGMT_LTK_P256_DEBUG;
7122 	}
7123 
7124 	return MGMT_LTK_UNAUTHENTICATED;
7125 }
7126 
7127 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7128 {
7129 	struct mgmt_ev_new_long_term_key ev;
7130 
7131 	memset(&ev, 0, sizeof(ev));
7132 
7133 	/* Devices using resolvable or non-resolvable random addresses
7134 	 * without providing an identity resolving key don't require
7135 	 * to store long term keys. Their addresses will change the
7136 	 * next time around.
7137 	 *
7138 	 * Only when a remote device provides an identity address
7139 	 * make sure the long term key is stored. If the remote
7140 	 * identity is known, the long term keys are internally
7141 	 * mapped to the identity address. So allow static random
7142 	 * and public addresses here.
7143 	 */
7144 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7145 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
7146 		ev.store_hint = 0x00;
7147 	else
7148 		ev.store_hint = persistent;
7149 
7150 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7151 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7152 	ev.key.type = mgmt_ltk_type(key);
7153 	ev.key.enc_size = key->enc_size;
7154 	ev.key.ediv = key->ediv;
7155 	ev.key.rand = key->rand;
7156 
7157 	if (key->type == SMP_LTK)
7158 		ev.key.master = 1;
7159 
7160 	/* Make sure we copy only the significant bytes based on the
7161 	 * encryption key size, and set the rest of the value to zeroes.
7162 	 */
7163 	memcpy(ev.key.val, key->val, key->enc_size);
7164 	memset(ev.key.val + key->enc_size, 0,
7165 	       sizeof(ev.key.val) - key->enc_size);
7166 
7167 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7168 }
7169 
7170 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
7171 {
7172 	struct mgmt_ev_new_irk ev;
7173 
7174 	memset(&ev, 0, sizeof(ev));
7175 
7176 	ev.store_hint = persistent;
7177 
7178 	bacpy(&ev.rpa, &irk->rpa);
7179 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7180 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7181 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7182 
7183 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7184 }
7185 
7186 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7187 		   bool persistent)
7188 {
7189 	struct mgmt_ev_new_csrk ev;
7190 
7191 	memset(&ev, 0, sizeof(ev));
7192 
7193 	/* Devices using resolvable or non-resolvable random addresses
7194 	 * without providing an identity resolving key don't require
7195 	 * to store signature resolving keys. Their addresses will change
7196 	 * the next time around.
7197 	 *
7198 	 * Only when a remote device provides an identity address
7199 	 * make sure the signature resolving key is stored. So allow
7200 	 * static random and public addresses here.
7201 	 */
7202 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7203 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7204 		ev.store_hint = 0x00;
7205 	else
7206 		ev.store_hint = persistent;
7207 
7208 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7209 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7210 	ev.key.type = csrk->type;
7211 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7212 
7213 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7214 }
7215 
7216 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7217 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7218 			 u16 max_interval, u16 latency, u16 timeout)
7219 {
7220 	struct mgmt_ev_new_conn_param ev;
7221 
7222 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
7223 		return;
7224 
7225 	memset(&ev, 0, sizeof(ev));
7226 	bacpy(&ev.addr.bdaddr, bdaddr);
7227 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7228 	ev.store_hint = store_hint;
7229 	ev.min_interval = cpu_to_le16(min_interval);
7230 	ev.max_interval = cpu_to_le16(max_interval);
7231 	ev.latency = cpu_to_le16(latency);
7232 	ev.timeout = cpu_to_le16(timeout);
7233 
7234 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7235 }
7236 
7237 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7238 			   u32 flags, u8 *name, u8 name_len)
7239 {
7240 	char buf[512];
7241 	struct mgmt_ev_device_connected *ev = (void *) buf;
7242 	u16 eir_len = 0;
7243 
7244 	bacpy(&ev->addr.bdaddr, &conn->dst);
7245 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7246 
7247 	ev->flags = __cpu_to_le32(flags);
7248 
7249 	/* We must ensure that the EIR Data fields are ordered and
7250 	 * unique. Keep it simple for now and avoid the problem by not
7251 	 * adding any BR/EDR data to the LE adv.
7252 	 */
7253 	if (conn->le_adv_data_len > 0) {
7254 		memcpy(&ev->eir[eir_len],
7255 		       conn->le_adv_data, conn->le_adv_data_len);
7256 		eir_len = conn->le_adv_data_len;
7257 	} else {
7258 		if (name_len > 0)
7259 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7260 						  name, name_len);
7261 
7262 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7263 			eir_len = eir_append_data(ev->eir, eir_len,
7264 						  EIR_CLASS_OF_DEV,
7265 						  conn->dev_class, 3);
7266 	}
7267 
7268 	ev->eir_len = cpu_to_le16(eir_len);
7269 
7270 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7271 		    sizeof(*ev) + eir_len, NULL);
7272 }
7273 
7274 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7275 {
7276 	struct sock **sk = data;
7277 
7278 	cmd->cmd_complete(cmd, 0);
7279 
7280 	*sk = cmd->sk;
7281 	sock_hold(*sk);
7282 
7283 	mgmt_pending_remove(cmd);
7284 }
7285 
7286 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7287 {
7288 	struct hci_dev *hdev = data;
7289 	struct mgmt_cp_unpair_device *cp = cmd->param;
7290 
7291 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7292 
7293 	cmd->cmd_complete(cmd, 0);
7294 	mgmt_pending_remove(cmd);
7295 }
7296 
7297 bool mgmt_powering_down(struct hci_dev *hdev)
7298 {
7299 	struct mgmt_pending_cmd *cmd;
7300 	struct mgmt_mode *cp;
7301 
7302 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7303 	if (!cmd)
7304 		return false;
7305 
7306 	cp = cmd->param;
7307 	if (!cp->val)
7308 		return true;
7309 
7310 	return false;
7311 }
7312 
7313 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7314 			      u8 link_type, u8 addr_type, u8 reason,
7315 			      bool mgmt_connected)
7316 {
7317 	struct mgmt_ev_device_disconnected ev;
7318 	struct sock *sk = NULL;
7319 
7320 	/* The connection is still in hci_conn_hash so test for 1
7321 	 * instead of 0 to know if this is the last one.
7322 	 */
7323 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7324 		cancel_delayed_work(&hdev->power_off);
7325 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7326 	}
7327 
7328 	if (!mgmt_connected)
7329 		return;
7330 
7331 	if (link_type != ACL_LINK && link_type != LE_LINK)
7332 		return;
7333 
7334 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7335 
7336 	bacpy(&ev.addr.bdaddr, bdaddr);
7337 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7338 	ev.reason = reason;
7339 
7340 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7341 
7342 	if (sk)
7343 		sock_put(sk);
7344 
7345 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7346 			     hdev);
7347 }
7348 
7349 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7350 			    u8 link_type, u8 addr_type, u8 status)
7351 {
7352 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7353 	struct mgmt_cp_disconnect *cp;
7354 	struct mgmt_pending_cmd *cmd;
7355 
7356 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7357 			     hdev);
7358 
7359 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7360 	if (!cmd)
7361 		return;
7362 
7363 	cp = cmd->param;
7364 
7365 	if (bacmp(bdaddr, &cp->addr.bdaddr))
7366 		return;
7367 
7368 	if (cp->addr.type != bdaddr_type)
7369 		return;
7370 
7371 	cmd->cmd_complete(cmd, mgmt_status(status));
7372 	mgmt_pending_remove(cmd);
7373 }
7374 
7375 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7376 			 u8 addr_type, u8 status)
7377 {
7378 	struct mgmt_ev_connect_failed ev;
7379 
7380 	/* The connection is still in hci_conn_hash so test for 1
7381 	 * instead of 0 to know if this is the last one.
7382 	 */
7383 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7384 		cancel_delayed_work(&hdev->power_off);
7385 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7386 	}
7387 
7388 	bacpy(&ev.addr.bdaddr, bdaddr);
7389 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7390 	ev.status = mgmt_status(status);
7391 
7392 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7393 }
7394 
7395 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7396 {
7397 	struct mgmt_ev_pin_code_request ev;
7398 
7399 	bacpy(&ev.addr.bdaddr, bdaddr);
7400 	ev.addr.type = BDADDR_BREDR;
7401 	ev.secure = secure;
7402 
7403 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7404 }
7405 
7406 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7407 				  u8 status)
7408 {
7409 	struct mgmt_pending_cmd *cmd;
7410 
7411 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7412 	if (!cmd)
7413 		return;
7414 
7415 	cmd->cmd_complete(cmd, mgmt_status(status));
7416 	mgmt_pending_remove(cmd);
7417 }
7418 
7419 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7420 				      u8 status)
7421 {
7422 	struct mgmt_pending_cmd *cmd;
7423 
7424 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7425 	if (!cmd)
7426 		return;
7427 
7428 	cmd->cmd_complete(cmd, mgmt_status(status));
7429 	mgmt_pending_remove(cmd);
7430 }
7431 
7432 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7433 			      u8 link_type, u8 addr_type, u32 value,
7434 			      u8 confirm_hint)
7435 {
7436 	struct mgmt_ev_user_confirm_request ev;
7437 
7438 	BT_DBG("%s", hdev->name);
7439 
7440 	bacpy(&ev.addr.bdaddr, bdaddr);
7441 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7442 	ev.confirm_hint = confirm_hint;
7443 	ev.value = cpu_to_le32(value);
7444 
7445 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7446 			  NULL);
7447 }
7448 
7449 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7450 			      u8 link_type, u8 addr_type)
7451 {
7452 	struct mgmt_ev_user_passkey_request ev;
7453 
7454 	BT_DBG("%s", hdev->name);
7455 
7456 	bacpy(&ev.addr.bdaddr, bdaddr);
7457 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7458 
7459 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7460 			  NULL);
7461 }
7462 
7463 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7464 				      u8 link_type, u8 addr_type, u8 status,
7465 				      u8 opcode)
7466 {
7467 	struct mgmt_pending_cmd *cmd;
7468 
7469 	cmd = pending_find(opcode, hdev);
7470 	if (!cmd)
7471 		return -ENOENT;
7472 
7473 	cmd->cmd_complete(cmd, mgmt_status(status));
7474 	mgmt_pending_remove(cmd);
7475 
7476 	return 0;
7477 }
7478 
7479 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7480 				     u8 link_type, u8 addr_type, u8 status)
7481 {
7482 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7483 					  status, MGMT_OP_USER_CONFIRM_REPLY);
7484 }
7485 
7486 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7487 					 u8 link_type, u8 addr_type, u8 status)
7488 {
7489 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7490 					  status,
7491 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
7492 }
7493 
7494 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7495 				     u8 link_type, u8 addr_type, u8 status)
7496 {
7497 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7498 					  status, MGMT_OP_USER_PASSKEY_REPLY);
7499 }
7500 
7501 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7502 					 u8 link_type, u8 addr_type, u8 status)
7503 {
7504 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7505 					  status,
7506 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
7507 }
7508 
7509 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7510 			     u8 link_type, u8 addr_type, u32 passkey,
7511 			     u8 entered)
7512 {
7513 	struct mgmt_ev_passkey_notify ev;
7514 
7515 	BT_DBG("%s", hdev->name);
7516 
7517 	bacpy(&ev.addr.bdaddr, bdaddr);
7518 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7519 	ev.passkey = __cpu_to_le32(passkey);
7520 	ev.entered = entered;
7521 
7522 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7523 }
7524 
7525 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7526 {
7527 	struct mgmt_ev_auth_failed ev;
7528 	struct mgmt_pending_cmd *cmd;
7529 	u8 status = mgmt_status(hci_status);
7530 
7531 	bacpy(&ev.addr.bdaddr, &conn->dst);
7532 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7533 	ev.status = status;
7534 
7535 	cmd = find_pairing(conn);
7536 
7537 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7538 		    cmd ? cmd->sk : NULL);
7539 
7540 	if (cmd) {
7541 		cmd->cmd_complete(cmd, status);
7542 		mgmt_pending_remove(cmd);
7543 	}
7544 }
7545 
7546 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7547 {
7548 	struct cmd_lookup match = { NULL, hdev };
7549 	bool changed;
7550 
7551 	if (status) {
7552 		u8 mgmt_err = mgmt_status(status);
7553 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7554 				     cmd_status_rsp, &mgmt_err);
7555 		return;
7556 	}
7557 
7558 	if (test_bit(HCI_AUTH, &hdev->flags))
7559 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7560 	else
7561 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7562 
7563 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7564 			     &match);
7565 
7566 	if (changed)
7567 		new_settings(hdev, match.sk);
7568 
7569 	if (match.sk)
7570 		sock_put(match.sk);
7571 }
7572 
7573 static void clear_eir(struct hci_request *req)
7574 {
7575 	struct hci_dev *hdev = req->hdev;
7576 	struct hci_cp_write_eir cp;
7577 
7578 	if (!lmp_ext_inq_capable(hdev))
7579 		return;
7580 
7581 	memset(hdev->eir, 0, sizeof(hdev->eir));
7582 
7583 	memset(&cp, 0, sizeof(cp));
7584 
7585 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7586 }
7587 
7588 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7589 {
7590 	struct cmd_lookup match = { NULL, hdev };
7591 	struct hci_request req;
7592 	bool changed = false;
7593 
7594 	if (status) {
7595 		u8 mgmt_err = mgmt_status(status);
7596 
7597 		if (enable && hci_dev_test_and_clear_flag(hdev,
7598 							  HCI_SSP_ENABLED)) {
7599 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7600 			new_settings(hdev, NULL);
7601 		}
7602 
7603 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7604 				     &mgmt_err);
7605 		return;
7606 	}
7607 
7608 	if (enable) {
7609 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7610 	} else {
7611 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7612 		if (!changed)
7613 			changed = hci_dev_test_and_clear_flag(hdev,
7614 							      HCI_HS_ENABLED);
7615 		else
7616 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7617 	}
7618 
7619 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7620 
7621 	if (changed)
7622 		new_settings(hdev, match.sk);
7623 
7624 	if (match.sk)
7625 		sock_put(match.sk);
7626 
7627 	hci_req_init(&req, hdev);
7628 
7629 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7630 		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7631 			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7632 				    sizeof(enable), &enable);
7633 		__hci_req_update_eir(&req);
7634 	} else {
7635 		clear_eir(&req);
7636 	}
7637 
7638 	hci_req_run(&req, NULL);
7639 }
7640 
7641 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7642 {
7643 	struct cmd_lookup *match = data;
7644 
7645 	if (match->sk == NULL) {
7646 		match->sk = cmd->sk;
7647 		sock_hold(match->sk);
7648 	}
7649 }
7650 
7651 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7652 				    u8 status)
7653 {
7654 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7655 
7656 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7657 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7658 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7659 
7660 	if (!status) {
7661 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
7662 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7663 		ext_info_changed(hdev, NULL);
7664 	}
7665 
7666 	if (match.sk)
7667 		sock_put(match.sk);
7668 }
7669 
7670 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7671 {
7672 	struct mgmt_cp_set_local_name ev;
7673 	struct mgmt_pending_cmd *cmd;
7674 
7675 	if (status)
7676 		return;
7677 
7678 	memset(&ev, 0, sizeof(ev));
7679 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7680 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7681 
7682 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7683 	if (!cmd) {
7684 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7685 
7686 		/* If this is a HCI command related to powering on the
7687 		 * HCI dev don't send any mgmt signals.
7688 		 */
7689 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
7690 			return;
7691 	}
7692 
7693 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7694 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
7695 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
7696 }
7697 
7698 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7699 {
7700 	int i;
7701 
7702 	for (i = 0; i < uuid_count; i++) {
7703 		if (!memcmp(uuid, uuids[i], 16))
7704 			return true;
7705 	}
7706 
7707 	return false;
7708 }
7709 
7710 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7711 {
7712 	u16 parsed = 0;
7713 
7714 	while (parsed < eir_len) {
7715 		u8 field_len = eir[0];
7716 		u8 uuid[16];
7717 		int i;
7718 
7719 		if (field_len == 0)
7720 			break;
7721 
7722 		if (eir_len - parsed < field_len + 1)
7723 			break;
7724 
7725 		switch (eir[1]) {
7726 		case EIR_UUID16_ALL:
7727 		case EIR_UUID16_SOME:
7728 			for (i = 0; i + 3 <= field_len; i += 2) {
7729 				memcpy(uuid, bluetooth_base_uuid, 16);
7730 				uuid[13] = eir[i + 3];
7731 				uuid[12] = eir[i + 2];
7732 				if (has_uuid(uuid, uuid_count, uuids))
7733 					return true;
7734 			}
7735 			break;
7736 		case EIR_UUID32_ALL:
7737 		case EIR_UUID32_SOME:
7738 			for (i = 0; i + 5 <= field_len; i += 4) {
7739 				memcpy(uuid, bluetooth_base_uuid, 16);
7740 				uuid[15] = eir[i + 5];
7741 				uuid[14] = eir[i + 4];
7742 				uuid[13] = eir[i + 3];
7743 				uuid[12] = eir[i + 2];
7744 				if (has_uuid(uuid, uuid_count, uuids))
7745 					return true;
7746 			}
7747 			break;
7748 		case EIR_UUID128_ALL:
7749 		case EIR_UUID128_SOME:
7750 			for (i = 0; i + 17 <= field_len; i += 16) {
7751 				memcpy(uuid, eir + i + 2, 16);
7752 				if (has_uuid(uuid, uuid_count, uuids))
7753 					return true;
7754 			}
7755 			break;
7756 		}
7757 
7758 		parsed += field_len + 1;
7759 		eir += field_len + 1;
7760 	}
7761 
7762 	return false;
7763 }
7764 
7765 static void restart_le_scan(struct hci_dev *hdev)
7766 {
7767 	/* If controller is not scanning we are done. */
7768 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7769 		return;
7770 
7771 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7772 		       hdev->discovery.scan_start +
7773 		       hdev->discovery.scan_duration))
7774 		return;
7775 
7776 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
7777 			   DISCOV_LE_RESTART_DELAY);
7778 }
7779 
7780 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
7781 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7782 {
7783 	/* If a RSSI threshold has been specified, and
7784 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7785 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7786 	 * is set, let it through for further processing, as we might need to
7787 	 * restart the scan.
7788 	 *
7789 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7790 	 * the results are also dropped.
7791 	 */
7792 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7793 	    (rssi == HCI_RSSI_INVALID ||
7794 	    (rssi < hdev->discovery.rssi &&
7795 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7796 		return  false;
7797 
7798 	if (hdev->discovery.uuid_count != 0) {
7799 		/* If a list of UUIDs is provided in filter, results with no
7800 		 * matching UUID should be dropped.
7801 		 */
7802 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
7803 				   hdev->discovery.uuids) &&
7804 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
7805 				   hdev->discovery.uuid_count,
7806 				   hdev->discovery.uuids))
7807 			return false;
7808 	}
7809 
7810 	/* If duplicate filtering does not report RSSI changes, then restart
7811 	 * scanning to ensure updated result with updated RSSI values.
7812 	 */
7813 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
7814 		restart_le_scan(hdev);
7815 
7816 		/* Validate RSSI value against the RSSI threshold once more. */
7817 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7818 		    rssi < hdev->discovery.rssi)
7819 			return false;
7820 	}
7821 
7822 	return true;
7823 }
7824 
7825 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7826 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7827 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7828 {
7829 	char buf[512];
7830 	struct mgmt_ev_device_found *ev = (void *)buf;
7831 	size_t ev_size;
7832 
7833 	/* Don't send events for a non-kernel initiated discovery. With
7834 	 * LE one exception is if we have pend_le_reports > 0 in which
7835 	 * case we're doing passive scanning and want these events.
7836 	 */
7837 	if (!hci_discovery_active(hdev)) {
7838 		if (link_type == ACL_LINK)
7839 			return;
7840 		if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7841 			return;
7842 	}
7843 
7844 	if (hdev->discovery.result_filtering) {
7845 		/* We are using service discovery */
7846 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
7847 				     scan_rsp_len))
7848 			return;
7849 	}
7850 
7851 	if (hdev->discovery.limited) {
7852 		/* Check for limited discoverable bit */
7853 		if (dev_class) {
7854 			if (!(dev_class[1] & 0x20))
7855 				return;
7856 		} else {
7857 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
7858 			if (!flags || !(flags[0] & LE_AD_LIMITED))
7859 				return;
7860 		}
7861 	}
7862 
7863 	/* Make sure that the buffer is big enough. The 5 extra bytes
7864 	 * are for the potential CoD field.
7865 	 */
7866 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7867 		return;
7868 
7869 	memset(buf, 0, sizeof(buf));
7870 
7871 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
7872 	 * RSSI value was reported as 0 when not available. This behavior
7873 	 * is kept when using device discovery. This is required for full
7874 	 * backwards compatibility with the API.
7875 	 *
7876 	 * However when using service discovery, the value 127 will be
7877 	 * returned when the RSSI is not available.
7878 	 */
7879 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7880 	    link_type == ACL_LINK)
7881 		rssi = 0;
7882 
7883 	bacpy(&ev->addr.bdaddr, bdaddr);
7884 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
7885 	ev->rssi = rssi;
7886 	ev->flags = cpu_to_le32(flags);
7887 
7888 	if (eir_len > 0)
7889 		/* Copy EIR or advertising data into event */
7890 		memcpy(ev->eir, eir, eir_len);
7891 
7892 	if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7893 				       NULL))
7894 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7895 					  dev_class, 3);
7896 
7897 	if (scan_rsp_len > 0)
7898 		/* Append scan response data to event */
7899 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7900 
7901 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7902 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7903 
7904 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7905 }
7906 
7907 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7908 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7909 {
7910 	struct mgmt_ev_device_found *ev;
7911 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7912 	u16 eir_len;
7913 
7914 	ev = (struct mgmt_ev_device_found *) buf;
7915 
7916 	memset(buf, 0, sizeof(buf));
7917 
7918 	bacpy(&ev->addr.bdaddr, bdaddr);
7919 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
7920 	ev->rssi = rssi;
7921 
7922 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7923 				  name_len);
7924 
7925 	ev->eir_len = cpu_to_le16(eir_len);
7926 
7927 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7928 }
7929 
7930 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7931 {
7932 	struct mgmt_ev_discovering ev;
7933 
7934 	BT_DBG("%s discovering %u", hdev->name, discovering);
7935 
7936 	memset(&ev, 0, sizeof(ev));
7937 	ev.type = hdev->discovery.type;
7938 	ev.discovering = discovering;
7939 
7940 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7941 }
7942 
7943 static struct hci_mgmt_chan chan = {
7944 	.channel	= HCI_CHANNEL_CONTROL,
7945 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
7946 	.handlers	= mgmt_handlers,
7947 	.hdev_init	= mgmt_init_hdev,
7948 };
7949 
7950 int mgmt_init(void)
7951 {
7952 	return hci_mgmt_chan_register(&chan);
7953 }
7954 
7955 void mgmt_exit(void)
7956 {
7957 	hci_mgmt_chan_unregister(&chan);
7958 }
7959