1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 4 Copyright (C) 2015 Intel Corporation 5 6 This program is free software; you can redistribute it and/or modify 7 it under the terms of the GNU General Public License version 2 as 8 published by the Free Software Foundation; 9 10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 21 SOFTWARE IS DISCLAIMED. 22 */ 23 24 #include <linux/unaligned.h> 25 26 #include <net/bluetooth/bluetooth.h> 27 #include <net/bluetooth/hci_core.h> 28 #include <net/bluetooth/hci_mon.h> 29 #include <net/bluetooth/mgmt.h> 30 31 #include "mgmt_util.h" 32 33 static struct sk_buff *create_monitor_ctrl_event(__le16 index, u32 cookie, 34 u16 opcode, u16 len, void *buf) 35 { 36 struct hci_mon_hdr *hdr; 37 struct sk_buff *skb; 38 39 skb = bt_skb_alloc(6 + len, GFP_ATOMIC); 40 if (!skb) 41 return NULL; 42 43 put_unaligned_le32(cookie, skb_put(skb, 4)); 44 put_unaligned_le16(opcode, skb_put(skb, 2)); 45 46 if (buf) 47 skb_put_data(skb, buf, len); 48 49 __net_timestamp(skb); 50 51 hdr = skb_push(skb, HCI_MON_HDR_SIZE); 52 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT); 53 hdr->index = index; 54 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); 55 56 return skb; 57 } 58 59 struct sk_buff *mgmt_alloc_skb(struct hci_dev *hdev, u16 opcode, 60 unsigned int size) 61 { 62 struct sk_buff *skb; 63 64 skb = alloc_skb(sizeof(struct mgmt_hdr) + size, GFP_KERNEL); 65 if (!skb) 66 return skb; 67 68 skb_reserve(skb, sizeof(struct mgmt_hdr)); 69 bt_cb(skb)->mgmt.hdev = hdev; 70 bt_cb(skb)->mgmt.opcode = opcode; 71 72 return skb; 73 } 74 75 int mgmt_send_event_skb(unsigned short channel, struct sk_buff *skb, int flag, 76 struct sock *skip_sk) 77 { 78 struct hci_dev *hdev; 79 struct mgmt_hdr *hdr; 80 int len; 81 82 if (!skb) 83 return -EINVAL; 84 85 len = skb->len; 86 hdev = bt_cb(skb)->mgmt.hdev; 87 88 /* Time stamp */ 89 __net_timestamp(skb); 90 91 /* Send just the data, without headers, to the monitor */ 92 if (channel == HCI_CHANNEL_CONTROL) 93 hci_send_monitor_ctrl_event(hdev, bt_cb(skb)->mgmt.opcode, 94 skb->data, skb->len, 95 skb_get_ktime(skb), flag, skip_sk); 96 97 hdr = skb_push(skb, sizeof(*hdr)); 98 hdr->opcode = cpu_to_le16(bt_cb(skb)->mgmt.opcode); 99 if (hdev) 100 hdr->index = cpu_to_le16(hdev->id); 101 else 102 hdr->index = cpu_to_le16(MGMT_INDEX_NONE); 103 hdr->len = cpu_to_le16(len); 104 105 hci_send_to_channel(channel, skb, flag, skip_sk); 106 107 kfree_skb(skb); 108 return 0; 109 } 110 111 int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel, 112 void *data, u16 data_len, int flag, struct sock *skip_sk) 113 { 114 struct sk_buff *skb; 115 116 skb = mgmt_alloc_skb(hdev, event, data_len); 117 if (!skb) 118 return -ENOMEM; 119 120 if (data) 121 skb_put_data(skb, data, data_len); 122 123 return mgmt_send_event_skb(channel, skb, flag, skip_sk); 124 } 125 126 int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) 127 { 128 struct sk_buff *skb, *mskb; 129 struct mgmt_hdr *hdr; 130 struct mgmt_ev_cmd_status *ev; 131 int err; 132 133 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status); 134 135 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL); 136 if (!skb) 137 return -ENOMEM; 138 139 hdr = skb_put(skb, sizeof(*hdr)); 140 141 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS); 142 hdr->index = cpu_to_le16(index); 143 hdr->len = cpu_to_le16(sizeof(*ev)); 144 145 ev = skb_put(skb, sizeof(*ev)); 146 ev->status = status; 147 ev->opcode = cpu_to_le16(cmd); 148 149 mskb = create_monitor_ctrl_event(hdr->index, hci_sock_get_cookie(sk), 150 MGMT_EV_CMD_STATUS, sizeof(*ev), ev); 151 if (mskb) 152 skb->tstamp = mskb->tstamp; 153 else 154 __net_timestamp(skb); 155 156 err = sock_queue_rcv_skb(sk, skb); 157 if (err < 0) 158 kfree_skb(skb); 159 160 if (mskb) { 161 hci_send_to_channel(HCI_CHANNEL_MONITOR, mskb, 162 HCI_SOCK_TRUSTED, NULL); 163 kfree_skb(mskb); 164 } 165 166 return err; 167 } 168 169 int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status, 170 void *rp, size_t rp_len) 171 { 172 struct sk_buff *skb, *mskb; 173 struct mgmt_hdr *hdr; 174 struct mgmt_ev_cmd_complete *ev; 175 int err; 176 177 BT_DBG("sock %p", sk); 178 179 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL); 180 if (!skb) 181 return -ENOMEM; 182 183 hdr = skb_put(skb, sizeof(*hdr)); 184 185 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE); 186 hdr->index = cpu_to_le16(index); 187 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len); 188 189 ev = skb_put(skb, sizeof(*ev) + rp_len); 190 ev->opcode = cpu_to_le16(cmd); 191 ev->status = status; 192 193 if (rp) 194 memcpy(ev->data, rp, rp_len); 195 196 mskb = create_monitor_ctrl_event(hdr->index, hci_sock_get_cookie(sk), 197 MGMT_EV_CMD_COMPLETE, 198 sizeof(*ev) + rp_len, ev); 199 if (mskb) 200 skb->tstamp = mskb->tstamp; 201 else 202 __net_timestamp(skb); 203 204 err = sock_queue_rcv_skb(sk, skb); 205 if (err < 0) 206 kfree_skb(skb); 207 208 if (mskb) { 209 hci_send_to_channel(HCI_CHANNEL_MONITOR, mskb, 210 HCI_SOCK_TRUSTED, NULL); 211 kfree_skb(mskb); 212 } 213 214 return err; 215 } 216 217 struct mgmt_pending_cmd *mgmt_pending_find(unsigned short channel, u16 opcode, 218 struct hci_dev *hdev) 219 { 220 struct mgmt_pending_cmd *cmd; 221 222 list_for_each_entry(cmd, &hdev->mgmt_pending, list) { 223 if (hci_sock_get_channel(cmd->sk) != channel) 224 continue; 225 if (cmd->opcode == opcode) 226 return cmd; 227 } 228 229 return NULL; 230 } 231 232 struct mgmt_pending_cmd *mgmt_pending_find_data(unsigned short channel, 233 u16 opcode, 234 struct hci_dev *hdev, 235 const void *data) 236 { 237 struct mgmt_pending_cmd *cmd; 238 239 list_for_each_entry(cmd, &hdev->mgmt_pending, list) { 240 if (cmd->user_data != data) 241 continue; 242 if (cmd->opcode == opcode) 243 return cmd; 244 } 245 246 return NULL; 247 } 248 249 void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, 250 void (*cb)(struct mgmt_pending_cmd *cmd, void *data), 251 void *data) 252 { 253 struct mgmt_pending_cmd *cmd, *tmp; 254 255 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) { 256 if (opcode > 0 && cmd->opcode != opcode) 257 continue; 258 259 cb(cmd, data); 260 } 261 } 262 263 struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode, 264 struct hci_dev *hdev, 265 void *data, u16 len) 266 { 267 struct mgmt_pending_cmd *cmd; 268 269 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 270 if (!cmd) 271 return NULL; 272 273 cmd->opcode = opcode; 274 cmd->index = hdev->id; 275 276 cmd->param = kmemdup(data, len, GFP_KERNEL); 277 if (!cmd->param) { 278 kfree(cmd); 279 return NULL; 280 } 281 282 cmd->param_len = len; 283 284 cmd->sk = sk; 285 sock_hold(sk); 286 287 return cmd; 288 } 289 290 struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, 291 struct hci_dev *hdev, 292 void *data, u16 len) 293 { 294 struct mgmt_pending_cmd *cmd; 295 296 cmd = mgmt_pending_new(sk, opcode, hdev, data, len); 297 if (!cmd) 298 return NULL; 299 300 list_add_tail(&cmd->list, &hdev->mgmt_pending); 301 302 return cmd; 303 } 304 305 void mgmt_pending_free(struct mgmt_pending_cmd *cmd) 306 { 307 sock_put(cmd->sk); 308 kfree(cmd->param); 309 kfree(cmd); 310 } 311 312 void mgmt_pending_remove(struct mgmt_pending_cmd *cmd) 313 { 314 list_del(&cmd->list); 315 mgmt_pending_free(cmd); 316 } 317 318 void mgmt_mesh_foreach(struct hci_dev *hdev, 319 void (*cb)(struct mgmt_mesh_tx *mesh_tx, void *data), 320 void *data, struct sock *sk) 321 { 322 struct mgmt_mesh_tx *mesh_tx, *tmp; 323 324 list_for_each_entry_safe(mesh_tx, tmp, &hdev->mgmt_pending, list) { 325 if (!sk || mesh_tx->sk == sk) 326 cb(mesh_tx, data); 327 } 328 } 329 330 struct mgmt_mesh_tx *mgmt_mesh_next(struct hci_dev *hdev, struct sock *sk) 331 { 332 struct mgmt_mesh_tx *mesh_tx; 333 334 if (list_empty(&hdev->mesh_pending)) 335 return NULL; 336 337 list_for_each_entry(mesh_tx, &hdev->mesh_pending, list) { 338 if (!sk || mesh_tx->sk == sk) 339 return mesh_tx; 340 } 341 342 return NULL; 343 } 344 345 struct mgmt_mesh_tx *mgmt_mesh_find(struct hci_dev *hdev, u8 handle) 346 { 347 struct mgmt_mesh_tx *mesh_tx; 348 349 if (list_empty(&hdev->mesh_pending)) 350 return NULL; 351 352 list_for_each_entry(mesh_tx, &hdev->mesh_pending, list) { 353 if (mesh_tx->handle == handle) 354 return mesh_tx; 355 } 356 357 return NULL; 358 } 359 360 struct mgmt_mesh_tx *mgmt_mesh_add(struct sock *sk, struct hci_dev *hdev, 361 void *data, u16 len) 362 { 363 struct mgmt_mesh_tx *mesh_tx; 364 365 mesh_tx = kzalloc(sizeof(*mesh_tx), GFP_KERNEL); 366 if (!mesh_tx) 367 return NULL; 368 369 hdev->mesh_send_ref++; 370 if (!hdev->mesh_send_ref) 371 hdev->mesh_send_ref++; 372 373 mesh_tx->handle = hdev->mesh_send_ref; 374 mesh_tx->index = hdev->id; 375 memcpy(mesh_tx->param, data, len); 376 mesh_tx->param_len = len; 377 mesh_tx->sk = sk; 378 sock_hold(sk); 379 380 list_add_tail(&mesh_tx->list, &hdev->mesh_pending); 381 382 return mesh_tx; 383 } 384 385 void mgmt_mesh_remove(struct mgmt_mesh_tx *mesh_tx) 386 { 387 list_del(&mesh_tx->list); 388 sock_put(mesh_tx->sk); 389 kfree(mesh_tx); 390 } 391