1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2023 Google Corporation
4 */
5
6 #include <linux/devcoredump.h>
7
8 #include <linux/unaligned.h>
9 #include <net/bluetooth/bluetooth.h>
10 #include <net/bluetooth/hci_core.h>
11
12 enum hci_devcoredump_pkt_type {
13 HCI_DEVCOREDUMP_PKT_INIT,
14 HCI_DEVCOREDUMP_PKT_SKB,
15 HCI_DEVCOREDUMP_PKT_PATTERN,
16 HCI_DEVCOREDUMP_PKT_COMPLETE,
17 HCI_DEVCOREDUMP_PKT_ABORT,
18 };
19
20 struct hci_devcoredump_skb_cb {
21 u16 pkt_type;
22 };
23
24 struct hci_devcoredump_skb_pattern {
25 u8 pattern;
26 u32 len;
27 } __packed;
28
29 #define hci_dmp_cb(skb) ((struct hci_devcoredump_skb_cb *)((skb)->cb))
30
31 #define DBG_UNEXPECTED_STATE() \
32 bt_dev_dbg(hdev, \
33 "Unexpected packet (%d) for state (%d). ", \
34 hci_dmp_cb(skb)->pkt_type, hdev->dump.state)
35
36 #define MAX_DEVCOREDUMP_HDR_SIZE 512 /* bytes */
37
hci_devcd_update_hdr_state(char * buf,size_t size,int state)38 static int hci_devcd_update_hdr_state(char *buf, size_t size, int state)
39 {
40 int len = 0;
41
42 if (!buf)
43 return 0;
44
45 len = scnprintf(buf, size, "Bluetooth devcoredump\nState: %d\n", state);
46
47 return len + 1; /* scnprintf adds \0 at the end upon state rewrite */
48 }
49
50 /* Call with hci_dev_lock only. */
hci_devcd_update_state(struct hci_dev * hdev,int state)51 static int hci_devcd_update_state(struct hci_dev *hdev, int state)
52 {
53 bt_dev_dbg(hdev, "Updating devcoredump state from %d to %d.",
54 hdev->dump.state, state);
55
56 hdev->dump.state = state;
57
58 return hci_devcd_update_hdr_state(hdev->dump.head,
59 hdev->dump.alloc_size, state);
60 }
61
hci_devcd_mkheader(struct hci_dev * hdev,struct sk_buff * skb)62 static int hci_devcd_mkheader(struct hci_dev *hdev, struct sk_buff *skb)
63 {
64 char dump_start[] = "--- Start dump ---\n";
65 char hdr[80];
66 int hdr_len;
67
68 hdr_len = hci_devcd_update_hdr_state(hdr, sizeof(hdr),
69 HCI_DEVCOREDUMP_IDLE);
70 skb_put_data(skb, hdr, hdr_len);
71
72 if (hdev->dump.dmp_hdr)
73 hdev->dump.dmp_hdr(hdev, skb);
74
75 skb_put_data(skb, dump_start, strlen(dump_start));
76
77 return skb->len;
78 }
79
80 /* Do not call with hci_dev_lock since this calls driver code. */
hci_devcd_notify(struct hci_dev * hdev,int state)81 static void hci_devcd_notify(struct hci_dev *hdev, int state)
82 {
83 if (hdev->dump.notify_change)
84 hdev->dump.notify_change(hdev, state);
85 }
86
87 /* Call with hci_dev_lock only. */
hci_devcd_reset(struct hci_dev * hdev)88 void hci_devcd_reset(struct hci_dev *hdev)
89 {
90 hdev->dump.head = NULL;
91 hdev->dump.tail = NULL;
92 hdev->dump.alloc_size = 0;
93
94 hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_IDLE);
95
96 cancel_delayed_work(&hdev->dump.dump_timeout);
97 skb_queue_purge(&hdev->dump.dump_q);
98 }
99
100 /* Call with hci_dev_lock only. */
hci_devcd_free(struct hci_dev * hdev)101 static void hci_devcd_free(struct hci_dev *hdev)
102 {
103 vfree(hdev->dump.head);
104
105 hci_devcd_reset(hdev);
106 }
107
108 /* Call with hci_dev_lock only. */
hci_devcd_alloc(struct hci_dev * hdev,u32 size)109 static int hci_devcd_alloc(struct hci_dev *hdev, u32 size)
110 {
111 hdev->dump.head = vmalloc(size);
112 if (!hdev->dump.head)
113 return -ENOMEM;
114
115 hdev->dump.alloc_size = size;
116 hdev->dump.tail = hdev->dump.head;
117 hdev->dump.end = hdev->dump.head + size;
118
119 hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_IDLE);
120
121 return 0;
122 }
123
124 /* Call with hci_dev_lock only. */
hci_devcd_copy(struct hci_dev * hdev,char * buf,u32 size)125 static bool hci_devcd_copy(struct hci_dev *hdev, char *buf, u32 size)
126 {
127 if (hdev->dump.tail + size > hdev->dump.end)
128 return false;
129
130 memcpy(hdev->dump.tail, buf, size);
131 hdev->dump.tail += size;
132
133 return true;
134 }
135
136 /* Call with hci_dev_lock only. */
hci_devcd_memset(struct hci_dev * hdev,u8 pattern,u32 len)137 static bool hci_devcd_memset(struct hci_dev *hdev, u8 pattern, u32 len)
138 {
139 if (hdev->dump.tail + len > hdev->dump.end)
140 return false;
141
142 memset(hdev->dump.tail, pattern, len);
143 hdev->dump.tail += len;
144
145 return true;
146 }
147
148 /* Call with hci_dev_lock only. */
hci_devcd_prepare(struct hci_dev * hdev,u32 dump_size)149 static int hci_devcd_prepare(struct hci_dev *hdev, u32 dump_size)
150 {
151 struct sk_buff *skb;
152 int dump_hdr_size;
153 int err = 0;
154
155 skb = alloc_skb(MAX_DEVCOREDUMP_HDR_SIZE, GFP_ATOMIC);
156 if (!skb)
157 return -ENOMEM;
158
159 dump_hdr_size = hci_devcd_mkheader(hdev, skb);
160
161 if (hci_devcd_alloc(hdev, dump_hdr_size + dump_size)) {
162 err = -ENOMEM;
163 goto hdr_free;
164 }
165
166 /* Insert the device header */
167 if (!hci_devcd_copy(hdev, skb->data, skb->len)) {
168 bt_dev_err(hdev, "Failed to insert header");
169 hci_devcd_free(hdev);
170
171 err = -ENOMEM;
172 goto hdr_free;
173 }
174
175 hdr_free:
176 kfree_skb(skb);
177
178 return err;
179 }
180
hci_devcd_handle_pkt_init(struct hci_dev * hdev,struct sk_buff * skb)181 static void hci_devcd_handle_pkt_init(struct hci_dev *hdev, struct sk_buff *skb)
182 {
183 u32 dump_size;
184
185 if (hdev->dump.state != HCI_DEVCOREDUMP_IDLE) {
186 DBG_UNEXPECTED_STATE();
187 return;
188 }
189
190 if (skb->len != sizeof(dump_size)) {
191 bt_dev_dbg(hdev, "Invalid dump init pkt");
192 return;
193 }
194
195 dump_size = get_unaligned_le32(skb_pull_data(skb, 4));
196 if (!dump_size) {
197 bt_dev_err(hdev, "Zero size dump init pkt");
198 return;
199 }
200
201 if (hci_devcd_prepare(hdev, dump_size)) {
202 bt_dev_err(hdev, "Failed to prepare for dump");
203 return;
204 }
205
206 hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_ACTIVE);
207 queue_delayed_work(hdev->workqueue, &hdev->dump.dump_timeout,
208 hdev->dump.timeout);
209 }
210
hci_devcd_handle_pkt_skb(struct hci_dev * hdev,struct sk_buff * skb)211 static void hci_devcd_handle_pkt_skb(struct hci_dev *hdev, struct sk_buff *skb)
212 {
213 if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
214 DBG_UNEXPECTED_STATE();
215 return;
216 }
217
218 if (!hci_devcd_copy(hdev, skb->data, skb->len))
219 bt_dev_dbg(hdev, "Failed to insert skb");
220 }
221
hci_devcd_handle_pkt_pattern(struct hci_dev * hdev,struct sk_buff * skb)222 static void hci_devcd_handle_pkt_pattern(struct hci_dev *hdev,
223 struct sk_buff *skb)
224 {
225 struct hci_devcoredump_skb_pattern *pattern;
226
227 if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
228 DBG_UNEXPECTED_STATE();
229 return;
230 }
231
232 if (skb->len != sizeof(*pattern)) {
233 bt_dev_dbg(hdev, "Invalid pattern skb");
234 return;
235 }
236
237 pattern = skb_pull_data(skb, sizeof(*pattern));
238
239 if (!hci_devcd_memset(hdev, pattern->pattern, pattern->len))
240 bt_dev_dbg(hdev, "Failed to set pattern");
241 }
242
hci_devcd_dump(struct hci_dev * hdev)243 static void hci_devcd_dump(struct hci_dev *hdev)
244 {
245 struct sk_buff *skb;
246 u32 size;
247
248 bt_dev_dbg(hdev, "state %d", hdev->dump.state);
249
250 size = hdev->dump.tail - hdev->dump.head;
251
252 /* Emit a devcoredump with the available data */
253 dev_coredumpv(&hdev->dev, hdev->dump.head, size, GFP_KERNEL);
254
255 /* Send a copy to monitor as a diagnostic packet */
256 skb = bt_skb_alloc(size, GFP_ATOMIC);
257 if (skb) {
258 skb_put_data(skb, hdev->dump.head, size);
259 hci_recv_diag(hdev, skb);
260 }
261 }
262
hci_devcd_handle_pkt_complete(struct hci_dev * hdev,struct sk_buff * skb)263 static void hci_devcd_handle_pkt_complete(struct hci_dev *hdev,
264 struct sk_buff *skb)
265 {
266 u32 dump_size;
267
268 if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
269 DBG_UNEXPECTED_STATE();
270 return;
271 }
272
273 hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_DONE);
274 dump_size = hdev->dump.tail - hdev->dump.head;
275
276 bt_dev_dbg(hdev, "complete with size %u (expect %zu)", dump_size,
277 hdev->dump.alloc_size);
278
279 hci_devcd_dump(hdev);
280 }
281
hci_devcd_handle_pkt_abort(struct hci_dev * hdev,struct sk_buff * skb)282 static void hci_devcd_handle_pkt_abort(struct hci_dev *hdev,
283 struct sk_buff *skb)
284 {
285 u32 dump_size;
286
287 if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
288 DBG_UNEXPECTED_STATE();
289 return;
290 }
291
292 hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_ABORT);
293 dump_size = hdev->dump.tail - hdev->dump.head;
294
295 bt_dev_dbg(hdev, "aborted with size %u (expect %zu)", dump_size,
296 hdev->dump.alloc_size);
297
298 hci_devcd_dump(hdev);
299 }
300
301 /* Bluetooth devcoredump state machine.
302 *
303 * Devcoredump states:
304 *
305 * HCI_DEVCOREDUMP_IDLE: The default state.
306 *
307 * HCI_DEVCOREDUMP_ACTIVE: A devcoredump will be in this state once it has
308 * been initialized using hci_devcd_init(). Once active, the driver
309 * can append data using hci_devcd_append() or insert a pattern
310 * using hci_devcd_append_pattern().
311 *
312 * HCI_DEVCOREDUMP_DONE: Once the dump collection is complete, the drive
313 * can signal the completion using hci_devcd_complete(). A
314 * devcoredump is generated indicating the completion event and
315 * then the state machine is reset to the default state.
316 *
317 * HCI_DEVCOREDUMP_ABORT: The driver can cancel ongoing dump collection in
318 * case of any error using hci_devcd_abort(). A devcoredump is
319 * still generated with the available data indicating the abort
320 * event and then the state machine is reset to the default state.
321 *
322 * HCI_DEVCOREDUMP_TIMEOUT: A timeout timer for HCI_DEVCOREDUMP_TIMEOUT sec
323 * is started during devcoredump initialization. Once the timeout
324 * occurs, the driver is notified, a devcoredump is generated with
325 * the available data indicating the timeout event and then the
326 * state machine is reset to the default state.
327 *
328 * The driver must register using hci_devcd_register() before using the hci
329 * devcoredump APIs.
330 */
hci_devcd_rx(struct work_struct * work)331 void hci_devcd_rx(struct work_struct *work)
332 {
333 struct hci_dev *hdev = container_of(work, struct hci_dev, dump.dump_rx);
334 struct sk_buff *skb;
335 int start_state;
336
337 while ((skb = skb_dequeue(&hdev->dump.dump_q))) {
338 /* Return if timeout occurs. The timeout handler function
339 * hci_devcd_timeout() will report the available dump data.
340 */
341 if (hdev->dump.state == HCI_DEVCOREDUMP_TIMEOUT) {
342 kfree_skb(skb);
343 return;
344 }
345
346 hci_dev_lock(hdev);
347 start_state = hdev->dump.state;
348
349 switch (hci_dmp_cb(skb)->pkt_type) {
350 case HCI_DEVCOREDUMP_PKT_INIT:
351 hci_devcd_handle_pkt_init(hdev, skb);
352 break;
353
354 case HCI_DEVCOREDUMP_PKT_SKB:
355 hci_devcd_handle_pkt_skb(hdev, skb);
356 break;
357
358 case HCI_DEVCOREDUMP_PKT_PATTERN:
359 hci_devcd_handle_pkt_pattern(hdev, skb);
360 break;
361
362 case HCI_DEVCOREDUMP_PKT_COMPLETE:
363 hci_devcd_handle_pkt_complete(hdev, skb);
364 break;
365
366 case HCI_DEVCOREDUMP_PKT_ABORT:
367 hci_devcd_handle_pkt_abort(hdev, skb);
368 break;
369
370 default:
371 bt_dev_dbg(hdev, "Unknown packet (%d) for state (%d). ",
372 hci_dmp_cb(skb)->pkt_type, hdev->dump.state);
373 break;
374 }
375
376 hci_dev_unlock(hdev);
377 kfree_skb(skb);
378
379 /* Notify the driver about any state changes before resetting
380 * the state machine
381 */
382 if (start_state != hdev->dump.state)
383 hci_devcd_notify(hdev, hdev->dump.state);
384
385 /* Reset the state machine if the devcoredump is complete */
386 hci_dev_lock(hdev);
387 if (hdev->dump.state == HCI_DEVCOREDUMP_DONE ||
388 hdev->dump.state == HCI_DEVCOREDUMP_ABORT)
389 hci_devcd_reset(hdev);
390 hci_dev_unlock(hdev);
391 }
392 }
393 EXPORT_SYMBOL(hci_devcd_rx);
394
hci_devcd_timeout(struct work_struct * work)395 void hci_devcd_timeout(struct work_struct *work)
396 {
397 struct hci_dev *hdev = container_of(work, struct hci_dev,
398 dump.dump_timeout.work);
399 u32 dump_size;
400
401 hci_devcd_notify(hdev, HCI_DEVCOREDUMP_TIMEOUT);
402
403 hci_dev_lock(hdev);
404
405 cancel_work(&hdev->dump.dump_rx);
406
407 hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_TIMEOUT);
408
409 dump_size = hdev->dump.tail - hdev->dump.head;
410 bt_dev_dbg(hdev, "timeout with size %u (expect %zu)", dump_size,
411 hdev->dump.alloc_size);
412
413 hci_devcd_dump(hdev);
414
415 hci_devcd_reset(hdev);
416
417 hci_dev_unlock(hdev);
418 }
419 EXPORT_SYMBOL(hci_devcd_timeout);
420
hci_devcd_register(struct hci_dev * hdev,coredump_t coredump,dmp_hdr_t dmp_hdr,notify_change_t notify_change)421 int hci_devcd_register(struct hci_dev *hdev, coredump_t coredump,
422 dmp_hdr_t dmp_hdr, notify_change_t notify_change)
423 {
424 /* Driver must implement coredump() and dmp_hdr() functions for
425 * bluetooth devcoredump. The coredump() should trigger a coredump
426 * event on the controller when the device's coredump sysfs entry is
427 * written to. The dmp_hdr() should create a dump header to identify
428 * the controller/fw/driver info.
429 */
430 if (!coredump || !dmp_hdr)
431 return -EINVAL;
432
433 hci_dev_lock(hdev);
434 hdev->dump.coredump = coredump;
435 hdev->dump.dmp_hdr = dmp_hdr;
436 hdev->dump.notify_change = notify_change;
437 hdev->dump.supported = true;
438 hdev->dump.timeout = DEVCOREDUMP_TIMEOUT;
439 hci_dev_unlock(hdev);
440
441 return 0;
442 }
443 EXPORT_SYMBOL(hci_devcd_register);
444
hci_devcd_enabled(struct hci_dev * hdev)445 static inline bool hci_devcd_enabled(struct hci_dev *hdev)
446 {
447 return hdev->dump.supported;
448 }
449
hci_devcd_init(struct hci_dev * hdev,u32 dump_size)450 int hci_devcd_init(struct hci_dev *hdev, u32 dump_size)
451 {
452 struct sk_buff *skb;
453
454 if (!hci_devcd_enabled(hdev))
455 return -EOPNOTSUPP;
456
457 skb = alloc_skb(sizeof(dump_size), GFP_ATOMIC);
458 if (!skb)
459 return -ENOMEM;
460
461 hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_INIT;
462 put_unaligned_le32(dump_size, skb_put(skb, 4));
463
464 skb_queue_tail(&hdev->dump.dump_q, skb);
465 queue_work(hdev->workqueue, &hdev->dump.dump_rx);
466
467 return 0;
468 }
469 EXPORT_SYMBOL(hci_devcd_init);
470
hci_devcd_append(struct hci_dev * hdev,struct sk_buff * skb)471 int hci_devcd_append(struct hci_dev *hdev, struct sk_buff *skb)
472 {
473 if (!skb)
474 return -ENOMEM;
475
476 if (!hci_devcd_enabled(hdev)) {
477 kfree_skb(skb);
478 return -EOPNOTSUPP;
479 }
480
481 hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_SKB;
482
483 skb_queue_tail(&hdev->dump.dump_q, skb);
484 queue_work(hdev->workqueue, &hdev->dump.dump_rx);
485
486 return 0;
487 }
488 EXPORT_SYMBOL(hci_devcd_append);
489
hci_devcd_append_pattern(struct hci_dev * hdev,u8 pattern,u32 len)490 int hci_devcd_append_pattern(struct hci_dev *hdev, u8 pattern, u32 len)
491 {
492 struct hci_devcoredump_skb_pattern p;
493 struct sk_buff *skb;
494
495 if (!hci_devcd_enabled(hdev))
496 return -EOPNOTSUPP;
497
498 skb = alloc_skb(sizeof(p), GFP_ATOMIC);
499 if (!skb)
500 return -ENOMEM;
501
502 p.pattern = pattern;
503 p.len = len;
504
505 hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_PATTERN;
506 skb_put_data(skb, &p, sizeof(p));
507
508 skb_queue_tail(&hdev->dump.dump_q, skb);
509 queue_work(hdev->workqueue, &hdev->dump.dump_rx);
510
511 return 0;
512 }
513 EXPORT_SYMBOL(hci_devcd_append_pattern);
514
hci_devcd_complete(struct hci_dev * hdev)515 int hci_devcd_complete(struct hci_dev *hdev)
516 {
517 struct sk_buff *skb;
518
519 if (!hci_devcd_enabled(hdev))
520 return -EOPNOTSUPP;
521
522 skb = alloc_skb(0, GFP_ATOMIC);
523 if (!skb)
524 return -ENOMEM;
525
526 hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_COMPLETE;
527
528 skb_queue_tail(&hdev->dump.dump_q, skb);
529 queue_work(hdev->workqueue, &hdev->dump.dump_rx);
530
531 return 0;
532 }
533 EXPORT_SYMBOL(hci_devcd_complete);
534
hci_devcd_abort(struct hci_dev * hdev)535 int hci_devcd_abort(struct hci_dev *hdev)
536 {
537 struct sk_buff *skb;
538
539 if (!hci_devcd_enabled(hdev))
540 return -EOPNOTSUPP;
541
542 skb = alloc_skb(0, GFP_ATOMIC);
543 if (!skb)
544 return -ENOMEM;
545
546 hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_ABORT;
547
548 skb_queue_tail(&hdev->dump.dump_q, skb);
549 queue_work(hdev->workqueue, &hdev->dump.dump_rx);
550
551 return 0;
552 }
553 EXPORT_SYMBOL(hci_devcd_abort);
554