xref: /linux/net/bluetooth/coredump.c (revision 1b0975ee3bdd3eb19a47371c26fd7ef8f7f6b599)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2023 Google Corporation
4  */
5 
6 #include <linux/devcoredump.h>
7 
8 #include <asm/unaligned.h>
9 #include <net/bluetooth/bluetooth.h>
10 #include <net/bluetooth/hci_core.h>
11 
12 enum hci_devcoredump_pkt_type {
13 	HCI_DEVCOREDUMP_PKT_INIT,
14 	HCI_DEVCOREDUMP_PKT_SKB,
15 	HCI_DEVCOREDUMP_PKT_PATTERN,
16 	HCI_DEVCOREDUMP_PKT_COMPLETE,
17 	HCI_DEVCOREDUMP_PKT_ABORT,
18 };
19 
20 struct hci_devcoredump_skb_cb {
21 	u16 pkt_type;
22 };
23 
24 struct hci_devcoredump_skb_pattern {
25 	u8 pattern;
26 	u32 len;
27 } __packed;
28 
29 #define hci_dmp_cb(skb)	((struct hci_devcoredump_skb_cb *)((skb)->cb))
30 
31 #define DBG_UNEXPECTED_STATE() \
32 	bt_dev_dbg(hdev, \
33 		   "Unexpected packet (%d) for state (%d). ", \
34 		   hci_dmp_cb(skb)->pkt_type, hdev->dump.state)
35 
36 #define MAX_DEVCOREDUMP_HDR_SIZE	512	/* bytes */
37 
38 static int hci_devcd_update_hdr_state(char *buf, size_t size, int state)
39 {
40 	int len = 0;
41 
42 	if (!buf)
43 		return 0;
44 
45 	len = scnprintf(buf, size, "Bluetooth devcoredump\nState: %d\n", state);
46 
47 	return len + 1; /* scnprintf adds \0 at the end upon state rewrite */
48 }
49 
50 /* Call with hci_dev_lock only. */
51 static int hci_devcd_update_state(struct hci_dev *hdev, int state)
52 {
53 	bt_dev_dbg(hdev, "Updating devcoredump state from %d to %d.",
54 		   hdev->dump.state, state);
55 
56 	hdev->dump.state = state;
57 
58 	return hci_devcd_update_hdr_state(hdev->dump.head,
59 					  hdev->dump.alloc_size, state);
60 }
61 
62 static int hci_devcd_mkheader(struct hci_dev *hdev, struct sk_buff *skb)
63 {
64 	char dump_start[] = "--- Start dump ---\n";
65 	char hdr[80];
66 	int hdr_len;
67 
68 	hdr_len = hci_devcd_update_hdr_state(hdr, sizeof(hdr),
69 					     HCI_DEVCOREDUMP_IDLE);
70 	skb_put_data(skb, hdr, hdr_len);
71 
72 	if (hdev->dump.dmp_hdr)
73 		hdev->dump.dmp_hdr(hdev, skb);
74 
75 	skb_put_data(skb, dump_start, strlen(dump_start));
76 
77 	return skb->len;
78 }
79 
80 /* Do not call with hci_dev_lock since this calls driver code. */
81 static void hci_devcd_notify(struct hci_dev *hdev, int state)
82 {
83 	if (hdev->dump.notify_change)
84 		hdev->dump.notify_change(hdev, state);
85 }
86 
87 /* Call with hci_dev_lock only. */
88 void hci_devcd_reset(struct hci_dev *hdev)
89 {
90 	hdev->dump.head = NULL;
91 	hdev->dump.tail = NULL;
92 	hdev->dump.alloc_size = 0;
93 
94 	hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_IDLE);
95 
96 	cancel_delayed_work(&hdev->dump.dump_timeout);
97 	skb_queue_purge(&hdev->dump.dump_q);
98 }
99 
100 /* Call with hci_dev_lock only. */
101 static void hci_devcd_free(struct hci_dev *hdev)
102 {
103 	if (hdev->dump.head)
104 		vfree(hdev->dump.head);
105 
106 	hci_devcd_reset(hdev);
107 }
108 
109 /* Call with hci_dev_lock only. */
110 static int hci_devcd_alloc(struct hci_dev *hdev, u32 size)
111 {
112 	hdev->dump.head = vmalloc(size);
113 	if (!hdev->dump.head)
114 		return -ENOMEM;
115 
116 	hdev->dump.alloc_size = size;
117 	hdev->dump.tail = hdev->dump.head;
118 	hdev->dump.end = hdev->dump.head + size;
119 
120 	hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_IDLE);
121 
122 	return 0;
123 }
124 
125 /* Call with hci_dev_lock only. */
126 static bool hci_devcd_copy(struct hci_dev *hdev, char *buf, u32 size)
127 {
128 	if (hdev->dump.tail + size > hdev->dump.end)
129 		return false;
130 
131 	memcpy(hdev->dump.tail, buf, size);
132 	hdev->dump.tail += size;
133 
134 	return true;
135 }
136 
137 /* Call with hci_dev_lock only. */
138 static bool hci_devcd_memset(struct hci_dev *hdev, u8 pattern, u32 len)
139 {
140 	if (hdev->dump.tail + len > hdev->dump.end)
141 		return false;
142 
143 	memset(hdev->dump.tail, pattern, len);
144 	hdev->dump.tail += len;
145 
146 	return true;
147 }
148 
149 /* Call with hci_dev_lock only. */
150 static int hci_devcd_prepare(struct hci_dev *hdev, u32 dump_size)
151 {
152 	struct sk_buff *skb;
153 	int dump_hdr_size;
154 	int err = 0;
155 
156 	skb = alloc_skb(MAX_DEVCOREDUMP_HDR_SIZE, GFP_ATOMIC);
157 	if (!skb)
158 		return -ENOMEM;
159 
160 	dump_hdr_size = hci_devcd_mkheader(hdev, skb);
161 
162 	if (hci_devcd_alloc(hdev, dump_hdr_size + dump_size)) {
163 		err = -ENOMEM;
164 		goto hdr_free;
165 	}
166 
167 	/* Insert the device header */
168 	if (!hci_devcd_copy(hdev, skb->data, skb->len)) {
169 		bt_dev_err(hdev, "Failed to insert header");
170 		hci_devcd_free(hdev);
171 
172 		err = -ENOMEM;
173 		goto hdr_free;
174 	}
175 
176 hdr_free:
177 	kfree_skb(skb);
178 
179 	return err;
180 }
181 
182 static void hci_devcd_handle_pkt_init(struct hci_dev *hdev, struct sk_buff *skb)
183 {
184 	u32 dump_size;
185 
186 	if (hdev->dump.state != HCI_DEVCOREDUMP_IDLE) {
187 		DBG_UNEXPECTED_STATE();
188 		return;
189 	}
190 
191 	if (skb->len != sizeof(dump_size)) {
192 		bt_dev_dbg(hdev, "Invalid dump init pkt");
193 		return;
194 	}
195 
196 	dump_size = get_unaligned_le32(skb_pull_data(skb, 4));
197 	if (!dump_size) {
198 		bt_dev_err(hdev, "Zero size dump init pkt");
199 		return;
200 	}
201 
202 	if (hci_devcd_prepare(hdev, dump_size)) {
203 		bt_dev_err(hdev, "Failed to prepare for dump");
204 		return;
205 	}
206 
207 	hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_ACTIVE);
208 	queue_delayed_work(hdev->workqueue, &hdev->dump.dump_timeout,
209 			   hdev->dump.timeout);
210 }
211 
212 static void hci_devcd_handle_pkt_skb(struct hci_dev *hdev, struct sk_buff *skb)
213 {
214 	if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
215 		DBG_UNEXPECTED_STATE();
216 		return;
217 	}
218 
219 	if (!hci_devcd_copy(hdev, skb->data, skb->len))
220 		bt_dev_dbg(hdev, "Failed to insert skb");
221 }
222 
223 static void hci_devcd_handle_pkt_pattern(struct hci_dev *hdev,
224 					 struct sk_buff *skb)
225 {
226 	struct hci_devcoredump_skb_pattern *pattern;
227 
228 	if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
229 		DBG_UNEXPECTED_STATE();
230 		return;
231 	}
232 
233 	if (skb->len != sizeof(*pattern)) {
234 		bt_dev_dbg(hdev, "Invalid pattern skb");
235 		return;
236 	}
237 
238 	pattern = skb_pull_data(skb, sizeof(*pattern));
239 
240 	if (!hci_devcd_memset(hdev, pattern->pattern, pattern->len))
241 		bt_dev_dbg(hdev, "Failed to set pattern");
242 }
243 
244 static void hci_devcd_handle_pkt_complete(struct hci_dev *hdev,
245 					  struct sk_buff *skb)
246 {
247 	u32 dump_size;
248 
249 	if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
250 		DBG_UNEXPECTED_STATE();
251 		return;
252 	}
253 
254 	hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_DONE);
255 	dump_size = hdev->dump.tail - hdev->dump.head;
256 
257 	bt_dev_dbg(hdev, "complete with size %u (expect %zu)", dump_size,
258 		   hdev->dump.alloc_size);
259 
260 	dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL);
261 }
262 
263 static void hci_devcd_handle_pkt_abort(struct hci_dev *hdev,
264 				       struct sk_buff *skb)
265 {
266 	u32 dump_size;
267 
268 	if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
269 		DBG_UNEXPECTED_STATE();
270 		return;
271 	}
272 
273 	hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_ABORT);
274 	dump_size = hdev->dump.tail - hdev->dump.head;
275 
276 	bt_dev_dbg(hdev, "aborted with size %u (expect %zu)", dump_size,
277 		   hdev->dump.alloc_size);
278 
279 	/* Emit a devcoredump with the available data */
280 	dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL);
281 }
282 
283 /* Bluetooth devcoredump state machine.
284  *
285  * Devcoredump states:
286  *
287  *      HCI_DEVCOREDUMP_IDLE: The default state.
288  *
289  *      HCI_DEVCOREDUMP_ACTIVE: A devcoredump will be in this state once it has
290  *              been initialized using hci_devcd_init(). Once active, the driver
291  *              can append data using hci_devcd_append() or insert a pattern
292  *              using hci_devcd_append_pattern().
293  *
294  *      HCI_DEVCOREDUMP_DONE: Once the dump collection is complete, the drive
295  *              can signal the completion using hci_devcd_complete(). A
296  *              devcoredump is generated indicating the completion event and
297  *              then the state machine is reset to the default state.
298  *
299  *      HCI_DEVCOREDUMP_ABORT: The driver can cancel ongoing dump collection in
300  *              case of any error using hci_devcd_abort(). A devcoredump is
301  *              still generated with the available data indicating the abort
302  *              event and then the state machine is reset to the default state.
303  *
304  *      HCI_DEVCOREDUMP_TIMEOUT: A timeout timer for HCI_DEVCOREDUMP_TIMEOUT sec
305  *              is started during devcoredump initialization. Once the timeout
306  *              occurs, the driver is notified, a devcoredump is generated with
307  *              the available data indicating the timeout event and then the
308  *              state machine is reset to the default state.
309  *
310  * The driver must register using hci_devcd_register() before using the hci
311  * devcoredump APIs.
312  */
313 void hci_devcd_rx(struct work_struct *work)
314 {
315 	struct hci_dev *hdev = container_of(work, struct hci_dev, dump.dump_rx);
316 	struct sk_buff *skb;
317 	int start_state;
318 
319 	while ((skb = skb_dequeue(&hdev->dump.dump_q))) {
320 		/* Return if timeout occurs. The timeout handler function
321 		 * hci_devcd_timeout() will report the available dump data.
322 		 */
323 		if (hdev->dump.state == HCI_DEVCOREDUMP_TIMEOUT) {
324 			kfree_skb(skb);
325 			return;
326 		}
327 
328 		hci_dev_lock(hdev);
329 		start_state = hdev->dump.state;
330 
331 		switch (hci_dmp_cb(skb)->pkt_type) {
332 		case HCI_DEVCOREDUMP_PKT_INIT:
333 			hci_devcd_handle_pkt_init(hdev, skb);
334 			break;
335 
336 		case HCI_DEVCOREDUMP_PKT_SKB:
337 			hci_devcd_handle_pkt_skb(hdev, skb);
338 			break;
339 
340 		case HCI_DEVCOREDUMP_PKT_PATTERN:
341 			hci_devcd_handle_pkt_pattern(hdev, skb);
342 			break;
343 
344 		case HCI_DEVCOREDUMP_PKT_COMPLETE:
345 			hci_devcd_handle_pkt_complete(hdev, skb);
346 			break;
347 
348 		case HCI_DEVCOREDUMP_PKT_ABORT:
349 			hci_devcd_handle_pkt_abort(hdev, skb);
350 			break;
351 
352 		default:
353 			bt_dev_dbg(hdev, "Unknown packet (%d) for state (%d). ",
354 				   hci_dmp_cb(skb)->pkt_type, hdev->dump.state);
355 			break;
356 		}
357 
358 		hci_dev_unlock(hdev);
359 		kfree_skb(skb);
360 
361 		/* Notify the driver about any state changes before resetting
362 		 * the state machine
363 		 */
364 		if (start_state != hdev->dump.state)
365 			hci_devcd_notify(hdev, hdev->dump.state);
366 
367 		/* Reset the state machine if the devcoredump is complete */
368 		hci_dev_lock(hdev);
369 		if (hdev->dump.state == HCI_DEVCOREDUMP_DONE ||
370 		    hdev->dump.state == HCI_DEVCOREDUMP_ABORT)
371 			hci_devcd_reset(hdev);
372 		hci_dev_unlock(hdev);
373 	}
374 }
375 EXPORT_SYMBOL(hci_devcd_rx);
376 
377 void hci_devcd_timeout(struct work_struct *work)
378 {
379 	struct hci_dev *hdev = container_of(work, struct hci_dev,
380 					    dump.dump_timeout.work);
381 	u32 dump_size;
382 
383 	hci_devcd_notify(hdev, HCI_DEVCOREDUMP_TIMEOUT);
384 
385 	hci_dev_lock(hdev);
386 
387 	cancel_work(&hdev->dump.dump_rx);
388 
389 	hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_TIMEOUT);
390 
391 	dump_size = hdev->dump.tail - hdev->dump.head;
392 	bt_dev_dbg(hdev, "timeout with size %u (expect %zu)", dump_size,
393 		   hdev->dump.alloc_size);
394 
395 	/* Emit a devcoredump with the available data */
396 	dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL);
397 
398 	hci_devcd_reset(hdev);
399 
400 	hci_dev_unlock(hdev);
401 }
402 EXPORT_SYMBOL(hci_devcd_timeout);
403 
404 int hci_devcd_register(struct hci_dev *hdev, coredump_t coredump,
405 		       dmp_hdr_t dmp_hdr, notify_change_t notify_change)
406 {
407 	/* Driver must implement coredump() and dmp_hdr() functions for
408 	 * bluetooth devcoredump. The coredump() should trigger a coredump
409 	 * event on the controller when the device's coredump sysfs entry is
410 	 * written to. The dmp_hdr() should create a dump header to identify
411 	 * the controller/fw/driver info.
412 	 */
413 	if (!coredump || !dmp_hdr)
414 		return -EINVAL;
415 
416 	hci_dev_lock(hdev);
417 	hdev->dump.coredump = coredump;
418 	hdev->dump.dmp_hdr = dmp_hdr;
419 	hdev->dump.notify_change = notify_change;
420 	hdev->dump.supported = true;
421 	hdev->dump.timeout = DEVCOREDUMP_TIMEOUT;
422 	hci_dev_unlock(hdev);
423 
424 	return 0;
425 }
426 EXPORT_SYMBOL(hci_devcd_register);
427 
428 static inline bool hci_devcd_enabled(struct hci_dev *hdev)
429 {
430 	return hdev->dump.supported;
431 }
432 
433 int hci_devcd_init(struct hci_dev *hdev, u32 dump_size)
434 {
435 	struct sk_buff *skb;
436 
437 	if (!hci_devcd_enabled(hdev))
438 		return -EOPNOTSUPP;
439 
440 	skb = alloc_skb(sizeof(dump_size), GFP_ATOMIC);
441 	if (!skb)
442 		return -ENOMEM;
443 
444 	hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_INIT;
445 	put_unaligned_le32(dump_size, skb_put(skb, 4));
446 
447 	skb_queue_tail(&hdev->dump.dump_q, skb);
448 	queue_work(hdev->workqueue, &hdev->dump.dump_rx);
449 
450 	return 0;
451 }
452 EXPORT_SYMBOL(hci_devcd_init);
453 
454 int hci_devcd_append(struct hci_dev *hdev, struct sk_buff *skb)
455 {
456 	if (!skb)
457 		return -ENOMEM;
458 
459 	if (!hci_devcd_enabled(hdev)) {
460 		kfree_skb(skb);
461 		return -EOPNOTSUPP;
462 	}
463 
464 	hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_SKB;
465 
466 	skb_queue_tail(&hdev->dump.dump_q, skb);
467 	queue_work(hdev->workqueue, &hdev->dump.dump_rx);
468 
469 	return 0;
470 }
471 EXPORT_SYMBOL(hci_devcd_append);
472 
473 int hci_devcd_append_pattern(struct hci_dev *hdev, u8 pattern, u32 len)
474 {
475 	struct hci_devcoredump_skb_pattern p;
476 	struct sk_buff *skb;
477 
478 	if (!hci_devcd_enabled(hdev))
479 		return -EOPNOTSUPP;
480 
481 	skb = alloc_skb(sizeof(p), GFP_ATOMIC);
482 	if (!skb)
483 		return -ENOMEM;
484 
485 	p.pattern = pattern;
486 	p.len = len;
487 
488 	hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_PATTERN;
489 	skb_put_data(skb, &p, sizeof(p));
490 
491 	skb_queue_tail(&hdev->dump.dump_q, skb);
492 	queue_work(hdev->workqueue, &hdev->dump.dump_rx);
493 
494 	return 0;
495 }
496 EXPORT_SYMBOL(hci_devcd_append_pattern);
497 
498 int hci_devcd_complete(struct hci_dev *hdev)
499 {
500 	struct sk_buff *skb;
501 
502 	if (!hci_devcd_enabled(hdev))
503 		return -EOPNOTSUPP;
504 
505 	skb = alloc_skb(0, GFP_ATOMIC);
506 	if (!skb)
507 		return -ENOMEM;
508 
509 	hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_COMPLETE;
510 
511 	skb_queue_tail(&hdev->dump.dump_q, skb);
512 	queue_work(hdev->workqueue, &hdev->dump.dump_rx);
513 
514 	return 0;
515 }
516 EXPORT_SYMBOL(hci_devcd_complete);
517 
518 int hci_devcd_abort(struct hci_dev *hdev)
519 {
520 	struct sk_buff *skb;
521 
522 	if (!hci_devcd_enabled(hdev))
523 		return -EOPNOTSUPP;
524 
525 	skb = alloc_skb(0, GFP_ATOMIC);
526 	if (!skb)
527 		return -ENOMEM;
528 
529 	hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_ABORT;
530 
531 	skb_queue_tail(&hdev->dump.dump_q, skb);
532 	queue_work(hdev->workqueue, &hdev->dump.dump_rx);
533 
534 	return 0;
535 }
536 EXPORT_SYMBOL(hci_devcd_abort);
537