xref: /linux/drivers/hwtracing/coresight/coresight-cpu-debug.c (revision 36ec807b627b4c0a0a382f0ae48eac7187d14b2b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2017 Linaro Limited. All rights reserved.
4  *
5  * Author: Leo Yan <leo.yan@linaro.org>
6  */
7 #include <linux/acpi.h>
8 #include <linux/amba/bus.h>
9 #include <linux/coresight.h>
10 #include <linux/cpu.h>
11 #include <linux/debugfs.h>
12 #include <linux/delay.h>
13 #include <linux/device.h>
14 #include <linux/err.h>
15 #include <linux/init.h>
16 #include <linux/io.h>
17 #include <linux/iopoll.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/panic_notifier.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_qos.h>
24 #include <linux/slab.h>
25 #include <linux/smp.h>
26 #include <linux/types.h>
27 #include <linux/uaccess.h>
28 
29 #include "coresight-priv.h"
30 
31 #define EDPCSR				0x0A0
32 #define EDCIDSR				0x0A4
33 #define EDVIDSR				0x0A8
34 #define EDPCSR_HI			0x0AC
35 #define EDOSLAR				0x300
36 #define EDPRCR				0x310
37 #define EDPRSR				0x314
38 #define EDDEVID1			0xFC4
39 #define EDDEVID				0xFC8
40 
41 #define EDPCSR_PROHIBITED		0xFFFFFFFF
42 
43 /* bits definition for EDPCSR */
44 #define EDPCSR_THUMB			BIT(0)
45 #define EDPCSR_ARM_INST_MASK		GENMASK(31, 2)
46 #define EDPCSR_THUMB_INST_MASK		GENMASK(31, 1)
47 
48 /* bits definition for EDPRCR */
49 #define EDPRCR_COREPURQ			BIT(3)
50 #define EDPRCR_CORENPDRQ		BIT(0)
51 
52 /* bits definition for EDPRSR */
53 #define EDPRSR_DLK			BIT(6)
54 #define EDPRSR_PU			BIT(0)
55 
56 /* bits definition for EDVIDSR */
57 #define EDVIDSR_NS			BIT(31)
58 #define EDVIDSR_E2			BIT(30)
59 #define EDVIDSR_E3			BIT(29)
60 #define EDVIDSR_HV			BIT(28)
61 #define EDVIDSR_VMID			GENMASK(7, 0)
62 
63 /*
64  * bits definition for EDDEVID1:PSCROffset
65  *
66  * NOTE: armv8 and armv7 have different definition for the register,
67  * so consolidate the bits definition as below:
68  *
69  * 0b0000 - Sample offset applies based on the instruction state, we
70  *          rely on EDDEVID to check if EDPCSR is implemented or not
71  * 0b0001 - No offset applies.
72  * 0b0010 - No offset applies, but do not use in AArch32 mode
73  *
74  */
75 #define EDDEVID1_PCSR_OFFSET_MASK	GENMASK(3, 0)
76 #define EDDEVID1_PCSR_OFFSET_INS_SET	(0x0)
77 #define EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32	(0x2)
78 
79 /* bits definition for EDDEVID */
80 #define EDDEVID_PCSAMPLE_MODE		GENMASK(3, 0)
81 #define EDDEVID_IMPL_EDPCSR		(0x1)
82 #define EDDEVID_IMPL_EDPCSR_EDCIDSR	(0x2)
83 #define EDDEVID_IMPL_FULL		(0x3)
84 
85 #define DEBUG_WAIT_SLEEP		1000
86 #define DEBUG_WAIT_TIMEOUT		32000
87 
88 struct debug_drvdata {
89 	struct clk	*pclk;
90 	void __iomem	*base;
91 	struct device	*dev;
92 	int		cpu;
93 
94 	bool		edpcsr_present;
95 	bool		edcidsr_present;
96 	bool		edvidsr_present;
97 	bool		pc_has_offset;
98 
99 	u32		edpcsr;
100 	u32		edpcsr_hi;
101 	u32		edprsr;
102 	u32		edvidsr;
103 	u32		edcidsr;
104 };
105 
106 static DEFINE_MUTEX(debug_lock);
107 static DEFINE_PER_CPU(struct debug_drvdata *, debug_drvdata);
108 static int debug_count;
109 static struct dentry *debug_debugfs_dir;
110 
111 static bool debug_enable = IS_ENABLED(CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON);
112 module_param_named(enable, debug_enable, bool, 0600);
113 MODULE_PARM_DESC(enable, "Control to enable coresight CPU debug functionality");
114 
115 static void debug_os_unlock(struct debug_drvdata *drvdata)
116 {
117 	/* Unlocks the debug registers */
118 	writel_relaxed(0x0, drvdata->base + EDOSLAR);
119 
120 	/* Make sure the registers are unlocked before accessing */
121 	wmb();
122 }
123 
124 /*
125  * According to ARM DDI 0487A.k, before access external debug
126  * registers should firstly check the access permission; if any
127  * below condition has been met then cannot access debug
128  * registers to avoid lockup issue:
129  *
130  * - CPU power domain is powered off;
131  * - The OS Double Lock is locked;
132  *
133  * By checking EDPRSR can get to know if meet these conditions.
134  */
135 static bool debug_access_permitted(struct debug_drvdata *drvdata)
136 {
137 	/* CPU is powered off */
138 	if (!(drvdata->edprsr & EDPRSR_PU))
139 		return false;
140 
141 	/* The OS Double Lock is locked */
142 	if (drvdata->edprsr & EDPRSR_DLK)
143 		return false;
144 
145 	return true;
146 }
147 
148 static void debug_force_cpu_powered_up(struct debug_drvdata *drvdata)
149 {
150 	u32 edprcr;
151 
152 try_again:
153 
154 	/*
155 	 * Send request to power management controller and assert
156 	 * DBGPWRUPREQ signal; if power management controller has
157 	 * sane implementation, it should enable CPU power domain
158 	 * in case CPU is in low power state.
159 	 */
160 	edprcr = readl_relaxed(drvdata->base + EDPRCR);
161 	edprcr |= EDPRCR_COREPURQ;
162 	writel_relaxed(edprcr, drvdata->base + EDPRCR);
163 
164 	/* Wait for CPU to be powered up (timeout~=32ms) */
165 	if (readx_poll_timeout_atomic(readl_relaxed, drvdata->base + EDPRSR,
166 			drvdata->edprsr, (drvdata->edprsr & EDPRSR_PU),
167 			DEBUG_WAIT_SLEEP, DEBUG_WAIT_TIMEOUT)) {
168 		/*
169 		 * Unfortunately the CPU cannot be powered up, so return
170 		 * back and later has no permission to access other
171 		 * registers. For this case, should disable CPU low power
172 		 * states to ensure CPU power domain is enabled!
173 		 */
174 		dev_err(drvdata->dev, "%s: power up request for CPU%d failed\n",
175 			__func__, drvdata->cpu);
176 		return;
177 	}
178 
179 	/*
180 	 * At this point the CPU is powered up, so set the no powerdown
181 	 * request bit so we don't lose power and emulate power down.
182 	 */
183 	edprcr = readl_relaxed(drvdata->base + EDPRCR);
184 	edprcr |= EDPRCR_COREPURQ | EDPRCR_CORENPDRQ;
185 	writel_relaxed(edprcr, drvdata->base + EDPRCR);
186 
187 	drvdata->edprsr = readl_relaxed(drvdata->base + EDPRSR);
188 
189 	/* The core power domain got switched off on use, try again */
190 	if (unlikely(!(drvdata->edprsr & EDPRSR_PU)))
191 		goto try_again;
192 }
193 
194 static void debug_read_regs(struct debug_drvdata *drvdata)
195 {
196 	u32 save_edprcr;
197 
198 	CS_UNLOCK(drvdata->base);
199 
200 	/* Unlock os lock */
201 	debug_os_unlock(drvdata);
202 
203 	/* Save EDPRCR register */
204 	save_edprcr = readl_relaxed(drvdata->base + EDPRCR);
205 
206 	/*
207 	 * Ensure CPU power domain is enabled to let registers
208 	 * are accessiable.
209 	 */
210 	debug_force_cpu_powered_up(drvdata);
211 
212 	if (!debug_access_permitted(drvdata))
213 		goto out;
214 
215 	drvdata->edpcsr = readl_relaxed(drvdata->base + EDPCSR);
216 
217 	/*
218 	 * As described in ARM DDI 0487A.k, if the processing
219 	 * element (PE) is in debug state, or sample-based
220 	 * profiling is prohibited, EDPCSR reads as 0xFFFFFFFF;
221 	 * EDCIDSR, EDVIDSR and EDPCSR_HI registers also become
222 	 * UNKNOWN state. So directly bail out for this case.
223 	 */
224 	if (drvdata->edpcsr == EDPCSR_PROHIBITED)
225 		goto out;
226 
227 	/*
228 	 * A read of the EDPCSR normally has the side-effect of
229 	 * indirectly writing to EDCIDSR, EDVIDSR and EDPCSR_HI;
230 	 * at this point it's safe to read value from them.
231 	 */
232 	if (IS_ENABLED(CONFIG_64BIT))
233 		drvdata->edpcsr_hi = readl_relaxed(drvdata->base + EDPCSR_HI);
234 
235 	if (drvdata->edcidsr_present)
236 		drvdata->edcidsr = readl_relaxed(drvdata->base + EDCIDSR);
237 
238 	if (drvdata->edvidsr_present)
239 		drvdata->edvidsr = readl_relaxed(drvdata->base + EDVIDSR);
240 
241 out:
242 	/* Restore EDPRCR register */
243 	writel_relaxed(save_edprcr, drvdata->base + EDPRCR);
244 
245 	CS_LOCK(drvdata->base);
246 }
247 
248 #ifdef CONFIG_64BIT
249 static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
250 {
251 	return (unsigned long)drvdata->edpcsr_hi << 32 |
252 	       (unsigned long)drvdata->edpcsr;
253 }
254 #else
255 static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
256 {
257 	unsigned long arm_inst_offset = 0, thumb_inst_offset = 0;
258 	unsigned long pc;
259 
260 	pc = (unsigned long)drvdata->edpcsr;
261 
262 	if (drvdata->pc_has_offset) {
263 		arm_inst_offset = 8;
264 		thumb_inst_offset = 4;
265 	}
266 
267 	/* Handle thumb instruction */
268 	if (pc & EDPCSR_THUMB) {
269 		pc = (pc & EDPCSR_THUMB_INST_MASK) - thumb_inst_offset;
270 		return pc;
271 	}
272 
273 	/*
274 	 * Handle arm instruction offset, if the arm instruction
275 	 * is not 4 byte alignment then it's possible the case
276 	 * for implementation defined; keep original value for this
277 	 * case and print info for notice.
278 	 */
279 	if (pc & BIT(1))
280 		dev_emerg(drvdata->dev,
281 			  "Instruction offset is implementation defined\n");
282 	else
283 		pc = (pc & EDPCSR_ARM_INST_MASK) - arm_inst_offset;
284 
285 	return pc;
286 }
287 #endif
288 
289 static void debug_dump_regs(struct debug_drvdata *drvdata)
290 {
291 	struct device *dev = drvdata->dev;
292 	unsigned long pc;
293 
294 	dev_emerg(dev, " EDPRSR:  %08x (Power:%s DLK:%s)\n",
295 		  drvdata->edprsr,
296 		  drvdata->edprsr & EDPRSR_PU ? "On" : "Off",
297 		  drvdata->edprsr & EDPRSR_DLK ? "Lock" : "Unlock");
298 
299 	if (!debug_access_permitted(drvdata)) {
300 		dev_emerg(dev, "No permission to access debug registers!\n");
301 		return;
302 	}
303 
304 	if (drvdata->edpcsr == EDPCSR_PROHIBITED) {
305 		dev_emerg(dev, "CPU is in Debug state or profiling is prohibited!\n");
306 		return;
307 	}
308 
309 	pc = debug_adjust_pc(drvdata);
310 	dev_emerg(dev, " EDPCSR:  %pS\n", (void *)pc);
311 
312 	if (drvdata->edcidsr_present)
313 		dev_emerg(dev, " EDCIDSR: %08x\n", drvdata->edcidsr);
314 
315 	if (drvdata->edvidsr_present)
316 		dev_emerg(dev, " EDVIDSR: %08x (State:%s Mode:%s Width:%dbits VMID:%x)\n",
317 			  drvdata->edvidsr,
318 			  drvdata->edvidsr & EDVIDSR_NS ?
319 			  "Non-secure" : "Secure",
320 			  drvdata->edvidsr & EDVIDSR_E3 ? "EL3" :
321 				(drvdata->edvidsr & EDVIDSR_E2 ?
322 				 "EL2" : "EL1/0"),
323 			  drvdata->edvidsr & EDVIDSR_HV ? 64 : 32,
324 			  drvdata->edvidsr & (u32)EDVIDSR_VMID);
325 }
326 
327 static void debug_init_arch_data(void *info)
328 {
329 	struct debug_drvdata *drvdata = info;
330 	u32 mode, pcsr_offset;
331 	u32 eddevid, eddevid1;
332 
333 	CS_UNLOCK(drvdata->base);
334 
335 	/* Read device info */
336 	eddevid  = readl_relaxed(drvdata->base + EDDEVID);
337 	eddevid1 = readl_relaxed(drvdata->base + EDDEVID1);
338 
339 	CS_LOCK(drvdata->base);
340 
341 	/* Parse implementation feature */
342 	mode = eddevid & EDDEVID_PCSAMPLE_MODE;
343 	pcsr_offset = eddevid1 & EDDEVID1_PCSR_OFFSET_MASK;
344 
345 	drvdata->edpcsr_present  = false;
346 	drvdata->edcidsr_present = false;
347 	drvdata->edvidsr_present = false;
348 	drvdata->pc_has_offset   = false;
349 
350 	switch (mode) {
351 	case EDDEVID_IMPL_FULL:
352 		drvdata->edvidsr_present = true;
353 		fallthrough;
354 	case EDDEVID_IMPL_EDPCSR_EDCIDSR:
355 		drvdata->edcidsr_present = true;
356 		fallthrough;
357 	case EDDEVID_IMPL_EDPCSR:
358 		/*
359 		 * In ARM DDI 0487A.k, the EDDEVID1.PCSROffset is used to
360 		 * define if has the offset for PC sampling value; if read
361 		 * back EDDEVID1.PCSROffset == 0x2, then this means the debug
362 		 * module does not sample the instruction set state when
363 		 * armv8 CPU in AArch32 state.
364 		 */
365 		drvdata->edpcsr_present =
366 			((IS_ENABLED(CONFIG_64BIT) && pcsr_offset != 0) ||
367 			 (pcsr_offset != EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32));
368 
369 		drvdata->pc_has_offset =
370 			(pcsr_offset == EDDEVID1_PCSR_OFFSET_INS_SET);
371 		break;
372 	default:
373 		break;
374 	}
375 }
376 
377 /*
378  * Dump out information on panic.
379  */
380 static int debug_notifier_call(struct notifier_block *self,
381 			       unsigned long v, void *p)
382 {
383 	int cpu;
384 	struct debug_drvdata *drvdata;
385 
386 	/* Bail out if we can't acquire the mutex or the functionality is off */
387 	if (!mutex_trylock(&debug_lock))
388 		return NOTIFY_DONE;
389 
390 	if (!debug_enable)
391 		goto skip_dump;
392 
393 	pr_emerg("ARM external debug module:\n");
394 
395 	for_each_possible_cpu(cpu) {
396 		drvdata = per_cpu(debug_drvdata, cpu);
397 		if (!drvdata)
398 			continue;
399 
400 		dev_emerg(drvdata->dev, "CPU[%d]:\n", drvdata->cpu);
401 
402 		debug_read_regs(drvdata);
403 		debug_dump_regs(drvdata);
404 	}
405 
406 skip_dump:
407 	mutex_unlock(&debug_lock);
408 	return NOTIFY_DONE;
409 }
410 
411 static struct notifier_block debug_notifier = {
412 	.notifier_call = debug_notifier_call,
413 };
414 
415 static int debug_enable_func(void)
416 {
417 	struct debug_drvdata *drvdata;
418 	int cpu, ret = 0;
419 	cpumask_t mask;
420 
421 	/*
422 	 * Use cpumask to track which debug power domains have
423 	 * been powered on and use it to handle failure case.
424 	 */
425 	cpumask_clear(&mask);
426 
427 	for_each_possible_cpu(cpu) {
428 		drvdata = per_cpu(debug_drvdata, cpu);
429 		if (!drvdata)
430 			continue;
431 
432 		ret = pm_runtime_get_sync(drvdata->dev);
433 		if (ret < 0)
434 			goto err;
435 		else
436 			cpumask_set_cpu(cpu, &mask);
437 	}
438 
439 	return 0;
440 
441 err:
442 	/*
443 	 * If pm_runtime_get_sync() has failed, need rollback on
444 	 * all the other CPUs that have been enabled before that.
445 	 */
446 	for_each_cpu(cpu, &mask) {
447 		drvdata = per_cpu(debug_drvdata, cpu);
448 		pm_runtime_put_noidle(drvdata->dev);
449 	}
450 
451 	return ret;
452 }
453 
454 static int debug_disable_func(void)
455 {
456 	struct debug_drvdata *drvdata;
457 	int cpu, ret, err = 0;
458 
459 	/*
460 	 * Disable debug power domains, records the error and keep
461 	 * circling through all other CPUs when an error has been
462 	 * encountered.
463 	 */
464 	for_each_possible_cpu(cpu) {
465 		drvdata = per_cpu(debug_drvdata, cpu);
466 		if (!drvdata)
467 			continue;
468 
469 		ret = pm_runtime_put(drvdata->dev);
470 		if (ret < 0)
471 			err = ret;
472 	}
473 
474 	return err;
475 }
476 
477 static ssize_t debug_func_knob_write(struct file *f,
478 		const char __user *buf, size_t count, loff_t *ppos)
479 {
480 	u8 val;
481 	int ret;
482 
483 	ret = kstrtou8_from_user(buf, count, 2, &val);
484 	if (ret)
485 		return ret;
486 
487 	mutex_lock(&debug_lock);
488 
489 	if (val == debug_enable)
490 		goto out;
491 
492 	if (val)
493 		ret = debug_enable_func();
494 	else
495 		ret = debug_disable_func();
496 
497 	if (ret) {
498 		pr_err("%s: unable to %s debug function: %d\n",
499 		       __func__, val ? "enable" : "disable", ret);
500 		goto err;
501 	}
502 
503 	debug_enable = val;
504 out:
505 	ret = count;
506 err:
507 	mutex_unlock(&debug_lock);
508 	return ret;
509 }
510 
511 static ssize_t debug_func_knob_read(struct file *f,
512 		char __user *ubuf, size_t count, loff_t *ppos)
513 {
514 	ssize_t ret;
515 	char buf[3];
516 
517 	mutex_lock(&debug_lock);
518 	snprintf(buf, sizeof(buf), "%d\n", debug_enable);
519 	mutex_unlock(&debug_lock);
520 
521 	ret = simple_read_from_buffer(ubuf, count, ppos, buf, sizeof(buf));
522 	return ret;
523 }
524 
525 static const struct file_operations debug_func_knob_fops = {
526 	.open	= simple_open,
527 	.read	= debug_func_knob_read,
528 	.write	= debug_func_knob_write,
529 };
530 
531 static int debug_func_init(void)
532 {
533 	int ret;
534 
535 	/* Create debugfs node */
536 	debug_debugfs_dir = debugfs_create_dir("coresight_cpu_debug", NULL);
537 	debugfs_create_file("enable", 0644, debug_debugfs_dir, NULL,
538 			    &debug_func_knob_fops);
539 
540 	/* Register function to be called for panic */
541 	ret = atomic_notifier_chain_register(&panic_notifier_list,
542 					     &debug_notifier);
543 	if (ret) {
544 		pr_err("%s: unable to register notifier: %d\n",
545 		       __func__, ret);
546 		goto err;
547 	}
548 
549 	return 0;
550 
551 err:
552 	debugfs_remove_recursive(debug_debugfs_dir);
553 	return ret;
554 }
555 
556 static void debug_func_exit(void)
557 {
558 	atomic_notifier_chain_unregister(&panic_notifier_list,
559 					 &debug_notifier);
560 	debugfs_remove_recursive(debug_debugfs_dir);
561 }
562 
563 static int __debug_probe(struct device *dev, struct resource *res)
564 {
565 	struct debug_drvdata *drvdata = dev_get_drvdata(dev);
566 	void __iomem *base;
567 	int ret;
568 
569 	drvdata->cpu = coresight_get_cpu(dev);
570 	if (drvdata->cpu < 0)
571 		return drvdata->cpu;
572 
573 	if (per_cpu(debug_drvdata, drvdata->cpu)) {
574 		dev_err(dev, "CPU%d drvdata has already been initialized\n",
575 			drvdata->cpu);
576 		return -EBUSY;
577 	}
578 
579 	drvdata->dev = dev;
580 	base = devm_ioremap_resource(dev, res);
581 	if (IS_ERR(base))
582 		return PTR_ERR(base);
583 
584 	drvdata->base = base;
585 
586 	cpus_read_lock();
587 	per_cpu(debug_drvdata, drvdata->cpu) = drvdata;
588 	ret = smp_call_function_single(drvdata->cpu, debug_init_arch_data,
589 				       drvdata, 1);
590 	cpus_read_unlock();
591 
592 	if (ret) {
593 		dev_err(dev, "CPU%d debug arch init failed\n", drvdata->cpu);
594 		goto err;
595 	}
596 
597 	if (!drvdata->edpcsr_present) {
598 		dev_err(dev, "CPU%d sample-based profiling isn't implemented\n",
599 			drvdata->cpu);
600 		ret = -ENXIO;
601 		goto err;
602 	}
603 
604 	if (!debug_count++) {
605 		ret = debug_func_init();
606 		if (ret)
607 			goto err_func_init;
608 	}
609 
610 	mutex_lock(&debug_lock);
611 	/* Turn off debug power domain if debugging is disabled */
612 	if (!debug_enable)
613 		pm_runtime_put(dev);
614 	mutex_unlock(&debug_lock);
615 
616 	dev_info(dev, "Coresight debug-CPU%d initialized\n", drvdata->cpu);
617 	return 0;
618 
619 err_func_init:
620 	debug_count--;
621 err:
622 	per_cpu(debug_drvdata, drvdata->cpu) = NULL;
623 	return ret;
624 }
625 
626 static int debug_probe(struct amba_device *adev, const struct amba_id *id)
627 {
628 	struct debug_drvdata *drvdata;
629 
630 	drvdata = devm_kzalloc(&adev->dev, sizeof(*drvdata), GFP_KERNEL);
631 	if (!drvdata)
632 		return -ENOMEM;
633 
634 	amba_set_drvdata(adev, drvdata);
635 	return __debug_probe(&adev->dev, &adev->res);
636 }
637 
638 static void __debug_remove(struct device *dev)
639 {
640 	struct debug_drvdata *drvdata = dev_get_drvdata(dev);
641 
642 	per_cpu(debug_drvdata, drvdata->cpu) = NULL;
643 
644 	mutex_lock(&debug_lock);
645 	/* Turn off debug power domain before rmmod the module */
646 	if (debug_enable)
647 		pm_runtime_put(dev);
648 	mutex_unlock(&debug_lock);
649 
650 	if (!--debug_count)
651 		debug_func_exit();
652 }
653 
654 static void debug_remove(struct amba_device *adev)
655 {
656 	__debug_remove(&adev->dev);
657 }
658 
659 static const struct amba_cs_uci_id uci_id_debug[] = {
660 	{
661 		/*  CPU Debug UCI data */
662 		.devarch	= 0x47706a15,
663 		.devarch_mask	= 0xfff0ffff,
664 		.devtype	= 0x00000015,
665 	}
666 };
667 
668 static const struct amba_id debug_ids[] = {
669 	CS_AMBA_ID(0x000bbd03),				/* Cortex-A53 */
670 	CS_AMBA_ID(0x000bbd07),				/* Cortex-A57 */
671 	CS_AMBA_ID(0x000bbd08),				/* Cortex-A72 */
672 	CS_AMBA_ID(0x000bbd09),				/* Cortex-A73 */
673 	CS_AMBA_UCI_ID(0x000f0205, uci_id_debug),	/* Qualcomm Kryo */
674 	CS_AMBA_UCI_ID(0x000f0211, uci_id_debug),	/* Qualcomm Kryo */
675 	{},
676 };
677 
678 MODULE_DEVICE_TABLE(amba, debug_ids);
679 
680 static struct amba_driver debug_driver = {
681 	.drv = {
682 		.name   = "coresight-cpu-debug",
683 		.suppress_bind_attrs = true,
684 	},
685 	.probe		= debug_probe,
686 	.remove		= debug_remove,
687 	.id_table	= debug_ids,
688 };
689 
690 static int debug_platform_probe(struct platform_device *pdev)
691 {
692 	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
693 	struct debug_drvdata *drvdata;
694 	int ret = 0;
695 
696 	drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
697 	if (!drvdata)
698 		return -ENOMEM;
699 
700 	drvdata->pclk = coresight_get_enable_apb_pclk(&pdev->dev);
701 	if (IS_ERR(drvdata->pclk))
702 		return -ENODEV;
703 
704 	dev_set_drvdata(&pdev->dev, drvdata);
705 	pm_runtime_get_noresume(&pdev->dev);
706 	pm_runtime_set_active(&pdev->dev);
707 	pm_runtime_enable(&pdev->dev);
708 
709 	ret = __debug_probe(&pdev->dev, res);
710 	if (ret) {
711 		pm_runtime_put_noidle(&pdev->dev);
712 		pm_runtime_disable(&pdev->dev);
713 		if (!IS_ERR_OR_NULL(drvdata->pclk))
714 			clk_put(drvdata->pclk);
715 	}
716 	return ret;
717 }
718 
719 static void debug_platform_remove(struct platform_device *pdev)
720 {
721 	struct debug_drvdata *drvdata = dev_get_drvdata(&pdev->dev);
722 
723 	if (WARN_ON(!drvdata))
724 		return;
725 
726 	__debug_remove(&pdev->dev);
727 	pm_runtime_disable(&pdev->dev);
728 	if (!IS_ERR_OR_NULL(drvdata->pclk))
729 		clk_put(drvdata->pclk);
730 }
731 
732 #ifdef CONFIG_ACPI
733 static const struct acpi_device_id debug_platform_ids[] = {
734 	{"ARMHC503", 0, 0, 0}, /* ARM CoreSight Debug */
735 	{},
736 };
737 MODULE_DEVICE_TABLE(acpi, debug_platform_ids);
738 #endif
739 
740 #ifdef CONFIG_PM
741 static int debug_runtime_suspend(struct device *dev)
742 {
743 	struct debug_drvdata *drvdata = dev_get_drvdata(dev);
744 
745 	if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
746 		clk_disable_unprepare(drvdata->pclk);
747 	return 0;
748 }
749 
750 static int debug_runtime_resume(struct device *dev)
751 {
752 	struct debug_drvdata *drvdata = dev_get_drvdata(dev);
753 
754 	if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
755 		clk_prepare_enable(drvdata->pclk);
756 	return 0;
757 }
758 #endif
759 
760 static const struct dev_pm_ops debug_dev_pm_ops = {
761 	SET_RUNTIME_PM_OPS(debug_runtime_suspend, debug_runtime_resume, NULL)
762 };
763 
764 static struct platform_driver debug_platform_driver = {
765 	.probe	= debug_platform_probe,
766 	.remove_new = debug_platform_remove,
767 	.driver	= {
768 		.name			= "coresight-debug-platform",
769 		.acpi_match_table	= ACPI_PTR(debug_platform_ids),
770 		.suppress_bind_attrs	= true,
771 		.pm			= &debug_dev_pm_ops,
772 	},
773 };
774 
775 static int __init debug_init(void)
776 {
777 	return coresight_init_driver("debug", &debug_driver, &debug_platform_driver);
778 }
779 
780 static void __exit debug_exit(void)
781 {
782 	coresight_remove_driver(&debug_driver, &debug_platform_driver);
783 }
784 module_init(debug_init);
785 module_exit(debug_exit);
786 
787 MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
788 MODULE_DESCRIPTION("ARM Coresight CPU Debug Driver");
789 MODULE_LICENSE("GPL");
790