xref: /linux/arch/powerpc/kernel/pmc.c (revision 60e13231561b3a4c5269bfa1ef6c0569ad6f28ec)
1 /*
2  *  arch/powerpc/kernel/pmc.c
3  *
4  *  Copyright (C) 2004 David Gibson, IBM Corporation.
5  *  Includes code formerly from arch/ppc/kernel/perfmon.c:
6  *    Author: Andy Fleming
7  *    Copyright (c) 2004 Freescale Semiconductor, Inc
8  *
9  *  This program is free software; you can redistribute it and/or
10  *  modify it under the terms of the GNU General Public License
11  *  as published by the Free Software Foundation; either version
12  *  2 of the License, or (at your option) any later version.
13  */
14 
15 #include <linux/errno.h>
16 #include <linux/spinlock.h>
17 #include <linux/module.h>
18 
19 #include <asm/processor.h>
20 #include <asm/cputable.h>
21 #include <asm/pmc.h>
22 
23 #ifndef MMCR0_PMAO
24 #define MMCR0_PMAO	0
25 #endif
26 
27 static void dummy_perf(struct pt_regs *regs)
28 {
29 #if defined(CONFIG_FSL_EMB_PERFMON)
30 	mtpmr(PMRN_PMGC0, mfpmr(PMRN_PMGC0) & ~PMGC0_PMIE);
31 #elif defined(CONFIG_PPC64) || defined(CONFIG_6xx)
32 	if (cur_cpu_spec->pmc_type == PPC_PMC_IBM)
33 		mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~(MMCR0_PMXE|MMCR0_PMAO));
34 #else
35 	mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_PMXE);
36 #endif
37 }
38 
39 
40 static DEFINE_RAW_SPINLOCK(pmc_owner_lock);
41 static void *pmc_owner_caller; /* mostly for debugging */
42 perf_irq_t perf_irq = dummy_perf;
43 
44 int reserve_pmc_hardware(perf_irq_t new_perf_irq)
45 {
46 	int err = 0;
47 
48 	raw_spin_lock(&pmc_owner_lock);
49 
50 	if (pmc_owner_caller) {
51 		printk(KERN_WARNING "reserve_pmc_hardware: "
52 		       "PMC hardware busy (reserved by caller %p)\n",
53 		       pmc_owner_caller);
54 		err = -EBUSY;
55 		goto out;
56 	}
57 
58 	pmc_owner_caller = __builtin_return_address(0);
59 	perf_irq = new_perf_irq ? new_perf_irq : dummy_perf;
60 
61  out:
62 	raw_spin_unlock(&pmc_owner_lock);
63 	return err;
64 }
65 EXPORT_SYMBOL_GPL(reserve_pmc_hardware);
66 
67 void release_pmc_hardware(void)
68 {
69 	raw_spin_lock(&pmc_owner_lock);
70 
71 	WARN_ON(! pmc_owner_caller);
72 
73 	pmc_owner_caller = NULL;
74 	perf_irq = dummy_perf;
75 
76 	raw_spin_unlock(&pmc_owner_lock);
77 }
78 EXPORT_SYMBOL_GPL(release_pmc_hardware);
79 
80 #ifdef CONFIG_PPC64
81 void power4_enable_pmcs(void)
82 {
83 	unsigned long hid0;
84 
85 	hid0 = mfspr(SPRN_HID0);
86 	hid0 |= 1UL << (63 - 20);
87 
88 	/* POWER4 requires the following sequence */
89 	asm volatile(
90 		"sync\n"
91 		"mtspr     %1, %0\n"
92 		"mfspr     %0, %1\n"
93 		"mfspr     %0, %1\n"
94 		"mfspr     %0, %1\n"
95 		"mfspr     %0, %1\n"
96 		"mfspr     %0, %1\n"
97 		"mfspr     %0, %1\n"
98 		"isync" : "=&r" (hid0) : "i" (SPRN_HID0), "0" (hid0):
99 		"memory");
100 }
101 #endif /* CONFIG_PPC64 */
102