xref: /linux/arch/powerpc/kernel/pmc.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 /*
2  *  arch/powerpc/kernel/pmc.c
3  *
4  *  Copyright (C) 2004 David Gibson, IBM Corporation.
5  *  Includes code formerly from arch/ppc/kernel/perfmon.c:
6  *    Author: Andy Fleming
7  *    Copyright (c) 2004 Freescale Semiconductor, Inc
8  *
9  *  This program is free software; you can redistribute it and/or
10  *  modify it under the terms of the GNU General Public License
11  *  as published by the Free Software Foundation; either version
12  *  2 of the License, or (at your option) any later version.
13  */
14 
15 #include <linux/errno.h>
16 #include <linux/spinlock.h>
17 #include <linux/module.h>
18 
19 #include <asm/processor.h>
20 #include <asm/pmc.h>
21 
22 #if defined(CONFIG_FSL_BOOKE) && !defined(CONFIG_E200)
23 static void dummy_perf(struct pt_regs *regs)
24 {
25 	unsigned int pmgc0 = mfpmr(PMRN_PMGC0);
26 
27 	pmgc0 &= ~PMGC0_PMIE;
28 	mtpmr(PMRN_PMGC0, pmgc0);
29 }
30 #elif defined(CONFIG_PPC64) || defined(CONFIG_6xx)
31 
32 #ifndef MMCR0_PMAO
33 #define MMCR0_PMAO	0
34 #endif
35 
36 /* Ensure exceptions are disabled */
37 static void dummy_perf(struct pt_regs *regs)
38 {
39 	unsigned int mmcr0 = mfspr(SPRN_MMCR0);
40 
41 	mmcr0 &= ~(MMCR0_PMXE|MMCR0_PMAO);
42 	mtspr(SPRN_MMCR0, mmcr0);
43 }
44 #else
45 /* Ensure exceptions are disabled */
46 static void dummy_perf(struct pt_regs *regs)
47 {
48 	unsigned int mmcr0 = mfspr(SPRN_MMCR0);
49 
50 	mmcr0 &= ~(MMCR0_PMXE);
51 	mtspr(SPRN_MMCR0, mmcr0);
52 }
53 #endif
54 
55 static DEFINE_SPINLOCK(pmc_owner_lock);
56 static void *pmc_owner_caller; /* mostly for debugging */
57 perf_irq_t perf_irq = dummy_perf;
58 
59 int reserve_pmc_hardware(perf_irq_t new_perf_irq)
60 {
61 	int err = 0;
62 
63 	spin_lock(&pmc_owner_lock);
64 
65 	if (pmc_owner_caller) {
66 		printk(KERN_WARNING "reserve_pmc_hardware: "
67 		       "PMC hardware busy (reserved by caller %p)\n",
68 		       pmc_owner_caller);
69 		err = -EBUSY;
70 		goto out;
71 	}
72 
73 	pmc_owner_caller = __builtin_return_address(0);
74 	perf_irq = new_perf_irq ? : dummy_perf;
75 
76  out:
77 	spin_unlock(&pmc_owner_lock);
78 	return err;
79 }
80 EXPORT_SYMBOL_GPL(reserve_pmc_hardware);
81 
82 void release_pmc_hardware(void)
83 {
84 	spin_lock(&pmc_owner_lock);
85 
86 	WARN_ON(! pmc_owner_caller);
87 
88 	pmc_owner_caller = NULL;
89 	perf_irq = dummy_perf;
90 
91 	spin_unlock(&pmc_owner_lock);
92 }
93 EXPORT_SYMBOL_GPL(release_pmc_hardware);
94 
95 #ifdef CONFIG_PPC64
96 void power4_enable_pmcs(void)
97 {
98 	unsigned long hid0;
99 
100 	hid0 = mfspr(SPRN_HID0);
101 	hid0 |= 1UL << (63 - 20);
102 
103 	/* POWER4 requires the following sequence */
104 	asm volatile(
105 		"sync\n"
106 		"mtspr     %1, %0\n"
107 		"mfspr     %0, %1\n"
108 		"mfspr     %0, %1\n"
109 		"mfspr     %0, %1\n"
110 		"mfspr     %0, %1\n"
111 		"mfspr     %0, %1\n"
112 		"mfspr     %0, %1\n"
113 		"isync" : "=&r" (hid0) : "i" (SPRN_HID0), "0" (hid0):
114 		"memory");
115 }
116 #endif /* CONFIG_PPC64 */
117