xref: /linux/arch/powerpc/platforms/powernv/opal-imc.c (revision 32a92f8c89326985e05dce8b22d3f0aa07a3e1bd)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * OPAL IMC interface detection driver
4  * Supported on POWERNV platform
5  *
6  * Copyright	(C) 2017 Madhavan Srinivasan, IBM Corporation.
7  *		(C) 2017 Anju T Sudhakar, IBM Corporation.
8  *		(C) 2017 Hemant K Shaw, IBM Corporation.
9  */
10 #include <linux/kernel.h>
11 #include <linux/platform_device.h>
12 #include <linux/of.h>
13 #include <linux/of_address.h>
14 #include <linux/crash_dump.h>
15 #include <linux/debugfs.h>
16 #include <asm/opal.h>
17 #include <asm/io.h>
18 #include <asm/imc-pmu.h>
19 #include <asm/cputhreads.h>
20 
21 static struct dentry *imc_debugfs_parent;
22 
23 /* Helpers to export imc command and mode via debugfs */
imc_mem_get(void * data,u64 * val)24 static int imc_mem_get(void *data, u64 *val)
25 {
26 	*val = cpu_to_be64(*(u64 *)data);
27 	return 0;
28 }
29 
imc_mem_set(void * data,u64 val)30 static int imc_mem_set(void *data, u64 val)
31 {
32 	*(u64 *)data = cpu_to_be64(val);
33 	return 0;
34 }
35 DEFINE_DEBUGFS_ATTRIBUTE(fops_imc_x64, imc_mem_get, imc_mem_set, "0x%016llx\n");
36 
imc_debugfs_create_x64(const char * name,umode_t mode,struct dentry * parent,u64 * value)37 static void imc_debugfs_create_x64(const char *name, umode_t mode,
38 				   struct dentry *parent, u64  *value)
39 {
40 	debugfs_create_file_unsafe(name, mode, parent, value, &fops_imc_x64);
41 }
42 
43 /*
44  * export_imc_mode_and_cmd: Create a debugfs interface
45  *                     for imc_cmd and imc_mode
46  *                     for each node in the system.
47  *  imc_mode and imc_cmd can be changed by echo into
48  *  this interface.
49  */
export_imc_mode_and_cmd(struct device_node * node,struct imc_pmu * pmu_ptr)50 static void export_imc_mode_and_cmd(struct device_node *node,
51 				    struct imc_pmu *pmu_ptr)
52 {
53 	static u64 loc, *imc_mode_addr, *imc_cmd_addr;
54 	char mode[16], cmd[16];
55 	u32 cb_offset;
56 	struct imc_mem_info *ptr = pmu_ptr->mem_info;
57 
58 	imc_debugfs_parent = debugfs_create_dir("imc", arch_debugfs_dir);
59 
60 	if (of_property_read_u32(node, "cb_offset", &cb_offset))
61 		cb_offset = IMC_CNTL_BLK_OFFSET;
62 
63 	while (ptr->vbase != NULL) {
64 		loc = (u64)(ptr->vbase) + cb_offset;
65 		imc_mode_addr = (u64 *)(loc + IMC_CNTL_BLK_MODE_OFFSET);
66 		sprintf(mode, "imc_mode_%d", (u32)(ptr->id));
67 		imc_debugfs_create_x64(mode, 0600, imc_debugfs_parent,
68 				       imc_mode_addr);
69 
70 		imc_cmd_addr = (u64 *)(loc + IMC_CNTL_BLK_CMD_OFFSET);
71 		sprintf(cmd, "imc_cmd_%d", (u32)(ptr->id));
72 		imc_debugfs_create_x64(cmd, 0600, imc_debugfs_parent,
73 				       imc_cmd_addr);
74 		ptr++;
75 	}
76 }
77 
78 /*
79  * imc_get_mem_addr_nest: Function to get nest counter memory region
80  * for each chip
81  */
imc_get_mem_addr_nest(struct device_node * node,struct imc_pmu * pmu_ptr,u32 offset)82 static int imc_get_mem_addr_nest(struct device_node *node,
83 				 struct imc_pmu *pmu_ptr,
84 				 u32 offset)
85 {
86 	int nr_chips = 0, i;
87 	u64 *base_addr_arr, baddr;
88 	u32 *chipid_arr;
89 
90 	nr_chips = of_property_count_u32_elems(node, "chip-id");
91 	if (nr_chips <= 0)
92 		return -ENODEV;
93 
94 	base_addr_arr = kcalloc(nr_chips, sizeof(*base_addr_arr), GFP_KERNEL);
95 	if (!base_addr_arr)
96 		return -ENOMEM;
97 
98 	chipid_arr = kcalloc(nr_chips, sizeof(*chipid_arr), GFP_KERNEL);
99 	if (!chipid_arr) {
100 		kfree(base_addr_arr);
101 		return -ENOMEM;
102 	}
103 
104 	if (of_property_read_u32_array(node, "chip-id", chipid_arr, nr_chips))
105 		goto error;
106 
107 	if (of_property_read_u64_array(node, "base-addr", base_addr_arr,
108 								nr_chips))
109 		goto error;
110 
111 	pmu_ptr->mem_info = kzalloc_objs(*pmu_ptr->mem_info, nr_chips + 1);
112 	if (!pmu_ptr->mem_info)
113 		goto error;
114 
115 	for (i = 0; i < nr_chips; i++) {
116 		pmu_ptr->mem_info[i].id = chipid_arr[i];
117 		baddr = base_addr_arr[i] + offset;
118 		pmu_ptr->mem_info[i].vbase = phys_to_virt(baddr);
119 	}
120 
121 	pmu_ptr->imc_counter_mmaped = true;
122 	kfree(base_addr_arr);
123 	kfree(chipid_arr);
124 	return 0;
125 
126 error:
127 	kfree(base_addr_arr);
128 	kfree(chipid_arr);
129 	return -1;
130 }
131 
132 /*
133  * imc_pmu_create : Takes the parent device which is the pmu unit, pmu_index
134  *		    and domain as the inputs.
135  * Allocates memory for the struct imc_pmu, sets up its domain, size and offsets
136  */
imc_pmu_create(struct device_node * parent,int pmu_index,int domain)137 static struct imc_pmu *imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
138 {
139 	int ret = 0;
140 	struct imc_pmu *pmu_ptr;
141 	u32 offset;
142 
143 	/* Return for unknown domain */
144 	if (domain < 0)
145 		return NULL;
146 
147 	/* memory for pmu */
148 	pmu_ptr = kzalloc_obj(*pmu_ptr);
149 	if (!pmu_ptr)
150 		return NULL;
151 
152 	/* Set the domain */
153 	pmu_ptr->domain = domain;
154 
155 	ret = of_property_read_u32(parent, "size", &pmu_ptr->counter_mem_size);
156 	if (ret)
157 		goto free_pmu;
158 
159 	if (!of_property_read_u32(parent, "offset", &offset)) {
160 		if (imc_get_mem_addr_nest(parent, pmu_ptr, offset))
161 			goto free_pmu;
162 	}
163 
164 	/* Function to register IMC pmu */
165 	ret = init_imc_pmu(parent, pmu_ptr, pmu_index);
166 	if (ret) {
167 		pr_err("IMC PMU %s Register failed\n", pmu_ptr->pmu.name);
168 		kfree(pmu_ptr->pmu.name);
169 		if (pmu_ptr->domain == IMC_DOMAIN_NEST)
170 			kfree(pmu_ptr->mem_info);
171 		kfree(pmu_ptr);
172 		return NULL;
173 	}
174 
175 	return pmu_ptr;
176 
177 free_pmu:
178 	kfree(pmu_ptr);
179 	return NULL;
180 }
181 
disable_nest_pmu_counters(void)182 static void disable_nest_pmu_counters(void)
183 {
184 	int nid, cpu;
185 	const struct cpumask *l_cpumask;
186 
187 	cpus_read_lock();
188 	for_each_node_with_cpus(nid) {
189 		l_cpumask = cpumask_of_node(nid);
190 		cpu = cpumask_first_and(l_cpumask, cpu_online_mask);
191 		if (cpu >= nr_cpu_ids)
192 			continue;
193 		opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
194 				       get_hard_smp_processor_id(cpu));
195 	}
196 	cpus_read_unlock();
197 }
198 
disable_core_pmu_counters(void)199 static void disable_core_pmu_counters(void)
200 {
201 	int cpu, rc;
202 
203 	cpus_read_lock();
204 	/* Disable the IMC Core functions */
205 	for_each_online_cpu(cpu) {
206 		if (cpu_first_thread_sibling(cpu) != cpu)
207 			continue;
208 		rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
209 					    get_hard_smp_processor_id(cpu));
210 		if (rc)
211 			pr_err("%s: Failed to stop Core (cpu = %d)\n",
212 				__func__, cpu);
213 	}
214 	cpus_read_unlock();
215 }
216 
get_max_nest_dev(void)217 int get_max_nest_dev(void)
218 {
219 	struct device_node *node;
220 	u32 pmu_units = 0, type;
221 
222 	for_each_compatible_node(node, NULL, IMC_DTB_UNIT_COMPAT) {
223 		if (of_property_read_u32(node, "type", &type))
224 			continue;
225 
226 		if (type == IMC_TYPE_CHIP)
227 			pmu_units++;
228 	}
229 
230 	return pmu_units;
231 }
232 
opal_imc_counters_probe(struct platform_device * pdev)233 static int opal_imc_counters_probe(struct platform_device *pdev)
234 {
235 	struct device_node *imc_dev = pdev->dev.of_node;
236 	struct imc_pmu *pmu;
237 	int pmu_count = 0, domain;
238 	bool core_imc_reg = false, thread_imc_reg = false;
239 	u32 type;
240 
241 	/*
242 	 * Check whether this is kdump kernel. If yes, force the engines to
243 	 * stop and return.
244 	 */
245 	if (is_kdump_kernel()) {
246 		disable_nest_pmu_counters();
247 		disable_core_pmu_counters();
248 		return -ENODEV;
249 	}
250 
251 	for_each_compatible_node(imc_dev, NULL, IMC_DTB_UNIT_COMPAT) {
252 		pmu = NULL;
253 		if (of_property_read_u32(imc_dev, "type", &type)) {
254 			pr_warn("IMC Device without type property\n");
255 			continue;
256 		}
257 
258 		switch (type) {
259 		case IMC_TYPE_CHIP:
260 			domain = IMC_DOMAIN_NEST;
261 			break;
262 		case IMC_TYPE_CORE:
263 			domain =IMC_DOMAIN_CORE;
264 			break;
265 		case IMC_TYPE_THREAD:
266 			domain = IMC_DOMAIN_THREAD;
267 			break;
268 		case IMC_TYPE_TRACE:
269 			domain = IMC_DOMAIN_TRACE;
270 			break;
271 		default:
272 			pr_warn("IMC Unknown Device type \n");
273 			domain = -1;
274 			break;
275 		}
276 
277 		pmu = imc_pmu_create(imc_dev, pmu_count, domain);
278 		if (pmu != NULL) {
279 			if (domain == IMC_DOMAIN_NEST) {
280 				if (!imc_debugfs_parent)
281 					export_imc_mode_and_cmd(imc_dev, pmu);
282 				pmu_count++;
283 			}
284 			if (domain == IMC_DOMAIN_CORE)
285 				core_imc_reg = true;
286 			if (domain == IMC_DOMAIN_THREAD)
287 				thread_imc_reg = true;
288 		}
289 	}
290 
291 	/* If core imc is not registered, unregister thread-imc */
292 	if (!core_imc_reg && thread_imc_reg)
293 		unregister_thread_imc();
294 
295 	return 0;
296 }
297 
opal_imc_counters_shutdown(struct platform_device * pdev)298 static void opal_imc_counters_shutdown(struct platform_device *pdev)
299 {
300 	/*
301 	 * Function only stops the engines which is bare minimum.
302 	 * TODO: Need to handle proper memory cleanup and pmu
303 	 * unregister.
304 	 */
305 	disable_nest_pmu_counters();
306 	disable_core_pmu_counters();
307 }
308 
309 static const struct of_device_id opal_imc_match[] = {
310 	{ .compatible = IMC_DTB_COMPAT },
311 	{},
312 };
313 
314 static struct platform_driver opal_imc_driver = {
315 	.driver = {
316 		.name = "opal-imc-counters",
317 		.of_match_table = opal_imc_match,
318 	},
319 	.probe = opal_imc_counters_probe,
320 	.shutdown = opal_imc_counters_shutdown,
321 };
322 
323 builtin_platform_driver(opal_imc_driver);
324