1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2012 Fabien Thomas
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/pmc.h>
31 #include <sys/pmckern.h>
32 #include <sys/systm.h>
33 #include <sys/mutex.h>
34
35 #include <machine/cpu.h>
36 #include <machine/cpufunc.h>
37
38 #include "hwpmc_soft.h"
39
40 /*
41 * Software PMC support.
42 */
43
44 #define SOFT_CAPS (PMC_CAP_READ | PMC_CAP_WRITE | PMC_CAP_INTERRUPT | \
45 PMC_CAP_USER | PMC_CAP_SYSTEM)
46
47 struct soft_descr {
48 struct pmc_descr pm_descr; /* "base class" */
49 };
50
51 static struct soft_descr soft_pmcdesc[SOFT_NPMCS] =
52 {
53 #define SOFT_PMCDESCR(N) \
54 { \
55 .pm_descr = \
56 { \
57 .pd_name = #N, \
58 .pd_class = PMC_CLASS_SOFT, \
59 .pd_caps = SOFT_CAPS, \
60 .pd_width = 64 \
61 }, \
62 }
63
64 SOFT_PMCDESCR(SOFT0),
65 SOFT_PMCDESCR(SOFT1),
66 SOFT_PMCDESCR(SOFT2),
67 SOFT_PMCDESCR(SOFT3),
68 SOFT_PMCDESCR(SOFT4),
69 SOFT_PMCDESCR(SOFT5),
70 SOFT_PMCDESCR(SOFT6),
71 SOFT_PMCDESCR(SOFT7),
72 SOFT_PMCDESCR(SOFT8),
73 SOFT_PMCDESCR(SOFT9),
74 SOFT_PMCDESCR(SOFT10),
75 SOFT_PMCDESCR(SOFT11),
76 SOFT_PMCDESCR(SOFT12),
77 SOFT_PMCDESCR(SOFT13),
78 SOFT_PMCDESCR(SOFT14),
79 SOFT_PMCDESCR(SOFT15)
80 };
81
82 /*
83 * Per-CPU data structure.
84 */
85
86 struct soft_cpu {
87 struct pmc_hw soft_hw[SOFT_NPMCS];
88 pmc_value_t soft_values[SOFT_NPMCS];
89 };
90
91
92 static struct soft_cpu **soft_pcpu;
93
94 static int
soft_allocate_pmc(int cpu,int ri,struct pmc * pm,const struct pmc_op_pmcallocate * a)95 soft_allocate_pmc(int cpu, int ri, struct pmc *pm,
96 const struct pmc_op_pmcallocate *a)
97 {
98 enum pmc_event ev;
99 struct pmc_soft *ps;
100
101 (void) cpu;
102
103 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
104 ("[soft,%d] illegal CPU value %d", __LINE__, cpu));
105 KASSERT(ri >= 0 && ri < SOFT_NPMCS,
106 ("[soft,%d] illegal row-index %d", __LINE__, ri));
107
108 if (a->pm_class != PMC_CLASS_SOFT)
109 return (EINVAL);
110
111 if ((pm->pm_caps & SOFT_CAPS) == 0)
112 return (EINVAL);
113
114 if ((pm->pm_caps & ~SOFT_CAPS) != 0)
115 return (EPERM);
116
117 ev = pm->pm_event;
118 if ((int)ev < PMC_EV_SOFT_FIRST || (int)ev > PMC_EV_SOFT_LAST)
119 return (EINVAL);
120
121 /* Check if event is registered. */
122 ps = pmc_soft_ev_acquire(ev);
123 if (ps == NULL)
124 return (EINVAL);
125 pmc_soft_ev_release(ps);
126 /* Module unload is protected by pmc SX lock. */
127 if (ps->ps_alloc != NULL)
128 ps->ps_alloc();
129
130 return (0);
131 }
132
133 static int
soft_config_pmc(int cpu,int ri,struct pmc * pm)134 soft_config_pmc(int cpu, int ri, struct pmc *pm)
135 {
136 struct pmc_hw *phw;
137
138 PMCDBG3(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
139
140 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
141 ("[soft,%d] illegal CPU value %d", __LINE__, cpu));
142 KASSERT(ri >= 0 && ri < SOFT_NPMCS,
143 ("[soft,%d] illegal row-index %d", __LINE__, ri));
144
145 phw = &soft_pcpu[cpu]->soft_hw[ri];
146
147 KASSERT(pm == NULL || phw->phw_pmc == NULL,
148 ("[soft,%d] pm=%p phw->pm=%p hwpmc not unconfigured", __LINE__,
149 pm, phw->phw_pmc));
150
151 phw->phw_pmc = pm;
152
153 return (0);
154 }
155
156 static int
soft_describe(int cpu,int ri,struct pmc_info * pi,struct pmc ** ppmc)157 soft_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
158 {
159 const struct soft_descr *pd;
160 struct pmc_hw *phw;
161
162 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
163 ("[soft,%d] illegal CPU %d", __LINE__, cpu));
164 KASSERT(ri >= 0 && ri < SOFT_NPMCS,
165 ("[soft,%d] illegal row-index %d", __LINE__, ri));
166
167 phw = &soft_pcpu[cpu]->soft_hw[ri];
168 pd = &soft_pmcdesc[ri];
169
170 strlcpy(pi->pm_name, pd->pm_descr.pd_name, sizeof(pi->pm_name));
171 pi->pm_class = pd->pm_descr.pd_class;
172
173 if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
174 pi->pm_enabled = TRUE;
175 *ppmc = phw->phw_pmc;
176 } else {
177 pi->pm_enabled = FALSE;
178 *ppmc = NULL;
179 }
180
181 return (0);
182 }
183
184 static int
soft_get_config(int cpu,int ri,struct pmc ** ppm)185 soft_get_config(int cpu, int ri, struct pmc **ppm)
186 {
187 (void) ri;
188
189 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
190 ("[soft,%d] illegal CPU %d", __LINE__, cpu));
191 KASSERT(ri >= 0 && ri < SOFT_NPMCS,
192 ("[soft,%d] illegal row-index %d", __LINE__, ri));
193
194 *ppm = soft_pcpu[cpu]->soft_hw[ri].phw_pmc;
195 return (0);
196 }
197
198 static int
soft_pcpu_fini(struct pmc_mdep * md,int cpu)199 soft_pcpu_fini(struct pmc_mdep *md, int cpu)
200 {
201 int ri;
202 struct pmc_cpu *pc;
203
204 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
205 ("[soft,%d] illegal cpu %d", __LINE__, cpu));
206 KASSERT(soft_pcpu[cpu] != NULL, ("[soft,%d] null pcpu", __LINE__));
207
208 free(soft_pcpu[cpu], M_PMC);
209 soft_pcpu[cpu] = NULL;
210
211 ri = md->pmd_classdep[PMC_CLASS_INDEX_SOFT].pcd_ri;
212
213 KASSERT(ri >= 0 && ri < SOFT_NPMCS,
214 ("[soft,%d] ri=%d", __LINE__, ri));
215
216 pc = pmc_pcpu[cpu];
217 pc->pc_hwpmcs[ri] = NULL;
218
219 return (0);
220 }
221
222 static int
soft_pcpu_init(struct pmc_mdep * md,int cpu)223 soft_pcpu_init(struct pmc_mdep *md, int cpu)
224 {
225 int first_ri, n;
226 struct pmc_cpu *pc;
227 struct soft_cpu *soft_pc;
228 struct pmc_hw *phw;
229
230
231 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
232 ("[soft,%d] illegal cpu %d", __LINE__, cpu));
233 KASSERT(soft_pcpu, ("[soft,%d] null pcpu", __LINE__));
234 KASSERT(soft_pcpu[cpu] == NULL, ("[soft,%d] non-null per-cpu",
235 __LINE__));
236
237 soft_pc = malloc(sizeof(struct soft_cpu), M_PMC, M_WAITOK|M_ZERO);
238 pc = pmc_pcpu[cpu];
239
240 KASSERT(pc != NULL, ("[soft,%d] cpu %d null per-cpu", __LINE__, cpu));
241
242 soft_pcpu[cpu] = soft_pc;
243 phw = soft_pc->soft_hw;
244 first_ri = md->pmd_classdep[PMC_CLASS_INDEX_SOFT].pcd_ri;
245
246 for (n = 0; n < SOFT_NPMCS; n++, phw++) {
247 phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
248 PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
249 phw->phw_pmc = NULL;
250 pc->pc_hwpmcs[n + first_ri] = phw;
251 }
252
253 return (0);
254 }
255
256 static int
soft_read_pmc(int cpu,int ri,struct pmc * pm __unused,pmc_value_t * v)257 soft_read_pmc(int cpu, int ri, struct pmc *pm __unused, pmc_value_t *v)
258 {
259
260 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
261 ("[soft,%d] illegal CPU value %d", __LINE__, cpu));
262 KASSERT(ri >= 0 && ri < SOFT_NPMCS,
263 ("[soft,%d] illegal row-index %d", __LINE__, ri));
264
265 PMCDBG1(MDP,REA,1,"soft-read id=%d", ri);
266
267 *v = soft_pcpu[cpu]->soft_values[ri];
268
269 return (0);
270 }
271
272 static int
soft_write_pmc(int cpu,int ri,struct pmc * pm __unused,pmc_value_t v)273 soft_write_pmc(int cpu, int ri, struct pmc *pm __unused, pmc_value_t v)
274 {
275 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
276 ("[soft,%d] illegal cpu value %d", __LINE__, cpu));
277 KASSERT(ri >= 0 && ri < SOFT_NPMCS,
278 ("[soft,%d] illegal row-index %d", __LINE__, ri));
279
280 PMCDBG3(MDP,WRI,1, "soft-write cpu=%d ri=%d v=%jx", cpu, ri, v);
281
282 soft_pcpu[cpu]->soft_values[ri] = v;
283
284 return (0);
285 }
286
287 static int
soft_release_pmc(int cpu,int ri,struct pmc * pmc)288 soft_release_pmc(int cpu, int ri, struct pmc *pmc)
289 {
290 struct pmc_hw *phw __diagused;
291 enum pmc_event ev;
292 struct pmc_soft *ps;
293
294 (void) pmc;
295
296 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
297 ("[soft,%d] illegal CPU value %d", __LINE__, cpu));
298 KASSERT(ri >= 0 && ri < SOFT_NPMCS,
299 ("[soft,%d] illegal row-index %d", __LINE__, ri));
300
301 phw = &soft_pcpu[cpu]->soft_hw[ri];
302
303 KASSERT(phw->phw_pmc == NULL,
304 ("[soft,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
305
306 ev = pmc->pm_event;
307
308 /* Check if event is registered. */
309 ps = pmc_soft_ev_acquire(ev);
310 KASSERT(ps != NULL,
311 ("[soft,%d] unregistered event %d", __LINE__, ev));
312 pmc_soft_ev_release(ps);
313 /* Module unload is protected by pmc SX lock. */
314 if (ps->ps_release != NULL)
315 ps->ps_release();
316 return (0);
317 }
318
319 static int
soft_start_pmc(int cpu,int ri,struct pmc * pm)320 soft_start_pmc(int cpu, int ri, struct pmc *pm)
321 {
322 struct pmc_soft *ps;
323
324 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
325 ("[soft,%d] illegal CPU value %d", __LINE__, cpu));
326 KASSERT(ri >= 0 && ri < SOFT_NPMCS,
327 ("[soft,%d] illegal row-index %d", __LINE__, ri));
328
329 ps = pmc_soft_ev_acquire(pm->pm_event);
330 if (ps == NULL)
331 return (EINVAL);
332 atomic_add_int(&ps->ps_running, 1);
333 pmc_soft_ev_release(ps);
334
335 return (0);
336 }
337
338 static int
soft_stop_pmc(int cpu,int ri,struct pmc * pm)339 soft_stop_pmc(int cpu, int ri, struct pmc *pm)
340 {
341 struct pmc_soft *ps;
342
343 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
344 ("[soft,%d] illegal CPU value %d", __LINE__, cpu));
345 KASSERT(ri >= 0 && ri < SOFT_NPMCS,
346 ("[soft,%d] illegal row-index %d", __LINE__, ri));
347
348 ps = pmc_soft_ev_acquire(pm->pm_event);
349 /* event unregistered ? */
350 if (ps != NULL) {
351 atomic_subtract_int(&ps->ps_running, 1);
352 pmc_soft_ev_release(ps);
353 }
354
355 return (0);
356 }
357
358 int
pmc_soft_intr(struct pmckern_soft * ks)359 pmc_soft_intr(struct pmckern_soft *ks)
360 {
361 struct pmc *pm;
362 struct soft_cpu *pc;
363 int ri, processed, error, user_mode;
364
365 KASSERT(ks->pm_cpu >= 0 && ks->pm_cpu < pmc_cpu_max(),
366 ("[soft,%d] CPU %d out of range", __LINE__, ks->pm_cpu));
367
368 processed = 0;
369 pc = soft_pcpu[ks->pm_cpu];
370
371 for (ri = 0; ri < SOFT_NPMCS; ri++) {
372
373 pm = pc->soft_hw[ri].phw_pmc;
374 if (pm == NULL ||
375 pm->pm_state != PMC_STATE_RUNNING ||
376 pm->pm_event != ks->pm_ev) {
377 continue;
378 }
379
380 processed = 1;
381 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
382 if ((pc->soft_values[ri]--) <= 0)
383 pc->soft_values[ri] += pm->pm_sc.pm_reloadcount;
384 else
385 continue;
386 user_mode = TRAPF_USERMODE(ks->pm_tf);
387 error = pmc_process_interrupt(PMC_SR, pm, ks->pm_tf);
388 if (error) {
389 soft_stop_pmc(ks->pm_cpu, ri, pm);
390 continue;
391 }
392
393 if (user_mode) {
394 /*
395 * If in user mode setup AST to process
396 * callchain out of interrupt context.
397 */
398 ast_sched(curthread, TDA_HWPMC);
399 }
400 } else
401 pc->soft_values[ri]++;
402 }
403 if (processed)
404 counter_u64_add(pmc_stats.pm_intr_processed, 1);
405 else
406 counter_u64_add(pmc_stats.pm_intr_ignored, 1);
407
408 return (processed);
409 }
410
411 static void
ast_hwpmc(struct thread * td,int tda __unused)412 ast_hwpmc(struct thread *td, int tda __unused)
413 {
414 /* Handle Software PMC callchain capture. */
415 if (PMC_IS_PENDING_CALLCHAIN(td))
416 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_USER_CALLCHAIN_SOFT,
417 (void *)td->td_frame);
418 }
419
420 void
pmc_soft_initialize(struct pmc_mdep * md)421 pmc_soft_initialize(struct pmc_mdep *md)
422 {
423 struct pmc_classdep *pcd;
424
425 /* Add SOFT PMCs. */
426 soft_pcpu = malloc(sizeof(struct soft_cpu *) * pmc_cpu_max(), M_PMC,
427 M_ZERO|M_WAITOK);
428
429 pcd = &md->pmd_classdep[PMC_CLASS_INDEX_SOFT];
430
431 pcd->pcd_caps = SOFT_CAPS;
432 pcd->pcd_class = PMC_CLASS_SOFT;
433 pcd->pcd_num = SOFT_NPMCS;
434 pcd->pcd_ri = md->pmd_npmc;
435 pcd->pcd_width = 64;
436
437 pcd->pcd_allocate_pmc = soft_allocate_pmc;
438 pcd->pcd_config_pmc = soft_config_pmc;
439 pcd->pcd_describe = soft_describe;
440 pcd->pcd_get_config = soft_get_config;
441 pcd->pcd_get_msr = NULL;
442 pcd->pcd_pcpu_init = soft_pcpu_init;
443 pcd->pcd_pcpu_fini = soft_pcpu_fini;
444 pcd->pcd_read_pmc = soft_read_pmc;
445 pcd->pcd_write_pmc = soft_write_pmc;
446 pcd->pcd_release_pmc = soft_release_pmc;
447 pcd->pcd_start_pmc = soft_start_pmc;
448 pcd->pcd_stop_pmc = soft_stop_pmc;
449
450 md->pmd_npmc += SOFT_NPMCS;
451
452 ast_register(TDA_HWPMC, ASTR_UNCOND, 0, ast_hwpmc);
453 }
454
455 void
pmc_soft_finalize(struct pmc_mdep * md)456 pmc_soft_finalize(struct pmc_mdep *md)
457 {
458 PMCDBG0(MDP, INI, 1, "soft-finalize");
459
460 for (int i = 0; i < pmc_cpu_max(); i++)
461 KASSERT(soft_pcpu[i] == NULL, ("[soft,%d] non-null pcpu cpu %d",
462 __LINE__, i));
463
464 ast_deregister(TDA_HWPMC);
465 free(soft_pcpu, M_PMC);
466 soft_pcpu = NULL;
467 }
468