Lines Matching +full:re +full:- +full:sampling

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
37 * SPE is enabled and configured on a per-core basis, with each core requiring
44 * - HWT allocates a large single buffer per core. This buffer is split in half
45 * to create a 2 element circular buffer (aka ping-pong buffer) where the
47 * - SMP calls are used to enable and configure each core, with SPE initially
49 * - When the first half of the buffer is full, a buffer full interrupt will
54 * - The kernel responds to HWT_IOC_BUFPTR_GET ioctl by sending details of the
56 * - The buffers pending copying will not be overwritten until an
59 * - In the case where both halfs of the buffer are full, profiling will be
64 * - Using large buffer sizes should minimise pauses and loss of profiling
71 * - kqueue can only notify and queue one kevent of the same type, with
146 sc->spe_info = spe_info; in spe_backend_init_cpu()
148 CPU_FOREACH_ISSET(cpu_id, &ctx->cpu_map) { in spe_backend_init_cpu()
150 info->sc = sc; in spe_backend_init_cpu()
151 info->ident = cpu_id; in spe_backend_init_cpu()
152 info->buf_info[0].info = info; in spe_backend_init_cpu()
153 info->buf_info[0].buf_idx = 0; in spe_backend_init_cpu()
154 info->buf_info[1].info = info; in spe_backend_init_cpu()
155 info->buf_info[1].buf_idx = 1; in spe_backend_init_cpu()
157 mtx_init(&info->lock, lock_name, NULL, MTX_SPIN); in spe_backend_init_cpu()
172 if (ctx->bufsize % sc->kva_align != 0) in spe_backend_init()
176 * Since we're splitting the buffer in half + PMBLIMITR needs to be page in spe_backend_init()
179 if (ctx->bufsize < (2 * PAGE_SIZE)) in spe_backend_init()
182 sc->ctx = ctx; in spe_backend_init()
183 sc->kqueue_fd = ctx->kqueue_fd; in spe_backend_init()
184 sc->hwt_td = ctx->hwt_td; in spe_backend_init()
186 if (ctx->mode == HWT_MODE_THREAD) in spe_backend_init()
199 printf("--------------------------------------------------------------\n"); in hex_dump()
223 CPU_FOREACH_ISSET(cpu_id, &ctx->cpu_map) { in spe_backend_deinit()
225 hex_dump((void *)info->kvaddr, 128); in spe_backend_deinit()
226 hex_dump((void *)(info->kvaddr + (info->buf_size/2)), 128); in spe_backend_deinit()
230 if (ctx->state == CTX_STATE_RUNNING) { in spe_backend_deinit()
232 ctx->state = CTX_STATE_STOPPED; in spe_backend_deinit()
244 switch (PMSIDR_Interval_VAL(sc->pmsidr)) in arm_spe_min_interval()
270 uint64_t min_interval = arm_spe_min_interval(info->sc); in arm_spe_set_interval()
277 info->pmsirr &= ~(PMSIRR_INTERVAL_MASK); in arm_spe_set_interval()
278 info->pmsirr |= (interval << PMSIRR_INTERVAL_SHIFT); in arm_spe_set_interval()
288 mtx_lock_spin(&info->lock); in spe_backend_configure()
289 info->ident = cpu_id; in spe_backend_configure()
291 info->pmsfcr = 0; in spe_backend_configure()
292 info->pmsevfr = 0xFFFFFFFFFFFFFFFFUL; in spe_backend_configure()
293 info->pmslatfr = 0; in spe_backend_configure()
294 info->pmsirr = in spe_backend_configure()
295 (arm_spe_min_interval(info->sc) << PMSIRR_INTERVAL_SHIFT) in spe_backend_configure()
297 info->pmsicr = 0; in spe_backend_configure()
298 info->pmscr = PMSCR_TS | PMSCR_PA | PMSCR_CX | PMSCR_E1SPE | PMSCR_E0SPE; in spe_backend_configure()
300 if (ctx->config != NULL && in spe_backend_configure()
301 ctx->config_size == sizeof(struct arm_spe_config) && in spe_backend_configure()
302 ctx->config_version == 1) { in spe_backend_configure()
303 cfg = (struct arm_spe_config *)ctx->config; in spe_backend_configure()
304 if (cfg->interval) in spe_backend_configure()
305 arm_spe_set_interval(info, cfg->interval); in spe_backend_configure()
306 if (cfg->level == ARM_SPE_KERNEL_ONLY) in spe_backend_configure()
307 info->pmscr &= ~(PMSCR_E0SPE); /* turn off user */ in spe_backend_configure()
308 if (cfg->level == ARM_SPE_USER_ONLY) in spe_backend_configure()
309 info->pmscr &= ~(PMSCR_E1SPE); /* turn off kern */ in spe_backend_configure()
310 if (cfg->ctx_field) in spe_backend_configure()
311 info->ctx_field = cfg->ctx_field; in spe_backend_configure()
314 mtx_unlock_spin(&info->lock); in spe_backend_configure()
328 mtx_lock_spin(&info->lock); in arm_spe_enable()
330 if (info->ctx_field == ARM_SPE_CTX_CPU_ID) in arm_spe_enable()
333 WRITE_SPECIALREG(PMSFCR_EL1_REG, info->pmsfcr); in arm_spe_enable()
334 WRITE_SPECIALREG(PMSEVFR_EL1_REG, info->pmsevfr); in arm_spe_enable()
335 WRITE_SPECIALREG(PMSLATFR_EL1_REG, info->pmslatfr); in arm_spe_enable()
337 /* Set the sampling interval */ in arm_spe_enable()
338 WRITE_SPECIALREG(PMSIRR_EL1_REG, info->pmsirr); in arm_spe_enable()
341 /* Write 0 here before enabling sampling */ in arm_spe_enable()
342 WRITE_SPECIALREG(PMSICR_EL1_REG, info->pmsicr); in arm_spe_enable()
345 base = info->kvaddr; in arm_spe_enable()
346 limit = base + (info->buf_size/2); in arm_spe_enable()
355 /* Enable sampling */ in arm_spe_enable()
356 WRITE_SPECIALREG(PMSCR_EL1_REG, info->pmscr); in arm_spe_enable()
359 info->enabled = true; in arm_spe_enable()
361 mtx_unlock_spin(&info->lock); in arm_spe_enable()
372 CPU_FOREACH_ISSET(cpu_id, &ctx->cpu_map) { in spe_backend_enable_smp()
373 vm = hwt_cpu_get(ctx, cpu_id)->vm; in spe_backend_enable_smp()
377 mtx_lock_spin(&info->lock); in spe_backend_enable_smp()
378 info->kvaddr = vm->kvaddr; in spe_backend_enable_smp()
379 info->buf_size = ctx->bufsize; in spe_backend_enable_smp()
380 mtx_unlock_spin(&info->lock); in spe_backend_enable_smp()
384 cpu_id = CPU_FFS(&ctx->cpu_map) - 1; in spe_backend_enable_smp()
386 if (info->ctx_field == ARM_SPE_CTX_PID) in spe_backend_enable_smp()
391 smp_rendezvous_cpus(ctx->cpu_map, smp_no_rendezvous_barrier, in spe_backend_enable_smp()
401 struct arm_spe_buf_info *buf = &info->buf_info[info->buf_idx]; in arm_spe_disable()
403 if (!info->enabled) in arm_spe_disable()
426 mtx_lock_spin(&info->lock); in arm_spe_disable()
427 buf->pmbptr = READ_SPECIALREG(PMBPTR_EL1_REG); in arm_spe_disable()
428 info->enabled = false; in arm_spe_disable()
429 mtx_unlock_spin(&info->lock); in arm_spe_disable()
442 smp_rendezvous_cpus(ctx->cpu_map, smp_no_rendezvous_barrier, in spe_backend_disable_smp()
445 CPU_FOREACH_ISSET(cpu_id, &ctx->cpu_map) { in spe_backend_disable_smp()
447 buf = &info->buf_info[info->buf_idx]; in spe_backend_disable_smp()
455 * offsets for all bufs - let userspace know it can shutdown in spe_backend_disable_smp()
458 ret = kqfd_register(ctx->kqueue_fd, &kev, ctx->hwt_td, M_WAITOK); in spe_backend_disable_smp()
476 WRITE_SPECIALREG(PMSCR_EL1_REG, info->pmscr); in arm_spe_reenable()
497 if (s->buf_idx > 1) in spe_backend_svc_buf()
499 if (s->ident >= mp_ncpus) in spe_backend_svc_buf()
502 info = &spe_info[s->ident]; in spe_backend_svc_buf()
503 mtx_lock_spin(&info->lock); in spe_backend_svc_buf()
505 buf = &info->buf_info[s->buf_idx]; in spe_backend_svc_buf()
507 if (!info->enabled) { in spe_backend_svc_buf()
513 buf->buf_svc = false; in spe_backend_svc_buf()
515 /* Re-enable profiling if we've been waiting for this notification */ in spe_backend_svc_buf()
516 if (buf->buf_wait) { in spe_backend_svc_buf()
517 CPU_SETOF(s->ident, &cpu_set); in spe_backend_svc_buf()
519 mtx_unlock_spin(&info->lock); in spe_backend_svc_buf()
522 mtx_lock_spin(&info->lock); in spe_backend_svc_buf()
524 buf->buf_wait = false; in spe_backend_svc_buf()
528 mtx_unlock_spin(&info->lock); in spe_backend_svc_buf()
540 mtx_lock_spin(&sc->sc_lock); in spe_backend_read()
543 q = STAILQ_FIRST(&sc->pending); in spe_backend_read()
548 *ident = q->ident; in spe_backend_read()
549 *offset = q->offset; in spe_backend_read()
550 *data = (q->buf_idx << KQ_BUF_POS_SHIFT) | in spe_backend_read()
551 (q->partial_rec << KQ_PARTREC_SHIFT) | in spe_backend_read()
552 (q->final_buf << KQ_FINAL_BUF_SHIFT); in spe_backend_read()
554 STAILQ_REMOVE_HEAD(&sc->pending, next); in spe_backend_read()
555 sc->npending--; in spe_backend_read()
558 mtx_unlock_spin(&sc->sc_lock); in spe_backend_read()