Lines Matching +full:cpu +full:- +full:offset
4 * SPDX-License-Identifier: BSD-2-Clause
12 * - Since PT is configured on a per-core basis, the driver uses
14 * - PT-specific resources are stored in a 'struct pt_ctx' context structure for
15 * each traced CPU core or thread. Upon initialization, a ToPA configuration
19 * - The PT driver uses the XSAVE/XRSTOR PT extensions to load and save all
23 * - When tracing starts, the PT hardware will start writing data into the
26 * last valid tracing buffer offset and enqueue a HWT_RECORD_BUFFER record.
28 * - The userspace PT backend waits for incoming HWT_RECORD_BUFFER records
33 * - We currently configure the PT hardware to trigger an interrupt whenever
40 * - Support for more tracing options and PT features.
101 * Intel Processor Trace XSAVE-managed state.
119 vm_offset_t offset; member
128 /* PT tracing contexts used for CPU mode. */
141 * PT-related CPUID bits.
172 return ((struct xstate_hdr *)(ctx->save_area + in pt_ctx_get_xstate_hdr()
180 return ((struct pt_ext_area *)(ctx->save_area + in pt_ctx_get_ext_area()
185 * Updates current trace buffer offset from the
192 uint64_t offset; in pt_update_buffer() local
194 /* Update buffer offset. */ in pt_update_buffer()
196 offset = ((reg & PT_TOPA_PAGE_MASK) >> PT_TOPA_PAGE_SHIFT) * PAGE_SIZE; in pt_update_buffer()
197 offset += (reg >> 32); in pt_update_buffer()
199 atomic_store_rel_64(&buf->offset, offset); in pt_update_buffer()
206 vm_offset_t offset; in pt_fill_buffer_record() local
208 offset = atomic_load_acq_64(&buf->offset); in pt_fill_buffer_record()
210 rec->record_type = HWT_RECORD_BUFFER; in pt_fill_buffer_record()
211 rec->buf_id = id; in pt_fill_buffer_record()
212 rec->curpage = offset / PAGE_SIZE; in pt_fill_buffer_record()
213 rec->offset = offset & PAGE_MASK; in pt_fill_buffer_record()
257 struct pt_cpu *cpu; in pt_cpu_start() local
259 cpu = &pt_pcpu[curcpu]; in pt_cpu_start()
260 MPASS(cpu->ctx != NULL); in pt_cpu_start()
266 pt_cpu_toggle_local(cpu->ctx->save_area, true); in pt_cpu_start()
271 * Updates trace buffer offset to ensure
278 struct pt_cpu *cpu; in pt_cpu_stop() local
281 cpu = &pt_pcpu[curcpu]; in pt_cpu_stop()
282 ctx = cpu->ctx; in pt_cpu_stop()
287 dprintf("%s: missing context on cpu %d; bailing\n", __func__, in pt_cpu_stop()
291 pt_cpu_toggle_local(cpu->ctx->save_area, false); in pt_cpu_stop()
292 pt_update_buffer(&ctx->buf); in pt_cpu_stop()
310 buf = &ctx->buf; in pt_topa_prepare()
312 KASSERT(buf->topa_hw == NULL, in pt_topa_prepare()
314 buf->topa_hw = mallocarray(vm->npages + 1, sizeof(uint64_t), M_PT, in pt_topa_prepare()
316 dprintf("%s: ToPA virt addr %p\n", __func__, buf->topa_hw); in pt_topa_prepare()
317 buf->size = vm->npages * PAGE_SIZE; in pt_topa_prepare()
318 for (i = 0; i < vm->npages; i++) { in pt_topa_prepare()
319 buf->topa_hw[i] = VM_PAGE_TO_PHYS(vm->pages[i]) | topa_size; in pt_topa_prepare()
326 buf->topa_hw[i] |= TOPA_INT; in pt_topa_prepare()
328 buf->topa_hw[vm->npages] = (uint64_t)vtophys(buf->topa_hw) | TOPA_END; in pt_topa_prepare()
351 n = cfg->nranges; in pt_configure_ranges()
353 printf("%s: %d IP filtering ranges requested, CPU " in pt_configure_ranges()
361 pt_ext->rtit_ctl |= (1UL << RTIT_CTL_ADDR_CFG_S(1)); in pt_configure_ranges()
362 pt_ext->rtit_addr1_a = cfg->ip_ranges[1].start; in pt_configure_ranges()
363 pt_ext->rtit_addr1_b = cfg->ip_ranges[1].end; in pt_configure_ranges()
365 pt_ext->rtit_ctl |= (1UL << RTIT_CTL_ADDR_CFG_S(0)); in pt_configure_ranges()
366 pt_ext->rtit_addr0_a = cfg->ip_ranges[0].start; in pt_configure_ranges()
367 pt_ext->rtit_addr0_b = cfg->ip_ranges[0].end; in pt_configure_ranges()
385 KASSERT(pt_ctx->buf.topa_hw == NULL, in pt_init_ctx()
389 mtx_init(&pt_ctx->buf.lock, "pttopa", NULL, MTX_SPIN); in pt_init_ctx()
390 pt_ctx->save_area = malloc_aligned(pt_info.xsave_area_size, 64, in pt_init_ctx()
392 if (pt_ctx->save_area == NULL) in pt_init_ctx()
396 free(pt_ctx->save_area, M_PT); in pt_init_ctx()
400 pt_ctx->id = ctx_id; in pt_init_ctx()
409 if (pt_ctx->buf.topa_hw != NULL) in pt_deinit_ctx()
410 free(pt_ctx->buf.topa_hw, M_PT); in pt_deinit_ctx()
411 if (pt_ctx->save_area != NULL) in pt_deinit_ctx()
412 free(pt_ctx->save_area, M_PT); in pt_deinit_ctx()
419 * Checks and translates the user-defined configuration to a
421 * the tracing context for the target CPU or thread.
436 cfg = (struct pt_cpu_config *)ctx->config; in pt_backend_configure()
440 cfg->rtit_ctl &= PT_SUPPORTED_FLAGS; in pt_backend_configure()
441 if (cfg->rtit_ctl & RTIT_CTL_MTCEN) { in pt_backend_configure()
443 printf("%s: CPU does not support generating MTC " in pt_backend_configure()
449 if (cfg->rtit_ctl & RTIT_CTL_CR3FILTER) { in pt_backend_configure()
451 printf("%s: CPU does not support CR3 filtering\n", in pt_backend_configure()
457 if (cfg->rtit_ctl & RTIT_CTL_DIS_TNT) { in pt_backend_configure()
459 printf("%s: CPU does not support TNT\n", __func__); in pt_backend_configure()
465 if (ctx->mode == HWT_MODE_CPU) { in pt_backend_configure()
466 TAILQ_FOREACH(hwt_cpu, &ctx->cpus, next) { in pt_backend_configure()
467 if (hwt_cpu->cpu_id != cpu_id) in pt_backend_configure()
473 TAILQ_FOREACH(thr, &ctx->threads, next) { in pt_backend_configure()
474 if (thr->thread_id != thread_id) in pt_backend_configure()
476 KASSERT(thr->private != NULL, in pt_backend_configure()
480 pt_ctx = (struct pt_ctx *)thr->private; in pt_backend_configure()
491 pt_ext->rtit_ctl |= cfg->rtit_ctl; in pt_backend_configure()
492 if (cfg->nranges != 0) { in pt_backend_configure()
497 pt_ctx->hwt_ctx = ctx; in pt_backend_configure()
498 pt_ext->rtit_ctl |= RTIT_CTL_TOPA; in pt_backend_configure()
499 pt_ext->rtit_output_base = (uint64_t)vtophys(pt_ctx->buf.topa_hw); in pt_backend_configure()
500 pt_ext->rtit_output_mask_ptrs = PT_TOPA_MASK_PTRS; in pt_backend_configure()
501 hdr->xstate_bv = XFEATURE_ENABLED_PT; in pt_backend_configure()
502 hdr->xstate_xcomp_bv = XFEATURE_ENABLED_PT | in pt_backend_configure()
504 pt_ext->rtit_ctl |= RTIT_CTL_TRACEEN; in pt_backend_configure()
511 * hwt backend trace start operation. CPU affine.
516 if (ctx->mode == HWT_MODE_CPU) in pt_backend_enable()
520 ("%s: attempting to start PT on another cpu", __func__)); in pt_backend_enable()
522 CPU_SET(cpu_id, &ctx->cpu_map); in pt_backend_enable()
526 * hwt backend trace stop operation. CPU affine.
531 struct pt_cpu *cpu; in pt_backend_disable() local
533 if (ctx->mode == HWT_MODE_CPU) in pt_backend_disable()
536 ("%s: attempting to disable PT on another cpu", __func__)); in pt_backend_disable()
538 cpu = &pt_pcpu[cpu_id]; in pt_backend_disable()
540 dprintf("%s: waiting for cpu %d to exit interrupt handler\n", __func__, in pt_backend_disable()
543 while (atomic_cmpset_int(&cpu->in_pcint_handler, 1, 0)) in pt_backend_disable()
547 CPU_CLR(cpu_id, &ctx->cpu_map); in pt_backend_disable()
548 cpu->ctx = NULL; in pt_backend_disable()
559 KASSERT(ctx->mode == HWT_MODE_CPU, in pt_backend_enable_smp()
560 ("%s: should only be used for CPU mode", __func__)); in pt_backend_enable_smp()
561 if (ctx->mode == HWT_MODE_CPU && in pt_backend_enable_smp()
563 return (-1); in pt_backend_enable_smp()
565 smp_rendezvous_cpus(ctx->cpu_map, NULL, pt_cpu_start, NULL, NULL); in pt_backend_enable_smp()
576 struct pt_cpu *cpu; in pt_backend_disable_smp() local
579 if (ctx->mode == HWT_MODE_CPU && in pt_backend_disable_smp()
581 return (-1); in pt_backend_disable_smp()
583 if (CPU_EMPTY(&ctx->cpu_map)) { in pt_backend_disable_smp()
584 dprintf("%s: empty cpu map\n", __func__); in pt_backend_disable_smp()
585 return (-1); in pt_backend_disable_smp()
587 CPU_FOREACH_ISSET(cpu_id, &ctx->cpu_map) { in pt_backend_disable_smp()
588 cpu = &pt_pcpu[cpu_id]; in pt_backend_disable_smp()
589 dprintf("%s: waiting for cpu %d to exit interrupt handler\n", in pt_backend_disable_smp()
592 while (atomic_cmpset_int(&cpu->in_pcint_handler, 1, 0)) in pt_backend_disable_smp()
595 smp_rendezvous_cpus(ctx->cpu_map, NULL, pt_cpu_stop, NULL, NULL); in pt_backend_disable_smp()
613 if (ctx->mode != HWT_MODE_CPU) in pt_backend_init()
615 TAILQ_FOREACH(hwt_cpu, &ctx->cpus, next) { in pt_backend_init()
616 error = pt_init_ctx(&pt_pcpu_ctx[hwt_cpu->cpu_id], hwt_cpu->vm, in pt_backend_init()
617 hwt_cpu->cpu_id); in pt_backend_init()
641 if (ctx->mode == HWT_MODE_THREAD) { in pt_backend_deinit()
642 TAILQ_FOREACH(thr, &ctx->threads, next) { in pt_backend_deinit()
643 KASSERT(thr->private != NULL, in pt_backend_deinit()
644 ("%s: thr->private not set", __func__)); in pt_backend_deinit()
645 pt_ctx = (struct pt_ctx *)thr->private; in pt_backend_deinit()
649 CPU_FOREACH_ISSET(cpu_id, &ctx->cpu_map) { in pt_backend_deinit()
653 ("%s: CPU mode tracing with non-cpu mode PT" in pt_backend_deinit()
666 * Fetches current offset into the tracing buffer.
673 uint64_t offset; in pt_backend_read() local
675 if (vm->ctx->mode == HWT_MODE_THREAD) in pt_backend_read()
676 buf = &((struct pt_ctx *)vm->thr->private)->buf; in pt_backend_read()
678 buf = &pt_pcpu[vm->cpu->cpu_id].ctx->buf; in pt_backend_read()
679 offset = atomic_load_acq_64(&buf->offset); in pt_backend_read()
680 *curpage = offset / PAGE_SIZE; in pt_backend_read()
681 *curpage_offset = offset & PAGE_MASK; in pt_backend_read()
696 /* Omit M_WAITOK since this might get invoked a non-sleepable context */ in pt_backend_alloc_thread()
701 error = pt_init_ctx(pt_ctx, thr->vm, thr->thread_id); in pt_backend_alloc_thread()
705 thr->private = pt_ctx; in pt_backend_alloc_thread()
716 ctx = (struct pt_ctx *)thr->private; in pt_backend_free_thread()
755 * Reads the latest valid trace buffer offset and enqueues
762 struct pt_cpu *cpu = (struct pt_cpu *)arg; in pt_send_buffer_record() local
765 struct pt_ctx *ctx = cpu->ctx; in pt_send_buffer_record()
766 pt_fill_buffer_record(ctx->id, &ctx->buf, &record); in pt_send_buffer_record()
767 hwt_record_ctx(ctx->hwt_ctx, &record, M_ZERO | M_NOWAIT); in pt_send_buffer_record()
785 * Re-enables the PC interrupt line as long as tracing is active.
791 struct pt_cpu *cpu; in pt_topa_intr() local
795 cpu = &pt_pcpu[curcpu]; in pt_topa_intr()
805 atomic_set_int(&cpu->in_pcint_handler, 1); in pt_topa_intr()
807 ctx = cpu->ctx; in pt_topa_intr()
809 ("%s: cpu %d: ToPA PMI interrupt without an active context", in pt_topa_intr()
811 buf = &ctx->buf; in pt_topa_intr()
812 KASSERT(buf->topa_hw != NULL, in pt_topa_intr()
813 ("%s: cpu %d: ToPA PMI interrupt with invalid buffer", __func__, in pt_topa_intr()
815 pt_cpu_toggle_local(ctx->save_area, false); in pt_topa_intr()
820 swi_sched(cpu->swi_cookie, SWI_FROMNMI); in pt_topa_intr()
821 pt_cpu_toggle_local(ctx->save_area, true); in pt_topa_intr()
824 atomic_set_int(&cpu->in_pcint_handler, 0); in pt_topa_intr()
831 * Saves all PT-related cpuid info, registers itself as a HWT backend,
833 * on each CPU.
843 dprintf("pt: Maximum valid sub-leaf Index: %x\n", cp[0]); in pt_init()
875 "%s: failed to add interrupt handler for cpu: %d\n", in pt_init()
904 * Checks whether the CPU support Intel PT and
917 printf("pt: CPU does not support Intel Processor Trace\n"); in pt_supported()
925 printf("pt: CPU does not support managing PT state using XSAVE\n"); in pt_supported()
933 printf("pt: CPU does not support XSAVES/XRSTORS\n"); in pt_supported()
960 struct pt_cpu *cpu; in pt_deinit() local
969 cpu = &pt_pcpu[i]; in pt_deinit()
970 swi_remove(cpu->swi_cookie); in pt_deinit()