Lines Matching defs:trace

228 			      struct perf_callchain_entry *trace, u64 flags)
238 if (trace->nr <= skip)
239 /* skipping more than usable stack trace */
242 trace_nr = trace->nr - skip;
244 ips = trace->ip + skip;
306 struct perf_callchain_entry *trace;
317 trace = get_perf_callchain(regs, kernel, user, max_depth,
320 if (unlikely(!trace))
321 /* couldn't fetch the stack trace */
324 return __bpf_get_stackid(map, trace, flags);
336 static __u64 count_kernel_ip(struct perf_callchain_entry *trace)
340 while (nr_kernel < trace->nr) {
341 if (trace->ip[nr_kernel] == PERF_CONTEXT_USER)
352 struct perf_callchain_entry *trace;
369 trace = ctx->data->callchain;
370 if (unlikely(!trace))
373 nr_kernel = count_kernel_ip(trace);
376 __u64 nr = trace->nr;
378 trace->nr = nr_kernel;
379 ret = __bpf_get_stackid(map, trace, flags);
382 trace->nr = nr;
391 ret = __bpf_get_stackid(map, trace, flags);
414 struct perf_callchain_entry *trace;
450 trace = trace_in;
452 trace = get_callchain_entry_for_task(task, max_depth);
454 trace = get_perf_callchain(regs, kernel, user, max_depth,
457 if (unlikely(!trace) || trace->nr < skip) {
463 trace_nr = trace->nr - skip;
467 ips = trace->ip + skip;
478 /* trace/ips should not be dereferenced after this point */
584 struct perf_callchain_entry *trace;
600 trace = ctx->data->callchain;
601 if (unlikely(!trace))
604 nr_kernel = count_kernel_ip(trace);
607 __u64 nr = trace->nr;
609 trace->nr = nr_kernel;
610 err = __bpf_get_stack(regs, NULL, trace, buf, size, flags, false /* !may_fault */);
613 trace->nr = nr;
622 err = __bpf_get_stack(regs, NULL, trace, buf, size, flags, false /* !may_fault */);