1 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 */ 7 #include <linux/kernel.h> 8 #include <linux/types.h> 9 #include <linux/slab.h> 10 #include <linux/bpf.h> 11 #include <linux/filter.h> 12 #include <linux/uaccess.h> 13 #include <linux/ctype.h> 14 #include "trace.h" 15 16 /** 17 * trace_call_bpf - invoke BPF program 18 * @prog: BPF program 19 * @ctx: opaque context pointer 20 * 21 * kprobe handlers execute BPF programs via this helper. 22 * Can be used from static tracepoints in the future. 23 * 24 * Return: BPF programs always return an integer which is interpreted by 25 * kprobe handler as: 26 * 0 - return from kprobe (event is filtered out) 27 * 1 - store kprobe event into ring buffer 28 * Other values are reserved and currently alias to 1 29 */ 30 unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx) 31 { 32 unsigned int ret; 33 34 if (in_nmi()) /* not supported yet */ 35 return 1; 36 37 preempt_disable(); 38 39 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { 40 /* 41 * since some bpf program is already running on this cpu, 42 * don't call into another bpf program (same or different) 43 * and don't send kprobe event into ring-buffer, 44 * so return zero here 45 */ 46 ret = 0; 47 goto out; 48 } 49 50 rcu_read_lock(); 51 ret = BPF_PROG_RUN(prog, ctx); 52 rcu_read_unlock(); 53 54 out: 55 __this_cpu_dec(bpf_prog_active); 56 preempt_enable(); 57 58 return ret; 59 } 60 EXPORT_SYMBOL_GPL(trace_call_bpf); 61 62 static u64 bpf_probe_read(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 63 { 64 void *dst = (void *) (long) r1; 65 int ret, size = (int) r2; 66 void *unsafe_ptr = (void *) (long) r3; 67 68 ret = probe_kernel_read(dst, unsafe_ptr, size); 69 if (unlikely(ret < 0)) 70 memset(dst, 0, size); 71 72 return ret; 73 } 74 75 static const struct bpf_func_proto bpf_probe_read_proto = { 76 .func = bpf_probe_read, 77 .gpl_only = true, 78 .ret_type = RET_INTEGER, 79 .arg1_type = ARG_PTR_TO_RAW_STACK, 80 .arg2_type = ARG_CONST_STACK_SIZE, 81 .arg3_type = ARG_ANYTHING, 82 }; 83 84 /* 85 * limited trace_printk() 86 * only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed 87 */ 88 static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5) 89 { 90 char *fmt = (char *) (long) r1; 91 bool str_seen = false; 92 int mod[3] = {}; 93 int fmt_cnt = 0; 94 u64 unsafe_addr; 95 char buf[64]; 96 int i; 97 98 /* 99 * bpf_check()->check_func_arg()->check_stack_boundary() 100 * guarantees that fmt points to bpf program stack, 101 * fmt_size bytes of it were initialized and fmt_size > 0 102 */ 103 if (fmt[--fmt_size] != 0) 104 return -EINVAL; 105 106 /* check format string for allowed specifiers */ 107 for (i = 0; i < fmt_size; i++) { 108 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) 109 return -EINVAL; 110 111 if (fmt[i] != '%') 112 continue; 113 114 if (fmt_cnt >= 3) 115 return -EINVAL; 116 117 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */ 118 i++; 119 if (fmt[i] == 'l') { 120 mod[fmt_cnt]++; 121 i++; 122 } else if (fmt[i] == 'p' || fmt[i] == 's') { 123 mod[fmt_cnt]++; 124 i++; 125 if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0) 126 return -EINVAL; 127 fmt_cnt++; 128 if (fmt[i - 1] == 's') { 129 if (str_seen) 130 /* allow only one '%s' per fmt string */ 131 return -EINVAL; 132 str_seen = true; 133 134 switch (fmt_cnt) { 135 case 1: 136 unsafe_addr = r3; 137 r3 = (long) buf; 138 break; 139 case 2: 140 unsafe_addr = r4; 141 r4 = (long) buf; 142 break; 143 case 3: 144 unsafe_addr = r5; 145 r5 = (long) buf; 146 break; 147 } 148 buf[0] = 0; 149 strncpy_from_unsafe(buf, 150 (void *) (long) unsafe_addr, 151 sizeof(buf)); 152 } 153 continue; 154 } 155 156 if (fmt[i] == 'l') { 157 mod[fmt_cnt]++; 158 i++; 159 } 160 161 if (fmt[i] != 'd' && fmt[i] != 'u' && fmt[i] != 'x') 162 return -EINVAL; 163 fmt_cnt++; 164 } 165 166 return __trace_printk(1/* fake ip will not be printed */, fmt, 167 mod[0] == 2 ? r3 : mod[0] == 1 ? (long) r3 : (u32) r3, 168 mod[1] == 2 ? r4 : mod[1] == 1 ? (long) r4 : (u32) r4, 169 mod[2] == 2 ? r5 : mod[2] == 1 ? (long) r5 : (u32) r5); 170 } 171 172 static const struct bpf_func_proto bpf_trace_printk_proto = { 173 .func = bpf_trace_printk, 174 .gpl_only = true, 175 .ret_type = RET_INTEGER, 176 .arg1_type = ARG_PTR_TO_STACK, 177 .arg2_type = ARG_CONST_STACK_SIZE, 178 }; 179 180 const struct bpf_func_proto *bpf_get_trace_printk_proto(void) 181 { 182 /* 183 * this program might be calling bpf_trace_printk, 184 * so allocate per-cpu printk buffers 185 */ 186 trace_printk_init_buffers(); 187 188 return &bpf_trace_printk_proto; 189 } 190 191 static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5) 192 { 193 struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 194 struct bpf_array *array = container_of(map, struct bpf_array, map); 195 struct bpf_event_entry *ee; 196 struct perf_event *event; 197 198 if (unlikely(index >= array->map.max_entries)) 199 return -E2BIG; 200 201 ee = READ_ONCE(array->ptrs[index]); 202 if (unlikely(!ee)) 203 return -ENOENT; 204 205 event = ee->event; 206 /* make sure event is local and doesn't have pmu::count */ 207 if (event->oncpu != smp_processor_id() || 208 event->pmu->count) 209 return -EINVAL; 210 211 /* 212 * we don't know if the function is run successfully by the 213 * return value. It can be judged in other places, such as 214 * eBPF programs. 215 */ 216 return perf_event_read_local(event); 217 } 218 219 static const struct bpf_func_proto bpf_perf_event_read_proto = { 220 .func = bpf_perf_event_read, 221 .gpl_only = true, 222 .ret_type = RET_INTEGER, 223 .arg1_type = ARG_CONST_MAP_PTR, 224 .arg2_type = ARG_ANYTHING, 225 }; 226 227 static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size) 228 { 229 struct pt_regs *regs = (struct pt_regs *) (long) r1; 230 struct bpf_map *map = (struct bpf_map *) (long) r2; 231 struct bpf_array *array = container_of(map, struct bpf_array, map); 232 u64 index = flags & BPF_F_INDEX_MASK; 233 void *data = (void *) (long) r4; 234 struct perf_sample_data sample_data; 235 struct bpf_event_entry *ee; 236 struct perf_event *event; 237 struct perf_raw_record raw = { 238 .size = size, 239 .data = data, 240 }; 241 242 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 243 return -EINVAL; 244 if (index == BPF_F_CURRENT_CPU) 245 index = raw_smp_processor_id(); 246 if (unlikely(index >= array->map.max_entries)) 247 return -E2BIG; 248 249 ee = READ_ONCE(array->ptrs[index]); 250 if (unlikely(!ee)) 251 return -ENOENT; 252 253 event = ee->event; 254 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || 255 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) 256 return -EINVAL; 257 258 if (unlikely(event->oncpu != smp_processor_id())) 259 return -EOPNOTSUPP; 260 261 perf_sample_data_init(&sample_data, 0, 0); 262 sample_data.raw = &raw; 263 perf_event_output(event, &sample_data, regs); 264 return 0; 265 } 266 267 static const struct bpf_func_proto bpf_perf_event_output_proto = { 268 .func = bpf_perf_event_output, 269 .gpl_only = true, 270 .ret_type = RET_INTEGER, 271 .arg1_type = ARG_PTR_TO_CTX, 272 .arg2_type = ARG_CONST_MAP_PTR, 273 .arg3_type = ARG_ANYTHING, 274 .arg4_type = ARG_PTR_TO_STACK, 275 .arg5_type = ARG_CONST_STACK_SIZE, 276 }; 277 278 static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); 279 280 static u64 bpf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size) 281 { 282 struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs); 283 284 perf_fetch_caller_regs(regs); 285 286 return bpf_perf_event_output((long)regs, r2, flags, r4, size); 287 } 288 289 static const struct bpf_func_proto bpf_event_output_proto = { 290 .func = bpf_event_output, 291 .gpl_only = true, 292 .ret_type = RET_INTEGER, 293 .arg1_type = ARG_PTR_TO_CTX, 294 .arg2_type = ARG_CONST_MAP_PTR, 295 .arg3_type = ARG_ANYTHING, 296 .arg4_type = ARG_PTR_TO_STACK, 297 .arg5_type = ARG_CONST_STACK_SIZE, 298 }; 299 300 const struct bpf_func_proto *bpf_get_event_output_proto(void) 301 { 302 return &bpf_event_output_proto; 303 } 304 305 static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id) 306 { 307 switch (func_id) { 308 case BPF_FUNC_map_lookup_elem: 309 return &bpf_map_lookup_elem_proto; 310 case BPF_FUNC_map_update_elem: 311 return &bpf_map_update_elem_proto; 312 case BPF_FUNC_map_delete_elem: 313 return &bpf_map_delete_elem_proto; 314 case BPF_FUNC_probe_read: 315 return &bpf_probe_read_proto; 316 case BPF_FUNC_ktime_get_ns: 317 return &bpf_ktime_get_ns_proto; 318 case BPF_FUNC_tail_call: 319 return &bpf_tail_call_proto; 320 case BPF_FUNC_get_current_pid_tgid: 321 return &bpf_get_current_pid_tgid_proto; 322 case BPF_FUNC_get_current_uid_gid: 323 return &bpf_get_current_uid_gid_proto; 324 case BPF_FUNC_get_current_comm: 325 return &bpf_get_current_comm_proto; 326 case BPF_FUNC_trace_printk: 327 return bpf_get_trace_printk_proto(); 328 case BPF_FUNC_get_smp_processor_id: 329 return &bpf_get_smp_processor_id_proto; 330 case BPF_FUNC_perf_event_read: 331 return &bpf_perf_event_read_proto; 332 default: 333 return NULL; 334 } 335 } 336 337 static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id) 338 { 339 switch (func_id) { 340 case BPF_FUNC_perf_event_output: 341 return &bpf_perf_event_output_proto; 342 case BPF_FUNC_get_stackid: 343 return &bpf_get_stackid_proto; 344 default: 345 return tracing_func_proto(func_id); 346 } 347 } 348 349 /* bpf+kprobe programs can access fields of 'struct pt_regs' */ 350 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type) 351 { 352 /* check bounds */ 353 if (off < 0 || off >= sizeof(struct pt_regs)) 354 return false; 355 356 /* only read is allowed */ 357 if (type != BPF_READ) 358 return false; 359 360 /* disallow misaligned access */ 361 if (off % size != 0) 362 return false; 363 364 return true; 365 } 366 367 static const struct bpf_verifier_ops kprobe_prog_ops = { 368 .get_func_proto = kprobe_prog_func_proto, 369 .is_valid_access = kprobe_prog_is_valid_access, 370 }; 371 372 static struct bpf_prog_type_list kprobe_tl = { 373 .ops = &kprobe_prog_ops, 374 .type = BPF_PROG_TYPE_KPROBE, 375 }; 376 377 static u64 bpf_perf_event_output_tp(u64 r1, u64 r2, u64 index, u64 r4, u64 size) 378 { 379 /* 380 * r1 points to perf tracepoint buffer where first 8 bytes are hidden 381 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it 382 * from there and call the same bpf_perf_event_output() helper 383 */ 384 u64 ctx = *(long *)(uintptr_t)r1; 385 386 return bpf_perf_event_output(ctx, r2, index, r4, size); 387 } 388 389 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { 390 .func = bpf_perf_event_output_tp, 391 .gpl_only = true, 392 .ret_type = RET_INTEGER, 393 .arg1_type = ARG_PTR_TO_CTX, 394 .arg2_type = ARG_CONST_MAP_PTR, 395 .arg3_type = ARG_ANYTHING, 396 .arg4_type = ARG_PTR_TO_STACK, 397 .arg5_type = ARG_CONST_STACK_SIZE, 398 }; 399 400 static u64 bpf_get_stackid_tp(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 401 { 402 u64 ctx = *(long *)(uintptr_t)r1; 403 404 return bpf_get_stackid(ctx, r2, r3, r4, r5); 405 } 406 407 static const struct bpf_func_proto bpf_get_stackid_proto_tp = { 408 .func = bpf_get_stackid_tp, 409 .gpl_only = true, 410 .ret_type = RET_INTEGER, 411 .arg1_type = ARG_PTR_TO_CTX, 412 .arg2_type = ARG_CONST_MAP_PTR, 413 .arg3_type = ARG_ANYTHING, 414 }; 415 416 static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id) 417 { 418 switch (func_id) { 419 case BPF_FUNC_perf_event_output: 420 return &bpf_perf_event_output_proto_tp; 421 case BPF_FUNC_get_stackid: 422 return &bpf_get_stackid_proto_tp; 423 default: 424 return tracing_func_proto(func_id); 425 } 426 } 427 428 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type) 429 { 430 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) 431 return false; 432 if (type != BPF_READ) 433 return false; 434 if (off % size != 0) 435 return false; 436 return true; 437 } 438 439 static const struct bpf_verifier_ops tracepoint_prog_ops = { 440 .get_func_proto = tp_prog_func_proto, 441 .is_valid_access = tp_prog_is_valid_access, 442 }; 443 444 static struct bpf_prog_type_list tracepoint_tl = { 445 .ops = &tracepoint_prog_ops, 446 .type = BPF_PROG_TYPE_TRACEPOINT, 447 }; 448 449 static int __init register_kprobe_prog_ops(void) 450 { 451 bpf_register_prog_type(&kprobe_tl); 452 bpf_register_prog_type(&tracepoint_tl); 453 return 0; 454 } 455 late_initcall(register_kprobe_prog_ops); 456