core.c (7d9a6ef558f6ff375aab9e29f08124cb0daa9bc5) | core.c (ab3f0063c48c26c927851b6767824e35a716d878) |
---|---|
1/* 2 * Linux Socket Filter - Kernel level socket filtering 3 * 4 * Based on the design of the Berkeley Packet Filter. The new 5 * internal format has been designed by PLUMgrid: 6 * 7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com 8 * --- 295 unchanged lines hidden (view full) --- 304 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog)); 305 306 *symbol_start = addr; 307 *symbol_end = addr + hdr->pages * PAGE_SIZE; 308} 309 310static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym) 311{ | 1/* 2 * Linux Socket Filter - Kernel level socket filtering 3 * 4 * Based on the design of the Berkeley Packet Filter. The new 5 * internal format has been designed by PLUMgrid: 6 * 7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com 8 * --- 295 unchanged lines hidden (view full) --- 304 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog)); 305 306 *symbol_start = addr; 307 *symbol_end = addr + hdr->pages * PAGE_SIZE; 308} 309 310static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym) 311{ |
312 const char *end = sym + KSYM_NAME_LEN; 313 |
|
312 BUILD_BUG_ON(sizeof("bpf_prog_") + | 314 BUILD_BUG_ON(sizeof("bpf_prog_") + |
313 sizeof(prog->tag) * 2 + 1 > KSYM_NAME_LEN); | 315 sizeof(prog->tag) * 2 + 316 /* name has been null terminated. 317 * We should need +1 for the '_' preceding 318 * the name. However, the null character 319 * is double counted between the name and the 320 * sizeof("bpf_prog_") above, so we omit 321 * the +1 here. 322 */ 323 sizeof(prog->aux->name) > KSYM_NAME_LEN); |
314 315 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_"); 316 sym = bin2hex(sym, prog->tag, sizeof(prog->tag)); | 324 325 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_"); 326 sym = bin2hex(sym, prog->tag, sizeof(prog->tag)); |
317 *sym = 0; | 327 if (prog->aux->name[0]) 328 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name); 329 else 330 *sym = 0; |
318} 319 320static __always_inline unsigned long 321bpf_get_prog_addr_start(struct latch_tree_node *n) 322{ 323 unsigned long symbol_start, symbol_end; 324 const struct bpf_prog_aux *aux; 325 --- 1036 unchanged lines hidden (view full) --- 1362 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; 1363 1364 /* eBPF JITs can rewrite the program in case constant 1365 * blinding is active. However, in case of error during 1366 * blinding, bpf_int_jit_compile() must always return a 1367 * valid program, which in this case would simply not 1368 * be JITed, but falls back to the interpreter. 1369 */ | 331} 332 333static __always_inline unsigned long 334bpf_get_prog_addr_start(struct latch_tree_node *n) 335{ 336 unsigned long symbol_start, symbol_end; 337 const struct bpf_prog_aux *aux; 338 --- 1036 unchanged lines hidden (view full) --- 1375 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; 1376 1377 /* eBPF JITs can rewrite the program in case constant 1378 * blinding is active. However, in case of error during 1379 * blinding, bpf_int_jit_compile() must always return a 1380 * valid program, which in this case would simply not 1381 * be JITed, but falls back to the interpreter. 1382 */ |
1370 fp = bpf_int_jit_compile(fp); | 1383 if (!bpf_prog_is_dev_bound(fp->aux)) { 1384 fp = bpf_int_jit_compile(fp); 1385 } else { 1386 *err = bpf_prog_offload_compile(fp); 1387 if (*err) 1388 return fp; 1389 } |
1371 bpf_prog_lock_ro(fp); 1372 1373 /* The tail call compatibility check can only be done at 1374 * this late stage as we need to determine, if we deal 1375 * with JITed or non JITed program concatenations and not 1376 * all eBPF JITs might immediately support all features. 1377 */ 1378 *err = bpf_check_tail_call(fp); 1379 1380 return fp; 1381} 1382EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); 1383 | 1390 bpf_prog_lock_ro(fp); 1391 1392 /* The tail call compatibility check can only be done at 1393 * this late stage as we need to determine, if we deal 1394 * with JITed or non JITed program concatenations and not 1395 * all eBPF JITs might immediately support all features. 1396 */ 1397 *err = bpf_check_tail_call(fp); 1398 1399 return fp; 1400} 1401EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); 1402 |
1403static unsigned int __bpf_prog_ret1(const void *ctx, 1404 const struct bpf_insn *insn) 1405{ 1406 return 1; 1407} 1408 1409static struct bpf_prog_dummy { 1410 struct bpf_prog prog; 1411} dummy_bpf_prog = { 1412 .prog = { 1413 .bpf_func = __bpf_prog_ret1, 1414 }, 1415}; 1416 1417/* to avoid allocating empty bpf_prog_array for cgroups that 1418 * don't have bpf program attached use one global 'empty_prog_array' 1419 * It will not be modified the caller of bpf_prog_array_alloc() 1420 * (since caller requested prog_cnt == 0) 1421 * that pointer should be 'freed' by bpf_prog_array_free() 1422 */ 1423static struct { 1424 struct bpf_prog_array hdr; 1425 struct bpf_prog *null_prog; 1426} empty_prog_array = { 1427 .null_prog = NULL, 1428}; 1429 1430struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags) 1431{ 1432 if (prog_cnt) 1433 return kzalloc(sizeof(struct bpf_prog_array) + 1434 sizeof(struct bpf_prog *) * (prog_cnt + 1), 1435 flags); 1436 1437 return &empty_prog_array.hdr; 1438} 1439 1440void bpf_prog_array_free(struct bpf_prog_array __rcu *progs) 1441{ 1442 if (!progs || 1443 progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr) 1444 return; 1445 kfree_rcu(progs, rcu); 1446} 1447 1448int bpf_prog_array_length(struct bpf_prog_array __rcu *progs) 1449{ 1450 struct bpf_prog **prog; 1451 u32 cnt = 0; 1452 1453 rcu_read_lock(); 1454 prog = rcu_dereference(progs)->progs; 1455 for (; *prog; prog++) 1456 cnt++; 1457 rcu_read_unlock(); 1458 return cnt; 1459} 1460 1461int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, 1462 __u32 __user *prog_ids, u32 cnt) 1463{ 1464 struct bpf_prog **prog; 1465 u32 i = 0, id; 1466 1467 rcu_read_lock(); 1468 prog = rcu_dereference(progs)->progs; 1469 for (; *prog; prog++) { 1470 id = (*prog)->aux->id; 1471 if (copy_to_user(prog_ids + i, &id, sizeof(id))) { 1472 rcu_read_unlock(); 1473 return -EFAULT; 1474 } 1475 if (++i == cnt) { 1476 prog++; 1477 break; 1478 } 1479 } 1480 rcu_read_unlock(); 1481 if (*prog) 1482 return -ENOSPC; 1483 return 0; 1484} 1485 1486void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs, 1487 struct bpf_prog *old_prog) 1488{ 1489 struct bpf_prog **prog = progs->progs; 1490 1491 for (; *prog; prog++) 1492 if (*prog == old_prog) { 1493 WRITE_ONCE(*prog, &dummy_bpf_prog.prog); 1494 break; 1495 } 1496} 1497 1498int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, 1499 struct bpf_prog *exclude_prog, 1500 struct bpf_prog *include_prog, 1501 struct bpf_prog_array **new_array) 1502{ 1503 int new_prog_cnt, carry_prog_cnt = 0; 1504 struct bpf_prog **existing_prog; 1505 struct bpf_prog_array *array; 1506 int new_prog_idx = 0; 1507 1508 /* Figure out how many existing progs we need to carry over to 1509 * the new array. 1510 */ 1511 if (old_array) { 1512 existing_prog = old_array->progs; 1513 for (; *existing_prog; existing_prog++) { 1514 if (*existing_prog != exclude_prog && 1515 *existing_prog != &dummy_bpf_prog.prog) 1516 carry_prog_cnt++; 1517 if (*existing_prog == include_prog) 1518 return -EEXIST; 1519 } 1520 } 1521 1522 /* How many progs (not NULL) will be in the new array? */ 1523 new_prog_cnt = carry_prog_cnt; 1524 if (include_prog) 1525 new_prog_cnt += 1; 1526 1527 /* Do we have any prog (not NULL) in the new array? */ 1528 if (!new_prog_cnt) { 1529 *new_array = NULL; 1530 return 0; 1531 } 1532 1533 /* +1 as the end of prog_array is marked with NULL */ 1534 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL); 1535 if (!array) 1536 return -ENOMEM; 1537 1538 /* Fill in the new prog array */ 1539 if (carry_prog_cnt) { 1540 existing_prog = old_array->progs; 1541 for (; *existing_prog; existing_prog++) 1542 if (*existing_prog != exclude_prog && 1543 *existing_prog != &dummy_bpf_prog.prog) 1544 array->progs[new_prog_idx++] = *existing_prog; 1545 } 1546 if (include_prog) 1547 array->progs[new_prog_idx++] = include_prog; 1548 array->progs[new_prog_idx] = NULL; 1549 *new_array = array; 1550 return 0; 1551} 1552 |
|
1384static void bpf_prog_free_deferred(struct work_struct *work) 1385{ 1386 struct bpf_prog_aux *aux; 1387 1388 aux = container_of(work, struct bpf_prog_aux, work); | 1553static void bpf_prog_free_deferred(struct work_struct *work) 1554{ 1555 struct bpf_prog_aux *aux; 1556 1557 aux = container_of(work, struct bpf_prog_aux, work); |
1558 if (bpf_prog_is_dev_bound(aux)) 1559 bpf_prog_offload_destroy(aux->prog); |
|
1389 bpf_jit_free(aux->prog); 1390} 1391 1392/* Free internal BPF program */ 1393void bpf_prog_free(struct bpf_prog *fp) 1394{ 1395 struct bpf_prog_aux *aux = fp->aux; 1396 --- 96 unchanged lines hidden (view full) --- 1493} 1494 1495/* All definitions of tracepoints related to BPF. */ 1496#define CREATE_TRACE_POINTS 1497#include <linux/bpf_trace.h> 1498 1499EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception); 1500 | 1560 bpf_jit_free(aux->prog); 1561} 1562 1563/* Free internal BPF program */ 1564void bpf_prog_free(struct bpf_prog *fp) 1565{ 1566 struct bpf_prog_aux *aux = fp->aux; 1567 --- 96 unchanged lines hidden (view full) --- 1664} 1665 1666/* All definitions of tracepoints related to BPF. */ 1667#define CREATE_TRACE_POINTS 1668#include <linux/bpf_trace.h> 1669 1670EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception); 1671 |
1672/* These are only used within the BPF_SYSCALL code */ 1673#ifdef CONFIG_BPF_SYSCALL |
|
1501EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_get_type); 1502EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_put_rcu); | 1674EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_get_type); 1675EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_put_rcu); |
1676#endif |
|