1 // SPDX-License-Identifier: GPL-2.0 2 /* -*- linux-c -*- 3 * sysctl_net_core.c: sysctl interface to net core subsystem. 4 * 5 * Begun April 1, 1996, Mike Shaver. 6 * Added /proc/sys/net/core directory entry (empty =) ). [MS] 7 */ 8 9 #include <linux/filter.h> 10 #include <linux/mm.h> 11 #include <linux/sysctl.h> 12 #include <linux/module.h> 13 #include <linux/socket.h> 14 #include <linux/netdevice.h> 15 #include <linux/ratelimit.h> 16 #include <linux/vmalloc.h> 17 #include <linux/init.h> 18 #include <linux/slab.h> 19 #include <linux/sched/isolation.h> 20 21 #include <net/ip.h> 22 #include <net/sock.h> 23 #include <net/net_ratelimit.h> 24 #include <net/busy_poll.h> 25 #include <net/pkt_sched.h> 26 27 #include "dev.h" 28 29 static int int_3600 = 3600; 30 static int min_sndbuf = SOCK_MIN_SNDBUF; 31 static int min_rcvbuf = SOCK_MIN_RCVBUF; 32 static int max_skb_frags = MAX_SKB_FRAGS; 33 34 static int net_msg_warn; /* Unused, but still a sysctl */ 35 36 int sysctl_fb_tunnels_only_for_init_net __read_mostly = 0; 37 EXPORT_SYMBOL(sysctl_fb_tunnels_only_for_init_net); 38 39 /* 0 - Keep current behavior: 40 * IPv4: inherit all current settings from init_net 41 * IPv6: reset all settings to default 42 * 1 - Both inherit all current settings from init_net 43 * 2 - Both reset all settings to default 44 * 3 - Both inherit all settings from current netns 45 */ 46 int sysctl_devconf_inherit_init_net __read_mostly; 47 EXPORT_SYMBOL(sysctl_devconf_inherit_init_net); 48 49 #if IS_ENABLED(CONFIG_NET_FLOW_LIMIT) || IS_ENABLED(CONFIG_RPS) 50 static void dump_cpumask(void *buffer, size_t *lenp, loff_t *ppos, 51 struct cpumask *mask) 52 { 53 char kbuf[128]; 54 int len; 55 56 if (*ppos || !*lenp) { 57 *lenp = 0; 58 return; 59 } 60 61 len = min(sizeof(kbuf) - 1, *lenp); 62 len = scnprintf(kbuf, len, "%*pb", cpumask_pr_args(mask)); 63 if (!len) { 64 *lenp = 0; 65 return; 66 } 67 68 if (len < *lenp) 69 kbuf[len++] = '\n'; 70 memcpy(buffer, kbuf, len); 71 *lenp = len; 72 *ppos += len; 73 } 74 #endif 75 76 #ifdef CONFIG_RPS 77 struct cpumask rps_default_mask; 78 79 static int rps_default_mask_sysctl(struct ctl_table *table, int write, 80 void *buffer, size_t *lenp, loff_t *ppos) 81 { 82 int err = 0; 83 84 rtnl_lock(); 85 if (write) { 86 err = cpumask_parse(buffer, &rps_default_mask); 87 if (err) 88 goto done; 89 90 err = rps_cpumask_housekeeping(&rps_default_mask); 91 if (err) 92 goto done; 93 } else { 94 dump_cpumask(buffer, lenp, ppos, &rps_default_mask); 95 } 96 97 done: 98 rtnl_unlock(); 99 return err; 100 } 101 102 static int rps_sock_flow_sysctl(struct ctl_table *table, int write, 103 void *buffer, size_t *lenp, loff_t *ppos) 104 { 105 unsigned int orig_size, size; 106 int ret, i; 107 struct ctl_table tmp = { 108 .data = &size, 109 .maxlen = sizeof(size), 110 .mode = table->mode 111 }; 112 struct rps_sock_flow_table *orig_sock_table, *sock_table; 113 static DEFINE_MUTEX(sock_flow_mutex); 114 115 mutex_lock(&sock_flow_mutex); 116 117 orig_sock_table = rcu_dereference_protected(rps_sock_flow_table, 118 lockdep_is_held(&sock_flow_mutex)); 119 size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0; 120 121 ret = proc_dointvec(&tmp, write, buffer, lenp, ppos); 122 123 if (write) { 124 if (size) { 125 if (size > 1<<29) { 126 /* Enforce limit to prevent overflow */ 127 mutex_unlock(&sock_flow_mutex); 128 return -EINVAL; 129 } 130 size = roundup_pow_of_two(size); 131 if (size != orig_size) { 132 sock_table = 133 vmalloc(RPS_SOCK_FLOW_TABLE_SIZE(size)); 134 if (!sock_table) { 135 mutex_unlock(&sock_flow_mutex); 136 return -ENOMEM; 137 } 138 rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1; 139 sock_table->mask = size - 1; 140 } else 141 sock_table = orig_sock_table; 142 143 for (i = 0; i < size; i++) 144 sock_table->ents[i] = RPS_NO_CPU; 145 } else 146 sock_table = NULL; 147 148 if (sock_table != orig_sock_table) { 149 rcu_assign_pointer(rps_sock_flow_table, sock_table); 150 if (sock_table) { 151 static_branch_inc(&rps_needed); 152 static_branch_inc(&rfs_needed); 153 } 154 if (orig_sock_table) { 155 static_branch_dec(&rps_needed); 156 static_branch_dec(&rfs_needed); 157 kvfree_rcu(orig_sock_table); 158 } 159 } 160 } 161 162 mutex_unlock(&sock_flow_mutex); 163 164 return ret; 165 } 166 #endif /* CONFIG_RPS */ 167 168 #ifdef CONFIG_NET_FLOW_LIMIT 169 static DEFINE_MUTEX(flow_limit_update_mutex); 170 171 static int flow_limit_cpu_sysctl(struct ctl_table *table, int write, 172 void *buffer, size_t *lenp, loff_t *ppos) 173 { 174 struct sd_flow_limit *cur; 175 struct softnet_data *sd; 176 cpumask_var_t mask; 177 int i, len, ret = 0; 178 179 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 180 return -ENOMEM; 181 182 if (write) { 183 ret = cpumask_parse(buffer, mask); 184 if (ret) 185 goto done; 186 187 mutex_lock(&flow_limit_update_mutex); 188 len = sizeof(*cur) + netdev_flow_limit_table_len; 189 for_each_possible_cpu(i) { 190 sd = &per_cpu(softnet_data, i); 191 cur = rcu_dereference_protected(sd->flow_limit, 192 lockdep_is_held(&flow_limit_update_mutex)); 193 if (cur && !cpumask_test_cpu(i, mask)) { 194 RCU_INIT_POINTER(sd->flow_limit, NULL); 195 kfree_rcu(cur); 196 } else if (!cur && cpumask_test_cpu(i, mask)) { 197 cur = kzalloc_node(len, GFP_KERNEL, 198 cpu_to_node(i)); 199 if (!cur) { 200 /* not unwinding previous changes */ 201 ret = -ENOMEM; 202 goto write_unlock; 203 } 204 cur->num_buckets = netdev_flow_limit_table_len; 205 rcu_assign_pointer(sd->flow_limit, cur); 206 } 207 } 208 write_unlock: 209 mutex_unlock(&flow_limit_update_mutex); 210 } else { 211 cpumask_clear(mask); 212 rcu_read_lock(); 213 for_each_possible_cpu(i) { 214 sd = &per_cpu(softnet_data, i); 215 if (rcu_dereference(sd->flow_limit)) 216 cpumask_set_cpu(i, mask); 217 } 218 rcu_read_unlock(); 219 220 dump_cpumask(buffer, lenp, ppos, mask); 221 } 222 223 done: 224 free_cpumask_var(mask); 225 return ret; 226 } 227 228 static int flow_limit_table_len_sysctl(struct ctl_table *table, int write, 229 void *buffer, size_t *lenp, loff_t *ppos) 230 { 231 unsigned int old, *ptr; 232 int ret; 233 234 mutex_lock(&flow_limit_update_mutex); 235 236 ptr = table->data; 237 old = *ptr; 238 ret = proc_dointvec(table, write, buffer, lenp, ppos); 239 if (!ret && write && !is_power_of_2(*ptr)) { 240 *ptr = old; 241 ret = -EINVAL; 242 } 243 244 mutex_unlock(&flow_limit_update_mutex); 245 return ret; 246 } 247 #endif /* CONFIG_NET_FLOW_LIMIT */ 248 249 #ifdef CONFIG_NET_SCHED 250 static int set_default_qdisc(struct ctl_table *table, int write, 251 void *buffer, size_t *lenp, loff_t *ppos) 252 { 253 char id[IFNAMSIZ]; 254 struct ctl_table tbl = { 255 .data = id, 256 .maxlen = IFNAMSIZ, 257 }; 258 int ret; 259 260 qdisc_get_default(id, IFNAMSIZ); 261 262 ret = proc_dostring(&tbl, write, buffer, lenp, ppos); 263 if (write && ret == 0) 264 ret = qdisc_set_default(id); 265 return ret; 266 } 267 #endif 268 269 static int proc_do_dev_weight(struct ctl_table *table, int write, 270 void *buffer, size_t *lenp, loff_t *ppos) 271 { 272 static DEFINE_MUTEX(dev_weight_mutex); 273 int ret, weight; 274 275 mutex_lock(&dev_weight_mutex); 276 ret = proc_dointvec(table, write, buffer, lenp, ppos); 277 if (!ret && write) { 278 weight = READ_ONCE(weight_p); 279 WRITE_ONCE(dev_rx_weight, weight * dev_weight_rx_bias); 280 WRITE_ONCE(dev_tx_weight, weight * dev_weight_tx_bias); 281 } 282 mutex_unlock(&dev_weight_mutex); 283 284 return ret; 285 } 286 287 static int proc_do_rss_key(struct ctl_table *table, int write, 288 void *buffer, size_t *lenp, loff_t *ppos) 289 { 290 struct ctl_table fake_table; 291 char buf[NETDEV_RSS_KEY_LEN * 3]; 292 293 snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key); 294 fake_table.data = buf; 295 fake_table.maxlen = sizeof(buf); 296 return proc_dostring(&fake_table, write, buffer, lenp, ppos); 297 } 298 299 #ifdef CONFIG_BPF_JIT 300 static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write, 301 void *buffer, size_t *lenp, 302 loff_t *ppos) 303 { 304 int ret, jit_enable = *(int *)table->data; 305 int min = *(int *)table->extra1; 306 int max = *(int *)table->extra2; 307 struct ctl_table tmp = *table; 308 309 if (write && !capable(CAP_SYS_ADMIN)) 310 return -EPERM; 311 312 tmp.data = &jit_enable; 313 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 314 if (write && !ret) { 315 if (jit_enable < 2 || 316 (jit_enable == 2 && bpf_dump_raw_ok(current_cred()))) { 317 *(int *)table->data = jit_enable; 318 if (jit_enable == 2) 319 pr_warn("bpf_jit_enable = 2 was set! NEVER use this in production, only for JIT debugging!\n"); 320 } else { 321 ret = -EPERM; 322 } 323 } 324 325 if (write && ret && min == max) 326 pr_info_once("CONFIG_BPF_JIT_ALWAYS_ON is enabled, bpf_jit_enable is permanently set to 1.\n"); 327 328 return ret; 329 } 330 331 # ifdef CONFIG_HAVE_EBPF_JIT 332 static int 333 proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write, 334 void *buffer, size_t *lenp, loff_t *ppos) 335 { 336 if (!capable(CAP_SYS_ADMIN)) 337 return -EPERM; 338 339 return proc_dointvec_minmax(table, write, buffer, lenp, ppos); 340 } 341 # endif /* CONFIG_HAVE_EBPF_JIT */ 342 343 static int 344 proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write, 345 void *buffer, size_t *lenp, loff_t *ppos) 346 { 347 if (!capable(CAP_SYS_ADMIN)) 348 return -EPERM; 349 350 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 351 } 352 #endif 353 354 static struct ctl_table net_core_table[] = { 355 { 356 .procname = "wmem_max", 357 .data = &sysctl_wmem_max, 358 .maxlen = sizeof(int), 359 .mode = 0644, 360 .proc_handler = proc_dointvec_minmax, 361 .extra1 = &min_sndbuf, 362 }, 363 { 364 .procname = "rmem_max", 365 .data = &sysctl_rmem_max, 366 .maxlen = sizeof(int), 367 .mode = 0644, 368 .proc_handler = proc_dointvec_minmax, 369 .extra1 = &min_rcvbuf, 370 }, 371 { 372 .procname = "wmem_default", 373 .data = &sysctl_wmem_default, 374 .maxlen = sizeof(int), 375 .mode = 0644, 376 .proc_handler = proc_dointvec_minmax, 377 .extra1 = &min_sndbuf, 378 }, 379 { 380 .procname = "rmem_default", 381 .data = &sysctl_rmem_default, 382 .maxlen = sizeof(int), 383 .mode = 0644, 384 .proc_handler = proc_dointvec_minmax, 385 .extra1 = &min_rcvbuf, 386 }, 387 { 388 .procname = "dev_weight", 389 .data = &weight_p, 390 .maxlen = sizeof(int), 391 .mode = 0644, 392 .proc_handler = proc_do_dev_weight, 393 }, 394 { 395 .procname = "dev_weight_rx_bias", 396 .data = &dev_weight_rx_bias, 397 .maxlen = sizeof(int), 398 .mode = 0644, 399 .proc_handler = proc_do_dev_weight, 400 }, 401 { 402 .procname = "dev_weight_tx_bias", 403 .data = &dev_weight_tx_bias, 404 .maxlen = sizeof(int), 405 .mode = 0644, 406 .proc_handler = proc_do_dev_weight, 407 }, 408 { 409 .procname = "netdev_max_backlog", 410 .data = &netdev_max_backlog, 411 .maxlen = sizeof(int), 412 .mode = 0644, 413 .proc_handler = proc_dointvec 414 }, 415 { 416 .procname = "netdev_rss_key", 417 .data = &netdev_rss_key, 418 .maxlen = sizeof(int), 419 .mode = 0444, 420 .proc_handler = proc_do_rss_key, 421 }, 422 #ifdef CONFIG_BPF_JIT 423 { 424 .procname = "bpf_jit_enable", 425 .data = &bpf_jit_enable, 426 .maxlen = sizeof(int), 427 .mode = 0644, 428 .proc_handler = proc_dointvec_minmax_bpf_enable, 429 # ifdef CONFIG_BPF_JIT_ALWAYS_ON 430 .extra1 = SYSCTL_ONE, 431 .extra2 = SYSCTL_ONE, 432 # else 433 .extra1 = SYSCTL_ZERO, 434 .extra2 = SYSCTL_TWO, 435 # endif 436 }, 437 # ifdef CONFIG_HAVE_EBPF_JIT 438 { 439 .procname = "bpf_jit_harden", 440 .data = &bpf_jit_harden, 441 .maxlen = sizeof(int), 442 .mode = 0600, 443 .proc_handler = proc_dointvec_minmax_bpf_restricted, 444 .extra1 = SYSCTL_ZERO, 445 .extra2 = SYSCTL_TWO, 446 }, 447 { 448 .procname = "bpf_jit_kallsyms", 449 .data = &bpf_jit_kallsyms, 450 .maxlen = sizeof(int), 451 .mode = 0600, 452 .proc_handler = proc_dointvec_minmax_bpf_restricted, 453 .extra1 = SYSCTL_ZERO, 454 .extra2 = SYSCTL_ONE, 455 }, 456 # endif 457 { 458 .procname = "bpf_jit_limit", 459 .data = &bpf_jit_limit, 460 .maxlen = sizeof(long), 461 .mode = 0600, 462 .proc_handler = proc_dolongvec_minmax_bpf_restricted, 463 .extra1 = SYSCTL_LONG_ONE, 464 .extra2 = &bpf_jit_limit_max, 465 }, 466 #endif 467 { 468 .procname = "netdev_tstamp_prequeue", 469 .data = &netdev_tstamp_prequeue, 470 .maxlen = sizeof(int), 471 .mode = 0644, 472 .proc_handler = proc_dointvec 473 }, 474 { 475 .procname = "message_cost", 476 .data = &net_ratelimit_state.interval, 477 .maxlen = sizeof(int), 478 .mode = 0644, 479 .proc_handler = proc_dointvec_jiffies, 480 }, 481 { 482 .procname = "message_burst", 483 .data = &net_ratelimit_state.burst, 484 .maxlen = sizeof(int), 485 .mode = 0644, 486 .proc_handler = proc_dointvec, 487 }, 488 { 489 .procname = "optmem_max", 490 .data = &sysctl_optmem_max, 491 .maxlen = sizeof(int), 492 .mode = 0644, 493 .proc_handler = proc_dointvec 494 }, 495 { 496 .procname = "tstamp_allow_data", 497 .data = &sysctl_tstamp_allow_data, 498 .maxlen = sizeof(int), 499 .mode = 0644, 500 .proc_handler = proc_dointvec_minmax, 501 .extra1 = SYSCTL_ZERO, 502 .extra2 = SYSCTL_ONE 503 }, 504 #ifdef CONFIG_RPS 505 { 506 .procname = "rps_sock_flow_entries", 507 .maxlen = sizeof(int), 508 .mode = 0644, 509 .proc_handler = rps_sock_flow_sysctl 510 }, 511 { 512 .procname = "rps_default_mask", 513 .mode = 0644, 514 .proc_handler = rps_default_mask_sysctl 515 }, 516 #endif 517 #ifdef CONFIG_NET_FLOW_LIMIT 518 { 519 .procname = "flow_limit_cpu_bitmap", 520 .mode = 0644, 521 .proc_handler = flow_limit_cpu_sysctl 522 }, 523 { 524 .procname = "flow_limit_table_len", 525 .data = &netdev_flow_limit_table_len, 526 .maxlen = sizeof(int), 527 .mode = 0644, 528 .proc_handler = flow_limit_table_len_sysctl 529 }, 530 #endif /* CONFIG_NET_FLOW_LIMIT */ 531 #ifdef CONFIG_NET_RX_BUSY_POLL 532 { 533 .procname = "busy_poll", 534 .data = &sysctl_net_busy_poll, 535 .maxlen = sizeof(unsigned int), 536 .mode = 0644, 537 .proc_handler = proc_dointvec_minmax, 538 .extra1 = SYSCTL_ZERO, 539 }, 540 { 541 .procname = "busy_read", 542 .data = &sysctl_net_busy_read, 543 .maxlen = sizeof(unsigned int), 544 .mode = 0644, 545 .proc_handler = proc_dointvec_minmax, 546 .extra1 = SYSCTL_ZERO, 547 }, 548 #endif 549 #ifdef CONFIG_NET_SCHED 550 { 551 .procname = "default_qdisc", 552 .mode = 0644, 553 .maxlen = IFNAMSIZ, 554 .proc_handler = set_default_qdisc 555 }, 556 #endif 557 { 558 .procname = "netdev_budget", 559 .data = &netdev_budget, 560 .maxlen = sizeof(int), 561 .mode = 0644, 562 .proc_handler = proc_dointvec 563 }, 564 { 565 .procname = "warnings", 566 .data = &net_msg_warn, 567 .maxlen = sizeof(int), 568 .mode = 0644, 569 .proc_handler = proc_dointvec 570 }, 571 { 572 .procname = "max_skb_frags", 573 .data = &sysctl_max_skb_frags, 574 .maxlen = sizeof(int), 575 .mode = 0644, 576 .proc_handler = proc_dointvec_minmax, 577 .extra1 = SYSCTL_ONE, 578 .extra2 = &max_skb_frags, 579 }, 580 { 581 .procname = "netdev_budget_usecs", 582 .data = &netdev_budget_usecs, 583 .maxlen = sizeof(unsigned int), 584 .mode = 0644, 585 .proc_handler = proc_dointvec_minmax, 586 .extra1 = SYSCTL_ZERO, 587 }, 588 { 589 .procname = "fb_tunnels_only_for_init_net", 590 .data = &sysctl_fb_tunnels_only_for_init_net, 591 .maxlen = sizeof(int), 592 .mode = 0644, 593 .proc_handler = proc_dointvec_minmax, 594 .extra1 = SYSCTL_ZERO, 595 .extra2 = SYSCTL_TWO, 596 }, 597 { 598 .procname = "devconf_inherit_init_net", 599 .data = &sysctl_devconf_inherit_init_net, 600 .maxlen = sizeof(int), 601 .mode = 0644, 602 .proc_handler = proc_dointvec_minmax, 603 .extra1 = SYSCTL_ZERO, 604 .extra2 = SYSCTL_THREE, 605 }, 606 { 607 .procname = "high_order_alloc_disable", 608 .data = &net_high_order_alloc_disable_key.key, 609 .maxlen = sizeof(net_high_order_alloc_disable_key), 610 .mode = 0644, 611 .proc_handler = proc_do_static_key, 612 }, 613 { 614 .procname = "gro_normal_batch", 615 .data = &gro_normal_batch, 616 .maxlen = sizeof(unsigned int), 617 .mode = 0644, 618 .proc_handler = proc_dointvec_minmax, 619 .extra1 = SYSCTL_ONE, 620 }, 621 { 622 .procname = "netdev_unregister_timeout_secs", 623 .data = &netdev_unregister_timeout_secs, 624 .maxlen = sizeof(unsigned int), 625 .mode = 0644, 626 .proc_handler = proc_dointvec_minmax, 627 .extra1 = SYSCTL_ONE, 628 .extra2 = &int_3600, 629 }, 630 { 631 .procname = "skb_defer_max", 632 .data = &sysctl_skb_defer_max, 633 .maxlen = sizeof(unsigned int), 634 .mode = 0644, 635 .proc_handler = proc_dointvec_minmax, 636 .extra1 = SYSCTL_ZERO, 637 }, 638 { } 639 }; 640 641 static struct ctl_table netns_core_table[] = { 642 { 643 .procname = "somaxconn", 644 .data = &init_net.core.sysctl_somaxconn, 645 .maxlen = sizeof(int), 646 .mode = 0644, 647 .extra1 = SYSCTL_ZERO, 648 .proc_handler = proc_dointvec_minmax 649 }, 650 { 651 .procname = "txrehash", 652 .data = &init_net.core.sysctl_txrehash, 653 .maxlen = sizeof(u8), 654 .mode = 0644, 655 .extra1 = SYSCTL_ZERO, 656 .extra2 = SYSCTL_ONE, 657 .proc_handler = proc_dou8vec_minmax, 658 }, 659 { } 660 }; 661 662 static int __init fb_tunnels_only_for_init_net_sysctl_setup(char *str) 663 { 664 /* fallback tunnels for initns only */ 665 if (!strncmp(str, "initns", 6)) 666 sysctl_fb_tunnels_only_for_init_net = 1; 667 /* no fallback tunnels anywhere */ 668 else if (!strncmp(str, "none", 4)) 669 sysctl_fb_tunnels_only_for_init_net = 2; 670 671 return 1; 672 } 673 __setup("fb_tunnels=", fb_tunnels_only_for_init_net_sysctl_setup); 674 675 static __net_init int sysctl_core_net_init(struct net *net) 676 { 677 struct ctl_table *tbl, *tmp; 678 679 tbl = netns_core_table; 680 if (!net_eq(net, &init_net)) { 681 tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL); 682 if (tbl == NULL) 683 goto err_dup; 684 685 for (tmp = tbl; tmp->procname; tmp++) 686 tmp->data += (char *)net - (char *)&init_net; 687 } 688 689 net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl); 690 if (net->core.sysctl_hdr == NULL) 691 goto err_reg; 692 693 return 0; 694 695 err_reg: 696 if (tbl != netns_core_table) 697 kfree(tbl); 698 err_dup: 699 return -ENOMEM; 700 } 701 702 static __net_exit void sysctl_core_net_exit(struct net *net) 703 { 704 struct ctl_table *tbl; 705 706 tbl = net->core.sysctl_hdr->ctl_table_arg; 707 unregister_net_sysctl_table(net->core.sysctl_hdr); 708 BUG_ON(tbl == netns_core_table); 709 kfree(tbl); 710 } 711 712 static __net_initdata struct pernet_operations sysctl_core_ops = { 713 .init = sysctl_core_net_init, 714 .exit = sysctl_core_net_exit, 715 }; 716 717 static __init int sysctl_core_init(void) 718 { 719 #if IS_ENABLED(CONFIG_RPS) 720 cpumask_copy(&rps_default_mask, cpu_none_mask); 721 #endif 722 723 register_net_sysctl(&init_net, "net/core", net_core_table); 724 return register_pernet_subsys(&sysctl_core_ops); 725 } 726 727 fs_initcall(sysctl_core_init); 728