1 // SPDX-License-Identifier: GPL-2.0
2 /* -*- linux-c -*-
3 * sysctl_net_core.c: sysctl interface to net core subsystem.
4 *
5 * Begun April 1, 1996, Mike Shaver.
6 * Added /proc/sys/net/core directory entry (empty =) ). [MS]
7 */
8
9 #include <linux/filter.h>
10 #include <linux/mm.h>
11 #include <linux/sysctl.h>
12 #include <linux/module.h>
13 #include <linux/socket.h>
14 #include <linux/netdevice.h>
15 #include <linux/ratelimit.h>
16 #include <linux/vmalloc.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <linux/sched/isolation.h>
20
21 #include <net/ip.h>
22 #include <net/sock.h>
23 #include <net/net_ratelimit.h>
24 #include <net/busy_poll.h>
25 #include <net/pkt_sched.h>
26 #include <net/hotdata.h>
27 #include <net/proto_memory.h>
28 #include <net/rps.h>
29
30 #include "dev.h"
31
32 static int int_3600 = 3600;
33 static int min_sndbuf = SOCK_MIN_SNDBUF;
34 static int min_rcvbuf = SOCK_MIN_RCVBUF;
35 static int max_skb_frags = MAX_SKB_FRAGS;
36 static int min_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE;
37 static int netdev_budget_usecs_min = 2 * USEC_PER_SEC / HZ;
38
39 static int net_msg_warn; /* Unused, but still a sysctl */
40
41 int sysctl_fb_tunnels_only_for_init_net __read_mostly = 0;
42 EXPORT_SYMBOL(sysctl_fb_tunnels_only_for_init_net);
43
44 /* 0 - Keep current behavior:
45 * IPv4: inherit all current settings from init_net
46 * IPv6: reset all settings to default
47 * 1 - Both inherit all current settings from init_net
48 * 2 - Both reset all settings to default
49 * 3 - Both inherit all settings from current netns
50 */
51 int sysctl_devconf_inherit_init_net __read_mostly;
52 EXPORT_SYMBOL(sysctl_devconf_inherit_init_net);
53
54 #if IS_ENABLED(CONFIG_NET_FLOW_LIMIT) || IS_ENABLED(CONFIG_RPS)
dump_cpumask(void * buffer,size_t * lenp,loff_t * ppos,struct cpumask * mask)55 static int dump_cpumask(void *buffer, size_t *lenp, loff_t *ppos,
56 struct cpumask *mask)
57 {
58 char *kbuf;
59 int len;
60
61 if (*ppos || !*lenp) {
62 *lenp = 0;
63 return 0;
64 }
65
66 /* CPUs are displayed as a hex bitmap + a comma between each groups of 8
67 * nibbles (except the last one which has a newline instead).
68 * Guesstimate the buffer size at the group granularity level.
69 */
70 len = min(DIV_ROUND_UP(nr_cpumask_bits, 32) * (8 + 1), *lenp);
71 kbuf = kmalloc(len, GFP_KERNEL);
72 if (!kbuf) {
73 *lenp = 0;
74 return -ENOMEM;
75 }
76
77 len = scnprintf(kbuf, len, "%*pb", cpumask_pr_args(mask));
78 if (!len) {
79 *lenp = 0;
80 goto free_buf;
81 }
82
83 /* scnprintf writes a trailing null char not counted in the returned
84 * length, override it with a newline.
85 */
86 kbuf[len++] = '\n';
87 memcpy(buffer, kbuf, len);
88 *lenp = len;
89 *ppos += len;
90
91 free_buf:
92 kfree(kbuf);
93 return 0;
94 }
95 #endif
96
97 #ifdef CONFIG_RPS
98
rps_default_mask_cow_alloc(struct net * net)99 static struct cpumask *rps_default_mask_cow_alloc(struct net *net)
100 {
101 struct cpumask *rps_default_mask;
102
103 if (net->core.rps_default_mask)
104 return net->core.rps_default_mask;
105
106 rps_default_mask = kzalloc(cpumask_size(), GFP_KERNEL);
107 if (!rps_default_mask)
108 return NULL;
109
110 /* pairs with READ_ONCE in rx_queue_default_mask() */
111 WRITE_ONCE(net->core.rps_default_mask, rps_default_mask);
112 return rps_default_mask;
113 }
114
rps_default_mask_sysctl(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)115 static int rps_default_mask_sysctl(const struct ctl_table *table, int write,
116 void *buffer, size_t *lenp, loff_t *ppos)
117 {
118 struct net *net = (struct net *)table->data;
119 int err = 0;
120
121 rtnl_lock();
122 if (write) {
123 struct cpumask *rps_default_mask = rps_default_mask_cow_alloc(net);
124
125 err = -ENOMEM;
126 if (!rps_default_mask)
127 goto done;
128
129 err = cpumask_parse(buffer, rps_default_mask);
130 if (err)
131 goto done;
132
133 err = rps_cpumask_housekeeping(rps_default_mask);
134 if (err)
135 goto done;
136 } else {
137 err = dump_cpumask(buffer, lenp, ppos,
138 net->core.rps_default_mask ? : cpu_none_mask);
139 }
140
141 done:
142 rtnl_unlock();
143 return err;
144 }
145
rps_sock_flow_sysctl(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)146 static int rps_sock_flow_sysctl(const struct ctl_table *table, int write,
147 void *buffer, size_t *lenp, loff_t *ppos)
148 {
149 unsigned int orig_size, size;
150 int ret, i;
151 struct ctl_table tmp = {
152 .data = &size,
153 .maxlen = sizeof(size),
154 .mode = table->mode
155 };
156 struct rps_sock_flow_table *orig_sock_table, *sock_table;
157 static DEFINE_MUTEX(sock_flow_mutex);
158
159 mutex_lock(&sock_flow_mutex);
160
161 orig_sock_table = rcu_dereference_protected(
162 net_hotdata.rps_sock_flow_table,
163 lockdep_is_held(&sock_flow_mutex));
164 size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0;
165
166 ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
167
168 if (write) {
169 if (size) {
170 if (size > 1<<29) {
171 /* Enforce limit to prevent overflow */
172 mutex_unlock(&sock_flow_mutex);
173 return -EINVAL;
174 }
175 size = roundup_pow_of_two(size);
176 if (size != orig_size) {
177 sock_table =
178 vmalloc(RPS_SOCK_FLOW_TABLE_SIZE(size));
179 if (!sock_table) {
180 mutex_unlock(&sock_flow_mutex);
181 return -ENOMEM;
182 }
183 net_hotdata.rps_cpu_mask =
184 roundup_pow_of_two(nr_cpu_ids) - 1;
185 sock_table->mask = size - 1;
186 } else
187 sock_table = orig_sock_table;
188
189 for (i = 0; i < size; i++)
190 sock_table->ents[i] = RPS_NO_CPU;
191 } else
192 sock_table = NULL;
193
194 if (sock_table != orig_sock_table) {
195 rcu_assign_pointer(net_hotdata.rps_sock_flow_table,
196 sock_table);
197 if (sock_table) {
198 static_branch_inc(&rps_needed);
199 static_branch_inc(&rfs_needed);
200 }
201 if (orig_sock_table) {
202 static_branch_dec(&rps_needed);
203 static_branch_dec(&rfs_needed);
204 kvfree_rcu_mightsleep(orig_sock_table);
205 }
206 }
207 }
208
209 mutex_unlock(&sock_flow_mutex);
210
211 return ret;
212 }
213 #endif /* CONFIG_RPS */
214
215 #ifdef CONFIG_NET_FLOW_LIMIT
216 static DEFINE_MUTEX(flow_limit_update_mutex);
217
flow_limit_cpu_sysctl(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)218 static int flow_limit_cpu_sysctl(const struct ctl_table *table, int write,
219 void *buffer, size_t *lenp, loff_t *ppos)
220 {
221 struct sd_flow_limit *cur;
222 struct softnet_data *sd;
223 cpumask_var_t mask;
224 int i, len, ret = 0;
225
226 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
227 return -ENOMEM;
228
229 if (write) {
230 ret = cpumask_parse(buffer, mask);
231 if (ret)
232 goto done;
233
234 mutex_lock(&flow_limit_update_mutex);
235 len = sizeof(*cur) + netdev_flow_limit_table_len;
236 for_each_possible_cpu(i) {
237 sd = &per_cpu(softnet_data, i);
238 cur = rcu_dereference_protected(sd->flow_limit,
239 lockdep_is_held(&flow_limit_update_mutex));
240 if (cur && !cpumask_test_cpu(i, mask)) {
241 RCU_INIT_POINTER(sd->flow_limit, NULL);
242 kfree_rcu_mightsleep(cur);
243 } else if (!cur && cpumask_test_cpu(i, mask)) {
244 cur = kzalloc_node(len, GFP_KERNEL,
245 cpu_to_node(i));
246 if (!cur) {
247 /* not unwinding previous changes */
248 ret = -ENOMEM;
249 goto write_unlock;
250 }
251 cur->num_buckets = netdev_flow_limit_table_len;
252 rcu_assign_pointer(sd->flow_limit, cur);
253 }
254 }
255 write_unlock:
256 mutex_unlock(&flow_limit_update_mutex);
257 } else {
258 cpumask_clear(mask);
259 rcu_read_lock();
260 for_each_possible_cpu(i) {
261 sd = &per_cpu(softnet_data, i);
262 if (rcu_dereference(sd->flow_limit))
263 cpumask_set_cpu(i, mask);
264 }
265 rcu_read_unlock();
266
267 ret = dump_cpumask(buffer, lenp, ppos, mask);
268 }
269
270 done:
271 free_cpumask_var(mask);
272 return ret;
273 }
274
flow_limit_table_len_sysctl(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)275 static int flow_limit_table_len_sysctl(const struct ctl_table *table, int write,
276 void *buffer, size_t *lenp, loff_t *ppos)
277 {
278 unsigned int old, *ptr;
279 int ret;
280
281 mutex_lock(&flow_limit_update_mutex);
282
283 ptr = table->data;
284 old = *ptr;
285 ret = proc_dointvec(table, write, buffer, lenp, ppos);
286 if (!ret && write && !is_power_of_2(*ptr)) {
287 *ptr = old;
288 ret = -EINVAL;
289 }
290
291 mutex_unlock(&flow_limit_update_mutex);
292 return ret;
293 }
294 #endif /* CONFIG_NET_FLOW_LIMIT */
295
296 #ifdef CONFIG_NET_SCHED
set_default_qdisc(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)297 static int set_default_qdisc(const struct ctl_table *table, int write,
298 void *buffer, size_t *lenp, loff_t *ppos)
299 {
300 char id[IFNAMSIZ];
301 struct ctl_table tbl = {
302 .data = id,
303 .maxlen = IFNAMSIZ,
304 };
305 int ret;
306
307 qdisc_get_default(id, IFNAMSIZ);
308
309 ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
310 if (write && ret == 0)
311 ret = qdisc_set_default(id);
312 return ret;
313 }
314 #endif
315
proc_do_dev_weight(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)316 static int proc_do_dev_weight(const struct ctl_table *table, int write,
317 void *buffer, size_t *lenp, loff_t *ppos)
318 {
319 static DEFINE_MUTEX(dev_weight_mutex);
320 int ret, weight;
321
322 mutex_lock(&dev_weight_mutex);
323 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
324 if (!ret && write) {
325 weight = READ_ONCE(weight_p);
326 WRITE_ONCE(net_hotdata.dev_rx_weight, weight * dev_weight_rx_bias);
327 WRITE_ONCE(net_hotdata.dev_tx_weight, weight * dev_weight_tx_bias);
328 }
329 mutex_unlock(&dev_weight_mutex);
330
331 return ret;
332 }
333
proc_do_rss_key(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)334 static int proc_do_rss_key(const struct ctl_table *table, int write,
335 void *buffer, size_t *lenp, loff_t *ppos)
336 {
337 struct ctl_table fake_table;
338 char buf[NETDEV_RSS_KEY_LEN * 3];
339
340 snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key);
341 fake_table.data = buf;
342 fake_table.maxlen = sizeof(buf);
343 return proc_dostring(&fake_table, write, buffer, lenp, ppos);
344 }
345
346 #ifdef CONFIG_BPF_JIT
proc_dointvec_minmax_bpf_enable(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)347 static int proc_dointvec_minmax_bpf_enable(const struct ctl_table *table, int write,
348 void *buffer, size_t *lenp,
349 loff_t *ppos)
350 {
351 int ret, jit_enable = *(int *)table->data;
352 int min = *(int *)table->extra1;
353 int max = *(int *)table->extra2;
354 struct ctl_table tmp = *table;
355
356 if (write && !capable(CAP_SYS_ADMIN))
357 return -EPERM;
358
359 tmp.data = &jit_enable;
360 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
361 if (write && !ret) {
362 if (jit_enable < 2 ||
363 (jit_enable == 2 && bpf_dump_raw_ok(current_cred()))) {
364 *(int *)table->data = jit_enable;
365 if (jit_enable == 2)
366 pr_warn("bpf_jit_enable = 2 was set! NEVER use this in production, only for JIT debugging!\n");
367 } else {
368 ret = -EPERM;
369 }
370 }
371
372 if (write && ret && min == max)
373 pr_info_once("CONFIG_BPF_JIT_ALWAYS_ON is enabled, bpf_jit_enable is permanently set to 1.\n");
374
375 return ret;
376 }
377
378 # ifdef CONFIG_HAVE_EBPF_JIT
379 static int
proc_dointvec_minmax_bpf_restricted(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)380 proc_dointvec_minmax_bpf_restricted(const struct ctl_table *table, int write,
381 void *buffer, size_t *lenp, loff_t *ppos)
382 {
383 if (!capable(CAP_SYS_ADMIN))
384 return -EPERM;
385
386 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
387 }
388 # endif /* CONFIG_HAVE_EBPF_JIT */
389
390 static int
proc_dolongvec_minmax_bpf_restricted(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)391 proc_dolongvec_minmax_bpf_restricted(const struct ctl_table *table, int write,
392 void *buffer, size_t *lenp, loff_t *ppos)
393 {
394 if (!capable(CAP_SYS_ADMIN))
395 return -EPERM;
396
397 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
398 }
399 #endif
400
401 static struct ctl_table net_core_table[] = {
402 {
403 .procname = "mem_pcpu_rsv",
404 .data = &net_hotdata.sysctl_mem_pcpu_rsv,
405 .maxlen = sizeof(int),
406 .mode = 0644,
407 .proc_handler = proc_dointvec_minmax,
408 .extra1 = &min_mem_pcpu_rsv,
409 },
410 {
411 .procname = "dev_weight",
412 .data = &weight_p,
413 .maxlen = sizeof(int),
414 .mode = 0644,
415 .proc_handler = proc_do_dev_weight,
416 .extra1 = SYSCTL_ONE,
417 },
418 {
419 .procname = "dev_weight_rx_bias",
420 .data = &dev_weight_rx_bias,
421 .maxlen = sizeof(int),
422 .mode = 0644,
423 .proc_handler = proc_do_dev_weight,
424 .extra1 = SYSCTL_ONE,
425 },
426 {
427 .procname = "dev_weight_tx_bias",
428 .data = &dev_weight_tx_bias,
429 .maxlen = sizeof(int),
430 .mode = 0644,
431 .proc_handler = proc_do_dev_weight,
432 .extra1 = SYSCTL_ONE,
433 },
434 {
435 .procname = "netdev_max_backlog",
436 .data = &net_hotdata.max_backlog,
437 .maxlen = sizeof(int),
438 .mode = 0644,
439 .proc_handler = proc_dointvec
440 },
441 {
442 .procname = "netdev_rss_key",
443 .data = &netdev_rss_key,
444 .maxlen = sizeof(int),
445 .mode = 0444,
446 .proc_handler = proc_do_rss_key,
447 },
448 #ifdef CONFIG_BPF_JIT
449 {
450 .procname = "bpf_jit_enable",
451 .data = &bpf_jit_enable,
452 .maxlen = sizeof(int),
453 .mode = 0644,
454 .proc_handler = proc_dointvec_minmax_bpf_enable,
455 # ifdef CONFIG_BPF_JIT_ALWAYS_ON
456 .extra1 = SYSCTL_ONE,
457 .extra2 = SYSCTL_ONE,
458 # else
459 .extra1 = SYSCTL_ZERO,
460 .extra2 = SYSCTL_TWO,
461 # endif
462 },
463 # ifdef CONFIG_HAVE_EBPF_JIT
464 {
465 .procname = "bpf_jit_harden",
466 .data = &bpf_jit_harden,
467 .maxlen = sizeof(int),
468 .mode = 0600,
469 .proc_handler = proc_dointvec_minmax_bpf_restricted,
470 .extra1 = SYSCTL_ZERO,
471 .extra2 = SYSCTL_TWO,
472 },
473 {
474 .procname = "bpf_jit_kallsyms",
475 .data = &bpf_jit_kallsyms,
476 .maxlen = sizeof(int),
477 .mode = 0600,
478 .proc_handler = proc_dointvec_minmax_bpf_restricted,
479 .extra1 = SYSCTL_ZERO,
480 .extra2 = SYSCTL_ONE,
481 },
482 # endif
483 {
484 .procname = "bpf_jit_limit",
485 .data = &bpf_jit_limit,
486 .maxlen = sizeof(long),
487 .mode = 0600,
488 .proc_handler = proc_dolongvec_minmax_bpf_restricted,
489 .extra1 = SYSCTL_LONG_ONE,
490 .extra2 = &bpf_jit_limit_max,
491 },
492 #endif
493 {
494 .procname = "netdev_tstamp_prequeue",
495 .data = &net_hotdata.tstamp_prequeue,
496 .maxlen = sizeof(int),
497 .mode = 0644,
498 .proc_handler = proc_dointvec
499 },
500 {
501 .procname = "message_cost",
502 .data = &net_ratelimit_state.interval,
503 .maxlen = sizeof(int),
504 .mode = 0644,
505 .proc_handler = proc_dointvec_jiffies,
506 },
507 {
508 .procname = "message_burst",
509 .data = &net_ratelimit_state.burst,
510 .maxlen = sizeof(int),
511 .mode = 0644,
512 .proc_handler = proc_dointvec,
513 },
514 #ifdef CONFIG_RPS
515 {
516 .procname = "rps_sock_flow_entries",
517 .maxlen = sizeof(int),
518 .mode = 0644,
519 .proc_handler = rps_sock_flow_sysctl
520 },
521 #endif
522 #ifdef CONFIG_NET_FLOW_LIMIT
523 {
524 .procname = "flow_limit_cpu_bitmap",
525 .mode = 0644,
526 .proc_handler = flow_limit_cpu_sysctl
527 },
528 {
529 .procname = "flow_limit_table_len",
530 .data = &netdev_flow_limit_table_len,
531 .maxlen = sizeof(int),
532 .mode = 0644,
533 .proc_handler = flow_limit_table_len_sysctl
534 },
535 #endif /* CONFIG_NET_FLOW_LIMIT */
536 #ifdef CONFIG_NET_RX_BUSY_POLL
537 {
538 .procname = "busy_poll",
539 .data = &sysctl_net_busy_poll,
540 .maxlen = sizeof(unsigned int),
541 .mode = 0644,
542 .proc_handler = proc_dointvec_minmax,
543 .extra1 = SYSCTL_ZERO,
544 },
545 {
546 .procname = "busy_read",
547 .data = &sysctl_net_busy_read,
548 .maxlen = sizeof(unsigned int),
549 .mode = 0644,
550 .proc_handler = proc_dointvec_minmax,
551 .extra1 = SYSCTL_ZERO,
552 },
553 #endif
554 #ifdef CONFIG_NET_SCHED
555 {
556 .procname = "default_qdisc",
557 .mode = 0644,
558 .maxlen = IFNAMSIZ,
559 .proc_handler = set_default_qdisc
560 },
561 #endif
562 {
563 .procname = "netdev_budget",
564 .data = &net_hotdata.netdev_budget,
565 .maxlen = sizeof(int),
566 .mode = 0644,
567 .proc_handler = proc_dointvec
568 },
569 {
570 .procname = "warnings",
571 .data = &net_msg_warn,
572 .maxlen = sizeof(int),
573 .mode = 0644,
574 .proc_handler = proc_dointvec
575 },
576 {
577 .procname = "max_skb_frags",
578 .data = &net_hotdata.sysctl_max_skb_frags,
579 .maxlen = sizeof(int),
580 .mode = 0644,
581 .proc_handler = proc_dointvec_minmax,
582 .extra1 = SYSCTL_ONE,
583 .extra2 = &max_skb_frags,
584 },
585 {
586 .procname = "netdev_budget_usecs",
587 .data = &net_hotdata.netdev_budget_usecs,
588 .maxlen = sizeof(unsigned int),
589 .mode = 0644,
590 .proc_handler = proc_dointvec_minmax,
591 .extra1 = &netdev_budget_usecs_min,
592 },
593 {
594 .procname = "fb_tunnels_only_for_init_net",
595 .data = &sysctl_fb_tunnels_only_for_init_net,
596 .maxlen = sizeof(int),
597 .mode = 0644,
598 .proc_handler = proc_dointvec_minmax,
599 .extra1 = SYSCTL_ZERO,
600 .extra2 = SYSCTL_TWO,
601 },
602 {
603 .procname = "devconf_inherit_init_net",
604 .data = &sysctl_devconf_inherit_init_net,
605 .maxlen = sizeof(int),
606 .mode = 0644,
607 .proc_handler = proc_dointvec_minmax,
608 .extra1 = SYSCTL_ZERO,
609 .extra2 = SYSCTL_THREE,
610 },
611 {
612 .procname = "high_order_alloc_disable",
613 .data = &net_high_order_alloc_disable_key.key,
614 .maxlen = sizeof(net_high_order_alloc_disable_key),
615 .mode = 0644,
616 .proc_handler = proc_do_static_key,
617 },
618 {
619 .procname = "gro_normal_batch",
620 .data = &net_hotdata.gro_normal_batch,
621 .maxlen = sizeof(unsigned int),
622 .mode = 0644,
623 .proc_handler = proc_dointvec_minmax,
624 .extra1 = SYSCTL_ONE,
625 },
626 {
627 .procname = "netdev_unregister_timeout_secs",
628 .data = &netdev_unregister_timeout_secs,
629 .maxlen = sizeof(unsigned int),
630 .mode = 0644,
631 .proc_handler = proc_dointvec_minmax,
632 .extra1 = SYSCTL_ONE,
633 .extra2 = &int_3600,
634 },
635 {
636 .procname = "skb_defer_max",
637 .data = &net_hotdata.sysctl_skb_defer_max,
638 .maxlen = sizeof(unsigned int),
639 .mode = 0644,
640 .proc_handler = proc_dointvec_minmax,
641 .extra1 = SYSCTL_ZERO,
642 },
643 };
644
645 static struct ctl_table netns_core_table[] = {
646 #if IS_ENABLED(CONFIG_RPS)
647 {
648 .procname = "rps_default_mask",
649 .data = &init_net,
650 .mode = 0644,
651 .proc_handler = rps_default_mask_sysctl
652 },
653 #endif
654 {
655 .procname = "somaxconn",
656 .data = &init_net.core.sysctl_somaxconn,
657 .maxlen = sizeof(int),
658 .mode = 0644,
659 .extra1 = SYSCTL_ZERO,
660 .proc_handler = proc_dointvec_minmax
661 },
662 {
663 .procname = "optmem_max",
664 .data = &init_net.core.sysctl_optmem_max,
665 .maxlen = sizeof(int),
666 .mode = 0644,
667 .extra1 = SYSCTL_ZERO,
668 .proc_handler = proc_dointvec_minmax
669 },
670 {
671 .procname = "txrehash",
672 .data = &init_net.core.sysctl_txrehash,
673 .maxlen = sizeof(u8),
674 .mode = 0644,
675 .extra1 = SYSCTL_ZERO,
676 .extra2 = SYSCTL_ONE,
677 .proc_handler = proc_dou8vec_minmax,
678 },
679 {
680 .procname = "tstamp_allow_data",
681 .data = &init_net.core.sysctl_tstamp_allow_data,
682 .maxlen = sizeof(u8),
683 .mode = 0644,
684 .proc_handler = proc_dou8vec_minmax,
685 .extra1 = SYSCTL_ZERO,
686 .extra2 = SYSCTL_ONE
687 },
688 /* sysctl_core_net_init() will set the values after this
689 * to readonly in network namespaces
690 */
691 {
692 .procname = "wmem_max",
693 .data = &sysctl_wmem_max,
694 .maxlen = sizeof(int),
695 .mode = 0644,
696 .proc_handler = proc_dointvec_minmax,
697 .extra1 = &min_sndbuf,
698 },
699 {
700 .procname = "rmem_max",
701 .data = &sysctl_rmem_max,
702 .maxlen = sizeof(int),
703 .mode = 0644,
704 .proc_handler = proc_dointvec_minmax,
705 .extra1 = &min_rcvbuf,
706 },
707 {
708 .procname = "wmem_default",
709 .data = &sysctl_wmem_default,
710 .maxlen = sizeof(int),
711 .mode = 0644,
712 .proc_handler = proc_dointvec_minmax,
713 .extra1 = &min_sndbuf,
714 },
715 {
716 .procname = "rmem_default",
717 .data = &sysctl_rmem_default,
718 .maxlen = sizeof(int),
719 .mode = 0644,
720 .proc_handler = proc_dointvec_minmax,
721 .extra1 = &min_rcvbuf,
722 },
723 };
724
fb_tunnels_only_for_init_net_sysctl_setup(char * str)725 static int __init fb_tunnels_only_for_init_net_sysctl_setup(char *str)
726 {
727 /* fallback tunnels for initns only */
728 if (!strncmp(str, "initns", 6))
729 sysctl_fb_tunnels_only_for_init_net = 1;
730 /* no fallback tunnels anywhere */
731 else if (!strncmp(str, "none", 4))
732 sysctl_fb_tunnels_only_for_init_net = 2;
733
734 return 1;
735 }
736 __setup("fb_tunnels=", fb_tunnels_only_for_init_net_sysctl_setup);
737
sysctl_core_net_init(struct net * net)738 static __net_init int sysctl_core_net_init(struct net *net)
739 {
740 size_t table_size = ARRAY_SIZE(netns_core_table);
741 struct ctl_table *tbl;
742
743 tbl = netns_core_table;
744 if (!net_eq(net, &init_net)) {
745 int i;
746 tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
747 if (tbl == NULL)
748 goto err_dup;
749
750 for (i = 0; i < table_size; ++i) {
751 if (tbl[i].data == &sysctl_wmem_max)
752 break;
753
754 tbl[i].data += (char *)net - (char *)&init_net;
755 }
756 for (; i < table_size; ++i)
757 tbl[i].mode &= ~0222;
758 }
759
760 net->core.sysctl_hdr = register_net_sysctl_sz(net, "net/core", tbl, table_size);
761 if (net->core.sysctl_hdr == NULL)
762 goto err_reg;
763
764 return 0;
765
766 err_reg:
767 if (tbl != netns_core_table)
768 kfree(tbl);
769 err_dup:
770 return -ENOMEM;
771 }
772
sysctl_core_net_exit(struct net * net)773 static __net_exit void sysctl_core_net_exit(struct net *net)
774 {
775 const struct ctl_table *tbl;
776
777 tbl = net->core.sysctl_hdr->ctl_table_arg;
778 unregister_net_sysctl_table(net->core.sysctl_hdr);
779 BUG_ON(tbl == netns_core_table);
780 #if IS_ENABLED(CONFIG_RPS)
781 kfree(net->core.rps_default_mask);
782 #endif
783 kfree(tbl);
784 }
785
786 static __net_initdata struct pernet_operations sysctl_core_ops = {
787 .init = sysctl_core_net_init,
788 .exit = sysctl_core_net_exit,
789 };
790
sysctl_core_init(void)791 static __init int sysctl_core_init(void)
792 {
793 register_net_sysctl(&init_net, "net/core", net_core_table);
794 return register_pernet_subsys(&sysctl_core_ops);
795 }
796
797 fs_initcall(sysctl_core_init);
798