xref: /linux/security/security.c (revision aeca4e2ca65c1aeacfbe520684e6421719d99417)
1 /*
2  * Security plug functions
3  *
4  * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com>
5  * Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com>
6  * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com>
7  * Copyright (C) 2016 Mellanox Technologies
8  *
9  *	This program is free software; you can redistribute it and/or modify
10  *	it under the terms of the GNU General Public License as published by
11  *	the Free Software Foundation; either version 2 of the License, or
12  *	(at your option) any later version.
13  */
14 
15 #define pr_fmt(fmt) "LSM: " fmt
16 
17 #include <linux/bpf.h>
18 #include <linux/capability.h>
19 #include <linux/dcache.h>
20 #include <linux/export.h>
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/lsm_hooks.h>
24 #include <linux/integrity.h>
25 #include <linux/ima.h>
26 #include <linux/evm.h>
27 #include <linux/fsnotify.h>
28 #include <linux/mman.h>
29 #include <linux/mount.h>
30 #include <linux/personality.h>
31 #include <linux/backing-dev.h>
32 #include <linux/string.h>
33 #include <linux/msg.h>
34 #include <net/flow.h>
35 
36 #define MAX_LSM_EVM_XATTR	2
37 
38 /* How many LSMs were built into the kernel? */
39 #define LSM_COUNT (__end_lsm_info - __start_lsm_info)
40 
41 struct security_hook_heads security_hook_heads __lsm_ro_after_init;
42 static ATOMIC_NOTIFIER_HEAD(lsm_notifier_chain);
43 
44 static struct kmem_cache *lsm_file_cache;
45 static struct kmem_cache *lsm_inode_cache;
46 
47 char *lsm_names;
48 static struct lsm_blob_sizes blob_sizes __lsm_ro_after_init;
49 
50 /* Boot-time LSM user choice */
51 static __initdata const char *chosen_lsm_order;
52 static __initdata const char *chosen_major_lsm;
53 
54 static __initconst const char * const builtin_lsm_order = CONFIG_LSM;
55 
56 /* Ordered list of LSMs to initialize. */
57 static __initdata struct lsm_info **ordered_lsms;
58 static __initdata struct lsm_info *exclusive;
59 
60 static __initdata bool debug;
61 #define init_debug(...)						\
62 	do {							\
63 		if (debug)					\
64 			pr_info(__VA_ARGS__);			\
65 	} while (0)
66 
67 static bool __init is_enabled(struct lsm_info *lsm)
68 {
69 	if (!lsm->enabled)
70 		return false;
71 
72 	return *lsm->enabled;
73 }
74 
75 /* Mark an LSM's enabled flag. */
76 static int lsm_enabled_true __initdata = 1;
77 static int lsm_enabled_false __initdata = 0;
78 static void __init set_enabled(struct lsm_info *lsm, bool enabled)
79 {
80 	/*
81 	 * When an LSM hasn't configured an enable variable, we can use
82 	 * a hard-coded location for storing the default enabled state.
83 	 */
84 	if (!lsm->enabled) {
85 		if (enabled)
86 			lsm->enabled = &lsm_enabled_true;
87 		else
88 			lsm->enabled = &lsm_enabled_false;
89 	} else if (lsm->enabled == &lsm_enabled_true) {
90 		if (!enabled)
91 			lsm->enabled = &lsm_enabled_false;
92 	} else if (lsm->enabled == &lsm_enabled_false) {
93 		if (enabled)
94 			lsm->enabled = &lsm_enabled_true;
95 	} else {
96 		*lsm->enabled = enabled;
97 	}
98 }
99 
100 /* Is an LSM already listed in the ordered LSMs list? */
101 static bool __init exists_ordered_lsm(struct lsm_info *lsm)
102 {
103 	struct lsm_info **check;
104 
105 	for (check = ordered_lsms; *check; check++)
106 		if (*check == lsm)
107 			return true;
108 
109 	return false;
110 }
111 
112 /* Append an LSM to the list of ordered LSMs to initialize. */
113 static int last_lsm __initdata;
114 static void __init append_ordered_lsm(struct lsm_info *lsm, const char *from)
115 {
116 	/* Ignore duplicate selections. */
117 	if (exists_ordered_lsm(lsm))
118 		return;
119 
120 	if (WARN(last_lsm == LSM_COUNT, "%s: out of LSM slots!?\n", from))
121 		return;
122 
123 	/* Enable this LSM, if it is not already set. */
124 	if (!lsm->enabled)
125 		lsm->enabled = &lsm_enabled_true;
126 	ordered_lsms[last_lsm++] = lsm;
127 
128 	init_debug("%s ordering: %s (%sabled)\n", from, lsm->name,
129 		   is_enabled(lsm) ? "en" : "dis");
130 }
131 
132 /* Is an LSM allowed to be initialized? */
133 static bool __init lsm_allowed(struct lsm_info *lsm)
134 {
135 	/* Skip if the LSM is disabled. */
136 	if (!is_enabled(lsm))
137 		return false;
138 
139 	/* Not allowed if another exclusive LSM already initialized. */
140 	if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && exclusive) {
141 		init_debug("exclusive disabled: %s\n", lsm->name);
142 		return false;
143 	}
144 
145 	return true;
146 }
147 
148 static void __init lsm_set_blob_size(int *need, int *lbs)
149 {
150 	int offset;
151 
152 	if (*need > 0) {
153 		offset = *lbs;
154 		*lbs += *need;
155 		*need = offset;
156 	}
157 }
158 
159 static void __init lsm_set_blob_sizes(struct lsm_blob_sizes *needed)
160 {
161 	if (!needed)
162 		return;
163 
164 	lsm_set_blob_size(&needed->lbs_cred, &blob_sizes.lbs_cred);
165 	lsm_set_blob_size(&needed->lbs_file, &blob_sizes.lbs_file);
166 	/*
167 	 * The inode blob gets an rcu_head in addition to
168 	 * what the modules might need.
169 	 */
170 	if (needed->lbs_inode && blob_sizes.lbs_inode == 0)
171 		blob_sizes.lbs_inode = sizeof(struct rcu_head);
172 	lsm_set_blob_size(&needed->lbs_inode, &blob_sizes.lbs_inode);
173 	lsm_set_blob_size(&needed->lbs_ipc, &blob_sizes.lbs_ipc);
174 	lsm_set_blob_size(&needed->lbs_msg_msg, &blob_sizes.lbs_msg_msg);
175 	lsm_set_blob_size(&needed->lbs_task, &blob_sizes.lbs_task);
176 }
177 
178 /* Prepare LSM for initialization. */
179 static void __init prepare_lsm(struct lsm_info *lsm)
180 {
181 	int enabled = lsm_allowed(lsm);
182 
183 	/* Record enablement (to handle any following exclusive LSMs). */
184 	set_enabled(lsm, enabled);
185 
186 	/* If enabled, do pre-initialization work. */
187 	if (enabled) {
188 		if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && !exclusive) {
189 			exclusive = lsm;
190 			init_debug("exclusive chosen: %s\n", lsm->name);
191 		}
192 
193 		lsm_set_blob_sizes(lsm->blobs);
194 	}
195 }
196 
197 /* Initialize a given LSM, if it is enabled. */
198 static void __init initialize_lsm(struct lsm_info *lsm)
199 {
200 	if (is_enabled(lsm)) {
201 		int ret;
202 
203 		init_debug("initializing %s\n", lsm->name);
204 		ret = lsm->init();
205 		WARN(ret, "%s failed to initialize: %d\n", lsm->name, ret);
206 	}
207 }
208 
209 /* Populate ordered LSMs list from comma-separated LSM name list. */
210 static void __init ordered_lsm_parse(const char *order, const char *origin)
211 {
212 	struct lsm_info *lsm;
213 	char *sep, *name, *next;
214 
215 	/* LSM_ORDER_FIRST is always first. */
216 	for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
217 		if (lsm->order == LSM_ORDER_FIRST)
218 			append_ordered_lsm(lsm, "first");
219 	}
220 
221 	/* Process "security=", if given. */
222 	if (chosen_major_lsm) {
223 		struct lsm_info *major;
224 
225 		/*
226 		 * To match the original "security=" behavior, this
227 		 * explicitly does NOT fallback to another Legacy Major
228 		 * if the selected one was separately disabled: disable
229 		 * all non-matching Legacy Major LSMs.
230 		 */
231 		for (major = __start_lsm_info; major < __end_lsm_info;
232 		     major++) {
233 			if ((major->flags & LSM_FLAG_LEGACY_MAJOR) &&
234 			    strcmp(major->name, chosen_major_lsm) != 0) {
235 				set_enabled(major, false);
236 				init_debug("security=%s disabled: %s\n",
237 					   chosen_major_lsm, major->name);
238 			}
239 		}
240 	}
241 
242 	sep = kstrdup(order, GFP_KERNEL);
243 	next = sep;
244 	/* Walk the list, looking for matching LSMs. */
245 	while ((name = strsep(&next, ",")) != NULL) {
246 		bool found = false;
247 
248 		for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
249 			if (lsm->order == LSM_ORDER_MUTABLE &&
250 			    strcmp(lsm->name, name) == 0) {
251 				append_ordered_lsm(lsm, origin);
252 				found = true;
253 			}
254 		}
255 
256 		if (!found)
257 			init_debug("%s ignored: %s\n", origin, name);
258 	}
259 
260 	/* Process "security=", if given. */
261 	if (chosen_major_lsm) {
262 		for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
263 			if (exists_ordered_lsm(lsm))
264 				continue;
265 			if (strcmp(lsm->name, chosen_major_lsm) == 0)
266 				append_ordered_lsm(lsm, "security=");
267 		}
268 	}
269 
270 	/* Disable all LSMs not in the ordered list. */
271 	for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
272 		if (exists_ordered_lsm(lsm))
273 			continue;
274 		set_enabled(lsm, false);
275 		init_debug("%s disabled: %s\n", origin, lsm->name);
276 	}
277 
278 	kfree(sep);
279 }
280 
281 static void __init lsm_early_cred(struct cred *cred);
282 static void __init lsm_early_task(struct task_struct *task);
283 
284 static void __init ordered_lsm_init(void)
285 {
286 	struct lsm_info **lsm;
287 
288 	ordered_lsms = kcalloc(LSM_COUNT + 1, sizeof(*ordered_lsms),
289 				GFP_KERNEL);
290 
291 	if (chosen_lsm_order)
292 		ordered_lsm_parse(chosen_lsm_order, "cmdline");
293 	else
294 		ordered_lsm_parse(builtin_lsm_order, "builtin");
295 
296 	for (lsm = ordered_lsms; *lsm; lsm++)
297 		prepare_lsm(*lsm);
298 
299 	init_debug("cred blob size     = %d\n", blob_sizes.lbs_cred);
300 	init_debug("file blob size     = %d\n", blob_sizes.lbs_file);
301 	init_debug("inode blob size    = %d\n", blob_sizes.lbs_inode);
302 	init_debug("ipc blob size      = %d\n", blob_sizes.lbs_ipc);
303 	init_debug("msg_msg blob size  = %d\n", blob_sizes.lbs_msg_msg);
304 	init_debug("task blob size     = %d\n", blob_sizes.lbs_task);
305 
306 	/*
307 	 * Create any kmem_caches needed for blobs
308 	 */
309 	if (blob_sizes.lbs_file)
310 		lsm_file_cache = kmem_cache_create("lsm_file_cache",
311 						   blob_sizes.lbs_file, 0,
312 						   SLAB_PANIC, NULL);
313 	if (blob_sizes.lbs_inode)
314 		lsm_inode_cache = kmem_cache_create("lsm_inode_cache",
315 						    blob_sizes.lbs_inode, 0,
316 						    SLAB_PANIC, NULL);
317 
318 	lsm_early_cred((struct cred *) current->cred);
319 	lsm_early_task(current);
320 	for (lsm = ordered_lsms; *lsm; lsm++)
321 		initialize_lsm(*lsm);
322 
323 	kfree(ordered_lsms);
324 }
325 
326 /**
327  * security_init - initializes the security framework
328  *
329  * This should be called early in the kernel initialization sequence.
330  */
331 int __init security_init(void)
332 {
333 	int i;
334 	struct hlist_head *list = (struct hlist_head *) &security_hook_heads;
335 
336 	pr_info("Security Framework initializing\n");
337 
338 	for (i = 0; i < sizeof(security_hook_heads) / sizeof(struct hlist_head);
339 	     i++)
340 		INIT_HLIST_HEAD(&list[i]);
341 
342 	/* Load LSMs in specified order. */
343 	ordered_lsm_init();
344 
345 	return 0;
346 }
347 
348 /* Save user chosen LSM */
349 static int __init choose_major_lsm(char *str)
350 {
351 	chosen_major_lsm = str;
352 	return 1;
353 }
354 __setup("security=", choose_major_lsm);
355 
356 /* Explicitly choose LSM initialization order. */
357 static int __init choose_lsm_order(char *str)
358 {
359 	chosen_lsm_order = str;
360 	return 1;
361 }
362 __setup("lsm=", choose_lsm_order);
363 
364 /* Enable LSM order debugging. */
365 static int __init enable_debug(char *str)
366 {
367 	debug = true;
368 	return 1;
369 }
370 __setup("lsm.debug", enable_debug);
371 
372 static bool match_last_lsm(const char *list, const char *lsm)
373 {
374 	const char *last;
375 
376 	if (WARN_ON(!list || !lsm))
377 		return false;
378 	last = strrchr(list, ',');
379 	if (last)
380 		/* Pass the comma, strcmp() will check for '\0' */
381 		last++;
382 	else
383 		last = list;
384 	return !strcmp(last, lsm);
385 }
386 
387 static int lsm_append(char *new, char **result)
388 {
389 	char *cp;
390 
391 	if (*result == NULL) {
392 		*result = kstrdup(new, GFP_KERNEL);
393 		if (*result == NULL)
394 			return -ENOMEM;
395 	} else {
396 		/* Check if it is the last registered name */
397 		if (match_last_lsm(*result, new))
398 			return 0;
399 		cp = kasprintf(GFP_KERNEL, "%s,%s", *result, new);
400 		if (cp == NULL)
401 			return -ENOMEM;
402 		kfree(*result);
403 		*result = cp;
404 	}
405 	return 0;
406 }
407 
408 /**
409  * security_add_hooks - Add a modules hooks to the hook lists.
410  * @hooks: the hooks to add
411  * @count: the number of hooks to add
412  * @lsm: the name of the security module
413  *
414  * Each LSM has to register its hooks with the infrastructure.
415  */
416 void __init security_add_hooks(struct security_hook_list *hooks, int count,
417 				char *lsm)
418 {
419 	int i;
420 
421 	for (i = 0; i < count; i++) {
422 		hooks[i].lsm = lsm;
423 		hlist_add_tail_rcu(&hooks[i].list, hooks[i].head);
424 	}
425 	if (lsm_append(lsm, &lsm_names) < 0)
426 		panic("%s - Cannot get early memory.\n", __func__);
427 }
428 
429 int call_lsm_notifier(enum lsm_event event, void *data)
430 {
431 	return atomic_notifier_call_chain(&lsm_notifier_chain, event, data);
432 }
433 EXPORT_SYMBOL(call_lsm_notifier);
434 
435 int register_lsm_notifier(struct notifier_block *nb)
436 {
437 	return atomic_notifier_chain_register(&lsm_notifier_chain, nb);
438 }
439 EXPORT_SYMBOL(register_lsm_notifier);
440 
441 int unregister_lsm_notifier(struct notifier_block *nb)
442 {
443 	return atomic_notifier_chain_unregister(&lsm_notifier_chain, nb);
444 }
445 EXPORT_SYMBOL(unregister_lsm_notifier);
446 
447 /**
448  * lsm_cred_alloc - allocate a composite cred blob
449  * @cred: the cred that needs a blob
450  * @gfp: allocation type
451  *
452  * Allocate the cred blob for all the modules
453  *
454  * Returns 0, or -ENOMEM if memory can't be allocated.
455  */
456 static int lsm_cred_alloc(struct cred *cred, gfp_t gfp)
457 {
458 	if (blob_sizes.lbs_cred == 0) {
459 		cred->security = NULL;
460 		return 0;
461 	}
462 
463 	cred->security = kzalloc(blob_sizes.lbs_cred, gfp);
464 	if (cred->security == NULL)
465 		return -ENOMEM;
466 	return 0;
467 }
468 
469 /**
470  * lsm_early_cred - during initialization allocate a composite cred blob
471  * @cred: the cred that needs a blob
472  *
473  * Allocate the cred blob for all the modules
474  */
475 static void __init lsm_early_cred(struct cred *cred)
476 {
477 	int rc = lsm_cred_alloc(cred, GFP_KERNEL);
478 
479 	if (rc)
480 		panic("%s: Early cred alloc failed.\n", __func__);
481 }
482 
483 /**
484  * lsm_file_alloc - allocate a composite file blob
485  * @file: the file that needs a blob
486  *
487  * Allocate the file blob for all the modules
488  *
489  * Returns 0, or -ENOMEM if memory can't be allocated.
490  */
491 static int lsm_file_alloc(struct file *file)
492 {
493 	if (!lsm_file_cache) {
494 		file->f_security = NULL;
495 		return 0;
496 	}
497 
498 	file->f_security = kmem_cache_zalloc(lsm_file_cache, GFP_KERNEL);
499 	if (file->f_security == NULL)
500 		return -ENOMEM;
501 	return 0;
502 }
503 
504 /**
505  * lsm_inode_alloc - allocate a composite inode blob
506  * @inode: the inode that needs a blob
507  *
508  * Allocate the inode blob for all the modules
509  *
510  * Returns 0, or -ENOMEM if memory can't be allocated.
511  */
512 int lsm_inode_alloc(struct inode *inode)
513 {
514 	if (!lsm_inode_cache) {
515 		inode->i_security = NULL;
516 		return 0;
517 	}
518 
519 	inode->i_security = kmem_cache_zalloc(lsm_inode_cache, GFP_NOFS);
520 	if (inode->i_security == NULL)
521 		return -ENOMEM;
522 	return 0;
523 }
524 
525 /**
526  * lsm_task_alloc - allocate a composite task blob
527  * @task: the task that needs a blob
528  *
529  * Allocate the task blob for all the modules
530  *
531  * Returns 0, or -ENOMEM if memory can't be allocated.
532  */
533 static int lsm_task_alloc(struct task_struct *task)
534 {
535 	if (blob_sizes.lbs_task == 0) {
536 		task->security = NULL;
537 		return 0;
538 	}
539 
540 	task->security = kzalloc(blob_sizes.lbs_task, GFP_KERNEL);
541 	if (task->security == NULL)
542 		return -ENOMEM;
543 	return 0;
544 }
545 
546 /**
547  * lsm_ipc_alloc - allocate a composite ipc blob
548  * @kip: the ipc that needs a blob
549  *
550  * Allocate the ipc blob for all the modules
551  *
552  * Returns 0, or -ENOMEM if memory can't be allocated.
553  */
554 static int lsm_ipc_alloc(struct kern_ipc_perm *kip)
555 {
556 	if (blob_sizes.lbs_ipc == 0) {
557 		kip->security = NULL;
558 		return 0;
559 	}
560 
561 	kip->security = kzalloc(blob_sizes.lbs_ipc, GFP_KERNEL);
562 	if (kip->security == NULL)
563 		return -ENOMEM;
564 	return 0;
565 }
566 
567 /**
568  * lsm_msg_msg_alloc - allocate a composite msg_msg blob
569  * @mp: the msg_msg that needs a blob
570  *
571  * Allocate the ipc blob for all the modules
572  *
573  * Returns 0, or -ENOMEM if memory can't be allocated.
574  */
575 static int lsm_msg_msg_alloc(struct msg_msg *mp)
576 {
577 	if (blob_sizes.lbs_msg_msg == 0) {
578 		mp->security = NULL;
579 		return 0;
580 	}
581 
582 	mp->security = kzalloc(blob_sizes.lbs_msg_msg, GFP_KERNEL);
583 	if (mp->security == NULL)
584 		return -ENOMEM;
585 	return 0;
586 }
587 
588 /**
589  * lsm_early_task - during initialization allocate a composite task blob
590  * @task: the task that needs a blob
591  *
592  * Allocate the task blob for all the modules
593  */
594 static void __init lsm_early_task(struct task_struct *task)
595 {
596 	int rc = lsm_task_alloc(task);
597 
598 	if (rc)
599 		panic("%s: Early task alloc failed.\n", __func__);
600 }
601 
602 /*
603  * Hook list operation macros.
604  *
605  * call_void_hook:
606  *	This is a hook that does not return a value.
607  *
608  * call_int_hook:
609  *	This is a hook that returns a value.
610  */
611 
612 #define call_void_hook(FUNC, ...)				\
613 	do {							\
614 		struct security_hook_list *P;			\
615 								\
616 		hlist_for_each_entry(P, &security_hook_heads.FUNC, list) \
617 			P->hook.FUNC(__VA_ARGS__);		\
618 	} while (0)
619 
620 #define call_int_hook(FUNC, IRC, ...) ({			\
621 	int RC = IRC;						\
622 	do {							\
623 		struct security_hook_list *P;			\
624 								\
625 		hlist_for_each_entry(P, &security_hook_heads.FUNC, list) { \
626 			RC = P->hook.FUNC(__VA_ARGS__);		\
627 			if (RC != 0)				\
628 				break;				\
629 		}						\
630 	} while (0);						\
631 	RC;							\
632 })
633 
634 /* Security operations */
635 
636 int security_binder_set_context_mgr(struct task_struct *mgr)
637 {
638 	return call_int_hook(binder_set_context_mgr, 0, mgr);
639 }
640 
641 int security_binder_transaction(struct task_struct *from,
642 				struct task_struct *to)
643 {
644 	return call_int_hook(binder_transaction, 0, from, to);
645 }
646 
647 int security_binder_transfer_binder(struct task_struct *from,
648 				    struct task_struct *to)
649 {
650 	return call_int_hook(binder_transfer_binder, 0, from, to);
651 }
652 
653 int security_binder_transfer_file(struct task_struct *from,
654 				  struct task_struct *to, struct file *file)
655 {
656 	return call_int_hook(binder_transfer_file, 0, from, to, file);
657 }
658 
659 int security_ptrace_access_check(struct task_struct *child, unsigned int mode)
660 {
661 	return call_int_hook(ptrace_access_check, 0, child, mode);
662 }
663 
664 int security_ptrace_traceme(struct task_struct *parent)
665 {
666 	return call_int_hook(ptrace_traceme, 0, parent);
667 }
668 
669 int security_capget(struct task_struct *target,
670 		     kernel_cap_t *effective,
671 		     kernel_cap_t *inheritable,
672 		     kernel_cap_t *permitted)
673 {
674 	return call_int_hook(capget, 0, target,
675 				effective, inheritable, permitted);
676 }
677 
678 int security_capset(struct cred *new, const struct cred *old,
679 		    const kernel_cap_t *effective,
680 		    const kernel_cap_t *inheritable,
681 		    const kernel_cap_t *permitted)
682 {
683 	return call_int_hook(capset, 0, new, old,
684 				effective, inheritable, permitted);
685 }
686 
687 int security_capable(const struct cred *cred,
688 		     struct user_namespace *ns,
689 		     int cap,
690 		     unsigned int opts)
691 {
692 	return call_int_hook(capable, 0, cred, ns, cap, opts);
693 }
694 
695 int security_quotactl(int cmds, int type, int id, struct super_block *sb)
696 {
697 	return call_int_hook(quotactl, 0, cmds, type, id, sb);
698 }
699 
700 int security_quota_on(struct dentry *dentry)
701 {
702 	return call_int_hook(quota_on, 0, dentry);
703 }
704 
705 int security_syslog(int type)
706 {
707 	return call_int_hook(syslog, 0, type);
708 }
709 
710 int security_settime64(const struct timespec64 *ts, const struct timezone *tz)
711 {
712 	return call_int_hook(settime, 0, ts, tz);
713 }
714 
715 int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
716 {
717 	struct security_hook_list *hp;
718 	int cap_sys_admin = 1;
719 	int rc;
720 
721 	/*
722 	 * The module will respond with a positive value if
723 	 * it thinks the __vm_enough_memory() call should be
724 	 * made with the cap_sys_admin set. If all of the modules
725 	 * agree that it should be set it will. If any module
726 	 * thinks it should not be set it won't.
727 	 */
728 	hlist_for_each_entry(hp, &security_hook_heads.vm_enough_memory, list) {
729 		rc = hp->hook.vm_enough_memory(mm, pages);
730 		if (rc <= 0) {
731 			cap_sys_admin = 0;
732 			break;
733 		}
734 	}
735 	return __vm_enough_memory(mm, pages, cap_sys_admin);
736 }
737 
738 int security_bprm_set_creds(struct linux_binprm *bprm)
739 {
740 	return call_int_hook(bprm_set_creds, 0, bprm);
741 }
742 
743 int security_bprm_check(struct linux_binprm *bprm)
744 {
745 	int ret;
746 
747 	ret = call_int_hook(bprm_check_security, 0, bprm);
748 	if (ret)
749 		return ret;
750 	return ima_bprm_check(bprm);
751 }
752 
753 void security_bprm_committing_creds(struct linux_binprm *bprm)
754 {
755 	call_void_hook(bprm_committing_creds, bprm);
756 }
757 
758 void security_bprm_committed_creds(struct linux_binprm *bprm)
759 {
760 	call_void_hook(bprm_committed_creds, bprm);
761 }
762 
763 int security_sb_alloc(struct super_block *sb)
764 {
765 	return call_int_hook(sb_alloc_security, 0, sb);
766 }
767 
768 void security_sb_free(struct super_block *sb)
769 {
770 	call_void_hook(sb_free_security, sb);
771 }
772 
773 void security_free_mnt_opts(void **mnt_opts)
774 {
775 	if (!*mnt_opts)
776 		return;
777 	call_void_hook(sb_free_mnt_opts, *mnt_opts);
778 	*mnt_opts = NULL;
779 }
780 EXPORT_SYMBOL(security_free_mnt_opts);
781 
782 int security_sb_eat_lsm_opts(char *options, void **mnt_opts)
783 {
784 	return call_int_hook(sb_eat_lsm_opts, 0, options, mnt_opts);
785 }
786 EXPORT_SYMBOL(security_sb_eat_lsm_opts);
787 
788 int security_sb_remount(struct super_block *sb,
789 			void *mnt_opts)
790 {
791 	return call_int_hook(sb_remount, 0, sb, mnt_opts);
792 }
793 EXPORT_SYMBOL(security_sb_remount);
794 
795 int security_sb_kern_mount(struct super_block *sb)
796 {
797 	return call_int_hook(sb_kern_mount, 0, sb);
798 }
799 
800 int security_sb_show_options(struct seq_file *m, struct super_block *sb)
801 {
802 	return call_int_hook(sb_show_options, 0, m, sb);
803 }
804 
805 int security_sb_statfs(struct dentry *dentry)
806 {
807 	return call_int_hook(sb_statfs, 0, dentry);
808 }
809 
810 int security_sb_mount(const char *dev_name, const struct path *path,
811                        const char *type, unsigned long flags, void *data)
812 {
813 	return call_int_hook(sb_mount, 0, dev_name, path, type, flags, data);
814 }
815 
816 int security_sb_umount(struct vfsmount *mnt, int flags)
817 {
818 	return call_int_hook(sb_umount, 0, mnt, flags);
819 }
820 
821 int security_sb_pivotroot(const struct path *old_path, const struct path *new_path)
822 {
823 	return call_int_hook(sb_pivotroot, 0, old_path, new_path);
824 }
825 
826 int security_sb_set_mnt_opts(struct super_block *sb,
827 				void *mnt_opts,
828 				unsigned long kern_flags,
829 				unsigned long *set_kern_flags)
830 {
831 	return call_int_hook(sb_set_mnt_opts,
832 				mnt_opts ? -EOPNOTSUPP : 0, sb,
833 				mnt_opts, kern_flags, set_kern_flags);
834 }
835 EXPORT_SYMBOL(security_sb_set_mnt_opts);
836 
837 int security_sb_clone_mnt_opts(const struct super_block *oldsb,
838 				struct super_block *newsb,
839 				unsigned long kern_flags,
840 				unsigned long *set_kern_flags)
841 {
842 	return call_int_hook(sb_clone_mnt_opts, 0, oldsb, newsb,
843 				kern_flags, set_kern_flags);
844 }
845 EXPORT_SYMBOL(security_sb_clone_mnt_opts);
846 
847 int security_add_mnt_opt(const char *option, const char *val, int len,
848 			 void **mnt_opts)
849 {
850 	return call_int_hook(sb_add_mnt_opt, -EINVAL,
851 					option, val, len, mnt_opts);
852 }
853 EXPORT_SYMBOL(security_add_mnt_opt);
854 
855 int security_inode_alloc(struct inode *inode)
856 {
857 	int rc = lsm_inode_alloc(inode);
858 
859 	if (unlikely(rc))
860 		return rc;
861 	rc = call_int_hook(inode_alloc_security, 0, inode);
862 	if (unlikely(rc))
863 		security_inode_free(inode);
864 	return rc;
865 }
866 
867 static void inode_free_by_rcu(struct rcu_head *head)
868 {
869 	/*
870 	 * The rcu head is at the start of the inode blob
871 	 */
872 	kmem_cache_free(lsm_inode_cache, head);
873 }
874 
875 void security_inode_free(struct inode *inode)
876 {
877 	integrity_inode_free(inode);
878 	call_void_hook(inode_free_security, inode);
879 	/*
880 	 * The inode may still be referenced in a path walk and
881 	 * a call to security_inode_permission() can be made
882 	 * after inode_free_security() is called. Ideally, the VFS
883 	 * wouldn't do this, but fixing that is a much harder
884 	 * job. For now, simply free the i_security via RCU, and
885 	 * leave the current inode->i_security pointer intact.
886 	 * The inode will be freed after the RCU grace period too.
887 	 */
888 	if (inode->i_security)
889 		call_rcu((struct rcu_head *)inode->i_security,
890 				inode_free_by_rcu);
891 }
892 
893 int security_dentry_init_security(struct dentry *dentry, int mode,
894 					const struct qstr *name, void **ctx,
895 					u32 *ctxlen)
896 {
897 	return call_int_hook(dentry_init_security, -EOPNOTSUPP, dentry, mode,
898 				name, ctx, ctxlen);
899 }
900 EXPORT_SYMBOL(security_dentry_init_security);
901 
902 int security_dentry_create_files_as(struct dentry *dentry, int mode,
903 				    struct qstr *name,
904 				    const struct cred *old, struct cred *new)
905 {
906 	return call_int_hook(dentry_create_files_as, 0, dentry, mode,
907 				name, old, new);
908 }
909 EXPORT_SYMBOL(security_dentry_create_files_as);
910 
911 int security_inode_init_security(struct inode *inode, struct inode *dir,
912 				 const struct qstr *qstr,
913 				 const initxattrs initxattrs, void *fs_data)
914 {
915 	struct xattr new_xattrs[MAX_LSM_EVM_XATTR + 1];
916 	struct xattr *lsm_xattr, *evm_xattr, *xattr;
917 	int ret;
918 
919 	if (unlikely(IS_PRIVATE(inode)))
920 		return 0;
921 
922 	if (!initxattrs)
923 		return call_int_hook(inode_init_security, -EOPNOTSUPP, inode,
924 				     dir, qstr, NULL, NULL, NULL);
925 	memset(new_xattrs, 0, sizeof(new_xattrs));
926 	lsm_xattr = new_xattrs;
927 	ret = call_int_hook(inode_init_security, -EOPNOTSUPP, inode, dir, qstr,
928 						&lsm_xattr->name,
929 						&lsm_xattr->value,
930 						&lsm_xattr->value_len);
931 	if (ret)
932 		goto out;
933 
934 	evm_xattr = lsm_xattr + 1;
935 	ret = evm_inode_init_security(inode, lsm_xattr, evm_xattr);
936 	if (ret)
937 		goto out;
938 	ret = initxattrs(inode, new_xattrs, fs_data);
939 out:
940 	for (xattr = new_xattrs; xattr->value != NULL; xattr++)
941 		kfree(xattr->value);
942 	return (ret == -EOPNOTSUPP) ? 0 : ret;
943 }
944 EXPORT_SYMBOL(security_inode_init_security);
945 
946 int security_old_inode_init_security(struct inode *inode, struct inode *dir,
947 				     const struct qstr *qstr, const char **name,
948 				     void **value, size_t *len)
949 {
950 	if (unlikely(IS_PRIVATE(inode)))
951 		return -EOPNOTSUPP;
952 	return call_int_hook(inode_init_security, -EOPNOTSUPP, inode, dir,
953 			     qstr, name, value, len);
954 }
955 EXPORT_SYMBOL(security_old_inode_init_security);
956 
957 #ifdef CONFIG_SECURITY_PATH
958 int security_path_mknod(const struct path *dir, struct dentry *dentry, umode_t mode,
959 			unsigned int dev)
960 {
961 	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
962 		return 0;
963 	return call_int_hook(path_mknod, 0, dir, dentry, mode, dev);
964 }
965 EXPORT_SYMBOL(security_path_mknod);
966 
967 int security_path_mkdir(const struct path *dir, struct dentry *dentry, umode_t mode)
968 {
969 	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
970 		return 0;
971 	return call_int_hook(path_mkdir, 0, dir, dentry, mode);
972 }
973 EXPORT_SYMBOL(security_path_mkdir);
974 
975 int security_path_rmdir(const struct path *dir, struct dentry *dentry)
976 {
977 	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
978 		return 0;
979 	return call_int_hook(path_rmdir, 0, dir, dentry);
980 }
981 
982 int security_path_unlink(const struct path *dir, struct dentry *dentry)
983 {
984 	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
985 		return 0;
986 	return call_int_hook(path_unlink, 0, dir, dentry);
987 }
988 EXPORT_SYMBOL(security_path_unlink);
989 
990 int security_path_symlink(const struct path *dir, struct dentry *dentry,
991 			  const char *old_name)
992 {
993 	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
994 		return 0;
995 	return call_int_hook(path_symlink, 0, dir, dentry, old_name);
996 }
997 
998 int security_path_link(struct dentry *old_dentry, const struct path *new_dir,
999 		       struct dentry *new_dentry)
1000 {
1001 	if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry))))
1002 		return 0;
1003 	return call_int_hook(path_link, 0, old_dentry, new_dir, new_dentry);
1004 }
1005 
1006 int security_path_rename(const struct path *old_dir, struct dentry *old_dentry,
1007 			 const struct path *new_dir, struct dentry *new_dentry,
1008 			 unsigned int flags)
1009 {
1010 	if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) ||
1011 		     (d_is_positive(new_dentry) && IS_PRIVATE(d_backing_inode(new_dentry)))))
1012 		return 0;
1013 
1014 	if (flags & RENAME_EXCHANGE) {
1015 		int err = call_int_hook(path_rename, 0, new_dir, new_dentry,
1016 					old_dir, old_dentry);
1017 		if (err)
1018 			return err;
1019 	}
1020 
1021 	return call_int_hook(path_rename, 0, old_dir, old_dentry, new_dir,
1022 				new_dentry);
1023 }
1024 EXPORT_SYMBOL(security_path_rename);
1025 
1026 int security_path_truncate(const struct path *path)
1027 {
1028 	if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
1029 		return 0;
1030 	return call_int_hook(path_truncate, 0, path);
1031 }
1032 
1033 int security_path_chmod(const struct path *path, umode_t mode)
1034 {
1035 	if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
1036 		return 0;
1037 	return call_int_hook(path_chmod, 0, path, mode);
1038 }
1039 
1040 int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
1041 {
1042 	if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
1043 		return 0;
1044 	return call_int_hook(path_chown, 0, path, uid, gid);
1045 }
1046 
1047 int security_path_chroot(const struct path *path)
1048 {
1049 	return call_int_hook(path_chroot, 0, path);
1050 }
1051 #endif
1052 
1053 int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode)
1054 {
1055 	if (unlikely(IS_PRIVATE(dir)))
1056 		return 0;
1057 	return call_int_hook(inode_create, 0, dir, dentry, mode);
1058 }
1059 EXPORT_SYMBOL_GPL(security_inode_create);
1060 
1061 int security_inode_link(struct dentry *old_dentry, struct inode *dir,
1062 			 struct dentry *new_dentry)
1063 {
1064 	if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry))))
1065 		return 0;
1066 	return call_int_hook(inode_link, 0, old_dentry, dir, new_dentry);
1067 }
1068 
1069 int security_inode_unlink(struct inode *dir, struct dentry *dentry)
1070 {
1071 	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1072 		return 0;
1073 	return call_int_hook(inode_unlink, 0, dir, dentry);
1074 }
1075 
1076 int security_inode_symlink(struct inode *dir, struct dentry *dentry,
1077 			    const char *old_name)
1078 {
1079 	if (unlikely(IS_PRIVATE(dir)))
1080 		return 0;
1081 	return call_int_hook(inode_symlink, 0, dir, dentry, old_name);
1082 }
1083 
1084 int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
1085 {
1086 	if (unlikely(IS_PRIVATE(dir)))
1087 		return 0;
1088 	return call_int_hook(inode_mkdir, 0, dir, dentry, mode);
1089 }
1090 EXPORT_SYMBOL_GPL(security_inode_mkdir);
1091 
1092 int security_inode_rmdir(struct inode *dir, struct dentry *dentry)
1093 {
1094 	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1095 		return 0;
1096 	return call_int_hook(inode_rmdir, 0, dir, dentry);
1097 }
1098 
1099 int security_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
1100 {
1101 	if (unlikely(IS_PRIVATE(dir)))
1102 		return 0;
1103 	return call_int_hook(inode_mknod, 0, dir, dentry, mode, dev);
1104 }
1105 
1106 int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
1107 			   struct inode *new_dir, struct dentry *new_dentry,
1108 			   unsigned int flags)
1109 {
1110         if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) ||
1111             (d_is_positive(new_dentry) && IS_PRIVATE(d_backing_inode(new_dentry)))))
1112 		return 0;
1113 
1114 	if (flags & RENAME_EXCHANGE) {
1115 		int err = call_int_hook(inode_rename, 0, new_dir, new_dentry,
1116 						     old_dir, old_dentry);
1117 		if (err)
1118 			return err;
1119 	}
1120 
1121 	return call_int_hook(inode_rename, 0, old_dir, old_dentry,
1122 					   new_dir, new_dentry);
1123 }
1124 
1125 int security_inode_readlink(struct dentry *dentry)
1126 {
1127 	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1128 		return 0;
1129 	return call_int_hook(inode_readlink, 0, dentry);
1130 }
1131 
1132 int security_inode_follow_link(struct dentry *dentry, struct inode *inode,
1133 			       bool rcu)
1134 {
1135 	if (unlikely(IS_PRIVATE(inode)))
1136 		return 0;
1137 	return call_int_hook(inode_follow_link, 0, dentry, inode, rcu);
1138 }
1139 
1140 int security_inode_permission(struct inode *inode, int mask)
1141 {
1142 	if (unlikely(IS_PRIVATE(inode)))
1143 		return 0;
1144 	return call_int_hook(inode_permission, 0, inode, mask);
1145 }
1146 
1147 int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
1148 {
1149 	int ret;
1150 
1151 	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1152 		return 0;
1153 	ret = call_int_hook(inode_setattr, 0, dentry, attr);
1154 	if (ret)
1155 		return ret;
1156 	return evm_inode_setattr(dentry, attr);
1157 }
1158 EXPORT_SYMBOL_GPL(security_inode_setattr);
1159 
1160 int security_inode_getattr(const struct path *path)
1161 {
1162 	if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
1163 		return 0;
1164 	return call_int_hook(inode_getattr, 0, path);
1165 }
1166 
1167 int security_inode_setxattr(struct dentry *dentry, const char *name,
1168 			    const void *value, size_t size, int flags)
1169 {
1170 	int ret;
1171 
1172 	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1173 		return 0;
1174 	/*
1175 	 * SELinux and Smack integrate the cap call,
1176 	 * so assume that all LSMs supplying this call do so.
1177 	 */
1178 	ret = call_int_hook(inode_setxattr, 1, dentry, name, value, size,
1179 				flags);
1180 
1181 	if (ret == 1)
1182 		ret = cap_inode_setxattr(dentry, name, value, size, flags);
1183 	if (ret)
1184 		return ret;
1185 	ret = ima_inode_setxattr(dentry, name, value, size);
1186 	if (ret)
1187 		return ret;
1188 	return evm_inode_setxattr(dentry, name, value, size);
1189 }
1190 
1191 void security_inode_post_setxattr(struct dentry *dentry, const char *name,
1192 				  const void *value, size_t size, int flags)
1193 {
1194 	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1195 		return;
1196 	call_void_hook(inode_post_setxattr, dentry, name, value, size, flags);
1197 	evm_inode_post_setxattr(dentry, name, value, size);
1198 }
1199 
1200 int security_inode_getxattr(struct dentry *dentry, const char *name)
1201 {
1202 	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1203 		return 0;
1204 	return call_int_hook(inode_getxattr, 0, dentry, name);
1205 }
1206 
1207 int security_inode_listxattr(struct dentry *dentry)
1208 {
1209 	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1210 		return 0;
1211 	return call_int_hook(inode_listxattr, 0, dentry);
1212 }
1213 
1214 int security_inode_removexattr(struct dentry *dentry, const char *name)
1215 {
1216 	int ret;
1217 
1218 	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1219 		return 0;
1220 	/*
1221 	 * SELinux and Smack integrate the cap call,
1222 	 * so assume that all LSMs supplying this call do so.
1223 	 */
1224 	ret = call_int_hook(inode_removexattr, 1, dentry, name);
1225 	if (ret == 1)
1226 		ret = cap_inode_removexattr(dentry, name);
1227 	if (ret)
1228 		return ret;
1229 	ret = ima_inode_removexattr(dentry, name);
1230 	if (ret)
1231 		return ret;
1232 	return evm_inode_removexattr(dentry, name);
1233 }
1234 
1235 int security_inode_need_killpriv(struct dentry *dentry)
1236 {
1237 	return call_int_hook(inode_need_killpriv, 0, dentry);
1238 }
1239 
1240 int security_inode_killpriv(struct dentry *dentry)
1241 {
1242 	return call_int_hook(inode_killpriv, 0, dentry);
1243 }
1244 
1245 int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc)
1246 {
1247 	struct security_hook_list *hp;
1248 	int rc;
1249 
1250 	if (unlikely(IS_PRIVATE(inode)))
1251 		return -EOPNOTSUPP;
1252 	/*
1253 	 * Only one module will provide an attribute with a given name.
1254 	 */
1255 	hlist_for_each_entry(hp, &security_hook_heads.inode_getsecurity, list) {
1256 		rc = hp->hook.inode_getsecurity(inode, name, buffer, alloc);
1257 		if (rc != -EOPNOTSUPP)
1258 			return rc;
1259 	}
1260 	return -EOPNOTSUPP;
1261 }
1262 
1263 int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
1264 {
1265 	struct security_hook_list *hp;
1266 	int rc;
1267 
1268 	if (unlikely(IS_PRIVATE(inode)))
1269 		return -EOPNOTSUPP;
1270 	/*
1271 	 * Only one module will provide an attribute with a given name.
1272 	 */
1273 	hlist_for_each_entry(hp, &security_hook_heads.inode_setsecurity, list) {
1274 		rc = hp->hook.inode_setsecurity(inode, name, value, size,
1275 								flags);
1276 		if (rc != -EOPNOTSUPP)
1277 			return rc;
1278 	}
1279 	return -EOPNOTSUPP;
1280 }
1281 
1282 int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size)
1283 {
1284 	if (unlikely(IS_PRIVATE(inode)))
1285 		return 0;
1286 	return call_int_hook(inode_listsecurity, 0, inode, buffer, buffer_size);
1287 }
1288 EXPORT_SYMBOL(security_inode_listsecurity);
1289 
1290 void security_inode_getsecid(struct inode *inode, u32 *secid)
1291 {
1292 	call_void_hook(inode_getsecid, inode, secid);
1293 }
1294 
1295 int security_inode_copy_up(struct dentry *src, struct cred **new)
1296 {
1297 	return call_int_hook(inode_copy_up, 0, src, new);
1298 }
1299 EXPORT_SYMBOL(security_inode_copy_up);
1300 
1301 int security_inode_copy_up_xattr(const char *name)
1302 {
1303 	return call_int_hook(inode_copy_up_xattr, -EOPNOTSUPP, name);
1304 }
1305 EXPORT_SYMBOL(security_inode_copy_up_xattr);
1306 
1307 int security_file_permission(struct file *file, int mask)
1308 {
1309 	int ret;
1310 
1311 	ret = call_int_hook(file_permission, 0, file, mask);
1312 	if (ret)
1313 		return ret;
1314 
1315 	return fsnotify_perm(file, mask);
1316 }
1317 
1318 int security_file_alloc(struct file *file)
1319 {
1320 	int rc = lsm_file_alloc(file);
1321 
1322 	if (rc)
1323 		return rc;
1324 	rc = call_int_hook(file_alloc_security, 0, file);
1325 	if (unlikely(rc))
1326 		security_file_free(file);
1327 	return rc;
1328 }
1329 
1330 void security_file_free(struct file *file)
1331 {
1332 	void *blob;
1333 
1334 	call_void_hook(file_free_security, file);
1335 
1336 	blob = file->f_security;
1337 	if (blob) {
1338 		file->f_security = NULL;
1339 		kmem_cache_free(lsm_file_cache, blob);
1340 	}
1341 }
1342 
1343 int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1344 {
1345 	return call_int_hook(file_ioctl, 0, file, cmd, arg);
1346 }
1347 
1348 static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
1349 {
1350 	/*
1351 	 * Does we have PROT_READ and does the application expect
1352 	 * it to imply PROT_EXEC?  If not, nothing to talk about...
1353 	 */
1354 	if ((prot & (PROT_READ | PROT_EXEC)) != PROT_READ)
1355 		return prot;
1356 	if (!(current->personality & READ_IMPLIES_EXEC))
1357 		return prot;
1358 	/*
1359 	 * if that's an anonymous mapping, let it.
1360 	 */
1361 	if (!file)
1362 		return prot | PROT_EXEC;
1363 	/*
1364 	 * ditto if it's not on noexec mount, except that on !MMU we need
1365 	 * NOMMU_MAP_EXEC (== VM_MAYEXEC) in this case
1366 	 */
1367 	if (!path_noexec(&file->f_path)) {
1368 #ifndef CONFIG_MMU
1369 		if (file->f_op->mmap_capabilities) {
1370 			unsigned caps = file->f_op->mmap_capabilities(file);
1371 			if (!(caps & NOMMU_MAP_EXEC))
1372 				return prot;
1373 		}
1374 #endif
1375 		return prot | PROT_EXEC;
1376 	}
1377 	/* anything on noexec mount won't get PROT_EXEC */
1378 	return prot;
1379 }
1380 
1381 int security_mmap_file(struct file *file, unsigned long prot,
1382 			unsigned long flags)
1383 {
1384 	int ret;
1385 	ret = call_int_hook(mmap_file, 0, file, prot,
1386 					mmap_prot(file, prot), flags);
1387 	if (ret)
1388 		return ret;
1389 	return ima_file_mmap(file, prot);
1390 }
1391 
1392 int security_mmap_addr(unsigned long addr)
1393 {
1394 	return call_int_hook(mmap_addr, 0, addr);
1395 }
1396 
1397 int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
1398 			    unsigned long prot)
1399 {
1400 	return call_int_hook(file_mprotect, 0, vma, reqprot, prot);
1401 }
1402 
1403 int security_file_lock(struct file *file, unsigned int cmd)
1404 {
1405 	return call_int_hook(file_lock, 0, file, cmd);
1406 }
1407 
1408 int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1409 {
1410 	return call_int_hook(file_fcntl, 0, file, cmd, arg);
1411 }
1412 
1413 void security_file_set_fowner(struct file *file)
1414 {
1415 	call_void_hook(file_set_fowner, file);
1416 }
1417 
1418 int security_file_send_sigiotask(struct task_struct *tsk,
1419 				  struct fown_struct *fown, int sig)
1420 {
1421 	return call_int_hook(file_send_sigiotask, 0, tsk, fown, sig);
1422 }
1423 
1424 int security_file_receive(struct file *file)
1425 {
1426 	return call_int_hook(file_receive, 0, file);
1427 }
1428 
1429 int security_file_open(struct file *file)
1430 {
1431 	int ret;
1432 
1433 	ret = call_int_hook(file_open, 0, file);
1434 	if (ret)
1435 		return ret;
1436 
1437 	return fsnotify_perm(file, MAY_OPEN);
1438 }
1439 
1440 int security_task_alloc(struct task_struct *task, unsigned long clone_flags)
1441 {
1442 	int rc = lsm_task_alloc(task);
1443 
1444 	if (rc)
1445 		return rc;
1446 	rc = call_int_hook(task_alloc, 0, task, clone_flags);
1447 	if (unlikely(rc))
1448 		security_task_free(task);
1449 	return rc;
1450 }
1451 
1452 void security_task_free(struct task_struct *task)
1453 {
1454 	call_void_hook(task_free, task);
1455 
1456 	kfree(task->security);
1457 	task->security = NULL;
1458 }
1459 
1460 int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
1461 {
1462 	int rc = lsm_cred_alloc(cred, gfp);
1463 
1464 	if (rc)
1465 		return rc;
1466 
1467 	rc = call_int_hook(cred_alloc_blank, 0, cred, gfp);
1468 	if (unlikely(rc))
1469 		security_cred_free(cred);
1470 	return rc;
1471 }
1472 
1473 void security_cred_free(struct cred *cred)
1474 {
1475 	/*
1476 	 * There is a failure case in prepare_creds() that
1477 	 * may result in a call here with ->security being NULL.
1478 	 */
1479 	if (unlikely(cred->security == NULL))
1480 		return;
1481 
1482 	call_void_hook(cred_free, cred);
1483 
1484 	kfree(cred->security);
1485 	cred->security = NULL;
1486 }
1487 
1488 int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp)
1489 {
1490 	int rc = lsm_cred_alloc(new, gfp);
1491 
1492 	if (rc)
1493 		return rc;
1494 
1495 	rc = call_int_hook(cred_prepare, 0, new, old, gfp);
1496 	if (unlikely(rc))
1497 		security_cred_free(new);
1498 	return rc;
1499 }
1500 
1501 void security_transfer_creds(struct cred *new, const struct cred *old)
1502 {
1503 	call_void_hook(cred_transfer, new, old);
1504 }
1505 
1506 void security_cred_getsecid(const struct cred *c, u32 *secid)
1507 {
1508 	*secid = 0;
1509 	call_void_hook(cred_getsecid, c, secid);
1510 }
1511 EXPORT_SYMBOL(security_cred_getsecid);
1512 
1513 int security_kernel_act_as(struct cred *new, u32 secid)
1514 {
1515 	return call_int_hook(kernel_act_as, 0, new, secid);
1516 }
1517 
1518 int security_kernel_create_files_as(struct cred *new, struct inode *inode)
1519 {
1520 	return call_int_hook(kernel_create_files_as, 0, new, inode);
1521 }
1522 
1523 int security_kernel_module_request(char *kmod_name)
1524 {
1525 	int ret;
1526 
1527 	ret = call_int_hook(kernel_module_request, 0, kmod_name);
1528 	if (ret)
1529 		return ret;
1530 	return integrity_kernel_module_request(kmod_name);
1531 }
1532 
1533 int security_kernel_read_file(struct file *file, enum kernel_read_file_id id)
1534 {
1535 	int ret;
1536 
1537 	ret = call_int_hook(kernel_read_file, 0, file, id);
1538 	if (ret)
1539 		return ret;
1540 	return ima_read_file(file, id);
1541 }
1542 EXPORT_SYMBOL_GPL(security_kernel_read_file);
1543 
1544 int security_kernel_post_read_file(struct file *file, char *buf, loff_t size,
1545 				   enum kernel_read_file_id id)
1546 {
1547 	int ret;
1548 
1549 	ret = call_int_hook(kernel_post_read_file, 0, file, buf, size, id);
1550 	if (ret)
1551 		return ret;
1552 	return ima_post_read_file(file, buf, size, id);
1553 }
1554 EXPORT_SYMBOL_GPL(security_kernel_post_read_file);
1555 
1556 int security_kernel_load_data(enum kernel_load_data_id id)
1557 {
1558 	int ret;
1559 
1560 	ret = call_int_hook(kernel_load_data, 0, id);
1561 	if (ret)
1562 		return ret;
1563 	return ima_load_data(id);
1564 }
1565 EXPORT_SYMBOL_GPL(security_kernel_load_data);
1566 
1567 int security_task_fix_setuid(struct cred *new, const struct cred *old,
1568 			     int flags)
1569 {
1570 	return call_int_hook(task_fix_setuid, 0, new, old, flags);
1571 }
1572 
1573 int security_task_setpgid(struct task_struct *p, pid_t pgid)
1574 {
1575 	return call_int_hook(task_setpgid, 0, p, pgid);
1576 }
1577 
1578 int security_task_getpgid(struct task_struct *p)
1579 {
1580 	return call_int_hook(task_getpgid, 0, p);
1581 }
1582 
1583 int security_task_getsid(struct task_struct *p)
1584 {
1585 	return call_int_hook(task_getsid, 0, p);
1586 }
1587 
1588 void security_task_getsecid(struct task_struct *p, u32 *secid)
1589 {
1590 	*secid = 0;
1591 	call_void_hook(task_getsecid, p, secid);
1592 }
1593 EXPORT_SYMBOL(security_task_getsecid);
1594 
1595 int security_task_setnice(struct task_struct *p, int nice)
1596 {
1597 	return call_int_hook(task_setnice, 0, p, nice);
1598 }
1599 
1600 int security_task_setioprio(struct task_struct *p, int ioprio)
1601 {
1602 	return call_int_hook(task_setioprio, 0, p, ioprio);
1603 }
1604 
1605 int security_task_getioprio(struct task_struct *p)
1606 {
1607 	return call_int_hook(task_getioprio, 0, p);
1608 }
1609 
1610 int security_task_prlimit(const struct cred *cred, const struct cred *tcred,
1611 			  unsigned int flags)
1612 {
1613 	return call_int_hook(task_prlimit, 0, cred, tcred, flags);
1614 }
1615 
1616 int security_task_setrlimit(struct task_struct *p, unsigned int resource,
1617 		struct rlimit *new_rlim)
1618 {
1619 	return call_int_hook(task_setrlimit, 0, p, resource, new_rlim);
1620 }
1621 
1622 int security_task_setscheduler(struct task_struct *p)
1623 {
1624 	return call_int_hook(task_setscheduler, 0, p);
1625 }
1626 
1627 int security_task_getscheduler(struct task_struct *p)
1628 {
1629 	return call_int_hook(task_getscheduler, 0, p);
1630 }
1631 
1632 int security_task_movememory(struct task_struct *p)
1633 {
1634 	return call_int_hook(task_movememory, 0, p);
1635 }
1636 
1637 int security_task_kill(struct task_struct *p, struct kernel_siginfo *info,
1638 			int sig, const struct cred *cred)
1639 {
1640 	return call_int_hook(task_kill, 0, p, info, sig, cred);
1641 }
1642 
1643 int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
1644 			 unsigned long arg4, unsigned long arg5)
1645 {
1646 	int thisrc;
1647 	int rc = -ENOSYS;
1648 	struct security_hook_list *hp;
1649 
1650 	hlist_for_each_entry(hp, &security_hook_heads.task_prctl, list) {
1651 		thisrc = hp->hook.task_prctl(option, arg2, arg3, arg4, arg5);
1652 		if (thisrc != -ENOSYS) {
1653 			rc = thisrc;
1654 			if (thisrc != 0)
1655 				break;
1656 		}
1657 	}
1658 	return rc;
1659 }
1660 
1661 void security_task_to_inode(struct task_struct *p, struct inode *inode)
1662 {
1663 	call_void_hook(task_to_inode, p, inode);
1664 }
1665 
1666 int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag)
1667 {
1668 	return call_int_hook(ipc_permission, 0, ipcp, flag);
1669 }
1670 
1671 void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid)
1672 {
1673 	*secid = 0;
1674 	call_void_hook(ipc_getsecid, ipcp, secid);
1675 }
1676 
1677 int security_msg_msg_alloc(struct msg_msg *msg)
1678 {
1679 	int rc = lsm_msg_msg_alloc(msg);
1680 
1681 	if (unlikely(rc))
1682 		return rc;
1683 	rc = call_int_hook(msg_msg_alloc_security, 0, msg);
1684 	if (unlikely(rc))
1685 		security_msg_msg_free(msg);
1686 	return rc;
1687 }
1688 
1689 void security_msg_msg_free(struct msg_msg *msg)
1690 {
1691 	call_void_hook(msg_msg_free_security, msg);
1692 	kfree(msg->security);
1693 	msg->security = NULL;
1694 }
1695 
1696 int security_msg_queue_alloc(struct kern_ipc_perm *msq)
1697 {
1698 	int rc = lsm_ipc_alloc(msq);
1699 
1700 	if (unlikely(rc))
1701 		return rc;
1702 	rc = call_int_hook(msg_queue_alloc_security, 0, msq);
1703 	if (unlikely(rc))
1704 		security_msg_queue_free(msq);
1705 	return rc;
1706 }
1707 
1708 void security_msg_queue_free(struct kern_ipc_perm *msq)
1709 {
1710 	call_void_hook(msg_queue_free_security, msq);
1711 	kfree(msq->security);
1712 	msq->security = NULL;
1713 }
1714 
1715 int security_msg_queue_associate(struct kern_ipc_perm *msq, int msqflg)
1716 {
1717 	return call_int_hook(msg_queue_associate, 0, msq, msqflg);
1718 }
1719 
1720 int security_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd)
1721 {
1722 	return call_int_hook(msg_queue_msgctl, 0, msq, cmd);
1723 }
1724 
1725 int security_msg_queue_msgsnd(struct kern_ipc_perm *msq,
1726 			       struct msg_msg *msg, int msqflg)
1727 {
1728 	return call_int_hook(msg_queue_msgsnd, 0, msq, msg, msqflg);
1729 }
1730 
1731 int security_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *msg,
1732 			       struct task_struct *target, long type, int mode)
1733 {
1734 	return call_int_hook(msg_queue_msgrcv, 0, msq, msg, target, type, mode);
1735 }
1736 
1737 int security_shm_alloc(struct kern_ipc_perm *shp)
1738 {
1739 	int rc = lsm_ipc_alloc(shp);
1740 
1741 	if (unlikely(rc))
1742 		return rc;
1743 	rc = call_int_hook(shm_alloc_security, 0, shp);
1744 	if (unlikely(rc))
1745 		security_shm_free(shp);
1746 	return rc;
1747 }
1748 
1749 void security_shm_free(struct kern_ipc_perm *shp)
1750 {
1751 	call_void_hook(shm_free_security, shp);
1752 	kfree(shp->security);
1753 	shp->security = NULL;
1754 }
1755 
1756 int security_shm_associate(struct kern_ipc_perm *shp, int shmflg)
1757 {
1758 	return call_int_hook(shm_associate, 0, shp, shmflg);
1759 }
1760 
1761 int security_shm_shmctl(struct kern_ipc_perm *shp, int cmd)
1762 {
1763 	return call_int_hook(shm_shmctl, 0, shp, cmd);
1764 }
1765 
1766 int security_shm_shmat(struct kern_ipc_perm *shp, char __user *shmaddr, int shmflg)
1767 {
1768 	return call_int_hook(shm_shmat, 0, shp, shmaddr, shmflg);
1769 }
1770 
1771 int security_sem_alloc(struct kern_ipc_perm *sma)
1772 {
1773 	int rc = lsm_ipc_alloc(sma);
1774 
1775 	if (unlikely(rc))
1776 		return rc;
1777 	rc = call_int_hook(sem_alloc_security, 0, sma);
1778 	if (unlikely(rc))
1779 		security_sem_free(sma);
1780 	return rc;
1781 }
1782 
1783 void security_sem_free(struct kern_ipc_perm *sma)
1784 {
1785 	call_void_hook(sem_free_security, sma);
1786 	kfree(sma->security);
1787 	sma->security = NULL;
1788 }
1789 
1790 int security_sem_associate(struct kern_ipc_perm *sma, int semflg)
1791 {
1792 	return call_int_hook(sem_associate, 0, sma, semflg);
1793 }
1794 
1795 int security_sem_semctl(struct kern_ipc_perm *sma, int cmd)
1796 {
1797 	return call_int_hook(sem_semctl, 0, sma, cmd);
1798 }
1799 
1800 int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops,
1801 			unsigned nsops, int alter)
1802 {
1803 	return call_int_hook(sem_semop, 0, sma, sops, nsops, alter);
1804 }
1805 
1806 void security_d_instantiate(struct dentry *dentry, struct inode *inode)
1807 {
1808 	if (unlikely(inode && IS_PRIVATE(inode)))
1809 		return;
1810 	call_void_hook(d_instantiate, dentry, inode);
1811 }
1812 EXPORT_SYMBOL(security_d_instantiate);
1813 
1814 int security_getprocattr(struct task_struct *p, const char *lsm, char *name,
1815 				char **value)
1816 {
1817 	struct security_hook_list *hp;
1818 
1819 	hlist_for_each_entry(hp, &security_hook_heads.getprocattr, list) {
1820 		if (lsm != NULL && strcmp(lsm, hp->lsm))
1821 			continue;
1822 		return hp->hook.getprocattr(p, name, value);
1823 	}
1824 	return -EINVAL;
1825 }
1826 
1827 int security_setprocattr(const char *lsm, const char *name, void *value,
1828 			 size_t size)
1829 {
1830 	struct security_hook_list *hp;
1831 
1832 	hlist_for_each_entry(hp, &security_hook_heads.setprocattr, list) {
1833 		if (lsm != NULL && strcmp(lsm, hp->lsm))
1834 			continue;
1835 		return hp->hook.setprocattr(name, value, size);
1836 	}
1837 	return -EINVAL;
1838 }
1839 
1840 int security_netlink_send(struct sock *sk, struct sk_buff *skb)
1841 {
1842 	return call_int_hook(netlink_send, 0, sk, skb);
1843 }
1844 
1845 int security_ismaclabel(const char *name)
1846 {
1847 	return call_int_hook(ismaclabel, 0, name);
1848 }
1849 EXPORT_SYMBOL(security_ismaclabel);
1850 
1851 int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
1852 {
1853 	return call_int_hook(secid_to_secctx, -EOPNOTSUPP, secid, secdata,
1854 				seclen);
1855 }
1856 EXPORT_SYMBOL(security_secid_to_secctx);
1857 
1858 int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
1859 {
1860 	*secid = 0;
1861 	return call_int_hook(secctx_to_secid, 0, secdata, seclen, secid);
1862 }
1863 EXPORT_SYMBOL(security_secctx_to_secid);
1864 
1865 void security_release_secctx(char *secdata, u32 seclen)
1866 {
1867 	call_void_hook(release_secctx, secdata, seclen);
1868 }
1869 EXPORT_SYMBOL(security_release_secctx);
1870 
1871 void security_inode_invalidate_secctx(struct inode *inode)
1872 {
1873 	call_void_hook(inode_invalidate_secctx, inode);
1874 }
1875 EXPORT_SYMBOL(security_inode_invalidate_secctx);
1876 
1877 int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
1878 {
1879 	return call_int_hook(inode_notifysecctx, 0, inode, ctx, ctxlen);
1880 }
1881 EXPORT_SYMBOL(security_inode_notifysecctx);
1882 
1883 int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
1884 {
1885 	return call_int_hook(inode_setsecctx, 0, dentry, ctx, ctxlen);
1886 }
1887 EXPORT_SYMBOL(security_inode_setsecctx);
1888 
1889 int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
1890 {
1891 	return call_int_hook(inode_getsecctx, -EOPNOTSUPP, inode, ctx, ctxlen);
1892 }
1893 EXPORT_SYMBOL(security_inode_getsecctx);
1894 
1895 #ifdef CONFIG_SECURITY_NETWORK
1896 
1897 int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk)
1898 {
1899 	return call_int_hook(unix_stream_connect, 0, sock, other, newsk);
1900 }
1901 EXPORT_SYMBOL(security_unix_stream_connect);
1902 
1903 int security_unix_may_send(struct socket *sock,  struct socket *other)
1904 {
1905 	return call_int_hook(unix_may_send, 0, sock, other);
1906 }
1907 EXPORT_SYMBOL(security_unix_may_send);
1908 
1909 int security_socket_create(int family, int type, int protocol, int kern)
1910 {
1911 	return call_int_hook(socket_create, 0, family, type, protocol, kern);
1912 }
1913 
1914 int security_socket_post_create(struct socket *sock, int family,
1915 				int type, int protocol, int kern)
1916 {
1917 	return call_int_hook(socket_post_create, 0, sock, family, type,
1918 						protocol, kern);
1919 }
1920 
1921 int security_socket_socketpair(struct socket *socka, struct socket *sockb)
1922 {
1923 	return call_int_hook(socket_socketpair, 0, socka, sockb);
1924 }
1925 EXPORT_SYMBOL(security_socket_socketpair);
1926 
1927 int security_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen)
1928 {
1929 	return call_int_hook(socket_bind, 0, sock, address, addrlen);
1930 }
1931 
1932 int security_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen)
1933 {
1934 	return call_int_hook(socket_connect, 0, sock, address, addrlen);
1935 }
1936 
1937 int security_socket_listen(struct socket *sock, int backlog)
1938 {
1939 	return call_int_hook(socket_listen, 0, sock, backlog);
1940 }
1941 
1942 int security_socket_accept(struct socket *sock, struct socket *newsock)
1943 {
1944 	return call_int_hook(socket_accept, 0, sock, newsock);
1945 }
1946 
1947 int security_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size)
1948 {
1949 	return call_int_hook(socket_sendmsg, 0, sock, msg, size);
1950 }
1951 
1952 int security_socket_recvmsg(struct socket *sock, struct msghdr *msg,
1953 			    int size, int flags)
1954 {
1955 	return call_int_hook(socket_recvmsg, 0, sock, msg, size, flags);
1956 }
1957 
1958 int security_socket_getsockname(struct socket *sock)
1959 {
1960 	return call_int_hook(socket_getsockname, 0, sock);
1961 }
1962 
1963 int security_socket_getpeername(struct socket *sock)
1964 {
1965 	return call_int_hook(socket_getpeername, 0, sock);
1966 }
1967 
1968 int security_socket_getsockopt(struct socket *sock, int level, int optname)
1969 {
1970 	return call_int_hook(socket_getsockopt, 0, sock, level, optname);
1971 }
1972 
1973 int security_socket_setsockopt(struct socket *sock, int level, int optname)
1974 {
1975 	return call_int_hook(socket_setsockopt, 0, sock, level, optname);
1976 }
1977 
1978 int security_socket_shutdown(struct socket *sock, int how)
1979 {
1980 	return call_int_hook(socket_shutdown, 0, sock, how);
1981 }
1982 
1983 int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
1984 {
1985 	return call_int_hook(socket_sock_rcv_skb, 0, sk, skb);
1986 }
1987 EXPORT_SYMBOL(security_sock_rcv_skb);
1988 
1989 int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
1990 				      int __user *optlen, unsigned len)
1991 {
1992 	return call_int_hook(socket_getpeersec_stream, -ENOPROTOOPT, sock,
1993 				optval, optlen, len);
1994 }
1995 
1996 int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
1997 {
1998 	return call_int_hook(socket_getpeersec_dgram, -ENOPROTOOPT, sock,
1999 			     skb, secid);
2000 }
2001 EXPORT_SYMBOL(security_socket_getpeersec_dgram);
2002 
2003 int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
2004 {
2005 	return call_int_hook(sk_alloc_security, 0, sk, family, priority);
2006 }
2007 
2008 void security_sk_free(struct sock *sk)
2009 {
2010 	call_void_hook(sk_free_security, sk);
2011 }
2012 
2013 void security_sk_clone(const struct sock *sk, struct sock *newsk)
2014 {
2015 	call_void_hook(sk_clone_security, sk, newsk);
2016 }
2017 EXPORT_SYMBOL(security_sk_clone);
2018 
2019 void security_sk_classify_flow(struct sock *sk, struct flowi *fl)
2020 {
2021 	call_void_hook(sk_getsecid, sk, &fl->flowi_secid);
2022 }
2023 EXPORT_SYMBOL(security_sk_classify_flow);
2024 
2025 void security_req_classify_flow(const struct request_sock *req, struct flowi *fl)
2026 {
2027 	call_void_hook(req_classify_flow, req, fl);
2028 }
2029 EXPORT_SYMBOL(security_req_classify_flow);
2030 
2031 void security_sock_graft(struct sock *sk, struct socket *parent)
2032 {
2033 	call_void_hook(sock_graft, sk, parent);
2034 }
2035 EXPORT_SYMBOL(security_sock_graft);
2036 
2037 int security_inet_conn_request(struct sock *sk,
2038 			struct sk_buff *skb, struct request_sock *req)
2039 {
2040 	return call_int_hook(inet_conn_request, 0, sk, skb, req);
2041 }
2042 EXPORT_SYMBOL(security_inet_conn_request);
2043 
2044 void security_inet_csk_clone(struct sock *newsk,
2045 			const struct request_sock *req)
2046 {
2047 	call_void_hook(inet_csk_clone, newsk, req);
2048 }
2049 
2050 void security_inet_conn_established(struct sock *sk,
2051 			struct sk_buff *skb)
2052 {
2053 	call_void_hook(inet_conn_established, sk, skb);
2054 }
2055 EXPORT_SYMBOL(security_inet_conn_established);
2056 
2057 int security_secmark_relabel_packet(u32 secid)
2058 {
2059 	return call_int_hook(secmark_relabel_packet, 0, secid);
2060 }
2061 EXPORT_SYMBOL(security_secmark_relabel_packet);
2062 
2063 void security_secmark_refcount_inc(void)
2064 {
2065 	call_void_hook(secmark_refcount_inc);
2066 }
2067 EXPORT_SYMBOL(security_secmark_refcount_inc);
2068 
2069 void security_secmark_refcount_dec(void)
2070 {
2071 	call_void_hook(secmark_refcount_dec);
2072 }
2073 EXPORT_SYMBOL(security_secmark_refcount_dec);
2074 
2075 int security_tun_dev_alloc_security(void **security)
2076 {
2077 	return call_int_hook(tun_dev_alloc_security, 0, security);
2078 }
2079 EXPORT_SYMBOL(security_tun_dev_alloc_security);
2080 
2081 void security_tun_dev_free_security(void *security)
2082 {
2083 	call_void_hook(tun_dev_free_security, security);
2084 }
2085 EXPORT_SYMBOL(security_tun_dev_free_security);
2086 
2087 int security_tun_dev_create(void)
2088 {
2089 	return call_int_hook(tun_dev_create, 0);
2090 }
2091 EXPORT_SYMBOL(security_tun_dev_create);
2092 
2093 int security_tun_dev_attach_queue(void *security)
2094 {
2095 	return call_int_hook(tun_dev_attach_queue, 0, security);
2096 }
2097 EXPORT_SYMBOL(security_tun_dev_attach_queue);
2098 
2099 int security_tun_dev_attach(struct sock *sk, void *security)
2100 {
2101 	return call_int_hook(tun_dev_attach, 0, sk, security);
2102 }
2103 EXPORT_SYMBOL(security_tun_dev_attach);
2104 
2105 int security_tun_dev_open(void *security)
2106 {
2107 	return call_int_hook(tun_dev_open, 0, security);
2108 }
2109 EXPORT_SYMBOL(security_tun_dev_open);
2110 
2111 int security_sctp_assoc_request(struct sctp_endpoint *ep, struct sk_buff *skb)
2112 {
2113 	return call_int_hook(sctp_assoc_request, 0, ep, skb);
2114 }
2115 EXPORT_SYMBOL(security_sctp_assoc_request);
2116 
2117 int security_sctp_bind_connect(struct sock *sk, int optname,
2118 			       struct sockaddr *address, int addrlen)
2119 {
2120 	return call_int_hook(sctp_bind_connect, 0, sk, optname,
2121 			     address, addrlen);
2122 }
2123 EXPORT_SYMBOL(security_sctp_bind_connect);
2124 
2125 void security_sctp_sk_clone(struct sctp_endpoint *ep, struct sock *sk,
2126 			    struct sock *newsk)
2127 {
2128 	call_void_hook(sctp_sk_clone, ep, sk, newsk);
2129 }
2130 EXPORT_SYMBOL(security_sctp_sk_clone);
2131 
2132 #endif	/* CONFIG_SECURITY_NETWORK */
2133 
2134 #ifdef CONFIG_SECURITY_INFINIBAND
2135 
2136 int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey)
2137 {
2138 	return call_int_hook(ib_pkey_access, 0, sec, subnet_prefix, pkey);
2139 }
2140 EXPORT_SYMBOL(security_ib_pkey_access);
2141 
2142 int security_ib_endport_manage_subnet(void *sec, const char *dev_name, u8 port_num)
2143 {
2144 	return call_int_hook(ib_endport_manage_subnet, 0, sec, dev_name, port_num);
2145 }
2146 EXPORT_SYMBOL(security_ib_endport_manage_subnet);
2147 
2148 int security_ib_alloc_security(void **sec)
2149 {
2150 	return call_int_hook(ib_alloc_security, 0, sec);
2151 }
2152 EXPORT_SYMBOL(security_ib_alloc_security);
2153 
2154 void security_ib_free_security(void *sec)
2155 {
2156 	call_void_hook(ib_free_security, sec);
2157 }
2158 EXPORT_SYMBOL(security_ib_free_security);
2159 #endif	/* CONFIG_SECURITY_INFINIBAND */
2160 
2161 #ifdef CONFIG_SECURITY_NETWORK_XFRM
2162 
2163 int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
2164 			       struct xfrm_user_sec_ctx *sec_ctx,
2165 			       gfp_t gfp)
2166 {
2167 	return call_int_hook(xfrm_policy_alloc_security, 0, ctxp, sec_ctx, gfp);
2168 }
2169 EXPORT_SYMBOL(security_xfrm_policy_alloc);
2170 
2171 int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
2172 			      struct xfrm_sec_ctx **new_ctxp)
2173 {
2174 	return call_int_hook(xfrm_policy_clone_security, 0, old_ctx, new_ctxp);
2175 }
2176 
2177 void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
2178 {
2179 	call_void_hook(xfrm_policy_free_security, ctx);
2180 }
2181 EXPORT_SYMBOL(security_xfrm_policy_free);
2182 
2183 int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
2184 {
2185 	return call_int_hook(xfrm_policy_delete_security, 0, ctx);
2186 }
2187 
2188 int security_xfrm_state_alloc(struct xfrm_state *x,
2189 			      struct xfrm_user_sec_ctx *sec_ctx)
2190 {
2191 	return call_int_hook(xfrm_state_alloc, 0, x, sec_ctx);
2192 }
2193 EXPORT_SYMBOL(security_xfrm_state_alloc);
2194 
2195 int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
2196 				      struct xfrm_sec_ctx *polsec, u32 secid)
2197 {
2198 	return call_int_hook(xfrm_state_alloc_acquire, 0, x, polsec, secid);
2199 }
2200 
2201 int security_xfrm_state_delete(struct xfrm_state *x)
2202 {
2203 	return call_int_hook(xfrm_state_delete_security, 0, x);
2204 }
2205 EXPORT_SYMBOL(security_xfrm_state_delete);
2206 
2207 void security_xfrm_state_free(struct xfrm_state *x)
2208 {
2209 	call_void_hook(xfrm_state_free_security, x);
2210 }
2211 
2212 int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
2213 {
2214 	return call_int_hook(xfrm_policy_lookup, 0, ctx, fl_secid, dir);
2215 }
2216 
2217 int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
2218 				       struct xfrm_policy *xp,
2219 				       const struct flowi *fl)
2220 {
2221 	struct security_hook_list *hp;
2222 	int rc = 1;
2223 
2224 	/*
2225 	 * Since this function is expected to return 0 or 1, the judgment
2226 	 * becomes difficult if multiple LSMs supply this call. Fortunately,
2227 	 * we can use the first LSM's judgment because currently only SELinux
2228 	 * supplies this call.
2229 	 *
2230 	 * For speed optimization, we explicitly break the loop rather than
2231 	 * using the macro
2232 	 */
2233 	hlist_for_each_entry(hp, &security_hook_heads.xfrm_state_pol_flow_match,
2234 				list) {
2235 		rc = hp->hook.xfrm_state_pol_flow_match(x, xp, fl);
2236 		break;
2237 	}
2238 	return rc;
2239 }
2240 
2241 int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
2242 {
2243 	return call_int_hook(xfrm_decode_session, 0, skb, secid, 1);
2244 }
2245 
2246 void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl)
2247 {
2248 	int rc = call_int_hook(xfrm_decode_session, 0, skb, &fl->flowi_secid,
2249 				0);
2250 
2251 	BUG_ON(rc);
2252 }
2253 EXPORT_SYMBOL(security_skb_classify_flow);
2254 
2255 #endif	/* CONFIG_SECURITY_NETWORK_XFRM */
2256 
2257 #ifdef CONFIG_KEYS
2258 
2259 int security_key_alloc(struct key *key, const struct cred *cred,
2260 		       unsigned long flags)
2261 {
2262 	return call_int_hook(key_alloc, 0, key, cred, flags);
2263 }
2264 
2265 void security_key_free(struct key *key)
2266 {
2267 	call_void_hook(key_free, key);
2268 }
2269 
2270 int security_key_permission(key_ref_t key_ref,
2271 			    const struct cred *cred, unsigned perm)
2272 {
2273 	return call_int_hook(key_permission, 0, key_ref, cred, perm);
2274 }
2275 
2276 int security_key_getsecurity(struct key *key, char **_buffer)
2277 {
2278 	*_buffer = NULL;
2279 	return call_int_hook(key_getsecurity, 0, key, _buffer);
2280 }
2281 
2282 #endif	/* CONFIG_KEYS */
2283 
2284 #ifdef CONFIG_AUDIT
2285 
2286 int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule)
2287 {
2288 	return call_int_hook(audit_rule_init, 0, field, op, rulestr, lsmrule);
2289 }
2290 
2291 int security_audit_rule_known(struct audit_krule *krule)
2292 {
2293 	return call_int_hook(audit_rule_known, 0, krule);
2294 }
2295 
2296 void security_audit_rule_free(void *lsmrule)
2297 {
2298 	call_void_hook(audit_rule_free, lsmrule);
2299 }
2300 
2301 int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule,
2302 			      struct audit_context *actx)
2303 {
2304 	return call_int_hook(audit_rule_match, 0, secid, field, op, lsmrule,
2305 				actx);
2306 }
2307 #endif /* CONFIG_AUDIT */
2308 
2309 #ifdef CONFIG_BPF_SYSCALL
2310 int security_bpf(int cmd, union bpf_attr *attr, unsigned int size)
2311 {
2312 	return call_int_hook(bpf, 0, cmd, attr, size);
2313 }
2314 int security_bpf_map(struct bpf_map *map, fmode_t fmode)
2315 {
2316 	return call_int_hook(bpf_map, 0, map, fmode);
2317 }
2318 int security_bpf_prog(struct bpf_prog *prog)
2319 {
2320 	return call_int_hook(bpf_prog, 0, prog);
2321 }
2322 int security_bpf_map_alloc(struct bpf_map *map)
2323 {
2324 	return call_int_hook(bpf_map_alloc_security, 0, map);
2325 }
2326 int security_bpf_prog_alloc(struct bpf_prog_aux *aux)
2327 {
2328 	return call_int_hook(bpf_prog_alloc_security, 0, aux);
2329 }
2330 void security_bpf_map_free(struct bpf_map *map)
2331 {
2332 	call_void_hook(bpf_map_free_security, map);
2333 }
2334 void security_bpf_prog_free(struct bpf_prog_aux *aux)
2335 {
2336 	call_void_hook(bpf_prog_free_security, aux);
2337 }
2338 #endif /* CONFIG_BPF_SYSCALL */
2339