netnode.c (b7d3622a39fde7658170b7f3cf6c6889bb8db30d) | netnode.c (615e51fdda6f274e94b1e905fcaf6111e0d9aa20) |
---|---|
1/* 2 * Network node table 3 * 4 * SELinux must keep a mapping of network nodes to labels/SIDs. This 5 * mapping is maintained as part of the normal policy but a fast cache is 6 * needed to reduce the lookup overhead since most of these queries happen on 7 * a per-packet basis. 8 * --- 269 unchanged lines hidden (view full) --- 278 279/** 280 * sel_netnode_flush - Flush the entire network address table 281 * 282 * Description: 283 * Remove all entries from the network address table. 284 * 285 */ | 1/* 2 * Network node table 3 * 4 * SELinux must keep a mapping of network nodes to labels/SIDs. This 5 * mapping is maintained as part of the normal policy but a fast cache is 6 * needed to reduce the lookup overhead since most of these queries happen on 7 * a per-packet basis. 8 * --- 269 unchanged lines hidden (view full) --- 278 279/** 280 * sel_netnode_flush - Flush the entire network address table 281 * 282 * Description: 283 * Remove all entries from the network address table. 284 * 285 */ |
286static void sel_netnode_flush(void) | 286void sel_netnode_flush(void) |
287{ 288 unsigned int idx; 289 struct sel_netnode *node, *node_tmp; 290 291 spin_lock_bh(&sel_netnode_lock); 292 for (idx = 0; idx < SEL_NETNODE_HASH_SIZE; idx++) { 293 list_for_each_entry_safe(node, node_tmp, 294 &sel_netnode_hash[idx].list, list) { 295 list_del_rcu(&node->list); 296 kfree_rcu(node, rcu); 297 } 298 sel_netnode_hash[idx].size = 0; 299 } 300 spin_unlock_bh(&sel_netnode_lock); 301} 302 | 287{ 288 unsigned int idx; 289 struct sel_netnode *node, *node_tmp; 290 291 spin_lock_bh(&sel_netnode_lock); 292 for (idx = 0; idx < SEL_NETNODE_HASH_SIZE; idx++) { 293 list_for_each_entry_safe(node, node_tmp, 294 &sel_netnode_hash[idx].list, list) { 295 list_del_rcu(&node->list); 296 kfree_rcu(node, rcu); 297 } 298 sel_netnode_hash[idx].size = 0; 299 } 300 spin_unlock_bh(&sel_netnode_lock); 301} 302 |
303static int sel_netnode_avc_callback(u32 event) 304{ 305 if (event == AVC_CALLBACK_RESET) { 306 sel_netnode_flush(); 307 synchronize_net(); 308 } 309 return 0; 310} 311 | |
312static __init int sel_netnode_init(void) 313{ 314 int iter; 315 int ret; 316 317 if (!selinux_enabled) 318 return 0; 319 320 for (iter = 0; iter < SEL_NETNODE_HASH_SIZE; iter++) { 321 INIT_LIST_HEAD(&sel_netnode_hash[iter].list); 322 sel_netnode_hash[iter].size = 0; 323 } 324 | 303static __init int sel_netnode_init(void) 304{ 305 int iter; 306 int ret; 307 308 if (!selinux_enabled) 309 return 0; 310 311 for (iter = 0; iter < SEL_NETNODE_HASH_SIZE; iter++) { 312 INIT_LIST_HEAD(&sel_netnode_hash[iter].list); 313 sel_netnode_hash[iter].size = 0; 314 } 315 |
325 ret = avc_add_callback(sel_netnode_avc_callback, AVC_CALLBACK_RESET); 326 if (ret != 0) 327 panic("avc_add_callback() failed, error %d\n", ret); 328 | |
329 return ret; 330} 331 332__initcall(sel_netnode_init); | 316 return ret; 317} 318 319__initcall(sel_netnode_init); |