Lines Matching full:ss

21  * they define the ss->css_rstat_flush callback.
25 return css_is_self(css) || css->ss->css_rstat_flush != NULL; in css_uses_rstat()
40 static spinlock_t *ss_rstat_lock(struct cgroup_subsys *ss) in ss_rstat_lock() argument
42 if (ss) in ss_rstat_lock()
43 return &ss->rstat_ss_lock; in ss_rstat_lock()
48 static inline struct llist_head *ss_lhead_cpu(struct cgroup_subsys *ss, int cpu) in ss_lhead_cpu() argument
50 if (ss) in ss_lhead_cpu()
51 return per_cpu_ptr(ss->lhead, cpu); in ss_lhead_cpu()
60 * Atomically inserts the css in the ss's llist for the given cpu. This is
61 * reentrant safe i.e. safe against softirq, hardirq and nmi. The ss's llist
121 lhead = ss_lhead_cpu(css->ss, cpu); in css_rstat_updated()
154 static void css_process_update_tree(struct cgroup_subsys *ss, int cpu) in css_process_update_tree() argument
156 struct llist_head *lhead = ss_lhead_cpu(ss, cpu); in css_process_update_tree()
211 lockdep_assert_held(ss_rstat_lock(head->ss)); in css_rstat_push_children()
284 css_process_update_tree(root->ss, cpu); in css_rstat_updated_list()
357 __acquires(ss_rstat_lock(css->ss)) in __css_rstat_lock()
363 lock = ss_rstat_lock(css->ss); in __css_rstat_lock()
374 __releases(ss_rstat_lock(css->ss)) in __css_rstat_unlock()
379 lock = ss_rstat_lock(css->ss); in __css_rstat_unlock()
422 pos->ss->css_rstat_flush(pos, cpu); in css_rstat_flush()
443 } else if (css->ss->css_rstat_flush == NULL) in css_rstat_init()
509 * @ss: target subsystem
511 * If @ss is NULL, the static locks associated with the base stats
512 * are initialized. If @ss is non-NULL, the subsystem-specific locks
515 int __init ss_rstat_init(struct cgroup_subsys *ss) in ss_rstat_init() argument
519 if (ss) { in ss_rstat_init()
520 ss->lhead = alloc_percpu(struct llist_head); in ss_rstat_init()
521 if (!ss->lhead) in ss_rstat_init()
525 spin_lock_init(ss_rstat_lock(ss)); in ss_rstat_init()
527 init_llist_head(ss_lhead_cpu(ss, cpu)); in ss_rstat_init()