Lines Matching refs:ss
21 * they define the ss->css_rstat_flush callback.
25 return css_is_self(css) || css->ss->css_rstat_flush != NULL;
40 static spinlock_t *ss_rstat_lock(struct cgroup_subsys *ss)
42 if (ss)
43 return &ss->rstat_ss_lock;
48 static inline struct llist_head *ss_lhead_cpu(struct cgroup_subsys *ss, int cpu)
50 if (ss)
51 return per_cpu_ptr(ss->lhead, cpu);
60 * Atomically inserts the css in the ss's llist for the given cpu. This is
61 * reentrant safe i.e. safe against softirq, hardirq and nmi. The ss's llist
124 lhead = ss_lhead_cpu(css->ss, cpu);
157 static void css_process_update_tree(struct cgroup_subsys *ss, int cpu)
159 struct llist_head *lhead = ss_lhead_cpu(ss, cpu);
214 lockdep_assert_held(ss_rstat_lock(head->ss));
287 css_process_update_tree(root->ss, cpu);
360 __acquires(ss_rstat_lock(css->ss))
366 lock = ss_rstat_lock(css->ss);
377 __releases(ss_rstat_lock(css->ss))
382 lock = ss_rstat_lock(css->ss);
425 pos->ss->css_rstat_flush(pos, cpu);
446 } else if (css->ss->css_rstat_flush == NULL)
512 * @ss: target subsystem
514 * If @ss is NULL, the static locks associated with the base stats
515 * are initialized. If @ss is non-NULL, the subsystem-specific locks
518 int __init ss_rstat_init(struct cgroup_subsys *ss)
522 if (ss) {
523 ss->lhead = alloc_percpu(struct llist_head);
524 if (!ss->lhead)
528 spin_lock_init(ss_rstat_lock(ss));
530 init_llist_head(ss_lhead_cpu(ss, cpu));