xref: /linux/kernel/cgroup/dmem.c (revision f96a974170b749e3a56844e25b31d46a7233b6f6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2023-2024 Intel Corporation (Maarten Lankhorst <dev@lankhorst.se>)
4  * Copyright 2024 Red Hat (Maxime Ripard <mripard@kernel.org>)
5  * Partially based on the rdma and misc controllers, which bear the following copyrights:
6  *
7  * Copyright 2020 Google LLC
8  * Copyright (C) 2016 Parav Pandit <pandit.parav@gmail.com>
9  */
10 
11 #include <linux/cgroup.h>
12 #include <linux/cgroup_dmem.h>
13 #include <linux/list.h>
14 #include <linux/mutex.h>
15 #include <linux/page_counter.h>
16 #include <linux/parser.h>
17 #include <linux/slab.h>
18 
19 struct dmem_cgroup_region {
20 	/**
21 	 * @ref: References keeping the region alive.
22 	 * Keeps the region reference alive after a succesful RCU lookup.
23 	 */
24 	struct kref ref;
25 
26 	/** @rcu: RCU head for freeing */
27 	struct rcu_head rcu;
28 
29 	/**
30 	 * @region_node: Linked into &dmem_cgroup_regions list.
31 	 * Protected by RCU and global spinlock.
32 	 */
33 	struct list_head region_node;
34 
35 	/**
36 	 * @pools: List of pools linked to this region.
37 	 * Protected by global spinlock only
38 	 */
39 	struct list_head pools;
40 
41 	/** @size: Size of region, in bytes */
42 	u64 size;
43 
44 	/** @name: Name describing the node, set by dmem_cgroup_register_region */
45 	char *name;
46 
47 	/**
48 	 * @unregistered: Whether the region is unregistered by its caller.
49 	 * No new pools should be added to the region afterwards.
50 	 */
51 	bool unregistered;
52 };
53 
54 struct dmemcg_state {
55 	struct cgroup_subsys_state css;
56 
57 	struct list_head pools;
58 };
59 
60 struct dmem_cgroup_pool_state {
61 	struct dmem_cgroup_region *region;
62 	struct dmemcg_state *cs;
63 
64 	/* css node, RCU protected against region teardown */
65 	struct list_head	css_node;
66 
67 	/* dev node, no RCU protection required */
68 	struct list_head	region_node;
69 
70 	struct rcu_head rcu;
71 
72 	struct page_counter cnt;
73 
74 	bool inited;
75 };
76 
77 /*
78  * 3 operations require locking protection:
79  * - Registering and unregistering region to/from list, requires global lock.
80  * - Adding a dmem_cgroup_pool_state to a CSS, removing when CSS is freed.
81  * - Adding a dmem_cgroup_pool_state to a region list.
82  *
83  * Since for the most common operations RCU provides enough protection, I
84  * do not think more granular locking makes sense. Most protection is offered
85  * by RCU and the lockless operating page_counter.
86  */
87 static DEFINE_SPINLOCK(dmemcg_lock);
88 static LIST_HEAD(dmem_cgroup_regions);
89 
90 static inline struct dmemcg_state *
91 css_to_dmemcs(struct cgroup_subsys_state *css)
92 {
93 	return container_of(css, struct dmemcg_state, css);
94 }
95 
96 static inline struct dmemcg_state *get_current_dmemcs(void)
97 {
98 	return css_to_dmemcs(task_get_css(current, dmem_cgrp_id));
99 }
100 
101 static struct dmemcg_state *parent_dmemcs(struct dmemcg_state *cg)
102 {
103 	return cg->css.parent ? css_to_dmemcs(cg->css.parent) : NULL;
104 }
105 
106 static void free_cg_pool(struct dmem_cgroup_pool_state *pool)
107 {
108 	list_del(&pool->region_node);
109 	kfree(pool);
110 }
111 
112 static void
113 set_resource_min(struct dmem_cgroup_pool_state *pool, u64 val)
114 {
115 	page_counter_set_min(&pool->cnt, val);
116 }
117 
118 static void
119 set_resource_low(struct dmem_cgroup_pool_state *pool, u64 val)
120 {
121 	page_counter_set_low(&pool->cnt, val);
122 }
123 
124 static void
125 set_resource_max(struct dmem_cgroup_pool_state *pool, u64 val)
126 {
127 	page_counter_set_max(&pool->cnt, val);
128 }
129 
130 static u64 get_resource_low(struct dmem_cgroup_pool_state *pool)
131 {
132 	return pool ? READ_ONCE(pool->cnt.low) : 0;
133 }
134 
135 static u64 get_resource_min(struct dmem_cgroup_pool_state *pool)
136 {
137 	return pool ? READ_ONCE(pool->cnt.min) : 0;
138 }
139 
140 static u64 get_resource_max(struct dmem_cgroup_pool_state *pool)
141 {
142 	return pool ? READ_ONCE(pool->cnt.max) : PAGE_COUNTER_MAX;
143 }
144 
145 static u64 get_resource_current(struct dmem_cgroup_pool_state *pool)
146 {
147 	return pool ? page_counter_read(&pool->cnt) : 0;
148 }
149 
150 static void reset_all_resource_limits(struct dmem_cgroup_pool_state *rpool)
151 {
152 	set_resource_min(rpool, 0);
153 	set_resource_low(rpool, 0);
154 	set_resource_max(rpool, PAGE_COUNTER_MAX);
155 }
156 
157 static void dmemcs_offline(struct cgroup_subsys_state *css)
158 {
159 	struct dmemcg_state *dmemcs = css_to_dmemcs(css);
160 	struct dmem_cgroup_pool_state *pool;
161 
162 	rcu_read_lock();
163 	list_for_each_entry_rcu(pool, &dmemcs->pools, css_node)
164 		reset_all_resource_limits(pool);
165 	rcu_read_unlock();
166 }
167 
168 static void dmemcs_free(struct cgroup_subsys_state *css)
169 {
170 	struct dmemcg_state *dmemcs = css_to_dmemcs(css);
171 	struct dmem_cgroup_pool_state *pool, *next;
172 
173 	spin_lock(&dmemcg_lock);
174 	list_for_each_entry_safe(pool, next, &dmemcs->pools, css_node) {
175 		/*
176 		 *The pool is dead and all references are 0,
177 		 * no need for RCU protection with list_del_rcu or freeing.
178 		 */
179 		list_del(&pool->css_node);
180 		free_cg_pool(pool);
181 	}
182 	spin_unlock(&dmemcg_lock);
183 
184 	kfree(dmemcs);
185 }
186 
187 static struct cgroup_subsys_state *
188 dmemcs_alloc(struct cgroup_subsys_state *parent_css)
189 {
190 	struct dmemcg_state *dmemcs = kzalloc(sizeof(*dmemcs), GFP_KERNEL);
191 	if (!dmemcs)
192 		return ERR_PTR(-ENOMEM);
193 
194 	INIT_LIST_HEAD(&dmemcs->pools);
195 	return &dmemcs->css;
196 }
197 
198 static struct dmem_cgroup_pool_state *
199 find_cg_pool_locked(struct dmemcg_state *dmemcs, struct dmem_cgroup_region *region)
200 {
201 	struct dmem_cgroup_pool_state *pool;
202 
203 	list_for_each_entry_rcu(pool, &dmemcs->pools, css_node, spin_is_locked(&dmemcg_lock))
204 		if (pool->region == region)
205 			return pool;
206 
207 	return NULL;
208 }
209 
210 static struct dmem_cgroup_pool_state *pool_parent(struct dmem_cgroup_pool_state *pool)
211 {
212 	if (!pool->cnt.parent)
213 		return NULL;
214 
215 	return container_of(pool->cnt.parent, typeof(*pool), cnt);
216 }
217 
218 static void
219 dmem_cgroup_calculate_protection(struct dmem_cgroup_pool_state *limit_pool,
220 				 struct dmem_cgroup_pool_state *test_pool)
221 {
222 	struct page_counter *climit;
223 	struct cgroup_subsys_state *css, *next_css;
224 	struct dmemcg_state *dmemcg_iter;
225 	struct dmem_cgroup_pool_state *pool, *parent_pool;
226 	bool found_descendant;
227 
228 	climit = &limit_pool->cnt;
229 
230 	rcu_read_lock();
231 	parent_pool = pool = limit_pool;
232 	css = &limit_pool->cs->css;
233 
234 	/*
235 	 * This logic is roughly equivalent to css_foreach_descendant_pre,
236 	 * except we also track the parent pool to find out which pool we need
237 	 * to calculate protection values for.
238 	 *
239 	 * We can stop the traversal once we find test_pool among the
240 	 * descendants since we don't really care about any others.
241 	 */
242 	while (pool != test_pool) {
243 		next_css = css_next_child(NULL, css);
244 		if (next_css) {
245 			parent_pool = pool;
246 		} else {
247 			while (css != &limit_pool->cs->css) {
248 				next_css = css_next_child(css, css->parent);
249 				if (next_css)
250 					break;
251 				css = css->parent;
252 				parent_pool = pool_parent(parent_pool);
253 			}
254 			/*
255 			 * We can only hit this when test_pool is not a
256 			 * descendant of limit_pool.
257 			 */
258 			if (WARN_ON_ONCE(css == &limit_pool->cs->css))
259 				break;
260 		}
261 		css = next_css;
262 
263 		found_descendant = false;
264 		dmemcg_iter = container_of(css, struct dmemcg_state, css);
265 
266 		list_for_each_entry_rcu(pool, &dmemcg_iter->pools, css_node) {
267 			if (pool_parent(pool) == parent_pool) {
268 				found_descendant = true;
269 				break;
270 			}
271 		}
272 		if (!found_descendant)
273 			continue;
274 
275 		page_counter_calculate_protection(
276 			climit, &pool->cnt, true);
277 	}
278 	rcu_read_unlock();
279 }
280 
281 /**
282  * dmem_cgroup_state_evict_valuable() - Check if we should evict from test_pool
283  * @limit_pool: The pool for which we hit limits
284  * @test_pool: The pool for which to test
285  * @ignore_low: Whether we have to respect low watermarks.
286  * @ret_hit_low: Pointer to whether it makes sense to consider low watermark.
287  *
288  * This function returns true if we can evict from @test_pool, false if not.
289  * When returning false and @ignore_low is false, @ret_hit_low may
290  * be set to true to indicate this function can be retried with @ignore_low
291  * set to true.
292  *
293  * Return: bool
294  */
295 bool dmem_cgroup_state_evict_valuable(struct dmem_cgroup_pool_state *limit_pool,
296 				      struct dmem_cgroup_pool_state *test_pool,
297 				      bool ignore_low, bool *ret_hit_low)
298 {
299 	struct dmem_cgroup_pool_state *pool = test_pool;
300 	struct page_counter *ctest;
301 	u64 used, min, low;
302 
303 	/* Can always evict from current pool, despite limits */
304 	if (limit_pool == test_pool)
305 		return true;
306 
307 	if (limit_pool) {
308 		if (!parent_dmemcs(limit_pool->cs))
309 			return true;
310 
311 		for (pool = test_pool; pool && limit_pool != pool; pool = pool_parent(pool))
312 			{}
313 
314 		if (!pool)
315 			return false;
316 	} else {
317 		/*
318 		 * If there is no cgroup limiting memory usage, use the root
319 		 * cgroup instead for limit calculations.
320 		 */
321 		for (limit_pool = test_pool; pool_parent(limit_pool); limit_pool = pool_parent(limit_pool))
322 			{}
323 	}
324 
325 	ctest = &test_pool->cnt;
326 
327 	dmem_cgroup_calculate_protection(limit_pool, test_pool);
328 
329 	used = page_counter_read(ctest);
330 	min = READ_ONCE(ctest->emin);
331 
332 	if (used <= min)
333 		return false;
334 
335 	if (!ignore_low) {
336 		low = READ_ONCE(ctest->elow);
337 		if (used > low)
338 			return true;
339 
340 		*ret_hit_low = true;
341 		return false;
342 	}
343 	return true;
344 }
345 EXPORT_SYMBOL_GPL(dmem_cgroup_state_evict_valuable);
346 
347 static struct dmem_cgroup_pool_state *
348 alloc_pool_single(struct dmemcg_state *dmemcs, struct dmem_cgroup_region *region,
349 		  struct dmem_cgroup_pool_state **allocpool)
350 {
351 	struct dmemcg_state *parent = parent_dmemcs(dmemcs);
352 	struct dmem_cgroup_pool_state *pool, *ppool = NULL;
353 
354 	if (!*allocpool) {
355 		pool = kzalloc(sizeof(*pool), GFP_NOWAIT);
356 		if (!pool)
357 			return ERR_PTR(-ENOMEM);
358 	} else {
359 		pool = *allocpool;
360 		*allocpool = NULL;
361 	}
362 
363 	pool->region = region;
364 	pool->cs = dmemcs;
365 
366 	if (parent)
367 		ppool = find_cg_pool_locked(parent, region);
368 
369 	page_counter_init(&pool->cnt,
370 			  ppool ? &ppool->cnt : NULL, true);
371 	reset_all_resource_limits(pool);
372 
373 	list_add_tail_rcu(&pool->css_node, &dmemcs->pools);
374 	list_add_tail(&pool->region_node, &region->pools);
375 
376 	if (!parent)
377 		pool->inited = true;
378 	else
379 		pool->inited = ppool ? ppool->inited : false;
380 	return pool;
381 }
382 
383 static struct dmem_cgroup_pool_state *
384 get_cg_pool_locked(struct dmemcg_state *dmemcs, struct dmem_cgroup_region *region,
385 		   struct dmem_cgroup_pool_state **allocpool)
386 {
387 	struct dmem_cgroup_pool_state *pool, *ppool, *retpool;
388 	struct dmemcg_state *p, *pp;
389 
390 	/*
391 	 * Recursively create pool, we may not initialize yet on
392 	 * recursion, this is done as a separate step.
393 	 */
394 	for (p = dmemcs; p; p = parent_dmemcs(p)) {
395 		pool = find_cg_pool_locked(p, region);
396 		if (!pool)
397 			pool = alloc_pool_single(p, region, allocpool);
398 
399 		if (IS_ERR(pool))
400 			return pool;
401 
402 		if (p == dmemcs && pool->inited)
403 			return pool;
404 
405 		if (pool->inited)
406 			break;
407 	}
408 
409 	retpool = pool = find_cg_pool_locked(dmemcs, region);
410 	for (p = dmemcs, pp = parent_dmemcs(dmemcs); pp; p = pp, pp = parent_dmemcs(p)) {
411 		if (pool->inited)
412 			break;
413 
414 		/* ppool was created if it didn't exist by above loop. */
415 		ppool = find_cg_pool_locked(pp, region);
416 
417 		/* Fix up parent links, mark as inited. */
418 		pool->cnt.parent = &ppool->cnt;
419 		pool->inited = true;
420 
421 		pool = ppool;
422 	}
423 
424 	return retpool;
425 }
426 
427 static void dmemcg_free_rcu(struct rcu_head *rcu)
428 {
429 	struct dmem_cgroup_region *region = container_of(rcu, typeof(*region), rcu);
430 	struct dmem_cgroup_pool_state *pool, *next;
431 
432 	list_for_each_entry_safe(pool, next, &region->pools, region_node)
433 		free_cg_pool(pool);
434 	kfree(region->name);
435 	kfree(region);
436 }
437 
438 static void dmemcg_free_region(struct kref *ref)
439 {
440 	struct dmem_cgroup_region *cgregion = container_of(ref, typeof(*cgregion), ref);
441 
442 	call_rcu(&cgregion->rcu, dmemcg_free_rcu);
443 }
444 
445 /**
446  * dmem_cgroup_unregister_region() - Unregister a previously registered region.
447  * @region: The region to unregister.
448  *
449  * This function undoes dmem_cgroup_register_region.
450  */
451 void dmem_cgroup_unregister_region(struct dmem_cgroup_region *region)
452 {
453 	struct list_head *entry;
454 
455 	if (!region)
456 		return;
457 
458 	spin_lock(&dmemcg_lock);
459 
460 	/* Remove from global region list */
461 	list_del_rcu(&region->region_node);
462 
463 	list_for_each_rcu(entry, &region->pools) {
464 		struct dmem_cgroup_pool_state *pool =
465 			container_of(entry, typeof(*pool), region_node);
466 
467 		list_del_rcu(&pool->css_node);
468 	}
469 
470 	/*
471 	 * Ensure any RCU based lookups fail. Additionally,
472 	 * no new pools should be added to the dead region
473 	 * by get_cg_pool_unlocked.
474 	 */
475 	region->unregistered = true;
476 	spin_unlock(&dmemcg_lock);
477 
478 	kref_put(&region->ref, dmemcg_free_region);
479 }
480 EXPORT_SYMBOL_GPL(dmem_cgroup_unregister_region);
481 
482 /**
483  * dmem_cgroup_register_region() - Register a regions for dev cgroup.
484  * @size: Size of region to register, in bytes.
485  * @fmt: Region parameters to register
486  *
487  * This function registers a node in the dmem cgroup with the
488  * name given. After calling this function, the region can be
489  * used for allocations.
490  *
491  * Return: NULL or a struct on success, PTR_ERR on failure.
492  */
493 struct dmem_cgroup_region *dmem_cgroup_register_region(u64 size, const char *fmt, ...)
494 {
495 	struct dmem_cgroup_region *ret;
496 	char *region_name;
497 	va_list ap;
498 
499 	if (!size)
500 		return NULL;
501 
502 	va_start(ap, fmt);
503 	region_name = kvasprintf(GFP_KERNEL, fmt, ap);
504 	va_end(ap);
505 	if (!region_name)
506 		return ERR_PTR(-ENOMEM);
507 
508 	ret = kzalloc(sizeof(*ret), GFP_KERNEL);
509 	if (!ret) {
510 		kfree(region_name);
511 		return ERR_PTR(-ENOMEM);
512 	}
513 
514 	INIT_LIST_HEAD(&ret->pools);
515 	ret->name = region_name;
516 	ret->size = size;
517 	kref_init(&ret->ref);
518 
519 	spin_lock(&dmemcg_lock);
520 	list_add_tail_rcu(&ret->region_node, &dmem_cgroup_regions);
521 	spin_unlock(&dmemcg_lock);
522 
523 	return ret;
524 }
525 EXPORT_SYMBOL_GPL(dmem_cgroup_register_region);
526 
527 static struct dmem_cgroup_region *dmemcg_get_region_by_name(const char *name)
528 {
529 	struct dmem_cgroup_region *region;
530 
531 	list_for_each_entry_rcu(region, &dmem_cgroup_regions, region_node, spin_is_locked(&dmemcg_lock))
532 		if (!strcmp(name, region->name) &&
533 		    kref_get_unless_zero(&region->ref))
534 			return region;
535 
536 	return NULL;
537 }
538 
539 /**
540  * dmem_cgroup_pool_state_put() - Drop a reference to a dmem_cgroup_pool_state
541  * @pool: &dmem_cgroup_pool_state
542  *
543  * Called to drop a reference to the limiting pool returned by
544  * dmem_cgroup_try_charge().
545  */
546 void dmem_cgroup_pool_state_put(struct dmem_cgroup_pool_state *pool)
547 {
548 	if (pool)
549 		css_put(&pool->cs->css);
550 }
551 EXPORT_SYMBOL_GPL(dmem_cgroup_pool_state_put);
552 
553 static struct dmem_cgroup_pool_state *
554 get_cg_pool_unlocked(struct dmemcg_state *cg, struct dmem_cgroup_region *region)
555 {
556 	struct dmem_cgroup_pool_state *pool, *allocpool = NULL;
557 
558 	/* fastpath lookup? */
559 	rcu_read_lock();
560 	pool = find_cg_pool_locked(cg, region);
561 	if (pool && !READ_ONCE(pool->inited))
562 		pool = NULL;
563 	rcu_read_unlock();
564 
565 	while (!pool) {
566 		spin_lock(&dmemcg_lock);
567 		if (!region->unregistered)
568 			pool = get_cg_pool_locked(cg, region, &allocpool);
569 		else
570 			pool = ERR_PTR(-ENODEV);
571 		spin_unlock(&dmemcg_lock);
572 
573 		if (pool == ERR_PTR(-ENOMEM)) {
574 			pool = NULL;
575 			if (WARN_ON(allocpool))
576 				continue;
577 
578 			allocpool = kzalloc(sizeof(*allocpool), GFP_KERNEL);
579 			if (allocpool) {
580 				pool = NULL;
581 				continue;
582 			}
583 		}
584 	}
585 
586 	kfree(allocpool);
587 	return pool;
588 }
589 
590 /**
591  * dmem_cgroup_uncharge() - Uncharge a pool.
592  * @pool: Pool to uncharge.
593  * @size: Size to uncharge.
594  *
595  * Undoes the effects of dmem_cgroup_try_charge.
596  * Must be called with the returned pool as argument,
597  * and same @index and @size.
598  */
599 void dmem_cgroup_uncharge(struct dmem_cgroup_pool_state *pool, u64 size)
600 {
601 	if (!pool)
602 		return;
603 
604 	page_counter_uncharge(&pool->cnt, size);
605 	css_put(&pool->cs->css);
606 }
607 EXPORT_SYMBOL_GPL(dmem_cgroup_uncharge);
608 
609 /**
610  * dmem_cgroup_try_charge() - Try charging a new allocation to a region.
611  * @region: dmem region to charge
612  * @size: Size (in bytes) to charge.
613  * @ret_pool: On succesfull allocation, the pool that is charged.
614  * @ret_limit_pool: On a failed allocation, the limiting pool.
615  *
616  * This function charges the @region region for a size of @size bytes.
617  *
618  * If the function succeeds, @ret_pool is set, which must be passed to
619  * dmem_cgroup_uncharge() when undoing the allocation.
620  *
621  * When this function fails with -EAGAIN and @ret_limit_pool is non-null, it
622  * will be set to the pool for which the limit is hit. This can be used for
623  * eviction as argument to dmem_cgroup_evict_valuable(). This reference must be freed
624  * with @dmem_cgroup_pool_state_put().
625  *
626  * Return: 0 on success, -EAGAIN on hitting a limit, or a negative errno on failure.
627  */
628 int dmem_cgroup_try_charge(struct dmem_cgroup_region *region, u64 size,
629 			  struct dmem_cgroup_pool_state **ret_pool,
630 			  struct dmem_cgroup_pool_state **ret_limit_pool)
631 {
632 	struct dmemcg_state *cg;
633 	struct dmem_cgroup_pool_state *pool;
634 	struct page_counter *fail;
635 	int ret;
636 
637 	*ret_pool = NULL;
638 	if (ret_limit_pool)
639 		*ret_limit_pool = NULL;
640 
641 	/*
642 	 * hold on to css, as cgroup can be removed but resource
643 	 * accounting happens on css.
644 	 */
645 	cg = get_current_dmemcs();
646 
647 	pool = get_cg_pool_unlocked(cg, region);
648 	if (IS_ERR(pool)) {
649 		ret = PTR_ERR(pool);
650 		goto err;
651 	}
652 
653 	if (!page_counter_try_charge(&pool->cnt, size, &fail)) {
654 		if (ret_limit_pool) {
655 			*ret_limit_pool = container_of(fail, struct dmem_cgroup_pool_state, cnt);
656 			css_get(&(*ret_limit_pool)->cs->css);
657 		}
658 		ret = -EAGAIN;
659 		goto err;
660 	}
661 
662 	/* On success, reference from get_current_dmemcs is transferred to *ret_pool */
663 	*ret_pool = pool;
664 	return 0;
665 
666 err:
667 	css_put(&cg->css);
668 	return ret;
669 }
670 EXPORT_SYMBOL_GPL(dmem_cgroup_try_charge);
671 
672 static int dmem_cgroup_region_capacity_show(struct seq_file *sf, void *v)
673 {
674 	struct dmem_cgroup_region *region;
675 
676 	rcu_read_lock();
677 	list_for_each_entry_rcu(region, &dmem_cgroup_regions, region_node) {
678 		seq_puts(sf, region->name);
679 		seq_printf(sf, " %llu\n", region->size);
680 	}
681 	rcu_read_unlock();
682 	return 0;
683 }
684 
685 static int dmemcg_parse_limit(char *options, struct dmem_cgroup_region *region,
686 			      u64 *new_limit)
687 {
688 	char *end;
689 
690 	if (!strcmp(options, "max")) {
691 		*new_limit = PAGE_COUNTER_MAX;
692 		return 0;
693 	}
694 
695 	*new_limit = memparse(options, &end);
696 	if (*end != '\0')
697 		return -EINVAL;
698 
699 	return 0;
700 }
701 
702 static ssize_t dmemcg_limit_write(struct kernfs_open_file *of,
703 				 char *buf, size_t nbytes, loff_t off,
704 				 void (*apply)(struct dmem_cgroup_pool_state *, u64))
705 {
706 	struct dmemcg_state *dmemcs = css_to_dmemcs(of_css(of));
707 	int err = 0;
708 
709 	while (buf && !err) {
710 		struct dmem_cgroup_pool_state *pool = NULL;
711 		char *options, *region_name;
712 		struct dmem_cgroup_region *region;
713 		u64 new_limit;
714 
715 		options = buf;
716 		buf = strchr(buf, '\n');
717 		if (buf)
718 			*buf++ = '\0';
719 
720 		options = strstrip(options);
721 
722 		/* eat empty lines */
723 		if (!options[0])
724 			continue;
725 
726 		region_name = strsep(&options, " \t");
727 		if (!region_name[0])
728 			continue;
729 
730 		rcu_read_lock();
731 		region = dmemcg_get_region_by_name(region_name);
732 		rcu_read_unlock();
733 
734 		if (!region)
735 			return -EINVAL;
736 
737 		err = dmemcg_parse_limit(options, region, &new_limit);
738 		if (err < 0)
739 			goto out_put;
740 
741 		pool = get_cg_pool_unlocked(dmemcs, region);
742 		if (IS_ERR(pool)) {
743 			err = PTR_ERR(pool);
744 			goto out_put;
745 		}
746 
747 		/* And commit */
748 		apply(pool, new_limit);
749 
750 out_put:
751 		kref_put(&region->ref, dmemcg_free_region);
752 	}
753 
754 
755 	return err ?: nbytes;
756 }
757 
758 static int dmemcg_limit_show(struct seq_file *sf, void *v,
759 			    u64 (*fn)(struct dmem_cgroup_pool_state *))
760 {
761 	struct dmemcg_state *dmemcs = css_to_dmemcs(seq_css(sf));
762 	struct dmem_cgroup_region *region;
763 
764 	rcu_read_lock();
765 	list_for_each_entry_rcu(region, &dmem_cgroup_regions, region_node) {
766 		struct dmem_cgroup_pool_state *pool = find_cg_pool_locked(dmemcs, region);
767 		u64 val;
768 
769 		seq_puts(sf, region->name);
770 
771 		val = fn(pool);
772 		if (val < PAGE_COUNTER_MAX)
773 			seq_printf(sf, " %lld\n", val);
774 		else
775 			seq_puts(sf, " max\n");
776 	}
777 	rcu_read_unlock();
778 
779 	return 0;
780 }
781 
782 static int dmem_cgroup_region_current_show(struct seq_file *sf, void *v)
783 {
784 	return dmemcg_limit_show(sf, v, get_resource_current);
785 }
786 
787 static int dmem_cgroup_region_min_show(struct seq_file *sf, void *v)
788 {
789 	return dmemcg_limit_show(sf, v, get_resource_min);
790 }
791 
792 static ssize_t dmem_cgroup_region_min_write(struct kernfs_open_file *of,
793 				      char *buf, size_t nbytes, loff_t off)
794 {
795 	return dmemcg_limit_write(of, buf, nbytes, off, set_resource_min);
796 }
797 
798 static int dmem_cgroup_region_low_show(struct seq_file *sf, void *v)
799 {
800 	return dmemcg_limit_show(sf, v, get_resource_low);
801 }
802 
803 static ssize_t dmem_cgroup_region_low_write(struct kernfs_open_file *of,
804 				      char *buf, size_t nbytes, loff_t off)
805 {
806 	return dmemcg_limit_write(of, buf, nbytes, off, set_resource_low);
807 }
808 
809 static int dmem_cgroup_region_max_show(struct seq_file *sf, void *v)
810 {
811 	return dmemcg_limit_show(sf, v, get_resource_max);
812 }
813 
814 static ssize_t dmem_cgroup_region_max_write(struct kernfs_open_file *of,
815 				      char *buf, size_t nbytes, loff_t off)
816 {
817 	return dmemcg_limit_write(of, buf, nbytes, off, set_resource_max);
818 }
819 
820 static struct cftype files[] = {
821 	{
822 		.name = "capacity",
823 		.seq_show = dmem_cgroup_region_capacity_show,
824 		.flags = CFTYPE_ONLY_ON_ROOT,
825 	},
826 	{
827 		.name = "current",
828 		.seq_show = dmem_cgroup_region_current_show,
829 	},
830 	{
831 		.name = "min",
832 		.write = dmem_cgroup_region_min_write,
833 		.seq_show = dmem_cgroup_region_min_show,
834 		.flags = CFTYPE_NOT_ON_ROOT,
835 	},
836 	{
837 		.name = "low",
838 		.write = dmem_cgroup_region_low_write,
839 		.seq_show = dmem_cgroup_region_low_show,
840 		.flags = CFTYPE_NOT_ON_ROOT,
841 	},
842 	{
843 		.name = "max",
844 		.write = dmem_cgroup_region_max_write,
845 		.seq_show = dmem_cgroup_region_max_show,
846 		.flags = CFTYPE_NOT_ON_ROOT,
847 	},
848 	{ } /* Zero entry terminates. */
849 };
850 
851 struct cgroup_subsys dmem_cgrp_subsys = {
852 	.css_alloc	= dmemcs_alloc,
853 	.css_free	= dmemcs_free,
854 	.css_offline	= dmemcs_offline,
855 	.legacy_cftypes	= files,
856 	.dfl_cftypes	= files,
857 };
858