Lines Matching defs:pool
107 static void free_cg_pool(struct dmem_cgroup_pool_state *pool)
109 list_del(&pool->region_node);
110 kfree(pool);
114 set_resource_min(struct dmem_cgroup_pool_state *pool, u64 val)
116 page_counter_set_min(&pool->cnt, val);
120 set_resource_low(struct dmem_cgroup_pool_state *pool, u64 val)
122 page_counter_set_low(&pool->cnt, val);
126 set_resource_max(struct dmem_cgroup_pool_state *pool, u64 val)
128 page_counter_set_max(&pool->cnt, val);
131 static u64 get_resource_low(struct dmem_cgroup_pool_state *pool)
133 return pool ? READ_ONCE(pool->cnt.low) : 0;
136 static u64 get_resource_min(struct dmem_cgroup_pool_state *pool)
138 return pool ? READ_ONCE(pool->cnt.min) : 0;
141 static u64 get_resource_max(struct dmem_cgroup_pool_state *pool)
143 return pool ? READ_ONCE(pool->cnt.max) : PAGE_COUNTER_MAX;
146 static u64 get_resource_current(struct dmem_cgroup_pool_state *pool)
148 return pool ? page_counter_read(&pool->cnt) : 0;
161 struct dmem_cgroup_pool_state *pool;
164 list_for_each_entry_rcu(pool, &dmemcs->pools, css_node)
165 reset_all_resource_limits(pool);
172 struct dmem_cgroup_pool_state *pool, *next;
175 list_for_each_entry_safe(pool, next, &dmemcs->pools, css_node) {
177 *The pool is dead and all references are 0,
180 list_del(&pool->css_node);
181 free_cg_pool(pool);
202 struct dmem_cgroup_pool_state *pool;
204 list_for_each_entry_rcu(pool, &dmemcs->pools, css_node, spin_is_locked(&dmemcg_lock))
205 if (pool->region == region)
206 return pool;
211 static struct dmem_cgroup_pool_state *pool_parent(struct dmem_cgroup_pool_state *pool)
213 if (!pool->cnt.parent)
216 return container_of(pool->cnt.parent, typeof(*pool), cnt);
226 struct dmem_cgroup_pool_state *pool, *found_pool;
236 list_for_each_entry_rcu(pool, &dmemcg_iter->pools, css_node) {
237 if (pool->region == limit_pool->region) {
238 found_pool = pool;
256 * @limit_pool: The pool for which we hit limits
257 * @test_pool: The pool for which to test
272 struct dmem_cgroup_pool_state *pool = test_pool;
276 /* Can always evict from current pool, despite limits */
284 for (pool = test_pool; pool && limit_pool != pool; pool = pool_parent(pool))
287 if (!pool)
325 struct dmem_cgroup_pool_state *pool, *ppool = NULL;
328 pool = kzalloc(sizeof(*pool), GFP_NOWAIT);
329 if (!pool)
332 pool = *allocpool;
336 pool->region = region;
337 pool->cs = dmemcs;
342 page_counter_init(&pool->cnt,
344 reset_all_resource_limits(pool);
346 list_add_tail_rcu(&pool->css_node, &dmemcs->pools);
347 list_add_tail(&pool->region_node, ®ion->pools);
350 pool->inited = true;
352 pool->inited = ppool ? ppool->inited : false;
353 return pool;
360 struct dmem_cgroup_pool_state *pool, *ppool, *retpool;
364 * Recursively create pool, we may not initialize yet on
368 pool = find_cg_pool_locked(p, region);
369 if (!pool)
370 pool = alloc_pool_single(p, region, allocpool);
372 if (IS_ERR(pool))
373 return pool;
375 if (p == dmemcs && pool->inited)
376 return pool;
378 if (pool->inited)
382 retpool = pool = find_cg_pool_locked(dmemcs, region);
384 if (pool->inited)
391 pool->cnt.parent = &ppool->cnt;
392 pool->inited = true;
394 pool = ppool;
403 struct dmem_cgroup_pool_state *pool, *next;
405 list_for_each_entry_safe(pool, next, ®ion->pools, region_node)
406 free_cg_pool(pool);
437 struct dmem_cgroup_pool_state *pool =
438 container_of(entry, typeof(*pool), region_node);
440 list_del_rcu(&pool->css_node);
514 * @pool: &dmem_cgroup_pool_state
516 * Called to drop a reference to the limiting pool returned by
519 void dmem_cgroup_pool_state_put(struct dmem_cgroup_pool_state *pool)
521 if (pool)
522 css_put(&pool->cs->css);
529 struct dmem_cgroup_pool_state *pool, *allocpool = NULL;
533 pool = find_cg_pool_locked(cg, region);
534 if (pool && !READ_ONCE(pool->inited))
535 pool = NULL;
538 while (!pool) {
541 pool = get_cg_pool_locked(cg, region, &allocpool);
543 pool = ERR_PTR(-ENODEV);
546 if (pool == ERR_PTR(-ENOMEM)) {
547 pool = NULL;
553 pool = NULL;
560 return pool;
564 * dmem_cgroup_uncharge() - Uncharge a pool.
565 * @pool: Pool to uncharge.
569 * Must be called with the returned pool as argument,
572 void dmem_cgroup_uncharge(struct dmem_cgroup_pool_state *pool, u64 size)
574 if (!pool)
577 page_counter_uncharge(&pool->cnt, size);
578 css_put(&pool->cs->css);
586 * @ret_pool: On succesfull allocation, the pool that is charged.
587 * @ret_limit_pool: On a failed allocation, the limiting pool.
595 * will be set to the pool for which the limit is hit. This can be used for
606 struct dmem_cgroup_pool_state *pool;
620 pool = get_cg_pool_unlocked(cg, region);
621 if (IS_ERR(pool)) {
622 ret = PTR_ERR(pool);
626 if (!page_counter_try_charge(&pool->cnt, size, &fail)) {
636 *ret_pool = pool;
683 struct dmem_cgroup_pool_state *pool = NULL;
714 pool = get_cg_pool_unlocked(dmemcs, region);
715 if (IS_ERR(pool)) {
716 err = PTR_ERR(pool);
721 apply(pool, new_limit);
739 struct dmem_cgroup_pool_state *pool = find_cg_pool_locked(dmemcs, region);
744 val = fn(pool);