Lines Matching +full:zero +full:- +full:initialised
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2006-2007 Silicon Graphics, Inc.
23 * period in time. When the first element is added, time zero for the data
24 * structure is initialised to the current time.
29 * inserted at the head of the current most-recently-used list.
88 * likely result in a loop in one of the lists. That's a sure-fire recipe for
98 unsigned int lru_grp; /* Group containing time zero. */
115 * list again and again until either a) the lists are all empty, or b) time zero
123 * zero if there's no need to schedule more work because the lists are empty.
135 if (!mru->time_zero) in _xfs_mru_cache_migrate()
138 /* While time zero is older than the time spanned by all the lists. */ in _xfs_mru_cache_migrate()
139 while (mru->time_zero <= now - mru->grp_count * mru->grp_time) { in _xfs_mru_cache_migrate()
145 lru_list = mru->lists + mru->lru_grp; in _xfs_mru_cache_migrate()
147 list_splice_init(lru_list, mru->reap_list.prev); in _xfs_mru_cache_migrate()
151 * become the new MRU list; advance time zero accordingly. in _xfs_mru_cache_migrate()
153 mru->lru_grp = (mru->lru_grp + 1) % mru->grp_count; in _xfs_mru_cache_migrate()
154 mru->time_zero += mru->grp_time; in _xfs_mru_cache_migrate()
160 if (++migrated == mru->grp_count) { in _xfs_mru_cache_migrate()
161 mru->lru_grp = 0; in _xfs_mru_cache_migrate()
162 mru->time_zero = 0; in _xfs_mru_cache_migrate()
167 /* Find the first non-empty list from the LRU end. */ in _xfs_mru_cache_migrate()
168 for (grp = 0; grp < mru->grp_count; grp++) { in _xfs_mru_cache_migrate()
171 lru_list = mru->lists + ((mru->lru_grp + grp) % mru->grp_count); in _xfs_mru_cache_migrate()
173 return mru->time_zero + in _xfs_mru_cache_migrate()
174 (mru->grp_count + grp) * mru->grp_time; in _xfs_mru_cache_migrate()
178 mru->lru_grp = 0; in _xfs_mru_cache_migrate()
179 mru->time_zero = 0; in _xfs_mru_cache_migrate()
186 * up-to-date, otherwise the new element could be given a shorter lifetime in
198 * If the data store is empty, initialise time zero, leave grp set to in _xfs_mru_cache_list_insert()
199 * zero and start the work queue timer if necessary. Otherwise, set grp in _xfs_mru_cache_list_insert()
200 * to the number of group times that have elapsed since time zero. in _xfs_mru_cache_list_insert()
203 mru->time_zero = now; in _xfs_mru_cache_list_insert()
204 if (!mru->queued) { in _xfs_mru_cache_list_insert()
205 mru->queued = 1; in _xfs_mru_cache_list_insert()
206 queue_delayed_work(xfs_mru_reap_wq, &mru->work, in _xfs_mru_cache_list_insert()
207 mru->grp_count * mru->grp_time); in _xfs_mru_cache_list_insert()
210 grp = (now - mru->time_zero) / mru->grp_time; in _xfs_mru_cache_list_insert()
211 grp = (mru->lru_grp + grp) % mru->grp_count; in _xfs_mru_cache_list_insert()
215 list_add_tail(&elem->list_node, mru->lists + grp); in _xfs_mru_cache_list_insert()
224 * We get called holding the mru->lock, which we drop and then reacquire.
230 __releases(mru->lock) __acquires(mru->lock) in _xfs_mru_cache_clear_reap_list()
235 list_for_each_entry_safe(elem, next, &mru->reap_list, list_node) { in _xfs_mru_cache_clear_reap_list()
238 radix_tree_delete(&mru->store, elem->key); in _xfs_mru_cache_clear_reap_list()
244 list_move(&elem->list_node, &tmp); in _xfs_mru_cache_clear_reap_list()
246 spin_unlock(&mru->lock); in _xfs_mru_cache_clear_reap_list()
249 list_del_init(&elem->list_node); in _xfs_mru_cache_clear_reap_list()
250 mru->free_func(mru->data, elem); in _xfs_mru_cache_clear_reap_list()
253 spin_lock(&mru->lock); in _xfs_mru_cache_clear_reap_list()
271 ASSERT(mru && mru->lists); in _xfs_mru_cache_reap()
272 if (!mru || !mru->lists) in _xfs_mru_cache_reap()
275 spin_lock(&mru->lock); in _xfs_mru_cache_reap()
279 mru->queued = next; in _xfs_mru_cache_reap()
280 if ((mru->queued > 0)) { in _xfs_mru_cache_reap()
285 next -= now; in _xfs_mru_cache_reap()
286 queue_delayed_work(xfs_mru_reap_wq, &mru->work, next); in _xfs_mru_cache_reap()
289 spin_unlock(&mru->lock); in _xfs_mru_cache_reap()
299 return -ENOMEM; in xfs_mru_cache_init()
331 return -EINVAL; in xfs_mru_cache_create()
334 return -EINVAL; in xfs_mru_cache_create()
338 return -ENOMEM; in xfs_mru_cache_create()
341 mru->grp_count = grp_count + 1; in xfs_mru_cache_create()
342 mru->lists = kzalloc(mru->grp_count * sizeof(*mru->lists), in xfs_mru_cache_create()
344 if (!mru->lists) { in xfs_mru_cache_create()
346 return -ENOMEM; in xfs_mru_cache_create()
349 for (grp = 0; grp < mru->grp_count; grp++) in xfs_mru_cache_create()
350 INIT_LIST_HEAD(mru->lists + grp); in xfs_mru_cache_create()
356 INIT_RADIX_TREE(&mru->store, GFP_ATOMIC); in xfs_mru_cache_create()
357 INIT_LIST_HEAD(&mru->reap_list); in xfs_mru_cache_create()
358 spin_lock_init(&mru->lock); in xfs_mru_cache_create()
359 INIT_DELAYED_WORK(&mru->work, _xfs_mru_cache_reap); in xfs_mru_cache_create()
361 mru->grp_time = grp_time; in xfs_mru_cache_create()
362 mru->free_func = free_func; in xfs_mru_cache_create()
363 mru->data = data; in xfs_mru_cache_create()
378 if (!mru || !mru->lists) in xfs_mru_cache_flush()
381 spin_lock(&mru->lock); in xfs_mru_cache_flush()
382 if (mru->queued) { in xfs_mru_cache_flush()
383 spin_unlock(&mru->lock); in xfs_mru_cache_flush()
384 cancel_delayed_work_sync(&mru->work); in xfs_mru_cache_flush()
385 spin_lock(&mru->lock); in xfs_mru_cache_flush()
388 _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time); in xfs_mru_cache_flush()
391 spin_unlock(&mru->lock); in xfs_mru_cache_flush()
398 if (!mru || !mru->lists) in xfs_mru_cache_destroy()
403 kfree(mru->lists); in xfs_mru_cache_destroy()
412 * The passed in elem is freed through the per-cache free_func on failure.
420 int error = -EINVAL; in xfs_mru_cache_insert()
422 error = -ENOMEM; in xfs_mru_cache_insert()
426 INIT_LIST_HEAD(&elem->list_node); in xfs_mru_cache_insert()
427 elem->key = key; in xfs_mru_cache_insert()
429 spin_lock(&mru->lock); in xfs_mru_cache_insert()
430 error = radix_tree_insert(&mru->store, key, elem); in xfs_mru_cache_insert()
434 spin_unlock(&mru->lock); in xfs_mru_cache_insert()
441 mru->free_func(mru->data, elem); in xfs_mru_cache_insert()
458 ASSERT(mru && mru->lists); in xfs_mru_cache_remove()
459 if (!mru || !mru->lists) in xfs_mru_cache_remove()
462 spin_lock(&mru->lock); in xfs_mru_cache_remove()
463 elem = radix_tree_delete(&mru->store, key); in xfs_mru_cache_remove()
465 list_del(&elem->list_node); in xfs_mru_cache_remove()
466 spin_unlock(&mru->lock); in xfs_mru_cache_remove()
484 mru->free_func(mru->data, elem); in xfs_mru_cache_delete()
498 * of the returned data structure, the extra per-element memory isn't warranted.
514 ASSERT(mru && mru->lists); in xfs_mru_cache_lookup()
515 if (!mru || !mru->lists) in xfs_mru_cache_lookup()
518 spin_lock(&mru->lock); in xfs_mru_cache_lookup()
519 elem = radix_tree_lookup(&mru->store, key); in xfs_mru_cache_lookup()
521 list_del(&elem->list_node); in xfs_mru_cache_lookup()
525 spin_unlock(&mru->lock); in xfs_mru_cache_lookup()
538 __releases(mru->lock) in xfs_mru_cache_done()
540 spin_unlock(&mru->lock); in xfs_mru_cache_done()