xref: /linux/mm/z3fold.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * z3fold.c
3  *
4  * Author: Vitaly Wool <vitaly.wool@konsulko.com>
5  * Copyright (C) 2016, Sony Mobile Communications Inc.
6  *
7  * This implementation is based on zbud written by Seth Jennings.
8  *
9  * z3fold is an special purpose allocator for storing compressed pages. It
10  * can store up to three compressed pages per page which improves the
11  * compression ratio of zbud while retaining its main concepts (e. g. always
12  * storing an integral number of objects per page) and simplicity.
13  * It still has simple and deterministic reclaim properties that make it
14  * preferable to a higher density approach (with no requirement on integral
15  * number of object per page) when reclaim is used.
16  *
17  * As in zbud, pages are divided into "chunks".  The size of the chunks is
18  * fixed at compile time and is determined by NCHUNKS_ORDER below.
19  *
20  * z3fold doesn't export any API and is meant to be used via zpool API.
21  */
22 
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 
25 #include <linux/atomic.h>
26 #include <linux/list.h>
27 #include <linux/mm.h>
28 #include <linux/module.h>
29 #include <linux/preempt.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/zpool.h>
33 
34 /*****************
35  * Structures
36 *****************/
37 struct z3fold_pool;
38 struct z3fold_ops {
39 	int (*evict)(struct z3fold_pool *pool, unsigned long handle);
40 };
41 
42 enum buddy {
43 	HEADLESS = 0,
44 	FIRST,
45 	MIDDLE,
46 	LAST,
47 	BUDDIES_MAX
48 };
49 
50 /*
51  * struct z3fold_header - z3fold page metadata occupying the first chunk of each
52  *			z3fold page, except for HEADLESS pages
53  * @buddy:	links the z3fold page into the relevant list in the pool
54  * @page_lock:		per-page lock
55  * @refcount:		reference cound for the z3fold page
56  * @first_chunks:	the size of the first buddy in chunks, 0 if free
57  * @middle_chunks:	the size of the middle buddy in chunks, 0 if free
58  * @last_chunks:	the size of the last buddy in chunks, 0 if free
59  * @first_num:		the starting number (for the first handle)
60  */
61 struct z3fold_header {
62 	struct list_head buddy;
63 	spinlock_t page_lock;
64 	struct kref refcount;
65 	unsigned short first_chunks;
66 	unsigned short middle_chunks;
67 	unsigned short last_chunks;
68 	unsigned short start_middle;
69 	unsigned short first_num:2;
70 };
71 
72 /*
73  * NCHUNKS_ORDER determines the internal allocation granularity, effectively
74  * adjusting internal fragmentation.  It also determines the number of
75  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
76  * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
77  * in the beginning of an allocated page are occupied by z3fold header, so
78  * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
79  * which shows the max number of free chunks in z3fold page, also there will
80  * be 63, or 62, respectively, freelists per pool.
81  */
82 #define NCHUNKS_ORDER	6
83 
84 #define CHUNK_SHIFT	(PAGE_SHIFT - NCHUNKS_ORDER)
85 #define CHUNK_SIZE	(1 << CHUNK_SHIFT)
86 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
87 #define ZHDR_CHUNKS	(ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
88 #define TOTAL_CHUNKS	(PAGE_SIZE >> CHUNK_SHIFT)
89 #define NCHUNKS		((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
90 
91 #define BUDDY_MASK	(0x3)
92 
93 /**
94  * struct z3fold_pool - stores metadata for each z3fold pool
95  * @lock:	protects all pool fields and first|last_chunk fields of any
96  *		z3fold page in the pool
97  * @unbuddied:	array of lists tracking z3fold pages that contain 2- buddies;
98  *		the lists each z3fold page is added to depends on the size of
99  *		its free region.
100  * @lru:	list tracking the z3fold pages in LRU order by most recently
101  *		added buddy.
102  * @pages_nr:	number of z3fold pages in the pool.
103  * @ops:	pointer to a structure of user defined operations specified at
104  *		pool creation time.
105  *
106  * This structure is allocated at pool creation time and maintains metadata
107  * pertaining to a particular z3fold pool.
108  */
109 struct z3fold_pool {
110 	spinlock_t lock;
111 	struct list_head unbuddied[NCHUNKS];
112 	struct list_head lru;
113 	atomic64_t pages_nr;
114 	const struct z3fold_ops *ops;
115 	struct zpool *zpool;
116 	const struct zpool_ops *zpool_ops;
117 };
118 
119 /*
120  * Internal z3fold page flags
121  */
122 enum z3fold_page_flags {
123 	PAGE_HEADLESS = 0,
124 	MIDDLE_CHUNK_MAPPED,
125 };
126 
127 
128 /*****************
129  * Helpers
130 *****************/
131 
132 /* Converts an allocation size in bytes to size in z3fold chunks */
133 static int size_to_chunks(size_t size)
134 {
135 	return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
136 }
137 
138 #define for_each_unbuddied_list(_iter, _begin) \
139 	for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
140 
141 /* Initializes the z3fold header of a newly allocated z3fold page */
142 static struct z3fold_header *init_z3fold_page(struct page *page)
143 {
144 	struct z3fold_header *zhdr = page_address(page);
145 
146 	INIT_LIST_HEAD(&page->lru);
147 	clear_bit(PAGE_HEADLESS, &page->private);
148 	clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
149 
150 	spin_lock_init(&zhdr->page_lock);
151 	kref_init(&zhdr->refcount);
152 	zhdr->first_chunks = 0;
153 	zhdr->middle_chunks = 0;
154 	zhdr->last_chunks = 0;
155 	zhdr->first_num = 0;
156 	zhdr->start_middle = 0;
157 	INIT_LIST_HEAD(&zhdr->buddy);
158 	return zhdr;
159 }
160 
161 /* Resets the struct page fields and frees the page */
162 static void free_z3fold_page(struct page *page)
163 {
164 	__free_page(page);
165 }
166 
167 static void release_z3fold_page(struct kref *ref)
168 {
169 	struct z3fold_header *zhdr;
170 	struct page *page;
171 
172 	zhdr = container_of(ref, struct z3fold_header, refcount);
173 	page = virt_to_page(zhdr);
174 
175 	if (!list_empty(&zhdr->buddy))
176 		list_del(&zhdr->buddy);
177 	if (!list_empty(&page->lru))
178 		list_del(&page->lru);
179 	free_z3fold_page(page);
180 }
181 
182 /* Lock a z3fold page */
183 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
184 {
185 	spin_lock(&zhdr->page_lock);
186 }
187 
188 /* Try to lock a z3fold page */
189 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
190 {
191 	return spin_trylock(&zhdr->page_lock);
192 }
193 
194 /* Unlock a z3fold page */
195 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
196 {
197 	spin_unlock(&zhdr->page_lock);
198 }
199 
200 /*
201  * Encodes the handle of a particular buddy within a z3fold page
202  * Pool lock should be held as this function accesses first_num
203  */
204 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
205 {
206 	unsigned long handle;
207 
208 	handle = (unsigned long)zhdr;
209 	if (bud != HEADLESS)
210 		handle += (bud + zhdr->first_num) & BUDDY_MASK;
211 	return handle;
212 }
213 
214 /* Returns the z3fold page where a given handle is stored */
215 static struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
216 {
217 	return (struct z3fold_header *)(handle & PAGE_MASK);
218 }
219 
220 /*
221  * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
222  *  but that doesn't matter. because the masking will result in the
223  *  correct buddy number.
224  */
225 static enum buddy handle_to_buddy(unsigned long handle)
226 {
227 	struct z3fold_header *zhdr = handle_to_z3fold_header(handle);
228 	return (handle - zhdr->first_num) & BUDDY_MASK;
229 }
230 
231 /*
232  * Returns the number of free chunks in a z3fold page.
233  * NB: can't be used with HEADLESS pages.
234  */
235 static int num_free_chunks(struct z3fold_header *zhdr)
236 {
237 	int nfree;
238 	/*
239 	 * If there is a middle object, pick up the bigger free space
240 	 * either before or after it. Otherwise just subtract the number
241 	 * of chunks occupied by the first and the last objects.
242 	 */
243 	if (zhdr->middle_chunks != 0) {
244 		int nfree_before = zhdr->first_chunks ?
245 			0 : zhdr->start_middle - ZHDR_CHUNKS;
246 		int nfree_after = zhdr->last_chunks ?
247 			0 : TOTAL_CHUNKS -
248 				(zhdr->start_middle + zhdr->middle_chunks);
249 		nfree = max(nfree_before, nfree_after);
250 	} else
251 		nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
252 	return nfree;
253 }
254 
255 /*****************
256  * API Functions
257 *****************/
258 /**
259  * z3fold_create_pool() - create a new z3fold pool
260  * @gfp:	gfp flags when allocating the z3fold pool structure
261  * @ops:	user-defined operations for the z3fold pool
262  *
263  * Return: pointer to the new z3fold pool or NULL if the metadata allocation
264  * failed.
265  */
266 static struct z3fold_pool *z3fold_create_pool(gfp_t gfp,
267 		const struct z3fold_ops *ops)
268 {
269 	struct z3fold_pool *pool;
270 	int i;
271 
272 	pool = kzalloc(sizeof(struct z3fold_pool), gfp);
273 	if (!pool)
274 		return NULL;
275 	spin_lock_init(&pool->lock);
276 	for_each_unbuddied_list(i, 0)
277 		INIT_LIST_HEAD(&pool->unbuddied[i]);
278 	INIT_LIST_HEAD(&pool->lru);
279 	atomic64_set(&pool->pages_nr, 0);
280 	pool->ops = ops;
281 	return pool;
282 }
283 
284 /**
285  * z3fold_destroy_pool() - destroys an existing z3fold pool
286  * @pool:	the z3fold pool to be destroyed
287  *
288  * The pool should be emptied before this function is called.
289  */
290 static void z3fold_destroy_pool(struct z3fold_pool *pool)
291 {
292 	kfree(pool);
293 }
294 
295 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
296 				unsigned short dst_chunk)
297 {
298 	void *beg = zhdr;
299 	return memmove(beg + (dst_chunk << CHUNK_SHIFT),
300 		       beg + (zhdr->start_middle << CHUNK_SHIFT),
301 		       zhdr->middle_chunks << CHUNK_SHIFT);
302 }
303 
304 #define BIG_CHUNK_GAP	3
305 /* Has to be called with lock held */
306 static int z3fold_compact_page(struct z3fold_header *zhdr)
307 {
308 	struct page *page = virt_to_page(zhdr);
309 
310 	if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
311 		return 0; /* can't move middle chunk, it's used */
312 
313 	if (zhdr->middle_chunks == 0)
314 		return 0; /* nothing to compact */
315 
316 	if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
317 		/* move to the beginning */
318 		mchunk_memmove(zhdr, ZHDR_CHUNKS);
319 		zhdr->first_chunks = zhdr->middle_chunks;
320 		zhdr->middle_chunks = 0;
321 		zhdr->start_middle = 0;
322 		zhdr->first_num++;
323 		return 1;
324 	}
325 
326 	/*
327 	 * moving data is expensive, so let's only do that if
328 	 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
329 	 */
330 	if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
331 	    zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
332 			BIG_CHUNK_GAP) {
333 		mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
334 		zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
335 		return 1;
336 	} else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
337 		   TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
338 					+ zhdr->middle_chunks) >=
339 			BIG_CHUNK_GAP) {
340 		unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
341 			zhdr->middle_chunks;
342 		mchunk_memmove(zhdr, new_start);
343 		zhdr->start_middle = new_start;
344 		return 1;
345 	}
346 
347 	return 0;
348 }
349 
350 /**
351  * z3fold_alloc() - allocates a region of a given size
352  * @pool:	z3fold pool from which to allocate
353  * @size:	size in bytes of the desired allocation
354  * @gfp:	gfp flags used if the pool needs to grow
355  * @handle:	handle of the new allocation
356  *
357  * This function will attempt to find a free region in the pool large enough to
358  * satisfy the allocation request.  A search of the unbuddied lists is
359  * performed first. If no suitable free region is found, then a new page is
360  * allocated and added to the pool to satisfy the request.
361  *
362  * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
363  * as z3fold pool pages.
364  *
365  * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
366  * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
367  * a new page.
368  */
369 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
370 			unsigned long *handle)
371 {
372 	int chunks = 0, i, freechunks;
373 	struct z3fold_header *zhdr = NULL;
374 	enum buddy bud;
375 	struct page *page;
376 
377 	if (!size || (gfp & __GFP_HIGHMEM))
378 		return -EINVAL;
379 
380 	if (size > PAGE_SIZE)
381 		return -ENOSPC;
382 
383 	if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
384 		bud = HEADLESS;
385 	else {
386 		chunks = size_to_chunks(size);
387 
388 		/* First, try to find an unbuddied z3fold page. */
389 		zhdr = NULL;
390 		for_each_unbuddied_list(i, chunks) {
391 			spin_lock(&pool->lock);
392 			zhdr = list_first_entry_or_null(&pool->unbuddied[i],
393 						struct z3fold_header, buddy);
394 			if (!zhdr || !z3fold_page_trylock(zhdr)) {
395 				spin_unlock(&pool->lock);
396 				continue;
397 			}
398 			kref_get(&zhdr->refcount);
399 			list_del_init(&zhdr->buddy);
400 			spin_unlock(&pool->lock);
401 
402 			page = virt_to_page(zhdr);
403 			if (zhdr->first_chunks == 0) {
404 				if (zhdr->middle_chunks != 0 &&
405 				    chunks >= zhdr->start_middle)
406 					bud = LAST;
407 				else
408 					bud = FIRST;
409 			} else if (zhdr->last_chunks == 0)
410 				bud = LAST;
411 			else if (zhdr->middle_chunks == 0)
412 				bud = MIDDLE;
413 			else {
414 				z3fold_page_unlock(zhdr);
415 				spin_lock(&pool->lock);
416 				if (kref_put(&zhdr->refcount,
417 					     release_z3fold_page))
418 					atomic64_dec(&pool->pages_nr);
419 				spin_unlock(&pool->lock);
420 				pr_err("No free chunks in unbuddied\n");
421 				WARN_ON(1);
422 				continue;
423 			}
424 			goto found;
425 		}
426 		bud = FIRST;
427 	}
428 
429 	/* Couldn't find unbuddied z3fold page, create new one */
430 	page = alloc_page(gfp);
431 	if (!page)
432 		return -ENOMEM;
433 
434 	atomic64_inc(&pool->pages_nr);
435 	zhdr = init_z3fold_page(page);
436 
437 	if (bud == HEADLESS) {
438 		set_bit(PAGE_HEADLESS, &page->private);
439 		spin_lock(&pool->lock);
440 		goto headless;
441 	}
442 	z3fold_page_lock(zhdr);
443 
444 found:
445 	if (bud == FIRST)
446 		zhdr->first_chunks = chunks;
447 	else if (bud == LAST)
448 		zhdr->last_chunks = chunks;
449 	else {
450 		zhdr->middle_chunks = chunks;
451 		zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
452 	}
453 
454 	spin_lock(&pool->lock);
455 	if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
456 			zhdr->middle_chunks == 0) {
457 		/* Add to unbuddied list */
458 		freechunks = num_free_chunks(zhdr);
459 		list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
460 	}
461 
462 headless:
463 	/* Add/move z3fold page to beginning of LRU */
464 	if (!list_empty(&page->lru))
465 		list_del(&page->lru);
466 
467 	list_add(&page->lru, &pool->lru);
468 
469 	*handle = encode_handle(zhdr, bud);
470 	spin_unlock(&pool->lock);
471 	if (bud != HEADLESS)
472 		z3fold_page_unlock(zhdr);
473 
474 	return 0;
475 }
476 
477 /**
478  * z3fold_free() - frees the allocation associated with the given handle
479  * @pool:	pool in which the allocation resided
480  * @handle:	handle associated with the allocation returned by z3fold_alloc()
481  *
482  * In the case that the z3fold page in which the allocation resides is under
483  * reclaim, as indicated by the PG_reclaim flag being set, this function
484  * only sets the first|last_chunks to 0.  The page is actually freed
485  * once both buddies are evicted (see z3fold_reclaim_page() below).
486  */
487 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
488 {
489 	struct z3fold_header *zhdr;
490 	int freechunks;
491 	struct page *page;
492 	enum buddy bud;
493 
494 	zhdr = handle_to_z3fold_header(handle);
495 	page = virt_to_page(zhdr);
496 
497 	if (test_bit(PAGE_HEADLESS, &page->private)) {
498 		/* HEADLESS page stored */
499 		bud = HEADLESS;
500 	} else {
501 		z3fold_page_lock(zhdr);
502 		bud = handle_to_buddy(handle);
503 
504 		switch (bud) {
505 		case FIRST:
506 			zhdr->first_chunks = 0;
507 			break;
508 		case MIDDLE:
509 			zhdr->middle_chunks = 0;
510 			zhdr->start_middle = 0;
511 			break;
512 		case LAST:
513 			zhdr->last_chunks = 0;
514 			break;
515 		default:
516 			pr_err("%s: unknown bud %d\n", __func__, bud);
517 			WARN_ON(1);
518 			z3fold_page_unlock(zhdr);
519 			return;
520 		}
521 	}
522 
523 	if (bud == HEADLESS) {
524 		spin_lock(&pool->lock);
525 		list_del(&page->lru);
526 		spin_unlock(&pool->lock);
527 		free_z3fold_page(page);
528 		atomic64_dec(&pool->pages_nr);
529 	} else {
530 		if (zhdr->first_chunks != 0 || zhdr->middle_chunks != 0 ||
531 		    zhdr->last_chunks != 0) {
532 			z3fold_compact_page(zhdr);
533 			/* Add to the unbuddied list */
534 			spin_lock(&pool->lock);
535 			if (!list_empty(&zhdr->buddy))
536 				list_del(&zhdr->buddy);
537 			freechunks = num_free_chunks(zhdr);
538 			list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
539 			spin_unlock(&pool->lock);
540 		}
541 		z3fold_page_unlock(zhdr);
542 		spin_lock(&pool->lock);
543 		if (kref_put(&zhdr->refcount, release_z3fold_page))
544 			atomic64_dec(&pool->pages_nr);
545 		spin_unlock(&pool->lock);
546 	}
547 
548 }
549 
550 /**
551  * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
552  * @pool:	pool from which a page will attempt to be evicted
553  * @retires:	number of pages on the LRU list for which eviction will
554  *		be attempted before failing
555  *
556  * z3fold reclaim is different from normal system reclaim in that it is done
557  * from the bottom, up. This is because only the bottom layer, z3fold, has
558  * information on how the allocations are organized within each z3fold page.
559  * This has the potential to create interesting locking situations between
560  * z3fold and the user, however.
561  *
562  * To avoid these, this is how z3fold_reclaim_page() should be called:
563 
564  * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
565  * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
566  * call the user-defined eviction handler with the pool and handle as
567  * arguments.
568  *
569  * If the handle can not be evicted, the eviction handler should return
570  * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
571  * appropriate list and try the next z3fold page on the LRU up to
572  * a user defined number of retries.
573  *
574  * If the handle is successfully evicted, the eviction handler should
575  * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
576  * contains logic to delay freeing the page if the page is under reclaim,
577  * as indicated by the setting of the PG_reclaim flag on the underlying page.
578  *
579  * If all buddies in the z3fold page are successfully evicted, then the
580  * z3fold page can be freed.
581  *
582  * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
583  * no pages to evict or an eviction handler is not registered, -EAGAIN if
584  * the retry limit was hit.
585  */
586 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
587 {
588 	int i, ret = 0, freechunks;
589 	struct z3fold_header *zhdr;
590 	struct page *page;
591 	unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
592 
593 	spin_lock(&pool->lock);
594 	if (!pool->ops || !pool->ops->evict || retries == 0) {
595 		spin_unlock(&pool->lock);
596 		return -EINVAL;
597 	}
598 	for (i = 0; i < retries; i++) {
599 		if (list_empty(&pool->lru)) {
600 			spin_unlock(&pool->lock);
601 			return -EINVAL;
602 		}
603 		page = list_last_entry(&pool->lru, struct page, lru);
604 		list_del_init(&page->lru);
605 
606 		zhdr = page_address(page);
607 		if (!test_bit(PAGE_HEADLESS, &page->private)) {
608 			if (!list_empty(&zhdr->buddy))
609 				list_del_init(&zhdr->buddy);
610 			kref_get(&zhdr->refcount);
611 			spin_unlock(&pool->lock);
612 			z3fold_page_lock(zhdr);
613 			/*
614 			 * We need encode the handles before unlocking, since
615 			 * we can race with free that will set
616 			 * (first|last)_chunks to 0
617 			 */
618 			first_handle = 0;
619 			last_handle = 0;
620 			middle_handle = 0;
621 			if (zhdr->first_chunks)
622 				first_handle = encode_handle(zhdr, FIRST);
623 			if (zhdr->middle_chunks)
624 				middle_handle = encode_handle(zhdr, MIDDLE);
625 			if (zhdr->last_chunks)
626 				last_handle = encode_handle(zhdr, LAST);
627 			z3fold_page_unlock(zhdr);
628 		} else {
629 			first_handle = encode_handle(zhdr, HEADLESS);
630 			last_handle = middle_handle = 0;
631 			spin_unlock(&pool->lock);
632 		}
633 
634 		/* Issue the eviction callback(s) */
635 		if (middle_handle) {
636 			ret = pool->ops->evict(pool, middle_handle);
637 			if (ret)
638 				goto next;
639 		}
640 		if (first_handle) {
641 			ret = pool->ops->evict(pool, first_handle);
642 			if (ret)
643 				goto next;
644 		}
645 		if (last_handle) {
646 			ret = pool->ops->evict(pool, last_handle);
647 			if (ret)
648 				goto next;
649 		}
650 next:
651 		if (test_bit(PAGE_HEADLESS, &page->private)) {
652 			if (ret == 0) {
653 				free_z3fold_page(page);
654 				return 0;
655 			} else {
656 				spin_lock(&pool->lock);
657 			}
658 		} else {
659 			z3fold_page_lock(zhdr);
660 			if ((zhdr->first_chunks || zhdr->last_chunks ||
661 			     zhdr->middle_chunks) &&
662 			    !(zhdr->first_chunks && zhdr->last_chunks &&
663 			      zhdr->middle_chunks)) {
664 				z3fold_compact_page(zhdr);
665 				/* add to unbuddied list */
666 				spin_lock(&pool->lock);
667 				freechunks = num_free_chunks(zhdr);
668 				list_add(&zhdr->buddy,
669 					 &pool->unbuddied[freechunks]);
670 				spin_unlock(&pool->lock);
671 			}
672 			z3fold_page_unlock(zhdr);
673 			spin_lock(&pool->lock);
674 			if (kref_put(&zhdr->refcount, release_z3fold_page)) {
675 				spin_unlock(&pool->lock);
676 				atomic64_dec(&pool->pages_nr);
677 				return 0;
678 			}
679 		}
680 
681 		/*
682 		 * Add to the beginning of LRU.
683 		 * Pool lock has to be kept here to ensure the page has
684 		 * not already been released
685 		 */
686 		list_add(&page->lru, &pool->lru);
687 	}
688 	spin_unlock(&pool->lock);
689 	return -EAGAIN;
690 }
691 
692 /**
693  * z3fold_map() - maps the allocation associated with the given handle
694  * @pool:	pool in which the allocation resides
695  * @handle:	handle associated with the allocation to be mapped
696  *
697  * Extracts the buddy number from handle and constructs the pointer to the
698  * correct starting chunk within the page.
699  *
700  * Returns: a pointer to the mapped allocation
701  */
702 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
703 {
704 	struct z3fold_header *zhdr;
705 	struct page *page;
706 	void *addr;
707 	enum buddy buddy;
708 
709 	zhdr = handle_to_z3fold_header(handle);
710 	addr = zhdr;
711 	page = virt_to_page(zhdr);
712 
713 	if (test_bit(PAGE_HEADLESS, &page->private))
714 		goto out;
715 
716 	z3fold_page_lock(zhdr);
717 	buddy = handle_to_buddy(handle);
718 	switch (buddy) {
719 	case FIRST:
720 		addr += ZHDR_SIZE_ALIGNED;
721 		break;
722 	case MIDDLE:
723 		addr += zhdr->start_middle << CHUNK_SHIFT;
724 		set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
725 		break;
726 	case LAST:
727 		addr += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
728 		break;
729 	default:
730 		pr_err("unknown buddy id %d\n", buddy);
731 		WARN_ON(1);
732 		addr = NULL;
733 		break;
734 	}
735 
736 	z3fold_page_unlock(zhdr);
737 out:
738 	return addr;
739 }
740 
741 /**
742  * z3fold_unmap() - unmaps the allocation associated with the given handle
743  * @pool:	pool in which the allocation resides
744  * @handle:	handle associated with the allocation to be unmapped
745  */
746 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
747 {
748 	struct z3fold_header *zhdr;
749 	struct page *page;
750 	enum buddy buddy;
751 
752 	zhdr = handle_to_z3fold_header(handle);
753 	page = virt_to_page(zhdr);
754 
755 	if (test_bit(PAGE_HEADLESS, &page->private))
756 		return;
757 
758 	z3fold_page_lock(zhdr);
759 	buddy = handle_to_buddy(handle);
760 	if (buddy == MIDDLE)
761 		clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
762 	z3fold_page_unlock(zhdr);
763 }
764 
765 /**
766  * z3fold_get_pool_size() - gets the z3fold pool size in pages
767  * @pool:	pool whose size is being queried
768  *
769  * Returns: size in pages of the given pool.
770  */
771 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
772 {
773 	return atomic64_read(&pool->pages_nr);
774 }
775 
776 /*****************
777  * zpool
778  ****************/
779 
780 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
781 {
782 	if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
783 		return pool->zpool_ops->evict(pool->zpool, handle);
784 	else
785 		return -ENOENT;
786 }
787 
788 static const struct z3fold_ops z3fold_zpool_ops = {
789 	.evict =	z3fold_zpool_evict
790 };
791 
792 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
793 			       const struct zpool_ops *zpool_ops,
794 			       struct zpool *zpool)
795 {
796 	struct z3fold_pool *pool;
797 
798 	pool = z3fold_create_pool(gfp, zpool_ops ? &z3fold_zpool_ops : NULL);
799 	if (pool) {
800 		pool->zpool = zpool;
801 		pool->zpool_ops = zpool_ops;
802 	}
803 	return pool;
804 }
805 
806 static void z3fold_zpool_destroy(void *pool)
807 {
808 	z3fold_destroy_pool(pool);
809 }
810 
811 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
812 			unsigned long *handle)
813 {
814 	return z3fold_alloc(pool, size, gfp, handle);
815 }
816 static void z3fold_zpool_free(void *pool, unsigned long handle)
817 {
818 	z3fold_free(pool, handle);
819 }
820 
821 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
822 			unsigned int *reclaimed)
823 {
824 	unsigned int total = 0;
825 	int ret = -EINVAL;
826 
827 	while (total < pages) {
828 		ret = z3fold_reclaim_page(pool, 8);
829 		if (ret < 0)
830 			break;
831 		total++;
832 	}
833 
834 	if (reclaimed)
835 		*reclaimed = total;
836 
837 	return ret;
838 }
839 
840 static void *z3fold_zpool_map(void *pool, unsigned long handle,
841 			enum zpool_mapmode mm)
842 {
843 	return z3fold_map(pool, handle);
844 }
845 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
846 {
847 	z3fold_unmap(pool, handle);
848 }
849 
850 static u64 z3fold_zpool_total_size(void *pool)
851 {
852 	return z3fold_get_pool_size(pool) * PAGE_SIZE;
853 }
854 
855 static struct zpool_driver z3fold_zpool_driver = {
856 	.type =		"z3fold",
857 	.owner =	THIS_MODULE,
858 	.create =	z3fold_zpool_create,
859 	.destroy =	z3fold_zpool_destroy,
860 	.malloc =	z3fold_zpool_malloc,
861 	.free =		z3fold_zpool_free,
862 	.shrink =	z3fold_zpool_shrink,
863 	.map =		z3fold_zpool_map,
864 	.unmap =	z3fold_zpool_unmap,
865 	.total_size =	z3fold_zpool_total_size,
866 };
867 
868 MODULE_ALIAS("zpool-z3fold");
869 
870 static int __init init_z3fold(void)
871 {
872 	/* Make sure the z3fold header is not larger than the page size */
873 	BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
874 	zpool_register_driver(&z3fold_zpool_driver);
875 
876 	return 0;
877 }
878 
879 static void __exit exit_z3fold(void)
880 {
881 	zpool_unregister_driver(&z3fold_zpool_driver);
882 }
883 
884 module_init(init_z3fold);
885 module_exit(exit_z3fold);
886 
887 MODULE_LICENSE("GPL");
888 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
889 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");
890