xref: /linux/mm/z3fold.c (revision 6a87e0f0ce1ae8d70566935215430e718ea776ff)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * z3fold.c
4  *
5  * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6  * Copyright (C) 2016, Sony Mobile Communications Inc.
7  *
8  * This implementation is based on zbud written by Seth Jennings.
9  *
10  * z3fold is an special purpose allocator for storing compressed pages. It
11  * can store up to three compressed pages per page which improves the
12  * compression ratio of zbud while retaining its main concepts (e. g. always
13  * storing an integral number of objects per page) and simplicity.
14  * It still has simple and deterministic reclaim properties that make it
15  * preferable to a higher density approach (with no requirement on integral
16  * number of object per page) when reclaim is used.
17  *
18  * As in zbud, pages are divided into "chunks".  The size of the chunks is
19  * fixed at compile time and is determined by NCHUNKS_ORDER below.
20  *
21  * z3fold doesn't export any API and is meant to be used via zpool API.
22  */
23 
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 
26 #include <linux/atomic.h>
27 #include <linux/sched.h>
28 #include <linux/cpumask.h>
29 #include <linux/list.h>
30 #include <linux/mm.h>
31 #include <linux/module.h>
32 #include <linux/page-flags.h>
33 #include <linux/migrate.h>
34 #include <linux/node.h>
35 #include <linux/compaction.h>
36 #include <linux/percpu.h>
37 #include <linux/preempt.h>
38 #include <linux/workqueue.h>
39 #include <linux/slab.h>
40 #include <linux/spinlock.h>
41 #include <linux/zpool.h>
42 #include <linux/kmemleak.h>
43 
44 /*
45  * NCHUNKS_ORDER determines the internal allocation granularity, effectively
46  * adjusting internal fragmentation.  It also determines the number of
47  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
48  * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
49  * in the beginning of an allocated page are occupied by z3fold header, so
50  * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
51  * which shows the max number of free chunks in z3fold page, also there will
52  * be 63, or 62, respectively, freelists per pool.
53  */
54 #define NCHUNKS_ORDER	6
55 
56 #define CHUNK_SHIFT	(PAGE_SHIFT - NCHUNKS_ORDER)
57 #define CHUNK_SIZE	(1 << CHUNK_SHIFT)
58 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
59 #define ZHDR_CHUNKS	(ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
60 #define TOTAL_CHUNKS	(PAGE_SIZE >> CHUNK_SHIFT)
61 #define NCHUNKS		(TOTAL_CHUNKS - ZHDR_CHUNKS)
62 
63 #define BUDDY_MASK	(0x3)
64 #define BUDDY_SHIFT	2
65 #define SLOTS_ALIGN	(0x40)
66 
67 /*****************
68  * Structures
69 *****************/
70 struct z3fold_pool;
71 
72 enum buddy {
73 	HEADLESS = 0,
74 	FIRST,
75 	MIDDLE,
76 	LAST,
77 	BUDDIES_MAX = LAST
78 };
79 
80 struct z3fold_buddy_slots {
81 	/*
82 	 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
83 	 * be enough slots to hold all possible variants
84 	 */
85 	unsigned long slot[BUDDY_MASK + 1];
86 	unsigned long pool; /* back link */
87 	rwlock_t lock;
88 };
89 #define HANDLE_FLAG_MASK	(0x03)
90 
91 /*
92  * struct z3fold_header - z3fold page metadata occupying first chunks of each
93  *			z3fold page, except for HEADLESS pages
94  * @buddy:		links the z3fold page into the relevant list in the
95  *			pool
96  * @page_lock:		per-page lock
97  * @refcount:		reference count for the z3fold page
98  * @work:		work_struct for page layout optimization
99  * @slots:		pointer to the structure holding buddy slots
100  * @pool:		pointer to the containing pool
101  * @cpu:		CPU which this page "belongs" to
102  * @first_chunks:	the size of the first buddy in chunks, 0 if free
103  * @middle_chunks:	the size of the middle buddy in chunks, 0 if free
104  * @last_chunks:	the size of the last buddy in chunks, 0 if free
105  * @first_num:		the starting number (for the first handle)
106  * @mapped_count:	the number of objects currently mapped
107  */
108 struct z3fold_header {
109 	struct list_head buddy;
110 	spinlock_t page_lock;
111 	struct kref refcount;
112 	struct work_struct work;
113 	struct z3fold_buddy_slots *slots;
114 	struct z3fold_pool *pool;
115 	short cpu;
116 	unsigned short first_chunks;
117 	unsigned short middle_chunks;
118 	unsigned short last_chunks;
119 	unsigned short start_middle;
120 	unsigned short first_num:2;
121 	unsigned short mapped_count:2;
122 	unsigned short foreign_handles:2;
123 };
124 
125 /**
126  * struct z3fold_pool - stores metadata for each z3fold pool
127  * @name:	pool name
128  * @lock:	protects pool unbuddied lists
129  * @stale_lock:	protects pool stale page list
130  * @unbuddied:	per-cpu array of lists tracking z3fold pages that contain 2-
131  *		buddies; the list each z3fold page is added to depends on
132  *		the size of its free region.
133  * @stale:	list of pages marked for freeing
134  * @pages_nr:	number of z3fold pages in the pool.
135  * @c_handle:	cache for z3fold_buddy_slots allocation
136  * @zpool:	zpool driver
137  * @zpool_ops:	zpool operations structure with an evict callback
138  * @compact_wq:	workqueue for page layout background optimization
139  * @release_wq:	workqueue for safe page release
140  * @work:	work_struct for safe page release
141  *
142  * This structure is allocated at pool creation time and maintains metadata
143  * pertaining to a particular z3fold pool.
144  */
145 struct z3fold_pool {
146 	const char *name;
147 	spinlock_t lock;
148 	spinlock_t stale_lock;
149 	struct list_head *unbuddied;
150 	struct list_head stale;
151 	atomic64_t pages_nr;
152 	struct kmem_cache *c_handle;
153 	struct workqueue_struct *compact_wq;
154 	struct workqueue_struct *release_wq;
155 	struct work_struct work;
156 };
157 
158 /*
159  * Internal z3fold page flags
160  */
161 enum z3fold_page_flags {
162 	PAGE_HEADLESS = 0,
163 	MIDDLE_CHUNK_MAPPED,
164 	NEEDS_COMPACTING,
165 	PAGE_STALE,
166 	PAGE_CLAIMED, /* by either reclaim or free */
167 	PAGE_MIGRATED, /* page is migrated and soon to be released */
168 };
169 
170 /*
171  * handle flags, go under HANDLE_FLAG_MASK
172  */
173 enum z3fold_handle_flags {
174 	HANDLES_NOFREE = 0,
175 };
176 
177 /*
178  * Forward declarations
179  */
180 static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool);
181 static void compact_page_work(struct work_struct *w);
182 
183 /*****************
184  * Helpers
185 *****************/
186 
187 /* Converts an allocation size in bytes to size in z3fold chunks */
188 static int size_to_chunks(size_t size)
189 {
190 	return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
191 }
192 
193 #define for_each_unbuddied_list(_iter, _begin) \
194 	for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
195 
196 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
197 							gfp_t gfp)
198 {
199 	struct z3fold_buddy_slots *slots = kmem_cache_zalloc(pool->c_handle,
200 							     gfp);
201 
202 	if (slots) {
203 		/* It will be freed separately in free_handle(). */
204 		kmemleak_not_leak(slots);
205 		slots->pool = (unsigned long)pool;
206 		rwlock_init(&slots->lock);
207 	}
208 
209 	return slots;
210 }
211 
212 static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
213 {
214 	return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
215 }
216 
217 static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
218 {
219 	return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
220 }
221 
222 /* Lock a z3fold page */
223 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
224 {
225 	spin_lock(&zhdr->page_lock);
226 }
227 
228 /* Try to lock a z3fold page */
229 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
230 {
231 	return spin_trylock(&zhdr->page_lock);
232 }
233 
234 /* Unlock a z3fold page */
235 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
236 {
237 	spin_unlock(&zhdr->page_lock);
238 }
239 
240 /* return locked z3fold page if it's not headless */
241 static inline struct z3fold_header *get_z3fold_header(unsigned long handle)
242 {
243 	struct z3fold_buddy_slots *slots;
244 	struct z3fold_header *zhdr;
245 	int locked = 0;
246 
247 	if (!(handle & (1 << PAGE_HEADLESS))) {
248 		slots = handle_to_slots(handle);
249 		do {
250 			unsigned long addr;
251 
252 			read_lock(&slots->lock);
253 			addr = *(unsigned long *)handle;
254 			zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
255 			locked = z3fold_page_trylock(zhdr);
256 			read_unlock(&slots->lock);
257 			if (locked) {
258 				struct page *page = virt_to_page(zhdr);
259 
260 				if (!test_bit(PAGE_MIGRATED, &page->private))
261 					break;
262 				z3fold_page_unlock(zhdr);
263 			}
264 			cpu_relax();
265 		} while (true);
266 	} else {
267 		zhdr = (struct z3fold_header *)(handle & PAGE_MASK);
268 	}
269 
270 	return zhdr;
271 }
272 
273 static inline void put_z3fold_header(struct z3fold_header *zhdr)
274 {
275 	struct page *page = virt_to_page(zhdr);
276 
277 	if (!test_bit(PAGE_HEADLESS, &page->private))
278 		z3fold_page_unlock(zhdr);
279 }
280 
281 static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
282 {
283 	struct z3fold_buddy_slots *slots;
284 	int i;
285 	bool is_free;
286 
287 	if (WARN_ON(*(unsigned long *)handle == 0))
288 		return;
289 
290 	slots = handle_to_slots(handle);
291 	write_lock(&slots->lock);
292 	*(unsigned long *)handle = 0;
293 
294 	if (test_bit(HANDLES_NOFREE, &slots->pool)) {
295 		write_unlock(&slots->lock);
296 		return; /* simple case, nothing else to do */
297 	}
298 
299 	if (zhdr->slots != slots)
300 		zhdr->foreign_handles--;
301 
302 	is_free = true;
303 	for (i = 0; i <= BUDDY_MASK; i++) {
304 		if (slots->slot[i]) {
305 			is_free = false;
306 			break;
307 		}
308 	}
309 	write_unlock(&slots->lock);
310 
311 	if (is_free) {
312 		struct z3fold_pool *pool = slots_to_pool(slots);
313 
314 		if (zhdr->slots == slots)
315 			zhdr->slots = NULL;
316 		kmem_cache_free(pool->c_handle, slots);
317 	}
318 }
319 
320 /* Initializes the z3fold header of a newly allocated z3fold page */
321 static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
322 					struct z3fold_pool *pool, gfp_t gfp)
323 {
324 	struct z3fold_header *zhdr = page_address(page);
325 	struct z3fold_buddy_slots *slots;
326 
327 	clear_bit(PAGE_HEADLESS, &page->private);
328 	clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
329 	clear_bit(NEEDS_COMPACTING, &page->private);
330 	clear_bit(PAGE_STALE, &page->private);
331 	clear_bit(PAGE_CLAIMED, &page->private);
332 	clear_bit(PAGE_MIGRATED, &page->private);
333 	if (headless)
334 		return zhdr;
335 
336 	slots = alloc_slots(pool, gfp);
337 	if (!slots)
338 		return NULL;
339 
340 	memset(zhdr, 0, sizeof(*zhdr));
341 	spin_lock_init(&zhdr->page_lock);
342 	kref_init(&zhdr->refcount);
343 	zhdr->cpu = -1;
344 	zhdr->slots = slots;
345 	zhdr->pool = pool;
346 	INIT_LIST_HEAD(&zhdr->buddy);
347 	INIT_WORK(&zhdr->work, compact_page_work);
348 	return zhdr;
349 }
350 
351 /* Resets the struct page fields and frees the page */
352 static void free_z3fold_page(struct page *page, bool headless)
353 {
354 	if (!headless) {
355 		lock_page(page);
356 		__ClearPageMovable(page);
357 		unlock_page(page);
358 	}
359 	__free_page(page);
360 }
361 
362 /* Helper function to build the index */
363 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
364 {
365 	return (bud + zhdr->first_num) & BUDDY_MASK;
366 }
367 
368 /*
369  * Encodes the handle of a particular buddy within a z3fold page
370  * Pool lock should be held as this function accesses first_num
371  */
372 static unsigned long __encode_handle(struct z3fold_header *zhdr,
373 				struct z3fold_buddy_slots *slots,
374 				enum buddy bud)
375 {
376 	unsigned long h = (unsigned long)zhdr;
377 	int idx = 0;
378 
379 	/*
380 	 * For a headless page, its handle is its pointer with the extra
381 	 * PAGE_HEADLESS bit set
382 	 */
383 	if (bud == HEADLESS)
384 		return h | (1 << PAGE_HEADLESS);
385 
386 	/* otherwise, return pointer to encoded handle */
387 	idx = __idx(zhdr, bud);
388 	h += idx;
389 	if (bud == LAST)
390 		h |= (zhdr->last_chunks << BUDDY_SHIFT);
391 
392 	write_lock(&slots->lock);
393 	slots->slot[idx] = h;
394 	write_unlock(&slots->lock);
395 	return (unsigned long)&slots->slot[idx];
396 }
397 
398 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
399 {
400 	return __encode_handle(zhdr, zhdr->slots, bud);
401 }
402 
403 /* only for LAST bud, returns zero otherwise */
404 static unsigned short handle_to_chunks(unsigned long handle)
405 {
406 	struct z3fold_buddy_slots *slots = handle_to_slots(handle);
407 	unsigned long addr;
408 
409 	read_lock(&slots->lock);
410 	addr = *(unsigned long *)handle;
411 	read_unlock(&slots->lock);
412 	return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
413 }
414 
415 /*
416  * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
417  *  but that doesn't matter. because the masking will result in the
418  *  correct buddy number.
419  */
420 static enum buddy handle_to_buddy(unsigned long handle)
421 {
422 	struct z3fold_header *zhdr;
423 	struct z3fold_buddy_slots *slots = handle_to_slots(handle);
424 	unsigned long addr;
425 
426 	read_lock(&slots->lock);
427 	WARN_ON(handle & (1 << PAGE_HEADLESS));
428 	addr = *(unsigned long *)handle;
429 	read_unlock(&slots->lock);
430 	zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
431 	return (addr - zhdr->first_num) & BUDDY_MASK;
432 }
433 
434 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
435 {
436 	return zhdr->pool;
437 }
438 
439 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
440 {
441 	struct page *page = virt_to_page(zhdr);
442 	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
443 
444 	WARN_ON(!list_empty(&zhdr->buddy));
445 	set_bit(PAGE_STALE, &page->private);
446 	clear_bit(NEEDS_COMPACTING, &page->private);
447 	spin_lock(&pool->lock);
448 	spin_unlock(&pool->lock);
449 
450 	if (locked)
451 		z3fold_page_unlock(zhdr);
452 
453 	spin_lock(&pool->stale_lock);
454 	list_add(&zhdr->buddy, &pool->stale);
455 	queue_work(pool->release_wq, &pool->work);
456 	spin_unlock(&pool->stale_lock);
457 
458 	atomic64_dec(&pool->pages_nr);
459 }
460 
461 static void release_z3fold_page_locked(struct kref *ref)
462 {
463 	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
464 						refcount);
465 	WARN_ON(z3fold_page_trylock(zhdr));
466 	__release_z3fold_page(zhdr, true);
467 }
468 
469 static void release_z3fold_page_locked_list(struct kref *ref)
470 {
471 	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
472 					       refcount);
473 	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
474 
475 	spin_lock(&pool->lock);
476 	list_del_init(&zhdr->buddy);
477 	spin_unlock(&pool->lock);
478 
479 	WARN_ON(z3fold_page_trylock(zhdr));
480 	__release_z3fold_page(zhdr, true);
481 }
482 
483 static void free_pages_work(struct work_struct *w)
484 {
485 	struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
486 
487 	spin_lock(&pool->stale_lock);
488 	while (!list_empty(&pool->stale)) {
489 		struct z3fold_header *zhdr = list_first_entry(&pool->stale,
490 						struct z3fold_header, buddy);
491 		struct page *page = virt_to_page(zhdr);
492 
493 		list_del(&zhdr->buddy);
494 		if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
495 			continue;
496 		spin_unlock(&pool->stale_lock);
497 		cancel_work_sync(&zhdr->work);
498 		free_z3fold_page(page, false);
499 		cond_resched();
500 		spin_lock(&pool->stale_lock);
501 	}
502 	spin_unlock(&pool->stale_lock);
503 }
504 
505 /*
506  * Returns the number of free chunks in a z3fold page.
507  * NB: can't be used with HEADLESS pages.
508  */
509 static int num_free_chunks(struct z3fold_header *zhdr)
510 {
511 	int nfree;
512 	/*
513 	 * If there is a middle object, pick up the bigger free space
514 	 * either before or after it. Otherwise just subtract the number
515 	 * of chunks occupied by the first and the last objects.
516 	 */
517 	if (zhdr->middle_chunks != 0) {
518 		int nfree_before = zhdr->first_chunks ?
519 			0 : zhdr->start_middle - ZHDR_CHUNKS;
520 		int nfree_after = zhdr->last_chunks ?
521 			0 : TOTAL_CHUNKS -
522 				(zhdr->start_middle + zhdr->middle_chunks);
523 		nfree = max(nfree_before, nfree_after);
524 	} else
525 		nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
526 	return nfree;
527 }
528 
529 /* Add to the appropriate unbuddied list */
530 static inline void add_to_unbuddied(struct z3fold_pool *pool,
531 				struct z3fold_header *zhdr)
532 {
533 	if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
534 			zhdr->middle_chunks == 0) {
535 		struct list_head *unbuddied;
536 		int freechunks = num_free_chunks(zhdr);
537 
538 		migrate_disable();
539 		unbuddied = this_cpu_ptr(pool->unbuddied);
540 		spin_lock(&pool->lock);
541 		list_add(&zhdr->buddy, &unbuddied[freechunks]);
542 		spin_unlock(&pool->lock);
543 		zhdr->cpu = smp_processor_id();
544 		migrate_enable();
545 	}
546 }
547 
548 static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks)
549 {
550 	enum buddy bud = HEADLESS;
551 
552 	if (zhdr->middle_chunks) {
553 		if (!zhdr->first_chunks &&
554 		    chunks <= zhdr->start_middle - ZHDR_CHUNKS)
555 			bud = FIRST;
556 		else if (!zhdr->last_chunks)
557 			bud = LAST;
558 	} else {
559 		if (!zhdr->first_chunks)
560 			bud = FIRST;
561 		else if (!zhdr->last_chunks)
562 			bud = LAST;
563 		else
564 			bud = MIDDLE;
565 	}
566 
567 	return bud;
568 }
569 
570 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
571 				unsigned short dst_chunk)
572 {
573 	void *beg = zhdr;
574 	return memmove(beg + (dst_chunk << CHUNK_SHIFT),
575 		       beg + (zhdr->start_middle << CHUNK_SHIFT),
576 		       zhdr->middle_chunks << CHUNK_SHIFT);
577 }
578 
579 static inline bool buddy_single(struct z3fold_header *zhdr)
580 {
581 	return !((zhdr->first_chunks && zhdr->middle_chunks) ||
582 			(zhdr->first_chunks && zhdr->last_chunks) ||
583 			(zhdr->middle_chunks && zhdr->last_chunks));
584 }
585 
586 static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
587 {
588 	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
589 	void *p = zhdr;
590 	unsigned long old_handle = 0;
591 	size_t sz = 0;
592 	struct z3fold_header *new_zhdr = NULL;
593 	int first_idx = __idx(zhdr, FIRST);
594 	int middle_idx = __idx(zhdr, MIDDLE);
595 	int last_idx = __idx(zhdr, LAST);
596 	unsigned short *moved_chunks = NULL;
597 
598 	/*
599 	 * No need to protect slots here -- all the slots are "local" and
600 	 * the page lock is already taken
601 	 */
602 	if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) {
603 		p += ZHDR_SIZE_ALIGNED;
604 		sz = zhdr->first_chunks << CHUNK_SHIFT;
605 		old_handle = (unsigned long)&zhdr->slots->slot[first_idx];
606 		moved_chunks = &zhdr->first_chunks;
607 	} else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) {
608 		p += zhdr->start_middle << CHUNK_SHIFT;
609 		sz = zhdr->middle_chunks << CHUNK_SHIFT;
610 		old_handle = (unsigned long)&zhdr->slots->slot[middle_idx];
611 		moved_chunks = &zhdr->middle_chunks;
612 	} else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) {
613 		p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
614 		sz = zhdr->last_chunks << CHUNK_SHIFT;
615 		old_handle = (unsigned long)&zhdr->slots->slot[last_idx];
616 		moved_chunks = &zhdr->last_chunks;
617 	}
618 
619 	if (sz > 0) {
620 		enum buddy new_bud = HEADLESS;
621 		short chunks = size_to_chunks(sz);
622 		void *q;
623 
624 		new_zhdr = __z3fold_alloc(pool, sz, false);
625 		if (!new_zhdr)
626 			return NULL;
627 
628 		if (WARN_ON(new_zhdr == zhdr))
629 			goto out_fail;
630 
631 		new_bud = get_free_buddy(new_zhdr, chunks);
632 		q = new_zhdr;
633 		switch (new_bud) {
634 		case FIRST:
635 			new_zhdr->first_chunks = chunks;
636 			q += ZHDR_SIZE_ALIGNED;
637 			break;
638 		case MIDDLE:
639 			new_zhdr->middle_chunks = chunks;
640 			new_zhdr->start_middle =
641 				new_zhdr->first_chunks + ZHDR_CHUNKS;
642 			q += new_zhdr->start_middle << CHUNK_SHIFT;
643 			break;
644 		case LAST:
645 			new_zhdr->last_chunks = chunks;
646 			q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT);
647 			break;
648 		default:
649 			goto out_fail;
650 		}
651 		new_zhdr->foreign_handles++;
652 		memcpy(q, p, sz);
653 		write_lock(&zhdr->slots->lock);
654 		*(unsigned long *)old_handle = (unsigned long)new_zhdr +
655 			__idx(new_zhdr, new_bud);
656 		if (new_bud == LAST)
657 			*(unsigned long *)old_handle |=
658 					(new_zhdr->last_chunks << BUDDY_SHIFT);
659 		write_unlock(&zhdr->slots->lock);
660 		add_to_unbuddied(pool, new_zhdr);
661 		z3fold_page_unlock(new_zhdr);
662 
663 		*moved_chunks = 0;
664 	}
665 
666 	return new_zhdr;
667 
668 out_fail:
669 	if (new_zhdr && !kref_put(&new_zhdr->refcount, release_z3fold_page_locked)) {
670 		add_to_unbuddied(pool, new_zhdr);
671 		z3fold_page_unlock(new_zhdr);
672 	}
673 	return NULL;
674 
675 }
676 
677 #define BIG_CHUNK_GAP	3
678 /* Has to be called with lock held */
679 static int z3fold_compact_page(struct z3fold_header *zhdr)
680 {
681 	struct page *page = virt_to_page(zhdr);
682 
683 	if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
684 		return 0; /* can't move middle chunk, it's used */
685 
686 	if (unlikely(PageIsolated(page)))
687 		return 0;
688 
689 	if (zhdr->middle_chunks == 0)
690 		return 0; /* nothing to compact */
691 
692 	if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
693 		/* move to the beginning */
694 		mchunk_memmove(zhdr, ZHDR_CHUNKS);
695 		zhdr->first_chunks = zhdr->middle_chunks;
696 		zhdr->middle_chunks = 0;
697 		zhdr->start_middle = 0;
698 		zhdr->first_num++;
699 		return 1;
700 	}
701 
702 	/*
703 	 * moving data is expensive, so let's only do that if
704 	 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
705 	 */
706 	if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
707 	    zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
708 			BIG_CHUNK_GAP) {
709 		mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
710 		zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
711 		return 1;
712 	} else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
713 		   TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
714 					+ zhdr->middle_chunks) >=
715 			BIG_CHUNK_GAP) {
716 		unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
717 			zhdr->middle_chunks;
718 		mchunk_memmove(zhdr, new_start);
719 		zhdr->start_middle = new_start;
720 		return 1;
721 	}
722 
723 	return 0;
724 }
725 
726 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
727 {
728 	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
729 	struct page *page;
730 
731 	page = virt_to_page(zhdr);
732 	if (locked)
733 		WARN_ON(z3fold_page_trylock(zhdr));
734 	else
735 		z3fold_page_lock(zhdr);
736 	if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
737 		z3fold_page_unlock(zhdr);
738 		return;
739 	}
740 	spin_lock(&pool->lock);
741 	list_del_init(&zhdr->buddy);
742 	spin_unlock(&pool->lock);
743 
744 	if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
745 		return;
746 
747 	if (test_bit(PAGE_STALE, &page->private) ||
748 	    test_and_set_bit(PAGE_CLAIMED, &page->private)) {
749 		z3fold_page_unlock(zhdr);
750 		return;
751 	}
752 
753 	if (!zhdr->foreign_handles && buddy_single(zhdr) &&
754 	    zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
755 		if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
756 			clear_bit(PAGE_CLAIMED, &page->private);
757 			z3fold_page_unlock(zhdr);
758 		}
759 		return;
760 	}
761 
762 	z3fold_compact_page(zhdr);
763 	add_to_unbuddied(pool, zhdr);
764 	clear_bit(PAGE_CLAIMED, &page->private);
765 	z3fold_page_unlock(zhdr);
766 }
767 
768 static void compact_page_work(struct work_struct *w)
769 {
770 	struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
771 						work);
772 
773 	do_compact_page(zhdr, false);
774 }
775 
776 /* returns _locked_ z3fold page header or NULL */
777 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
778 						size_t size, bool can_sleep)
779 {
780 	struct z3fold_header *zhdr = NULL;
781 	struct page *page;
782 	struct list_head *unbuddied;
783 	int chunks = size_to_chunks(size), i;
784 
785 lookup:
786 	migrate_disable();
787 	/* First, try to find an unbuddied z3fold page. */
788 	unbuddied = this_cpu_ptr(pool->unbuddied);
789 	for_each_unbuddied_list(i, chunks) {
790 		struct list_head *l = &unbuddied[i];
791 
792 		zhdr = list_first_entry_or_null(READ_ONCE(l),
793 					struct z3fold_header, buddy);
794 
795 		if (!zhdr)
796 			continue;
797 
798 		/* Re-check under lock. */
799 		spin_lock(&pool->lock);
800 		if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
801 						struct z3fold_header, buddy)) ||
802 		    !z3fold_page_trylock(zhdr)) {
803 			spin_unlock(&pool->lock);
804 			zhdr = NULL;
805 			migrate_enable();
806 			if (can_sleep)
807 				cond_resched();
808 			goto lookup;
809 		}
810 		list_del_init(&zhdr->buddy);
811 		zhdr->cpu = -1;
812 		spin_unlock(&pool->lock);
813 
814 		page = virt_to_page(zhdr);
815 		if (test_bit(NEEDS_COMPACTING, &page->private) ||
816 		    test_bit(PAGE_CLAIMED, &page->private)) {
817 			z3fold_page_unlock(zhdr);
818 			zhdr = NULL;
819 			migrate_enable();
820 			if (can_sleep)
821 				cond_resched();
822 			goto lookup;
823 		}
824 
825 		/*
826 		 * this page could not be removed from its unbuddied
827 		 * list while pool lock was held, and then we've taken
828 		 * page lock so kref_put could not be called before
829 		 * we got here, so it's safe to just call kref_get()
830 		 */
831 		kref_get(&zhdr->refcount);
832 		break;
833 	}
834 	migrate_enable();
835 
836 	if (!zhdr) {
837 		int cpu;
838 
839 		/* look for _exact_ match on other cpus' lists */
840 		for_each_online_cpu(cpu) {
841 			struct list_head *l;
842 
843 			unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
844 			spin_lock(&pool->lock);
845 			l = &unbuddied[chunks];
846 
847 			zhdr = list_first_entry_or_null(READ_ONCE(l),
848 						struct z3fold_header, buddy);
849 
850 			if (!zhdr || !z3fold_page_trylock(zhdr)) {
851 				spin_unlock(&pool->lock);
852 				zhdr = NULL;
853 				continue;
854 			}
855 			list_del_init(&zhdr->buddy);
856 			zhdr->cpu = -1;
857 			spin_unlock(&pool->lock);
858 
859 			page = virt_to_page(zhdr);
860 			if (test_bit(NEEDS_COMPACTING, &page->private) ||
861 			    test_bit(PAGE_CLAIMED, &page->private)) {
862 				z3fold_page_unlock(zhdr);
863 				zhdr = NULL;
864 				if (can_sleep)
865 					cond_resched();
866 				continue;
867 			}
868 			kref_get(&zhdr->refcount);
869 			break;
870 		}
871 	}
872 
873 	if (zhdr && !zhdr->slots) {
874 		zhdr->slots = alloc_slots(pool, GFP_ATOMIC);
875 		if (!zhdr->slots)
876 			goto out_fail;
877 	}
878 	return zhdr;
879 
880 out_fail:
881 	if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
882 		add_to_unbuddied(pool, zhdr);
883 		z3fold_page_unlock(zhdr);
884 	}
885 	return NULL;
886 }
887 
888 /*
889  * API Functions
890  */
891 
892 /**
893  * z3fold_create_pool() - create a new z3fold pool
894  * @name:	pool name
895  * @gfp:	gfp flags when allocating the z3fold pool structure
896  *
897  * Return: pointer to the new z3fold pool or NULL if the metadata allocation
898  * failed.
899  */
900 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp)
901 {
902 	struct z3fold_pool *pool = NULL;
903 	int i, cpu;
904 
905 	pool = kzalloc(sizeof(struct z3fold_pool), gfp);
906 	if (!pool)
907 		goto out;
908 	pool->c_handle = kmem_cache_create("z3fold_handle",
909 				sizeof(struct z3fold_buddy_slots),
910 				SLOTS_ALIGN, 0, NULL);
911 	if (!pool->c_handle)
912 		goto out_c;
913 	spin_lock_init(&pool->lock);
914 	spin_lock_init(&pool->stale_lock);
915 	pool->unbuddied = __alloc_percpu(sizeof(struct list_head) * NCHUNKS,
916 					 __alignof__(struct list_head));
917 	if (!pool->unbuddied)
918 		goto out_pool;
919 	for_each_possible_cpu(cpu) {
920 		struct list_head *unbuddied =
921 				per_cpu_ptr(pool->unbuddied, cpu);
922 		for_each_unbuddied_list(i, 0)
923 			INIT_LIST_HEAD(&unbuddied[i]);
924 	}
925 	INIT_LIST_HEAD(&pool->stale);
926 	atomic64_set(&pool->pages_nr, 0);
927 	pool->name = name;
928 	pool->compact_wq = create_singlethread_workqueue(pool->name);
929 	if (!pool->compact_wq)
930 		goto out_unbuddied;
931 	pool->release_wq = create_singlethread_workqueue(pool->name);
932 	if (!pool->release_wq)
933 		goto out_wq;
934 	INIT_WORK(&pool->work, free_pages_work);
935 	return pool;
936 
937 out_wq:
938 	destroy_workqueue(pool->compact_wq);
939 out_unbuddied:
940 	free_percpu(pool->unbuddied);
941 out_pool:
942 	kmem_cache_destroy(pool->c_handle);
943 out_c:
944 	kfree(pool);
945 out:
946 	return NULL;
947 }
948 
949 /**
950  * z3fold_destroy_pool() - destroys an existing z3fold pool
951  * @pool:	the z3fold pool to be destroyed
952  *
953  * The pool should be emptied before this function is called.
954  */
955 static void z3fold_destroy_pool(struct z3fold_pool *pool)
956 {
957 	kmem_cache_destroy(pool->c_handle);
958 
959 	/*
960 	 * We need to destroy pool->compact_wq before pool->release_wq,
961 	 * as any pending work on pool->compact_wq will call
962 	 * queue_work(pool->release_wq, &pool->work).
963 	 *
964 	 * There are still outstanding pages until both workqueues are drained,
965 	 * so we cannot unregister migration until then.
966 	 */
967 
968 	destroy_workqueue(pool->compact_wq);
969 	destroy_workqueue(pool->release_wq);
970 	free_percpu(pool->unbuddied);
971 	kfree(pool);
972 }
973 
974 static const struct movable_operations z3fold_mops;
975 
976 /**
977  * z3fold_alloc() - allocates a region of a given size
978  * @pool:	z3fold pool from which to allocate
979  * @size:	size in bytes of the desired allocation
980  * @gfp:	gfp flags used if the pool needs to grow
981  * @handle:	handle of the new allocation
982  *
983  * This function will attempt to find a free region in the pool large enough to
984  * satisfy the allocation request.  A search of the unbuddied lists is
985  * performed first. If no suitable free region is found, then a new page is
986  * allocated and added to the pool to satisfy the request.
987  *
988  * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
989  * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
990  * a new page.
991  */
992 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
993 			unsigned long *handle)
994 {
995 	int chunks = size_to_chunks(size);
996 	struct z3fold_header *zhdr = NULL;
997 	struct page *page = NULL;
998 	enum buddy bud;
999 	bool can_sleep = gfpflags_allow_blocking(gfp);
1000 
1001 	if (!size || (gfp & __GFP_HIGHMEM))
1002 		return -EINVAL;
1003 
1004 	if (size > PAGE_SIZE)
1005 		return -ENOSPC;
1006 
1007 	if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
1008 		bud = HEADLESS;
1009 	else {
1010 retry:
1011 		zhdr = __z3fold_alloc(pool, size, can_sleep);
1012 		if (zhdr) {
1013 			bud = get_free_buddy(zhdr, chunks);
1014 			if (bud == HEADLESS) {
1015 				if (!kref_put(&zhdr->refcount,
1016 					     release_z3fold_page_locked))
1017 					z3fold_page_unlock(zhdr);
1018 				pr_err("No free chunks in unbuddied\n");
1019 				WARN_ON(1);
1020 				goto retry;
1021 			}
1022 			page = virt_to_page(zhdr);
1023 			goto found;
1024 		}
1025 		bud = FIRST;
1026 	}
1027 
1028 	page = alloc_page(gfp);
1029 	if (!page)
1030 		return -ENOMEM;
1031 
1032 	zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
1033 	if (!zhdr) {
1034 		__free_page(page);
1035 		return -ENOMEM;
1036 	}
1037 	atomic64_inc(&pool->pages_nr);
1038 
1039 	if (bud == HEADLESS) {
1040 		set_bit(PAGE_HEADLESS, &page->private);
1041 		goto headless;
1042 	}
1043 	if (can_sleep) {
1044 		lock_page(page);
1045 		__SetPageMovable(page, &z3fold_mops);
1046 		unlock_page(page);
1047 	} else {
1048 		WARN_ON(!trylock_page(page));
1049 		__SetPageMovable(page, &z3fold_mops);
1050 		unlock_page(page);
1051 	}
1052 	z3fold_page_lock(zhdr);
1053 
1054 found:
1055 	if (bud == FIRST)
1056 		zhdr->first_chunks = chunks;
1057 	else if (bud == LAST)
1058 		zhdr->last_chunks = chunks;
1059 	else {
1060 		zhdr->middle_chunks = chunks;
1061 		zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
1062 	}
1063 	add_to_unbuddied(pool, zhdr);
1064 
1065 headless:
1066 	spin_lock(&pool->lock);
1067 	*handle = encode_handle(zhdr, bud);
1068 	spin_unlock(&pool->lock);
1069 	if (bud != HEADLESS)
1070 		z3fold_page_unlock(zhdr);
1071 
1072 	return 0;
1073 }
1074 
1075 /**
1076  * z3fold_free() - frees the allocation associated with the given handle
1077  * @pool:	pool in which the allocation resided
1078  * @handle:	handle associated with the allocation returned by z3fold_alloc()
1079  *
1080  * In the case that the z3fold page in which the allocation resides is under
1081  * reclaim, as indicated by the PAGE_CLAIMED flag being set, this function
1082  * only sets the first|middle|last_chunks to 0.  The page is actually freed
1083  * once all buddies are evicted (see z3fold_reclaim_page() below).
1084  */
1085 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
1086 {
1087 	struct z3fold_header *zhdr;
1088 	struct page *page;
1089 	enum buddy bud;
1090 	bool page_claimed;
1091 
1092 	zhdr = get_z3fold_header(handle);
1093 	page = virt_to_page(zhdr);
1094 	page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
1095 
1096 	if (test_bit(PAGE_HEADLESS, &page->private)) {
1097 		/* if a headless page is under reclaim, just leave.
1098 		 * NB: we use test_and_set_bit for a reason: if the bit
1099 		 * has not been set before, we release this page
1100 		 * immediately so we don't care about its value any more.
1101 		 */
1102 		if (!page_claimed) {
1103 			put_z3fold_header(zhdr);
1104 			free_z3fold_page(page, true);
1105 			atomic64_dec(&pool->pages_nr);
1106 		}
1107 		return;
1108 	}
1109 
1110 	/* Non-headless case */
1111 	bud = handle_to_buddy(handle);
1112 
1113 	switch (bud) {
1114 	case FIRST:
1115 		zhdr->first_chunks = 0;
1116 		break;
1117 	case MIDDLE:
1118 		zhdr->middle_chunks = 0;
1119 		break;
1120 	case LAST:
1121 		zhdr->last_chunks = 0;
1122 		break;
1123 	default:
1124 		pr_err("%s: unknown bud %d\n", __func__, bud);
1125 		WARN_ON(1);
1126 		put_z3fold_header(zhdr);
1127 		return;
1128 	}
1129 
1130 	if (!page_claimed)
1131 		free_handle(handle, zhdr);
1132 	if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list))
1133 		return;
1134 	if (page_claimed) {
1135 		/* the page has not been claimed by us */
1136 		put_z3fold_header(zhdr);
1137 		return;
1138 	}
1139 	if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
1140 		clear_bit(PAGE_CLAIMED, &page->private);
1141 		put_z3fold_header(zhdr);
1142 		return;
1143 	}
1144 	if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
1145 		zhdr->cpu = -1;
1146 		kref_get(&zhdr->refcount);
1147 		clear_bit(PAGE_CLAIMED, &page->private);
1148 		do_compact_page(zhdr, true);
1149 		return;
1150 	}
1151 	kref_get(&zhdr->refcount);
1152 	clear_bit(PAGE_CLAIMED, &page->private);
1153 	queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1154 	put_z3fold_header(zhdr);
1155 }
1156 
1157 /**
1158  * z3fold_map() - maps the allocation associated with the given handle
1159  * @pool:	pool in which the allocation resides
1160  * @handle:	handle associated with the allocation to be mapped
1161  *
1162  * Extracts the buddy number from handle and constructs the pointer to the
1163  * correct starting chunk within the page.
1164  *
1165  * Returns: a pointer to the mapped allocation
1166  */
1167 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1168 {
1169 	struct z3fold_header *zhdr;
1170 	struct page *page;
1171 	void *addr;
1172 	enum buddy buddy;
1173 
1174 	zhdr = get_z3fold_header(handle);
1175 	addr = zhdr;
1176 	page = virt_to_page(zhdr);
1177 
1178 	if (test_bit(PAGE_HEADLESS, &page->private))
1179 		goto out;
1180 
1181 	buddy = handle_to_buddy(handle);
1182 	switch (buddy) {
1183 	case FIRST:
1184 		addr += ZHDR_SIZE_ALIGNED;
1185 		break;
1186 	case MIDDLE:
1187 		addr += zhdr->start_middle << CHUNK_SHIFT;
1188 		set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1189 		break;
1190 	case LAST:
1191 		addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
1192 		break;
1193 	default:
1194 		pr_err("unknown buddy id %d\n", buddy);
1195 		WARN_ON(1);
1196 		addr = NULL;
1197 		break;
1198 	}
1199 
1200 	if (addr)
1201 		zhdr->mapped_count++;
1202 out:
1203 	put_z3fold_header(zhdr);
1204 	return addr;
1205 }
1206 
1207 /**
1208  * z3fold_unmap() - unmaps the allocation associated with the given handle
1209  * @pool:	pool in which the allocation resides
1210  * @handle:	handle associated with the allocation to be unmapped
1211  */
1212 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1213 {
1214 	struct z3fold_header *zhdr;
1215 	struct page *page;
1216 	enum buddy buddy;
1217 
1218 	zhdr = get_z3fold_header(handle);
1219 	page = virt_to_page(zhdr);
1220 
1221 	if (test_bit(PAGE_HEADLESS, &page->private))
1222 		return;
1223 
1224 	buddy = handle_to_buddy(handle);
1225 	if (buddy == MIDDLE)
1226 		clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1227 	zhdr->mapped_count--;
1228 	put_z3fold_header(zhdr);
1229 }
1230 
1231 /**
1232  * z3fold_get_pool_size() - gets the z3fold pool size in pages
1233  * @pool:	pool whose size is being queried
1234  *
1235  * Returns: size in pages of the given pool.
1236  */
1237 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1238 {
1239 	return atomic64_read(&pool->pages_nr);
1240 }
1241 
1242 static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1243 {
1244 	struct z3fold_header *zhdr;
1245 	struct z3fold_pool *pool;
1246 
1247 	VM_BUG_ON_PAGE(PageIsolated(page), page);
1248 
1249 	if (test_bit(PAGE_HEADLESS, &page->private))
1250 		return false;
1251 
1252 	zhdr = page_address(page);
1253 	z3fold_page_lock(zhdr);
1254 	if (test_bit(NEEDS_COMPACTING, &page->private) ||
1255 	    test_bit(PAGE_STALE, &page->private))
1256 		goto out;
1257 
1258 	if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0)
1259 		goto out;
1260 
1261 	if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1262 		goto out;
1263 	pool = zhdr_to_pool(zhdr);
1264 	spin_lock(&pool->lock);
1265 	if (!list_empty(&zhdr->buddy))
1266 		list_del_init(&zhdr->buddy);
1267 	spin_unlock(&pool->lock);
1268 
1269 	kref_get(&zhdr->refcount);
1270 	z3fold_page_unlock(zhdr);
1271 	return true;
1272 
1273 out:
1274 	z3fold_page_unlock(zhdr);
1275 	return false;
1276 }
1277 
1278 static int z3fold_page_migrate(struct page *newpage, struct page *page,
1279 		enum migrate_mode mode)
1280 {
1281 	struct z3fold_header *zhdr, *new_zhdr;
1282 	struct z3fold_pool *pool;
1283 
1284 	VM_BUG_ON_PAGE(!PageIsolated(page), page);
1285 	VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page);
1286 	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
1287 
1288 	zhdr = page_address(page);
1289 	pool = zhdr_to_pool(zhdr);
1290 
1291 	if (!z3fold_page_trylock(zhdr))
1292 		return -EAGAIN;
1293 	if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
1294 		clear_bit(PAGE_CLAIMED, &page->private);
1295 		z3fold_page_unlock(zhdr);
1296 		return -EBUSY;
1297 	}
1298 	if (work_pending(&zhdr->work)) {
1299 		z3fold_page_unlock(zhdr);
1300 		return -EAGAIN;
1301 	}
1302 	new_zhdr = page_address(newpage);
1303 	memcpy(new_zhdr, zhdr, PAGE_SIZE);
1304 	newpage->private = page->private;
1305 	set_bit(PAGE_MIGRATED, &page->private);
1306 	z3fold_page_unlock(zhdr);
1307 	spin_lock_init(&new_zhdr->page_lock);
1308 	INIT_WORK(&new_zhdr->work, compact_page_work);
1309 	/*
1310 	 * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1311 	 * so we only have to reinitialize it.
1312 	 */
1313 	INIT_LIST_HEAD(&new_zhdr->buddy);
1314 	__ClearPageMovable(page);
1315 
1316 	get_page(newpage);
1317 	z3fold_page_lock(new_zhdr);
1318 	if (new_zhdr->first_chunks)
1319 		encode_handle(new_zhdr, FIRST);
1320 	if (new_zhdr->last_chunks)
1321 		encode_handle(new_zhdr, LAST);
1322 	if (new_zhdr->middle_chunks)
1323 		encode_handle(new_zhdr, MIDDLE);
1324 	set_bit(NEEDS_COMPACTING, &newpage->private);
1325 	new_zhdr->cpu = smp_processor_id();
1326 	__SetPageMovable(newpage, &z3fold_mops);
1327 	z3fold_page_unlock(new_zhdr);
1328 
1329 	queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1330 
1331 	/* PAGE_CLAIMED and PAGE_MIGRATED are cleared now. */
1332 	page->private = 0;
1333 	put_page(page);
1334 	return 0;
1335 }
1336 
1337 static void z3fold_page_putback(struct page *page)
1338 {
1339 	struct z3fold_header *zhdr;
1340 	struct z3fold_pool *pool;
1341 
1342 	zhdr = page_address(page);
1343 	pool = zhdr_to_pool(zhdr);
1344 
1345 	z3fold_page_lock(zhdr);
1346 	if (!list_empty(&zhdr->buddy))
1347 		list_del_init(&zhdr->buddy);
1348 	INIT_LIST_HEAD(&page->lru);
1349 	if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
1350 		return;
1351 	if (list_empty(&zhdr->buddy))
1352 		add_to_unbuddied(pool, zhdr);
1353 	clear_bit(PAGE_CLAIMED, &page->private);
1354 	z3fold_page_unlock(zhdr);
1355 }
1356 
1357 static const struct movable_operations z3fold_mops = {
1358 	.isolate_page = z3fold_page_isolate,
1359 	.migrate_page = z3fold_page_migrate,
1360 	.putback_page = z3fold_page_putback,
1361 };
1362 
1363 /*****************
1364  * zpool
1365  ****************/
1366 
1367 static void *z3fold_zpool_create(const char *name, gfp_t gfp)
1368 {
1369 	return z3fold_create_pool(name, gfp);
1370 }
1371 
1372 static void z3fold_zpool_destroy(void *pool)
1373 {
1374 	z3fold_destroy_pool(pool);
1375 }
1376 
1377 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1378 			unsigned long *handle)
1379 {
1380 	return z3fold_alloc(pool, size, gfp, handle);
1381 }
1382 static void z3fold_zpool_free(void *pool, unsigned long handle)
1383 {
1384 	z3fold_free(pool, handle);
1385 }
1386 
1387 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1388 			enum zpool_mapmode mm)
1389 {
1390 	return z3fold_map(pool, handle);
1391 }
1392 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1393 {
1394 	z3fold_unmap(pool, handle);
1395 }
1396 
1397 static u64 z3fold_zpool_total_size(void *pool)
1398 {
1399 	return z3fold_get_pool_size(pool) * PAGE_SIZE;
1400 }
1401 
1402 static struct zpool_driver z3fold_zpool_driver = {
1403 	.type =		"z3fold",
1404 	.sleep_mapped = true,
1405 	.owner =	THIS_MODULE,
1406 	.create =	z3fold_zpool_create,
1407 	.destroy =	z3fold_zpool_destroy,
1408 	.malloc =	z3fold_zpool_malloc,
1409 	.free =		z3fold_zpool_free,
1410 	.map =		z3fold_zpool_map,
1411 	.unmap =	z3fold_zpool_unmap,
1412 	.total_size =	z3fold_zpool_total_size,
1413 };
1414 
1415 MODULE_ALIAS("zpool-z3fold");
1416 
1417 static int __init init_z3fold(void)
1418 {
1419 	/*
1420 	 * Make sure the z3fold header is not larger than the page size and
1421 	 * there has remaining spaces for its buddy.
1422 	 */
1423 	BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE - CHUNK_SIZE);
1424 	zpool_register_driver(&z3fold_zpool_driver);
1425 
1426 	return 0;
1427 }
1428 
1429 static void __exit exit_z3fold(void)
1430 {
1431 	zpool_unregister_driver(&z3fold_zpool_driver);
1432 }
1433 
1434 module_init(init_z3fold);
1435 module_exit(exit_z3fold);
1436 
1437 MODULE_LICENSE("GPL");
1438 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1439 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");
1440