xref: /linux/mm/zbud.c (revision 93d90ad708b8da6efc0e487b66111aa9db7f70c7)
1 /*
2  * zbud.c
3  *
4  * Copyright (C) 2013, Seth Jennings, IBM
5  *
6  * Concepts based on zcache internal zbud allocator by Dan Magenheimer.
7  *
8  * zbud is an special purpose allocator for storing compressed pages.  Contrary
9  * to what its name may suggest, zbud is not a buddy allocator, but rather an
10  * allocator that "buddies" two compressed pages together in a single memory
11  * page.
12  *
13  * While this design limits storage density, it has simple and deterministic
14  * reclaim properties that make it preferable to a higher density approach when
15  * reclaim will be used.
16  *
17  * zbud works by storing compressed pages, or "zpages", together in pairs in a
18  * single memory page called a "zbud page".  The first buddy is "left
19  * justified" at the beginning of the zbud page, and the last buddy is "right
20  * justified" at the end of the zbud page.  The benefit is that if either
21  * buddy is freed, the freed buddy space, coalesced with whatever slack space
22  * that existed between the buddies, results in the largest possible free region
23  * within the zbud page.
24  *
25  * zbud also provides an attractive lower bound on density. The ratio of zpages
26  * to zbud pages can not be less than 1.  This ensures that zbud can never "do
27  * harm" by using more pages to store zpages than the uncompressed zpages would
28  * have used on their own.
29  *
30  * zbud pages are divided into "chunks".  The size of the chunks is fixed at
31  * compile time and determined by NCHUNKS_ORDER below.  Dividing zbud pages
32  * into chunks allows organizing unbuddied zbud pages into a manageable number
33  * of unbuddied lists according to the number of free chunks available in the
34  * zbud page.
35  *
36  * The zbud API differs from that of conventional allocators in that the
37  * allocation function, zbud_alloc(), returns an opaque handle to the user,
38  * not a dereferenceable pointer.  The user must map the handle using
39  * zbud_map() in order to get a usable pointer by which to access the
40  * allocation data and unmap the handle with zbud_unmap() when operations
41  * on the allocation data are complete.
42  */
43 
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 
46 #include <linux/atomic.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/module.h>
50 #include <linux/preempt.h>
51 #include <linux/slab.h>
52 #include <linux/spinlock.h>
53 #include <linux/zbud.h>
54 #include <linux/zpool.h>
55 
56 /*****************
57  * Structures
58 *****************/
59 /*
60  * NCHUNKS_ORDER determines the internal allocation granularity, effectively
61  * adjusting internal fragmentation.  It also determines the number of
62  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
63  * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk
64  * in allocated page is occupied by zbud header, NCHUNKS will be calculated to
65  * 63 which shows the max number of free chunks in zbud page, also there will be
66  * 63 freelists per pool.
67  */
68 #define NCHUNKS_ORDER	6
69 
70 #define CHUNK_SHIFT	(PAGE_SHIFT - NCHUNKS_ORDER)
71 #define CHUNK_SIZE	(1 << CHUNK_SHIFT)
72 #define ZHDR_SIZE_ALIGNED CHUNK_SIZE
73 #define NCHUNKS		((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
74 
75 /**
76  * struct zbud_pool - stores metadata for each zbud pool
77  * @lock:	protects all pool fields and first|last_chunk fields of any
78  *		zbud page in the pool
79  * @unbuddied:	array of lists tracking zbud pages that only contain one buddy;
80  *		the lists each zbud page is added to depends on the size of
81  *		its free region.
82  * @buddied:	list tracking the zbud pages that contain two buddies;
83  *		these zbud pages are full
84  * @lru:	list tracking the zbud pages in LRU order by most recently
85  *		added buddy.
86  * @pages_nr:	number of zbud pages in the pool.
87  * @ops:	pointer to a structure of user defined operations specified at
88  *		pool creation time.
89  *
90  * This structure is allocated at pool creation time and maintains metadata
91  * pertaining to a particular zbud pool.
92  */
93 struct zbud_pool {
94 	spinlock_t lock;
95 	struct list_head unbuddied[NCHUNKS];
96 	struct list_head buddied;
97 	struct list_head lru;
98 	u64 pages_nr;
99 	struct zbud_ops *ops;
100 };
101 
102 /*
103  * struct zbud_header - zbud page metadata occupying the first chunk of each
104  *			zbud page.
105  * @buddy:	links the zbud page into the unbuddied/buddied lists in the pool
106  * @lru:	links the zbud page into the lru list in the pool
107  * @first_chunks:	the size of the first buddy in chunks, 0 if free
108  * @last_chunks:	the size of the last buddy in chunks, 0 if free
109  */
110 struct zbud_header {
111 	struct list_head buddy;
112 	struct list_head lru;
113 	unsigned int first_chunks;
114 	unsigned int last_chunks;
115 	bool under_reclaim;
116 };
117 
118 /*****************
119  * zpool
120  ****************/
121 
122 #ifdef CONFIG_ZPOOL
123 
124 static int zbud_zpool_evict(struct zbud_pool *pool, unsigned long handle)
125 {
126 	return zpool_evict(pool, handle);
127 }
128 
129 static struct zbud_ops zbud_zpool_ops = {
130 	.evict =	zbud_zpool_evict
131 };
132 
133 static void *zbud_zpool_create(gfp_t gfp, struct zpool_ops *zpool_ops)
134 {
135 	return zbud_create_pool(gfp, zpool_ops ? &zbud_zpool_ops : NULL);
136 }
137 
138 static void zbud_zpool_destroy(void *pool)
139 {
140 	zbud_destroy_pool(pool);
141 }
142 
143 static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp,
144 			unsigned long *handle)
145 {
146 	return zbud_alloc(pool, size, gfp, handle);
147 }
148 static void zbud_zpool_free(void *pool, unsigned long handle)
149 {
150 	zbud_free(pool, handle);
151 }
152 
153 static int zbud_zpool_shrink(void *pool, unsigned int pages,
154 			unsigned int *reclaimed)
155 {
156 	unsigned int total = 0;
157 	int ret = -EINVAL;
158 
159 	while (total < pages) {
160 		ret = zbud_reclaim_page(pool, 8);
161 		if (ret < 0)
162 			break;
163 		total++;
164 	}
165 
166 	if (reclaimed)
167 		*reclaimed = total;
168 
169 	return ret;
170 }
171 
172 static void *zbud_zpool_map(void *pool, unsigned long handle,
173 			enum zpool_mapmode mm)
174 {
175 	return zbud_map(pool, handle);
176 }
177 static void zbud_zpool_unmap(void *pool, unsigned long handle)
178 {
179 	zbud_unmap(pool, handle);
180 }
181 
182 static u64 zbud_zpool_total_size(void *pool)
183 {
184 	return zbud_get_pool_size(pool) * PAGE_SIZE;
185 }
186 
187 static struct zpool_driver zbud_zpool_driver = {
188 	.type =		"zbud",
189 	.owner =	THIS_MODULE,
190 	.create =	zbud_zpool_create,
191 	.destroy =	zbud_zpool_destroy,
192 	.malloc =	zbud_zpool_malloc,
193 	.free =		zbud_zpool_free,
194 	.shrink =	zbud_zpool_shrink,
195 	.map =		zbud_zpool_map,
196 	.unmap =	zbud_zpool_unmap,
197 	.total_size =	zbud_zpool_total_size,
198 };
199 
200 MODULE_ALIAS("zpool-zbud");
201 #endif /* CONFIG_ZPOOL */
202 
203 /*****************
204  * Helpers
205 *****************/
206 /* Just to make the code easier to read */
207 enum buddy {
208 	FIRST,
209 	LAST
210 };
211 
212 /* Converts an allocation size in bytes to size in zbud chunks */
213 static int size_to_chunks(size_t size)
214 {
215 	return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
216 }
217 
218 #define for_each_unbuddied_list(_iter, _begin) \
219 	for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
220 
221 /* Initializes the zbud header of a newly allocated zbud page */
222 static struct zbud_header *init_zbud_page(struct page *page)
223 {
224 	struct zbud_header *zhdr = page_address(page);
225 	zhdr->first_chunks = 0;
226 	zhdr->last_chunks = 0;
227 	INIT_LIST_HEAD(&zhdr->buddy);
228 	INIT_LIST_HEAD(&zhdr->lru);
229 	zhdr->under_reclaim = 0;
230 	return zhdr;
231 }
232 
233 /* Resets the struct page fields and frees the page */
234 static void free_zbud_page(struct zbud_header *zhdr)
235 {
236 	__free_page(virt_to_page(zhdr));
237 }
238 
239 /*
240  * Encodes the handle of a particular buddy within a zbud page
241  * Pool lock should be held as this function accesses first|last_chunks
242  */
243 static unsigned long encode_handle(struct zbud_header *zhdr, enum buddy bud)
244 {
245 	unsigned long handle;
246 
247 	/*
248 	 * For now, the encoded handle is actually just the pointer to the data
249 	 * but this might not always be the case.  A little information hiding.
250 	 * Add CHUNK_SIZE to the handle if it is the first allocation to jump
251 	 * over the zbud header in the first chunk.
252 	 */
253 	handle = (unsigned long)zhdr;
254 	if (bud == FIRST)
255 		/* skip over zbud header */
256 		handle += ZHDR_SIZE_ALIGNED;
257 	else /* bud == LAST */
258 		handle += PAGE_SIZE - (zhdr->last_chunks  << CHUNK_SHIFT);
259 	return handle;
260 }
261 
262 /* Returns the zbud page where a given handle is stored */
263 static struct zbud_header *handle_to_zbud_header(unsigned long handle)
264 {
265 	return (struct zbud_header *)(handle & PAGE_MASK);
266 }
267 
268 /* Returns the number of free chunks in a zbud page */
269 static int num_free_chunks(struct zbud_header *zhdr)
270 {
271 	/*
272 	 * Rather than branch for different situations, just use the fact that
273 	 * free buddies have a length of zero to simplify everything.
274 	 */
275 	return NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
276 }
277 
278 /*****************
279  * API Functions
280 *****************/
281 /**
282  * zbud_create_pool() - create a new zbud pool
283  * @gfp:	gfp flags when allocating the zbud pool structure
284  * @ops:	user-defined operations for the zbud pool
285  *
286  * Return: pointer to the new zbud pool or NULL if the metadata allocation
287  * failed.
288  */
289 struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops)
290 {
291 	struct zbud_pool *pool;
292 	int i;
293 
294 	pool = kmalloc(sizeof(struct zbud_pool), gfp);
295 	if (!pool)
296 		return NULL;
297 	spin_lock_init(&pool->lock);
298 	for_each_unbuddied_list(i, 0)
299 		INIT_LIST_HEAD(&pool->unbuddied[i]);
300 	INIT_LIST_HEAD(&pool->buddied);
301 	INIT_LIST_HEAD(&pool->lru);
302 	pool->pages_nr = 0;
303 	pool->ops = ops;
304 	return pool;
305 }
306 
307 /**
308  * zbud_destroy_pool() - destroys an existing zbud pool
309  * @pool:	the zbud pool to be destroyed
310  *
311  * The pool should be emptied before this function is called.
312  */
313 void zbud_destroy_pool(struct zbud_pool *pool)
314 {
315 	kfree(pool);
316 }
317 
318 /**
319  * zbud_alloc() - allocates a region of a given size
320  * @pool:	zbud pool from which to allocate
321  * @size:	size in bytes of the desired allocation
322  * @gfp:	gfp flags used if the pool needs to grow
323  * @handle:	handle of the new allocation
324  *
325  * This function will attempt to find a free region in the pool large enough to
326  * satisfy the allocation request.  A search of the unbuddied lists is
327  * performed first. If no suitable free region is found, then a new page is
328  * allocated and added to the pool to satisfy the request.
329  *
330  * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
331  * as zbud pool pages.
332  *
333  * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
334  * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
335  * a new page.
336  */
337 int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
338 			unsigned long *handle)
339 {
340 	int chunks, i, freechunks;
341 	struct zbud_header *zhdr = NULL;
342 	enum buddy bud;
343 	struct page *page;
344 
345 	if (!size || (gfp & __GFP_HIGHMEM))
346 		return -EINVAL;
347 	if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
348 		return -ENOSPC;
349 	chunks = size_to_chunks(size);
350 	spin_lock(&pool->lock);
351 
352 	/* First, try to find an unbuddied zbud page. */
353 	zhdr = NULL;
354 	for_each_unbuddied_list(i, chunks) {
355 		if (!list_empty(&pool->unbuddied[i])) {
356 			zhdr = list_first_entry(&pool->unbuddied[i],
357 					struct zbud_header, buddy);
358 			list_del(&zhdr->buddy);
359 			if (zhdr->first_chunks == 0)
360 				bud = FIRST;
361 			else
362 				bud = LAST;
363 			goto found;
364 		}
365 	}
366 
367 	/* Couldn't find unbuddied zbud page, create new one */
368 	spin_unlock(&pool->lock);
369 	page = alloc_page(gfp);
370 	if (!page)
371 		return -ENOMEM;
372 	spin_lock(&pool->lock);
373 	pool->pages_nr++;
374 	zhdr = init_zbud_page(page);
375 	bud = FIRST;
376 
377 found:
378 	if (bud == FIRST)
379 		zhdr->first_chunks = chunks;
380 	else
381 		zhdr->last_chunks = chunks;
382 
383 	if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0) {
384 		/* Add to unbuddied list */
385 		freechunks = num_free_chunks(zhdr);
386 		list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
387 	} else {
388 		/* Add to buddied list */
389 		list_add(&zhdr->buddy, &pool->buddied);
390 	}
391 
392 	/* Add/move zbud page to beginning of LRU */
393 	if (!list_empty(&zhdr->lru))
394 		list_del(&zhdr->lru);
395 	list_add(&zhdr->lru, &pool->lru);
396 
397 	*handle = encode_handle(zhdr, bud);
398 	spin_unlock(&pool->lock);
399 
400 	return 0;
401 }
402 
403 /**
404  * zbud_free() - frees the allocation associated with the given handle
405  * @pool:	pool in which the allocation resided
406  * @handle:	handle associated with the allocation returned by zbud_alloc()
407  *
408  * In the case that the zbud page in which the allocation resides is under
409  * reclaim, as indicated by the PG_reclaim flag being set, this function
410  * only sets the first|last_chunks to 0.  The page is actually freed
411  * once both buddies are evicted (see zbud_reclaim_page() below).
412  */
413 void zbud_free(struct zbud_pool *pool, unsigned long handle)
414 {
415 	struct zbud_header *zhdr;
416 	int freechunks;
417 
418 	spin_lock(&pool->lock);
419 	zhdr = handle_to_zbud_header(handle);
420 
421 	/* If first buddy, handle will be page aligned */
422 	if ((handle - ZHDR_SIZE_ALIGNED) & ~PAGE_MASK)
423 		zhdr->last_chunks = 0;
424 	else
425 		zhdr->first_chunks = 0;
426 
427 	if (zhdr->under_reclaim) {
428 		/* zbud page is under reclaim, reclaim will free */
429 		spin_unlock(&pool->lock);
430 		return;
431 	}
432 
433 	/* Remove from existing buddy list */
434 	list_del(&zhdr->buddy);
435 
436 	if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
437 		/* zbud page is empty, free */
438 		list_del(&zhdr->lru);
439 		free_zbud_page(zhdr);
440 		pool->pages_nr--;
441 	} else {
442 		/* Add to unbuddied list */
443 		freechunks = num_free_chunks(zhdr);
444 		list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
445 	}
446 
447 	spin_unlock(&pool->lock);
448 }
449 
450 #define list_tail_entry(ptr, type, member) \
451 	list_entry((ptr)->prev, type, member)
452 
453 /**
454  * zbud_reclaim_page() - evicts allocations from a pool page and frees it
455  * @pool:	pool from which a page will attempt to be evicted
456  * @retires:	number of pages on the LRU list for which eviction will
457  *		be attempted before failing
458  *
459  * zbud reclaim is different from normal system reclaim in that the reclaim is
460  * done from the bottom, up.  This is because only the bottom layer, zbud, has
461  * information on how the allocations are organized within each zbud page. This
462  * has the potential to create interesting locking situations between zbud and
463  * the user, however.
464  *
465  * To avoid these, this is how zbud_reclaim_page() should be called:
466 
467  * The user detects a page should be reclaimed and calls zbud_reclaim_page().
468  * zbud_reclaim_page() will remove a zbud page from the pool LRU list and call
469  * the user-defined eviction handler with the pool and handle as arguments.
470  *
471  * If the handle can not be evicted, the eviction handler should return
472  * non-zero. zbud_reclaim_page() will add the zbud page back to the
473  * appropriate list and try the next zbud page on the LRU up to
474  * a user defined number of retries.
475  *
476  * If the handle is successfully evicted, the eviction handler should
477  * return 0 _and_ should have called zbud_free() on the handle. zbud_free()
478  * contains logic to delay freeing the page if the page is under reclaim,
479  * as indicated by the setting of the PG_reclaim flag on the underlying page.
480  *
481  * If all buddies in the zbud page are successfully evicted, then the
482  * zbud page can be freed.
483  *
484  * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
485  * no pages to evict or an eviction handler is not registered, -EAGAIN if
486  * the retry limit was hit.
487  */
488 int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
489 {
490 	int i, ret, freechunks;
491 	struct zbud_header *zhdr;
492 	unsigned long first_handle = 0, last_handle = 0;
493 
494 	spin_lock(&pool->lock);
495 	if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) ||
496 			retries == 0) {
497 		spin_unlock(&pool->lock);
498 		return -EINVAL;
499 	}
500 	for (i = 0; i < retries; i++) {
501 		zhdr = list_tail_entry(&pool->lru, struct zbud_header, lru);
502 		list_del(&zhdr->lru);
503 		list_del(&zhdr->buddy);
504 		/* Protect zbud page against free */
505 		zhdr->under_reclaim = true;
506 		/*
507 		 * We need encode the handles before unlocking, since we can
508 		 * race with free that will set (first|last)_chunks to 0
509 		 */
510 		first_handle = 0;
511 		last_handle = 0;
512 		if (zhdr->first_chunks)
513 			first_handle = encode_handle(zhdr, FIRST);
514 		if (zhdr->last_chunks)
515 			last_handle = encode_handle(zhdr, LAST);
516 		spin_unlock(&pool->lock);
517 
518 		/* Issue the eviction callback(s) */
519 		if (first_handle) {
520 			ret = pool->ops->evict(pool, first_handle);
521 			if (ret)
522 				goto next;
523 		}
524 		if (last_handle) {
525 			ret = pool->ops->evict(pool, last_handle);
526 			if (ret)
527 				goto next;
528 		}
529 next:
530 		spin_lock(&pool->lock);
531 		zhdr->under_reclaim = false;
532 		if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
533 			/*
534 			 * Both buddies are now free, free the zbud page and
535 			 * return success.
536 			 */
537 			free_zbud_page(zhdr);
538 			pool->pages_nr--;
539 			spin_unlock(&pool->lock);
540 			return 0;
541 		} else if (zhdr->first_chunks == 0 ||
542 				zhdr->last_chunks == 0) {
543 			/* add to unbuddied list */
544 			freechunks = num_free_chunks(zhdr);
545 			list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
546 		} else {
547 			/* add to buddied list */
548 			list_add(&zhdr->buddy, &pool->buddied);
549 		}
550 
551 		/* add to beginning of LRU */
552 		list_add(&zhdr->lru, &pool->lru);
553 	}
554 	spin_unlock(&pool->lock);
555 	return -EAGAIN;
556 }
557 
558 /**
559  * zbud_map() - maps the allocation associated with the given handle
560  * @pool:	pool in which the allocation resides
561  * @handle:	handle associated with the allocation to be mapped
562  *
563  * While trivial for zbud, the mapping functions for others allocators
564  * implementing this allocation API could have more complex information encoded
565  * in the handle and could create temporary mappings to make the data
566  * accessible to the user.
567  *
568  * Returns: a pointer to the mapped allocation
569  */
570 void *zbud_map(struct zbud_pool *pool, unsigned long handle)
571 {
572 	return (void *)(handle);
573 }
574 
575 /**
576  * zbud_unmap() - maps the allocation associated with the given handle
577  * @pool:	pool in which the allocation resides
578  * @handle:	handle associated with the allocation to be unmapped
579  */
580 void zbud_unmap(struct zbud_pool *pool, unsigned long handle)
581 {
582 }
583 
584 /**
585  * zbud_get_pool_size() - gets the zbud pool size in pages
586  * @pool:	pool whose size is being queried
587  *
588  * Returns: size in pages of the given pool.  The pool lock need not be
589  * taken to access pages_nr.
590  */
591 u64 zbud_get_pool_size(struct zbud_pool *pool)
592 {
593 	return pool->pages_nr;
594 }
595 
596 static int __init init_zbud(void)
597 {
598 	/* Make sure the zbud header will fit in one chunk */
599 	BUILD_BUG_ON(sizeof(struct zbud_header) > ZHDR_SIZE_ALIGNED);
600 	pr_info("loaded\n");
601 
602 #ifdef CONFIG_ZPOOL
603 	zpool_register_driver(&zbud_zpool_driver);
604 #endif
605 
606 	return 0;
607 }
608 
609 static void __exit exit_zbud(void)
610 {
611 #ifdef CONFIG_ZPOOL
612 	zpool_unregister_driver(&zbud_zpool_driver);
613 #endif
614 
615 	pr_info("unloaded\n");
616 }
617 
618 module_init(init_zbud);
619 module_exit(exit_zbud);
620 
621 MODULE_LICENSE("GPL");
622 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
623 MODULE_DESCRIPTION("Buddy Allocator for Compressed Pages");
624