4 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
5 * Copyright (C) 2016, Sony Mobile Communications Inc.
7 * This implementation is based on zbud written by Seth Jennings.
9 * z3fold is an special purpose allocator for storing compressed pages. It
10 * can store up to three compressed pages per page which improves the
11 * compression ratio of zbud while retaining its main concepts (e. g. always
12 * storing an integral number of objects per page) and simplicity.
13 * It still has simple and deterministic reclaim properties that make it
14 * preferable to a higher density approach (with no requirement on integral
15 * number of object per page) when reclaim is used.
17 * As in zbud, pages are divided into "chunks". The size of the chunks is
18 * fixed at compile time and is determined by NCHUNKS_ORDER below.
20 * z3fold doesn't export any API and is meant to be used via zpool API.
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/atomic.h>
26 #include <linux/list.h>
28 #include <linux/module.h>
29 #include <linux/preempt.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/zpool.h>
39 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
51 * struct z3fold_header - z3fold page metadata occupying the first chunk of each
52 * z3fold page, except for HEADLESS pages
53 * @buddy: links the z3fold page into the relevant list in the pool
54 * @page_lock: per-page lock
55 * @refcount: reference cound for the z3fold page
56 * @first_chunks: the size of the first buddy in chunks, 0 if free
57 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
58 * @last_chunks: the size of the last buddy in chunks, 0 if free
59 * @first_num: the starting number (for the first handle)
61 struct z3fold_header {
62 struct list_head buddy;
65 unsigned short first_chunks;
66 unsigned short middle_chunks;
67 unsigned short last_chunks;
68 unsigned short start_middle;
69 unsigned short first_num:2;
73 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
74 * adjusting internal fragmentation. It also determines the number of
75 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
76 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
77 * in the beginning of an allocated page are occupied by z3fold header, so
78 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
79 * which shows the max number of free chunks in z3fold page, also there will
80 * be 63, or 62, respectively, freelists per pool.
82 #define NCHUNKS_ORDER 6
84 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
85 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
86 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
87 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
88 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
89 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
91 #define BUDDY_MASK (0x3)
94 * struct z3fold_pool - stores metadata for each z3fold pool
95 * @lock: protects all pool fields and first|last_chunk fields of any
96 * z3fold page in the pool
97 * @unbuddied: array of lists tracking z3fold pages that contain 2- buddies;
98 * the lists each z3fold page is added to depends on the size of
100 * @lru: list tracking the z3fold pages in LRU order by most recently
102 * @pages_nr: number of z3fold pages in the pool.
103 * @ops: pointer to a structure of user defined operations specified at
104 * pool creation time.
106 * This structure is allocated at pool creation time and maintains metadata
107 * pertaining to a particular z3fold pool.
111 struct list_head unbuddied[NCHUNKS];
112 struct list_head lru;
114 const struct z3fold_ops *ops;
116 const struct zpool_ops *zpool_ops;
120 * Internal z3fold page flags
122 enum z3fold_page_flags {
132 /* Converts an allocation size in bytes to size in z3fold chunks */
133 static int size_to_chunks(size_t size)
135 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
138 #define for_each_unbuddied_list(_iter, _begin) \
139 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
141 /* Initializes the z3fold header of a newly allocated z3fold page */
142 static struct z3fold_header *init_z3fold_page(struct page *page)
144 struct z3fold_header *zhdr = page_address(page);
146 INIT_LIST_HEAD(&page->lru);
147 clear_bit(PAGE_HEADLESS, &page->private);
148 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
150 spin_lock_init(&zhdr->page_lock);
151 kref_init(&zhdr->refcount);
152 zhdr->first_chunks = 0;
153 zhdr->middle_chunks = 0;
154 zhdr->last_chunks = 0;
156 zhdr->start_middle = 0;
157 INIT_LIST_HEAD(&zhdr->buddy);
161 /* Resets the struct page fields and frees the page */
162 static void free_z3fold_page(struct page *page)
167 static void release_z3fold_page(struct kref *ref)
169 struct z3fold_header *zhdr;
172 zhdr = container_of(ref, struct z3fold_header, refcount);
173 page = virt_to_page(zhdr);
175 if (!list_empty(&zhdr->buddy))
176 list_del(&zhdr->buddy);
177 if (!list_empty(&page->lru))
178 list_del(&page->lru);
179 free_z3fold_page(page);
182 /* Lock a z3fold page */
183 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
185 spin_lock(&zhdr->page_lock);
188 /* Try to lock a z3fold page */
189 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
191 return spin_trylock(&zhdr->page_lock);
194 /* Unlock a z3fold page */
195 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
197 spin_unlock(&zhdr->page_lock);
201 * Encodes the handle of a particular buddy within a z3fold page
202 * Pool lock should be held as this function accesses first_num
204 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
206 unsigned long handle;
208 handle = (unsigned long)zhdr;
210 handle += (bud + zhdr->first_num) & BUDDY_MASK;
214 /* Returns the z3fold page where a given handle is stored */
215 static struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
217 return (struct z3fold_header *)(handle & PAGE_MASK);
221 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
222 * but that doesn't matter. because the masking will result in the
223 * correct buddy number.
225 static enum buddy handle_to_buddy(unsigned long handle)
227 struct z3fold_header *zhdr = handle_to_z3fold_header(handle);
228 return (handle - zhdr->first_num) & BUDDY_MASK;
232 * Returns the number of free chunks in a z3fold page.
233 * NB: can't be used with HEADLESS pages.
235 static int num_free_chunks(struct z3fold_header *zhdr)
239 * If there is a middle object, pick up the bigger free space
240 * either before or after it. Otherwise just subtract the number
241 * of chunks occupied by the first and the last objects.
243 if (zhdr->middle_chunks != 0) {
244 int nfree_before = zhdr->first_chunks ?
245 0 : zhdr->start_middle - ZHDR_CHUNKS;
246 int nfree_after = zhdr->last_chunks ?
248 (zhdr->start_middle + zhdr->middle_chunks);
249 nfree = max(nfree_before, nfree_after);
251 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
259 * z3fold_create_pool() - create a new z3fold pool
260 * @gfp: gfp flags when allocating the z3fold pool structure
261 * @ops: user-defined operations for the z3fold pool
263 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
266 static struct z3fold_pool *z3fold_create_pool(gfp_t gfp,
267 const struct z3fold_ops *ops)
269 struct z3fold_pool *pool;
272 pool = kzalloc(sizeof(struct z3fold_pool), gfp);
275 spin_lock_init(&pool->lock);
276 for_each_unbuddied_list(i, 0)
277 INIT_LIST_HEAD(&pool->unbuddied[i]);
278 INIT_LIST_HEAD(&pool->lru);
279 atomic64_set(&pool->pages_nr, 0);
285 * z3fold_destroy_pool() - destroys an existing z3fold pool
286 * @pool: the z3fold pool to be destroyed
288 * The pool should be emptied before this function is called.
290 static void z3fold_destroy_pool(struct z3fold_pool *pool)
295 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
296 unsigned short dst_chunk)
299 return memmove(beg + (dst_chunk << CHUNK_SHIFT),
300 beg + (zhdr->start_middle << CHUNK_SHIFT),
301 zhdr->middle_chunks << CHUNK_SHIFT);
304 #define BIG_CHUNK_GAP 3
305 /* Has to be called with lock held */
306 static int z3fold_compact_page(struct z3fold_header *zhdr)
308 struct page *page = virt_to_page(zhdr);
310 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
311 return 0; /* can't move middle chunk, it's used */
313 if (zhdr->middle_chunks == 0)
314 return 0; /* nothing to compact */
316 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
317 /* move to the beginning */
318 mchunk_memmove(zhdr, ZHDR_CHUNKS);
319 zhdr->first_chunks = zhdr->middle_chunks;
320 zhdr->middle_chunks = 0;
321 zhdr->start_middle = 0;
327 * moving data is expensive, so let's only do that if
328 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
330 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
331 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
333 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
334 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
336 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
337 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
338 + zhdr->middle_chunks) >=
340 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
342 mchunk_memmove(zhdr, new_start);
343 zhdr->start_middle = new_start;
351 * z3fold_alloc() - allocates a region of a given size
352 * @pool: z3fold pool from which to allocate
353 * @size: size in bytes of the desired allocation
354 * @gfp: gfp flags used if the pool needs to grow
355 * @handle: handle of the new allocation
357 * This function will attempt to find a free region in the pool large enough to
358 * satisfy the allocation request. A search of the unbuddied lists is
359 * performed first. If no suitable free region is found, then a new page is
360 * allocated and added to the pool to satisfy the request.
362 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
363 * as z3fold pool pages.
365 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
366 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
369 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
370 unsigned long *handle)
372 int chunks = 0, i, freechunks;
373 struct z3fold_header *zhdr = NULL;
377 if (!size || (gfp & __GFP_HIGHMEM))
380 if (size > PAGE_SIZE)
383 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
386 chunks = size_to_chunks(size);
388 /* First, try to find an unbuddied z3fold page. */
390 for_each_unbuddied_list(i, chunks) {
391 spin_lock(&pool->lock);
392 zhdr = list_first_entry_or_null(&pool->unbuddied[i],
393 struct z3fold_header, buddy);
394 if (!zhdr || !z3fold_page_trylock(zhdr)) {
395 spin_unlock(&pool->lock);
398 kref_get(&zhdr->refcount);
399 list_del_init(&zhdr->buddy);
400 spin_unlock(&pool->lock);
402 page = virt_to_page(zhdr);
403 if (zhdr->first_chunks == 0) {
404 if (zhdr->middle_chunks != 0 &&
405 chunks >= zhdr->start_middle)
409 } else if (zhdr->last_chunks == 0)
411 else if (zhdr->middle_chunks == 0)
414 z3fold_page_unlock(zhdr);
415 spin_lock(&pool->lock);
416 if (kref_put(&zhdr->refcount,
417 release_z3fold_page))
418 atomic64_dec(&pool->pages_nr);
419 spin_unlock(&pool->lock);
420 pr_err("No free chunks in unbuddied\n");
429 /* Couldn't find unbuddied z3fold page, create new one */
430 page = alloc_page(gfp);
434 atomic64_inc(&pool->pages_nr);
435 zhdr = init_z3fold_page(page);
437 if (bud == HEADLESS) {
438 set_bit(PAGE_HEADLESS, &page->private);
439 spin_lock(&pool->lock);
442 z3fold_page_lock(zhdr);
446 zhdr->first_chunks = chunks;
447 else if (bud == LAST)
448 zhdr->last_chunks = chunks;
450 zhdr->middle_chunks = chunks;
451 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
454 spin_lock(&pool->lock);
455 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
456 zhdr->middle_chunks == 0) {
457 /* Add to unbuddied list */
458 freechunks = num_free_chunks(zhdr);
459 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
463 /* Add/move z3fold page to beginning of LRU */
464 if (!list_empty(&page->lru))
465 list_del(&page->lru);
467 list_add(&page->lru, &pool->lru);
469 *handle = encode_handle(zhdr, bud);
470 spin_unlock(&pool->lock);
472 z3fold_page_unlock(zhdr);
478 * z3fold_free() - frees the allocation associated with the given handle
479 * @pool: pool in which the allocation resided
480 * @handle: handle associated with the allocation returned by z3fold_alloc()
482 * In the case that the z3fold page in which the allocation resides is under
483 * reclaim, as indicated by the PG_reclaim flag being set, this function
484 * only sets the first|last_chunks to 0. The page is actually freed
485 * once both buddies are evicted (see z3fold_reclaim_page() below).
487 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
489 struct z3fold_header *zhdr;
494 zhdr = handle_to_z3fold_header(handle);
495 page = virt_to_page(zhdr);
497 if (test_bit(PAGE_HEADLESS, &page->private)) {
498 /* HEADLESS page stored */
501 z3fold_page_lock(zhdr);
502 bud = handle_to_buddy(handle);
506 zhdr->first_chunks = 0;
509 zhdr->middle_chunks = 0;
510 zhdr->start_middle = 0;
513 zhdr->last_chunks = 0;
516 pr_err("%s: unknown bud %d\n", __func__, bud);
518 z3fold_page_unlock(zhdr);
523 if (bud == HEADLESS) {
524 spin_lock(&pool->lock);
525 list_del(&page->lru);
526 spin_unlock(&pool->lock);
527 free_z3fold_page(page);
528 atomic64_dec(&pool->pages_nr);
530 if (zhdr->first_chunks != 0 || zhdr->middle_chunks != 0 ||
531 zhdr->last_chunks != 0) {
532 z3fold_compact_page(zhdr);
533 /* Add to the unbuddied list */
534 spin_lock(&pool->lock);
535 if (!list_empty(&zhdr->buddy))
536 list_del(&zhdr->buddy);
537 freechunks = num_free_chunks(zhdr);
538 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
539 spin_unlock(&pool->lock);
541 z3fold_page_unlock(zhdr);
542 spin_lock(&pool->lock);
543 if (kref_put(&zhdr->refcount, release_z3fold_page))
544 atomic64_dec(&pool->pages_nr);
545 spin_unlock(&pool->lock);
551 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
552 * @pool: pool from which a page will attempt to be evicted
553 * @retires: number of pages on the LRU list for which eviction will
554 * be attempted before failing
556 * z3fold reclaim is different from normal system reclaim in that it is done
557 * from the bottom, up. This is because only the bottom layer, z3fold, has
558 * information on how the allocations are organized within each z3fold page.
559 * This has the potential to create interesting locking situations between
560 * z3fold and the user, however.
562 * To avoid these, this is how z3fold_reclaim_page() should be called:
564 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
565 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
566 * call the user-defined eviction handler with the pool and handle as
569 * If the handle can not be evicted, the eviction handler should return
570 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
571 * appropriate list and try the next z3fold page on the LRU up to
572 * a user defined number of retries.
574 * If the handle is successfully evicted, the eviction handler should
575 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
576 * contains logic to delay freeing the page if the page is under reclaim,
577 * as indicated by the setting of the PG_reclaim flag on the underlying page.
579 * If all buddies in the z3fold page are successfully evicted, then the
580 * z3fold page can be freed.
582 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
583 * no pages to evict or an eviction handler is not registered, -EAGAIN if
584 * the retry limit was hit.
586 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
588 int i, ret = 0, freechunks;
589 struct z3fold_header *zhdr;
591 unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
593 spin_lock(&pool->lock);
594 if (!pool->ops || !pool->ops->evict || retries == 0) {
595 spin_unlock(&pool->lock);
598 for (i = 0; i < retries; i++) {
599 if (list_empty(&pool->lru)) {
600 spin_unlock(&pool->lock);
603 page = list_last_entry(&pool->lru, struct page, lru);
604 list_del_init(&page->lru);
606 zhdr = page_address(page);
607 if (!test_bit(PAGE_HEADLESS, &page->private)) {
608 if (!list_empty(&zhdr->buddy))
609 list_del_init(&zhdr->buddy);
610 kref_get(&zhdr->refcount);
611 spin_unlock(&pool->lock);
612 z3fold_page_lock(zhdr);
614 * We need encode the handles before unlocking, since
615 * we can race with free that will set
616 * (first|last)_chunks to 0
621 if (zhdr->first_chunks)
622 first_handle = encode_handle(zhdr, FIRST);
623 if (zhdr->middle_chunks)
624 middle_handle = encode_handle(zhdr, MIDDLE);
625 if (zhdr->last_chunks)
626 last_handle = encode_handle(zhdr, LAST);
627 z3fold_page_unlock(zhdr);
629 first_handle = encode_handle(zhdr, HEADLESS);
630 last_handle = middle_handle = 0;
631 spin_unlock(&pool->lock);
634 /* Issue the eviction callback(s) */
636 ret = pool->ops->evict(pool, middle_handle);
641 ret = pool->ops->evict(pool, first_handle);
646 ret = pool->ops->evict(pool, last_handle);
651 if (test_bit(PAGE_HEADLESS, &page->private)) {
653 free_z3fold_page(page);
656 spin_lock(&pool->lock);
659 z3fold_page_lock(zhdr);
660 if ((zhdr->first_chunks || zhdr->last_chunks ||
661 zhdr->middle_chunks) &&
662 !(zhdr->first_chunks && zhdr->last_chunks &&
663 zhdr->middle_chunks)) {
664 z3fold_compact_page(zhdr);
665 /* add to unbuddied list */
666 spin_lock(&pool->lock);
667 freechunks = num_free_chunks(zhdr);
668 list_add(&zhdr->buddy,
669 &pool->unbuddied[freechunks]);
670 spin_unlock(&pool->lock);
672 z3fold_page_unlock(zhdr);
673 spin_lock(&pool->lock);
674 if (kref_put(&zhdr->refcount, release_z3fold_page)) {
675 spin_unlock(&pool->lock);
676 atomic64_dec(&pool->pages_nr);
682 * Add to the beginning of LRU.
683 * Pool lock has to be kept here to ensure the page has
684 * not already been released
686 list_add(&page->lru, &pool->lru);
688 spin_unlock(&pool->lock);
693 * z3fold_map() - maps the allocation associated with the given handle
694 * @pool: pool in which the allocation resides
695 * @handle: handle associated with the allocation to be mapped
697 * Extracts the buddy number from handle and constructs the pointer to the
698 * correct starting chunk within the page.
700 * Returns: a pointer to the mapped allocation
702 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
704 struct z3fold_header *zhdr;
709 zhdr = handle_to_z3fold_header(handle);
711 page = virt_to_page(zhdr);
713 if (test_bit(PAGE_HEADLESS, &page->private))
716 z3fold_page_lock(zhdr);
717 buddy = handle_to_buddy(handle);
720 addr += ZHDR_SIZE_ALIGNED;
723 addr += zhdr->start_middle << CHUNK_SHIFT;
724 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
727 addr += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
730 pr_err("unknown buddy id %d\n", buddy);
736 z3fold_page_unlock(zhdr);
742 * z3fold_unmap() - unmaps the allocation associated with the given handle
743 * @pool: pool in which the allocation resides
744 * @handle: handle associated with the allocation to be unmapped
746 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
748 struct z3fold_header *zhdr;
752 zhdr = handle_to_z3fold_header(handle);
753 page = virt_to_page(zhdr);
755 if (test_bit(PAGE_HEADLESS, &page->private))
758 z3fold_page_lock(zhdr);
759 buddy = handle_to_buddy(handle);
761 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
762 z3fold_page_unlock(zhdr);
766 * z3fold_get_pool_size() - gets the z3fold pool size in pages
767 * @pool: pool whose size is being queried
769 * Returns: size in pages of the given pool.
771 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
773 return atomic64_read(&pool->pages_nr);
780 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
782 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
783 return pool->zpool_ops->evict(pool->zpool, handle);
788 static const struct z3fold_ops z3fold_zpool_ops = {
789 .evict = z3fold_zpool_evict
792 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
793 const struct zpool_ops *zpool_ops,
796 struct z3fold_pool *pool;
798 pool = z3fold_create_pool(gfp, zpool_ops ? &z3fold_zpool_ops : NULL);
801 pool->zpool_ops = zpool_ops;
806 static void z3fold_zpool_destroy(void *pool)
808 z3fold_destroy_pool(pool);
811 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
812 unsigned long *handle)
814 return z3fold_alloc(pool, size, gfp, handle);
816 static void z3fold_zpool_free(void *pool, unsigned long handle)
818 z3fold_free(pool, handle);
821 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
822 unsigned int *reclaimed)
824 unsigned int total = 0;
827 while (total < pages) {
828 ret = z3fold_reclaim_page(pool, 8);
840 static void *z3fold_zpool_map(void *pool, unsigned long handle,
841 enum zpool_mapmode mm)
843 return z3fold_map(pool, handle);
845 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
847 z3fold_unmap(pool, handle);
850 static u64 z3fold_zpool_total_size(void *pool)
852 return z3fold_get_pool_size(pool) * PAGE_SIZE;
855 static struct zpool_driver z3fold_zpool_driver = {
857 .owner = THIS_MODULE,
858 .create = z3fold_zpool_create,
859 .destroy = z3fold_zpool_destroy,
860 .malloc = z3fold_zpool_malloc,
861 .free = z3fold_zpool_free,
862 .shrink = z3fold_zpool_shrink,
863 .map = z3fold_zpool_map,
864 .unmap = z3fold_zpool_unmap,
865 .total_size = z3fold_zpool_total_size,
868 MODULE_ALIAS("zpool-z3fold");
870 static int __init init_z3fold(void)
872 /* Make sure the z3fold header is not larger than the page size */
873 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
874 zpool_register_driver(&z3fold_zpool_driver);
879 static void __exit exit_z3fold(void)
881 zpool_unregister_driver(&z3fold_zpool_driver);
884 module_init(init_z3fold);
885 module_exit(exit_z3fold);
887 MODULE_LICENSE("GPL");
888 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
889 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");