2 * Contiguous Memory Allocator for ppc KVM hash pagetable based on CMA
3 * for DMA mapping framework
5 * Copyright IBM Corporation, 2013
6 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
14 #define pr_fmt(fmt) "kvm_cma: " fmt
16 #ifdef CONFIG_CMA_DEBUG
22 #include <linux/memblock.h>
23 #include <linux/mutex.h>
24 #include <linux/sizes.h>
25 #include <linux/slab.h>
27 #include "book3s_hv_cma.h"
30 unsigned long base_pfn;
32 unsigned long *bitmap;
35 static DEFINE_MUTEX(kvm_cma_mutex);
36 static struct kvm_cma kvm_cma_area;
39 * kvm_cma_declare_contiguous() - reserve area for contiguous memory handling
40 * for kvm hash pagetable
41 * @size: Size of the reserved memory.
42 * @alignment: Alignment for the contiguous memory area
44 * This function reserves memory for kvm cma area. It should be
45 * called by arch code when early allocator (memblock or bootmem)
48 long __init kvm_cma_declare_contiguous(phys_addr_t size, phys_addr_t alignment)
52 struct kvm_cma *cma = &kvm_cma_area;
54 pr_debug("%s(size %lx)\n", __func__, (unsigned long)size);
59 * Sanitise input arguments.
60 * We should be pageblock aligned for CMA.
62 alignment = max(alignment, (phys_addr_t)(PAGE_SIZE << pageblock_order));
63 size = ALIGN(size, alignment);
66 * Use __memblock_alloc_base() since
67 * memblock_alloc_base() panic()s.
69 addr = __memblock_alloc_base(size, alignment, 0);
74 base_pfn = PFN_DOWN(addr);
77 * Each reserved area must be initialised later, when more kernel
78 * subsystems (like slab allocator) are available.
80 cma->base_pfn = base_pfn;
81 cma->count = size >> PAGE_SHIFT;
82 pr_info("CMA: reserved %ld MiB\n", (unsigned long)size / SZ_1M);
85 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
90 * kvm_alloc_cma() - allocate pages from contiguous area
91 * @nr_pages: Requested number of pages.
92 * @align_pages: Requested alignment in number of pages
94 * This function allocates memory buffer for hash pagetable.
96 struct page *kvm_alloc_cma(unsigned long nr_pages, unsigned long align_pages)
99 struct page *page = NULL;
100 struct kvm_cma *cma = &kvm_cma_area;
101 unsigned long chunk_count, nr_chunk;
102 unsigned long mask, pfn, pageno, start = 0;
105 if (!cma || !cma->count)
108 pr_debug("%s(cma %p, count %lu, align pages %lu)\n", __func__,
109 (void *)cma, nr_pages, align_pages);
114 * align mask with chunk size. The bit tracks pages in chunk size
116 VM_BUG_ON(!is_power_of_2(align_pages));
117 mask = (align_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT)) - 1;
118 BUILD_BUG_ON(PAGE_SHIFT > KVM_CMA_CHUNK_ORDER);
120 chunk_count = cma->count >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
121 nr_chunk = nr_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
123 mutex_lock(&kvm_cma_mutex);
125 pageno = bitmap_find_next_zero_area(cma->bitmap, chunk_count,
126 start, nr_chunk, mask);
127 if (pageno >= chunk_count)
130 pfn = cma->base_pfn + (pageno << (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT));
131 ret = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_CMA);
133 bitmap_set(cma->bitmap, pageno, nr_chunk);
134 page = pfn_to_page(pfn);
135 memset(pfn_to_kaddr(pfn), 0, nr_pages << PAGE_SHIFT);
137 } else if (ret != -EBUSY) {
140 pr_debug("%s(): memory range at %p is busy, retrying\n",
141 __func__, pfn_to_page(pfn));
142 /* try again with a bit different memory target */
143 start = pageno + mask + 1;
145 mutex_unlock(&kvm_cma_mutex);
146 pr_debug("%s(): returned %p\n", __func__, page);
151 * kvm_release_cma() - release allocated pages for hash pagetable
152 * @pages: Allocated pages.
153 * @nr_pages: Number of allocated pages.
155 * This function releases memory allocated by kvm_alloc_cma().
156 * It returns false when provided pages do not belong to contiguous area and
159 bool kvm_release_cma(struct page *pages, unsigned long nr_pages)
162 unsigned long nr_chunk;
163 struct kvm_cma *cma = &kvm_cma_area;
168 pr_debug("%s(page %p count %lu)\n", __func__, (void *)pages, nr_pages);
170 pfn = page_to_pfn(pages);
172 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
175 VM_BUG_ON(pfn + nr_pages > cma->base_pfn + cma->count);
176 nr_chunk = nr_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
178 mutex_lock(&kvm_cma_mutex);
179 bitmap_clear(cma->bitmap,
180 (pfn - cma->base_pfn) >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT),
182 free_contig_range(pfn, nr_pages);
183 mutex_unlock(&kvm_cma_mutex);
188 static int __init kvm_cma_activate_area(unsigned long base_pfn,
191 unsigned long pfn = base_pfn;
192 unsigned i = count >> pageblock_order;
195 WARN_ON_ONCE(!pfn_valid(pfn));
196 zone = page_zone(pfn_to_page(pfn));
200 for (j = pageblock_nr_pages; j; --j, pfn++) {
201 WARN_ON_ONCE(!pfn_valid(pfn));
203 * alloc_contig_range requires the pfn range
204 * specified to be in the same zone. Make this
205 * simple by forcing the entire CMA resv range
206 * to be in the same zone.
208 if (page_zone(pfn_to_page(pfn)) != zone)
211 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
216 static int __init kvm_cma_init_reserved_areas(void)
218 int bitmap_size, ret;
219 unsigned long chunk_count;
220 struct kvm_cma *cma = &kvm_cma_area;
222 pr_debug("%s()\n", __func__);
225 chunk_count = cma->count >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
226 bitmap_size = BITS_TO_LONGS(chunk_count) * sizeof(long);
227 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
231 ret = kvm_cma_activate_area(cma->base_pfn, cma->count);
240 core_initcall(kvm_cma_init_reserved_areas);