2 * Copyright © 2006-2009, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
20 #include <linux/iova.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/smp.h>
24 #include <linux/bitops.h>
25 #include <linux/cpu.h>
27 static bool iova_rcache_insert(struct iova_domain *iovad,
30 static unsigned long iova_rcache_get(struct iova_domain *iovad,
32 unsigned long limit_pfn);
33 static void init_iova_rcaches(struct iova_domain *iovad);
34 static void free_iova_rcaches(struct iova_domain *iovad);
37 init_iova_domain(struct iova_domain *iovad, unsigned long granule,
38 unsigned long start_pfn, unsigned long pfn_32bit)
41 * IOVA granularity will normally be equal to the smallest
42 * supported IOMMU page size; both *must* be capable of
43 * representing individual CPU pages exactly.
45 BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
47 spin_lock_init(&iovad->iova_rbtree_lock);
48 iovad->rbroot = RB_ROOT;
49 iovad->cached32_node = NULL;
50 iovad->granule = granule;
51 iovad->start_pfn = start_pfn;
52 iovad->dma_32bit_pfn = pfn_32bit;
53 init_iova_rcaches(iovad);
55 EXPORT_SYMBOL_GPL(init_iova_domain);
57 static struct rb_node *
58 __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
60 if ((*limit_pfn > iovad->dma_32bit_pfn) ||
61 (iovad->cached32_node == NULL))
62 return rb_last(&iovad->rbroot);
64 struct rb_node *prev_node = rb_prev(iovad->cached32_node);
65 struct iova *curr_iova =
66 rb_entry(iovad->cached32_node, struct iova, node);
67 *limit_pfn = curr_iova->pfn_lo - 1;
73 __cached_rbnode_insert_update(struct iova_domain *iovad,
74 unsigned long limit_pfn, struct iova *new)
76 if (limit_pfn != iovad->dma_32bit_pfn)
78 iovad->cached32_node = &new->node;
82 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
84 struct iova *cached_iova;
87 if (!iovad->cached32_node)
89 curr = iovad->cached32_node;
90 cached_iova = rb_entry(curr, struct iova, node);
92 if (free->pfn_lo >= cached_iova->pfn_lo) {
93 struct rb_node *node = rb_next(&free->node);
94 struct iova *iova = rb_entry(node, struct iova, node);
96 /* only cache if it's below 32bit pfn */
97 if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
98 iovad->cached32_node = node;
100 iovad->cached32_node = NULL;
104 /* Insert the iova into domain rbtree by holding writer lock */
106 iova_insert_rbtree(struct rb_root *root, struct iova *iova,
107 struct rb_node *start)
109 struct rb_node **new, *parent = NULL;
111 new = (start) ? &start : &(root->rb_node);
112 /* Figure out where to put new node */
114 struct iova *this = rb_entry(*new, struct iova, node);
118 if (iova->pfn_lo < this->pfn_lo)
119 new = &((*new)->rb_left);
120 else if (iova->pfn_lo > this->pfn_lo)
121 new = &((*new)->rb_right);
123 WARN_ON(1); /* this should not happen */
127 /* Add new node and rebalance tree. */
128 rb_link_node(&iova->node, parent, new);
129 rb_insert_color(&iova->node, root);
133 * Computes the padding size required, to make the start address
134 * naturally aligned on the power-of-two order of its size
137 iova_get_pad_size(unsigned int size, unsigned int limit_pfn)
139 return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1);
142 static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
143 unsigned long size, unsigned long limit_pfn,
144 struct iova *new, bool size_aligned)
146 struct rb_node *prev, *curr = NULL;
148 unsigned long saved_pfn;
149 unsigned int pad_size = 0;
151 /* Walk the tree backwards */
152 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
153 saved_pfn = limit_pfn;
154 curr = __get_cached_rbnode(iovad, &limit_pfn);
157 struct iova *curr_iova = rb_entry(curr, struct iova, node);
159 if (limit_pfn < curr_iova->pfn_lo)
161 else if (limit_pfn < curr_iova->pfn_hi)
162 goto adjust_limit_pfn;
165 pad_size = iova_get_pad_size(size, limit_pfn);
166 if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
167 break; /* found a free slot */
170 limit_pfn = curr_iova->pfn_lo ? (curr_iova->pfn_lo - 1) : 0;
173 curr = rb_prev(curr);
178 pad_size = iova_get_pad_size(size, limit_pfn);
179 if ((iovad->start_pfn + size + pad_size) > limit_pfn) {
180 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
185 /* pfn_lo will point to size aligned address if size_aligned is set */
186 new->pfn_lo = limit_pfn - (size + pad_size) + 1;
187 new->pfn_hi = new->pfn_lo + size - 1;
189 /* If we have 'prev', it's a valid place to start the insertion. */
190 iova_insert_rbtree(&iovad->rbroot, new, prev);
191 __cached_rbnode_insert_update(iovad, saved_pfn, new);
193 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
199 static struct kmem_cache *iova_cache;
200 static unsigned int iova_cache_users;
201 static DEFINE_MUTEX(iova_cache_mutex);
203 struct iova *alloc_iova_mem(void)
205 return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
207 EXPORT_SYMBOL(alloc_iova_mem);
209 void free_iova_mem(struct iova *iova)
211 kmem_cache_free(iova_cache, iova);
213 EXPORT_SYMBOL(free_iova_mem);
215 int iova_cache_get(void)
217 mutex_lock(&iova_cache_mutex);
218 if (!iova_cache_users) {
219 iova_cache = kmem_cache_create(
220 "iommu_iova", sizeof(struct iova), 0,
221 SLAB_HWCACHE_ALIGN, NULL);
223 mutex_unlock(&iova_cache_mutex);
224 printk(KERN_ERR "Couldn't create iova cache\n");
230 mutex_unlock(&iova_cache_mutex);
234 EXPORT_SYMBOL_GPL(iova_cache_get);
236 void iova_cache_put(void)
238 mutex_lock(&iova_cache_mutex);
239 if (WARN_ON(!iova_cache_users)) {
240 mutex_unlock(&iova_cache_mutex);
244 if (!iova_cache_users)
245 kmem_cache_destroy(iova_cache);
246 mutex_unlock(&iova_cache_mutex);
248 EXPORT_SYMBOL_GPL(iova_cache_put);
251 * alloc_iova - allocates an iova
252 * @iovad: - iova domain in question
253 * @size: - size of page frames to allocate
254 * @limit_pfn: - max limit address
255 * @size_aligned: - set if size_aligned address range is required
256 * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
257 * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
258 * flag is set then the allocated address iova->pfn_lo will be naturally
259 * aligned on roundup_power_of_two(size).
262 alloc_iova(struct iova_domain *iovad, unsigned long size,
263 unsigned long limit_pfn,
266 struct iova *new_iova;
269 new_iova = alloc_iova_mem();
273 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
274 new_iova, size_aligned);
277 free_iova_mem(new_iova);
283 EXPORT_SYMBOL_GPL(alloc_iova);
286 private_find_iova(struct iova_domain *iovad, unsigned long pfn)
288 struct rb_node *node = iovad->rbroot.rb_node;
290 assert_spin_locked(&iovad->iova_rbtree_lock);
293 struct iova *iova = rb_entry(node, struct iova, node);
295 /* If pfn falls within iova's range, return iova */
296 if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
300 if (pfn < iova->pfn_lo)
301 node = node->rb_left;
302 else if (pfn > iova->pfn_lo)
303 node = node->rb_right;
309 static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
311 assert_spin_locked(&iovad->iova_rbtree_lock);
312 __cached_rbnode_delete_update(iovad, iova);
313 rb_erase(&iova->node, &iovad->rbroot);
318 * find_iova - finds an iova for a given pfn
319 * @iovad: - iova domain in question.
320 * @pfn: - page frame number
321 * This function finds and returns an iova belonging to the
322 * given doamin which matches the given pfn.
324 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
329 /* Take the lock so that no other thread is manipulating the rbtree */
330 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
331 iova = private_find_iova(iovad, pfn);
332 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
335 EXPORT_SYMBOL_GPL(find_iova);
338 * __free_iova - frees the given iova
339 * @iovad: iova domain in question.
340 * @iova: iova in question.
341 * Frees the given iova belonging to the giving domain
344 __free_iova(struct iova_domain *iovad, struct iova *iova)
348 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
349 private_free_iova(iovad, iova);
350 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
352 EXPORT_SYMBOL_GPL(__free_iova);
355 * free_iova - finds and frees the iova for a given pfn
356 * @iovad: - iova domain in question.
357 * @pfn: - pfn that is allocated previously
358 * This functions finds an iova for a given pfn and then
359 * frees the iova from that domain.
362 free_iova(struct iova_domain *iovad, unsigned long pfn)
364 struct iova *iova = find_iova(iovad, pfn);
367 __free_iova(iovad, iova);
370 EXPORT_SYMBOL_GPL(free_iova);
373 * alloc_iova_fast - allocates an iova from rcache
374 * @iovad: - iova domain in question
375 * @size: - size of page frames to allocate
376 * @limit_pfn: - max limit address
377 * This function tries to satisfy an iova allocation from the rcache,
378 * and falls back to regular allocation on failure.
381 alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
382 unsigned long limit_pfn)
384 bool flushed_rcache = false;
385 unsigned long iova_pfn;
386 struct iova *new_iova;
388 iova_pfn = iova_rcache_get(iovad, size, limit_pfn);
393 new_iova = alloc_iova(iovad, size, limit_pfn, true);
400 /* Try replenishing IOVAs by flushing rcache. */
401 flushed_rcache = true;
402 for_each_online_cpu(cpu)
403 free_cpu_cached_iovas(cpu, iovad);
407 return new_iova->pfn_lo;
409 EXPORT_SYMBOL_GPL(alloc_iova_fast);
412 * free_iova_fast - free iova pfn range into rcache
413 * @iovad: - iova domain in question.
414 * @pfn: - pfn that is allocated previously
415 * @size: - # of pages in range
416 * This functions frees an iova range by trying to put it into the rcache,
417 * falling back to regular iova deallocation via free_iova() if this fails.
420 free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
422 if (iova_rcache_insert(iovad, pfn, size))
425 free_iova(iovad, pfn);
427 EXPORT_SYMBOL_GPL(free_iova_fast);
430 * put_iova_domain - destroys the iova doamin
431 * @iovad: - iova domain in question.
432 * All the iova's in that domain are destroyed.
434 void put_iova_domain(struct iova_domain *iovad)
436 struct rb_node *node;
439 free_iova_rcaches(iovad);
440 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
441 node = rb_first(&iovad->rbroot);
443 struct iova *iova = rb_entry(node, struct iova, node);
445 rb_erase(node, &iovad->rbroot);
447 node = rb_first(&iovad->rbroot);
449 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
451 EXPORT_SYMBOL_GPL(put_iova_domain);
454 __is_range_overlap(struct rb_node *node,
455 unsigned long pfn_lo, unsigned long pfn_hi)
457 struct iova *iova = rb_entry(node, struct iova, node);
459 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
464 static inline struct iova *
465 alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
469 iova = alloc_iova_mem();
471 iova->pfn_lo = pfn_lo;
472 iova->pfn_hi = pfn_hi;
479 __insert_new_range(struct iova_domain *iovad,
480 unsigned long pfn_lo, unsigned long pfn_hi)
484 iova = alloc_and_init_iova(pfn_lo, pfn_hi);
486 iova_insert_rbtree(&iovad->rbroot, iova, NULL);
492 __adjust_overlap_range(struct iova *iova,
493 unsigned long *pfn_lo, unsigned long *pfn_hi)
495 if (*pfn_lo < iova->pfn_lo)
496 iova->pfn_lo = *pfn_lo;
497 if (*pfn_hi > iova->pfn_hi)
498 *pfn_lo = iova->pfn_hi + 1;
502 * reserve_iova - reserves an iova in the given range
503 * @iovad: - iova domain pointer
504 * @pfn_lo: - lower page frame address
505 * @pfn_hi:- higher pfn adderss
506 * This function allocates reserves the address range from pfn_lo to pfn_hi so
507 * that this address is not dished out as part of alloc_iova.
510 reserve_iova(struct iova_domain *iovad,
511 unsigned long pfn_lo, unsigned long pfn_hi)
513 struct rb_node *node;
516 unsigned int overlap = 0;
518 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
519 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
520 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
521 iova = rb_entry(node, struct iova, node);
522 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
523 if ((pfn_lo >= iova->pfn_lo) &&
524 (pfn_hi <= iova->pfn_hi))
532 /* We are here either because this is the first reserver node
533 * or need to insert remaining non overlap addr range
535 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
538 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
541 EXPORT_SYMBOL_GPL(reserve_iova);
544 * copy_reserved_iova - copies the reserved between domains
545 * @from: - source doamin from where to copy
546 * @to: - destination domin where to copy
547 * This function copies reserved iova's from one doamin to
551 copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
554 struct rb_node *node;
556 spin_lock_irqsave(&from->iova_rbtree_lock, flags);
557 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
558 struct iova *iova = rb_entry(node, struct iova, node);
559 struct iova *new_iova;
561 new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
563 printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
564 iova->pfn_lo, iova->pfn_lo);
566 spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
568 EXPORT_SYMBOL_GPL(copy_reserved_iova);
571 split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
572 unsigned long pfn_lo, unsigned long pfn_hi)
575 struct iova *prev = NULL, *next = NULL;
577 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
578 if (iova->pfn_lo < pfn_lo) {
579 prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
583 if (iova->pfn_hi > pfn_hi) {
584 next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
589 __cached_rbnode_delete_update(iovad, iova);
590 rb_erase(&iova->node, &iovad->rbroot);
593 iova_insert_rbtree(&iovad->rbroot, prev, NULL);
594 iova->pfn_lo = pfn_lo;
597 iova_insert_rbtree(&iovad->rbroot, next, NULL);
598 iova->pfn_hi = pfn_hi;
600 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
605 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
612 * Magazine caches for IOVA ranges. For an introduction to magazines,
613 * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
614 * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams.
615 * For simplicity, we use a static magazine size and don't implement the
616 * dynamic size tuning described in the paper.
619 #define IOVA_MAG_SIZE 128
621 struct iova_magazine {
623 unsigned long pfns[IOVA_MAG_SIZE];
626 struct iova_cpu_rcache {
628 struct iova_magazine *loaded;
629 struct iova_magazine *prev;
632 static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
634 return kzalloc(sizeof(struct iova_magazine), flags);
637 static void iova_magazine_free(struct iova_magazine *mag)
643 iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
651 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
653 for (i = 0 ; i < mag->size; ++i) {
654 struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
657 private_free_iova(iovad, iova);
660 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
665 static bool iova_magazine_full(struct iova_magazine *mag)
667 return (mag && mag->size == IOVA_MAG_SIZE);
670 static bool iova_magazine_empty(struct iova_magazine *mag)
672 return (!mag || mag->size == 0);
675 static unsigned long iova_magazine_pop(struct iova_magazine *mag,
676 unsigned long limit_pfn)
678 BUG_ON(iova_magazine_empty(mag));
680 if (mag->pfns[mag->size - 1] >= limit_pfn)
683 return mag->pfns[--mag->size];
686 static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
688 BUG_ON(iova_magazine_full(mag));
690 mag->pfns[mag->size++] = pfn;
693 static void init_iova_rcaches(struct iova_domain *iovad)
695 struct iova_cpu_rcache *cpu_rcache;
696 struct iova_rcache *rcache;
700 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
701 rcache = &iovad->rcaches[i];
702 spin_lock_init(&rcache->lock);
703 rcache->depot_size = 0;
704 rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
705 if (WARN_ON(!rcache->cpu_rcaches))
707 for_each_possible_cpu(cpu) {
708 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
709 spin_lock_init(&cpu_rcache->lock);
710 cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
711 cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
717 * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
718 * return true on success. Can fail if rcache is full and we can't free
719 * space, and free_iova() (our only caller) will then return the IOVA
720 * range to the rbtree instead.
722 static bool __iova_rcache_insert(struct iova_domain *iovad,
723 struct iova_rcache *rcache,
724 unsigned long iova_pfn)
726 struct iova_magazine *mag_to_free = NULL;
727 struct iova_cpu_rcache *cpu_rcache;
728 bool can_insert = false;
731 cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
732 spin_lock_irqsave(&cpu_rcache->lock, flags);
734 if (!iova_magazine_full(cpu_rcache->loaded)) {
736 } else if (!iova_magazine_full(cpu_rcache->prev)) {
737 swap(cpu_rcache->prev, cpu_rcache->loaded);
740 struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC);
743 spin_lock(&rcache->lock);
744 if (rcache->depot_size < MAX_GLOBAL_MAGS) {
745 rcache->depot[rcache->depot_size++] =
748 mag_to_free = cpu_rcache->loaded;
750 spin_unlock(&rcache->lock);
752 cpu_rcache->loaded = new_mag;
758 iova_magazine_push(cpu_rcache->loaded, iova_pfn);
760 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
763 iova_magazine_free_pfns(mag_to_free, iovad);
764 iova_magazine_free(mag_to_free);
770 static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
773 unsigned int log_size = order_base_2(size);
775 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
778 return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
782 * Caller wants to allocate a new IOVA range from 'rcache'. If we can
783 * satisfy the request, return a matching non-NULL range and remove
784 * it from the 'rcache'.
786 static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
787 unsigned long limit_pfn)
789 struct iova_cpu_rcache *cpu_rcache;
790 unsigned long iova_pfn = 0;
791 bool has_pfn = false;
794 cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
795 spin_lock_irqsave(&cpu_rcache->lock, flags);
797 if (!iova_magazine_empty(cpu_rcache->loaded)) {
799 } else if (!iova_magazine_empty(cpu_rcache->prev)) {
800 swap(cpu_rcache->prev, cpu_rcache->loaded);
803 spin_lock(&rcache->lock);
804 if (rcache->depot_size > 0) {
805 iova_magazine_free(cpu_rcache->loaded);
806 cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
809 spin_unlock(&rcache->lock);
813 iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
815 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
821 * Try to satisfy IOVA allocation range from rcache. Fail if requested
822 * size is too big or the DMA limit we are given isn't satisfied by the
823 * top element in the magazine.
825 static unsigned long iova_rcache_get(struct iova_domain *iovad,
827 unsigned long limit_pfn)
829 unsigned int log_size = order_base_2(size);
831 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
834 return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn);
838 * Free a cpu's rcache.
840 static void free_cpu_iova_rcache(unsigned int cpu, struct iova_domain *iovad,
841 struct iova_rcache *rcache)
843 struct iova_cpu_rcache *cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
846 spin_lock_irqsave(&cpu_rcache->lock, flags);
848 iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
849 iova_magazine_free(cpu_rcache->loaded);
851 iova_magazine_free_pfns(cpu_rcache->prev, iovad);
852 iova_magazine_free(cpu_rcache->prev);
854 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
858 * free rcache data structures.
860 static void free_iova_rcaches(struct iova_domain *iovad)
862 struct iova_rcache *rcache;
867 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
868 rcache = &iovad->rcaches[i];
869 for_each_possible_cpu(cpu)
870 free_cpu_iova_rcache(cpu, iovad, rcache);
871 spin_lock_irqsave(&rcache->lock, flags);
872 free_percpu(rcache->cpu_rcaches);
873 for (j = 0; j < rcache->depot_size; ++j) {
874 iova_magazine_free_pfns(rcache->depot[j], iovad);
875 iova_magazine_free(rcache->depot[j]);
877 spin_unlock_irqrestore(&rcache->lock, flags);
882 * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
884 void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
886 struct iova_cpu_rcache *cpu_rcache;
887 struct iova_rcache *rcache;
891 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
892 rcache = &iovad->rcaches[i];
893 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
894 spin_lock_irqsave(&cpu_rcache->lock, flags);
895 iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
896 iova_magazine_free_pfns(cpu_rcache->prev, iovad);
897 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
901 MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
902 MODULE_LICENSE("GPL");