]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - lib/iommu-common.c
Merge remote-tracking branch 'usb/usb-next'
[karo-tx-linux.git] / lib / iommu-common.c
1 /*
2  * IOMMU mmap management and range allocation functions.
3  * Based almost entirely upon the powerpc iommu allocator.
4  */
5
6 #include <linux/export.h>
7 #include <linux/bitmap.h>
8 #include <linux/bug.h>
9 #include <linux/iommu-helper.h>
10 #include <linux/iommu-common.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/hash.h>
13
14 static unsigned long iommu_large_alloc = 15;
15
16 static  DEFINE_PER_CPU(unsigned int, iommu_hash_common);
17
18 static inline bool need_flush(struct iommu_map_table *iommu)
19 {
20         return ((iommu->flags & IOMMU_NEED_FLUSH) != 0);
21 }
22
23 static inline void set_flush(struct iommu_map_table *iommu)
24 {
25         iommu->flags |= IOMMU_NEED_FLUSH;
26 }
27
28 static inline void clear_flush(struct iommu_map_table *iommu)
29 {
30         iommu->flags &= ~IOMMU_NEED_FLUSH;
31 }
32
33 static void setup_iommu_pool_hash(void)
34 {
35         unsigned int i;
36         static bool do_once;
37
38         if (do_once)
39                 return;
40         do_once = true;
41         for_each_possible_cpu(i)
42                 per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS);
43 }
44
45 /*
46  * Initialize iommu_pool entries for the iommu_map_table. `num_entries'
47  * is the number of table entries. If `large_pool' is set to true,
48  * the top 1/4 of the table will be set aside for pool allocations
49  * of more than iommu_large_alloc pages.
50  */
51 void iommu_tbl_pool_init(struct iommu_map_table *iommu,
52                          unsigned long num_entries,
53                          u32 table_shift,
54                          void (*lazy_flush)(struct iommu_map_table *),
55                          bool large_pool, u32 npools,
56                          bool skip_span_boundary_check)
57 {
58         unsigned int start, i;
59         struct iommu_pool *p = &(iommu->large_pool);
60
61         setup_iommu_pool_hash();
62         if (npools == 0)
63                 iommu->nr_pools = IOMMU_NR_POOLS;
64         else
65                 iommu->nr_pools = npools;
66         BUG_ON(npools > IOMMU_NR_POOLS);
67
68         iommu->table_shift = table_shift;
69         iommu->lazy_flush = lazy_flush;
70         start = 0;
71         if (skip_span_boundary_check)
72                 iommu->flags |= IOMMU_NO_SPAN_BOUND;
73         if (large_pool)
74                 iommu->flags |= IOMMU_HAS_LARGE_POOL;
75
76         if (!large_pool)
77                 iommu->poolsize = num_entries/iommu->nr_pools;
78         else
79                 iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools;
80         for (i = 0; i < iommu->nr_pools; i++) {
81                 spin_lock_init(&(iommu->pools[i].lock));
82                 iommu->pools[i].start = start;
83                 iommu->pools[i].hint = start;
84                 start += iommu->poolsize; /* start for next pool */
85                 iommu->pools[i].end = start - 1;
86         }
87         if (!large_pool)
88                 return;
89         /* initialize large_pool */
90         spin_lock_init(&(p->lock));
91         p->start = start;
92         p->hint = p->start;
93         p->end = num_entries;
94 }
95 EXPORT_SYMBOL(iommu_tbl_pool_init);
96
97 unsigned long iommu_tbl_range_alloc(struct device *dev,
98                                 struct iommu_map_table *iommu,
99                                 unsigned long npages,
100                                 unsigned long *handle,
101                                 unsigned long mask,
102                                 unsigned int align_order)
103 {
104         unsigned int pool_hash = __this_cpu_read(iommu_hash_common);
105         unsigned long n, end, start, limit, boundary_size;
106         struct iommu_pool *pool;
107         int pass = 0;
108         unsigned int pool_nr;
109         unsigned int npools = iommu->nr_pools;
110         unsigned long flags;
111         bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0);
112         bool largealloc = (large_pool && npages > iommu_large_alloc);
113         unsigned long shift;
114         unsigned long align_mask = 0;
115
116         if (align_order > 0)
117                 align_mask = ~0ul >> (BITS_PER_LONG - align_order);
118
119         /* Sanity check */
120         if (unlikely(npages == 0)) {
121                 WARN_ON_ONCE(1);
122                 return IOMMU_ERROR_CODE;
123         }
124
125         if (largealloc) {
126                 pool = &(iommu->large_pool);
127                 pool_nr = 0; /* to keep compiler happy */
128         } else {
129                 /* pick out pool_nr */
130                 pool_nr =  pool_hash & (npools - 1);
131                 pool = &(iommu->pools[pool_nr]);
132         }
133         spin_lock_irqsave(&pool->lock, flags);
134
135  again:
136         if (pass == 0 && handle && *handle &&
137             (*handle >= pool->start) && (*handle < pool->end))
138                 start = *handle;
139         else
140                 start = pool->hint;
141
142         limit = pool->end;
143
144         /* The case below can happen if we have a small segment appended
145          * to a large, or when the previous alloc was at the very end of
146          * the available space. If so, go back to the beginning. If a
147          * flush is needed, it will get done based on the return value
148          * from iommu_area_alloc() below.
149          */
150         if (start >= limit)
151                 start = pool->start;
152         shift = iommu->table_map_base >> iommu->table_shift;
153         if (limit + shift > mask) {
154                 limit = mask - shift + 1;
155                 /* If we're constrained on address range, first try
156                  * at the masked hint to avoid O(n) search complexity,
157                  * but on second pass, start at 0 in pool 0.
158                  */
159                 if ((start & mask) >= limit || pass > 0) {
160                         spin_unlock(&(pool->lock));
161                         pool = &(iommu->pools[0]);
162                         spin_lock(&(pool->lock));
163                         start = pool->start;
164                 } else {
165                         start &= mask;
166                 }
167         }
168
169         if (dev)
170                 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
171                                       1 << iommu->table_shift);
172         else
173                 boundary_size = ALIGN(1ULL << 32, 1 << iommu->table_shift);
174
175         boundary_size = boundary_size >> iommu->table_shift;
176         /*
177          * if the skip_span_boundary_check had been set during init, we set
178          * things up so that iommu_is_span_boundary() merely checks if the
179          * (index + npages) < num_tsb_entries
180          */
181         if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) {
182                 shift = 0;
183                 boundary_size = iommu->poolsize * iommu->nr_pools;
184         }
185         n = iommu_area_alloc(iommu->map, limit, start, npages, shift,
186                              boundary_size, align_mask);
187         if (n == -1) {
188                 if (likely(pass == 0)) {
189                         /* First failure, rescan from the beginning.  */
190                         pool->hint = pool->start;
191                         set_flush(iommu);
192                         pass++;
193                         goto again;
194                 } else if (!largealloc && pass <= iommu->nr_pools) {
195                         spin_unlock(&(pool->lock));
196                         pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1);
197                         pool = &(iommu->pools[pool_nr]);
198                         spin_lock(&(pool->lock));
199                         pool->hint = pool->start;
200                         set_flush(iommu);
201                         pass++;
202                         goto again;
203                 } else {
204                         /* give up */
205                         n = IOMMU_ERROR_CODE;
206                         goto bail;
207                 }
208         }
209         if (iommu->lazy_flush &&
210             (n < pool->hint || need_flush(iommu))) {
211                 clear_flush(iommu);
212                 iommu->lazy_flush(iommu);
213         }
214
215         end = n + npages;
216         pool->hint = end;
217
218         /* Update handle for SG allocations */
219         if (handle)
220                 *handle = end;
221 bail:
222         spin_unlock_irqrestore(&(pool->lock), flags);
223
224         return n;
225 }
226 EXPORT_SYMBOL(iommu_tbl_range_alloc);
227
228 static struct iommu_pool *get_pool(struct iommu_map_table *tbl,
229                                    unsigned long entry)
230 {
231         struct iommu_pool *p;
232         unsigned long largepool_start = tbl->large_pool.start;
233         bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0);
234
235         /* The large pool is the last pool at the top of the table */
236         if (large_pool && entry >= largepool_start) {
237                 p = &tbl->large_pool;
238         } else {
239                 unsigned int pool_nr = entry / tbl->poolsize;
240
241                 BUG_ON(pool_nr >= tbl->nr_pools);
242                 p = &tbl->pools[pool_nr];
243         }
244         return p;
245 }
246
247 /* Caller supplies the index of the entry into the iommu map table
248  * itself when the mapping from dma_addr to the entry is not the
249  * default addr->entry mapping below.
250  */
251 void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr,
252                           unsigned long npages, unsigned long entry)
253 {
254         struct iommu_pool *pool;
255         unsigned long flags;
256         unsigned long shift = iommu->table_shift;
257
258         if (entry == IOMMU_ERROR_CODE) /* use default addr->entry mapping */
259                 entry = (dma_addr - iommu->table_map_base) >> shift;
260         pool = get_pool(iommu, entry);
261
262         spin_lock_irqsave(&(pool->lock), flags);
263         bitmap_clear(iommu->map, entry, npages);
264         spin_unlock_irqrestore(&(pool->lock), flags);
265 }
266 EXPORT_SYMBOL(iommu_tbl_range_free);