]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/iommu/arm-smmu.c
iommu/arm-smmu: Add support for MSI on SMMUv3
[karo-tx-linux.git] / drivers / iommu / arm-smmu.c
1 /*
2  * IOMMU API for ARM architected SMMU implementations.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16  *
17  * Copyright (C) 2013 ARM Limited
18  *
19  * Author: Will Deacon <will.deacon@arm.com>
20  *
21  * This driver currently supports:
22  *      - SMMUv1 and v2 implementations
23  *      - Stream-matching and stream-indexing
24  *      - v7/v8 long-descriptor format
25  *      - Non-secure access to the SMMU
26  *      - Context fault reporting
27  */
28
29 #define pr_fmt(fmt) "arm-smmu: " fmt
30
31 #include <linux/delay.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/err.h>
34 #include <linux/interrupt.h>
35 #include <linux/io.h>
36 #include <linux/iommu.h>
37 #include <linux/iopoll.h>
38 #include <linux/module.h>
39 #include <linux/of.h>
40 #include <linux/of_address.h>
41 #include <linux/pci.h>
42 #include <linux/platform_device.h>
43 #include <linux/slab.h>
44 #include <linux/spinlock.h>
45
46 #include <linux/amba/bus.h>
47
48 #include "io-pgtable.h"
49
50 /* Maximum number of stream IDs assigned to a single device */
51 #define MAX_MASTER_STREAMIDS            MAX_PHANDLE_ARGS
52
53 /* Maximum number of context banks per SMMU */
54 #define ARM_SMMU_MAX_CBS                128
55
56 /* Maximum number of mapping groups per SMMU */
57 #define ARM_SMMU_MAX_SMRS               128
58
59 /* SMMU global address space */
60 #define ARM_SMMU_GR0(smmu)              ((smmu)->base)
61 #define ARM_SMMU_GR1(smmu)              ((smmu)->base + (1 << (smmu)->pgshift))
62
63 /*
64  * SMMU global address space with conditional offset to access secure
65  * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
66  * nsGFSYNR0: 0x450)
67  */
68 #define ARM_SMMU_GR0_NS(smmu)                                           \
69         ((smmu)->base +                                                 \
70                 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS)       \
71                         ? 0x400 : 0))
72
73 #ifdef CONFIG_64BIT
74 #define smmu_writeq     writeq_relaxed
75 #else
76 #define smmu_writeq(reg64, addr)                                \
77         do {                                                    \
78                 u64 __val = (reg64);                            \
79                 void __iomem *__addr = (addr);                  \
80                 writel_relaxed(__val >> 32, __addr + 4);        \
81                 writel_relaxed(__val, __addr);                  \
82         } while (0)
83 #endif
84
85 /* Configuration registers */
86 #define ARM_SMMU_GR0_sCR0               0x0
87 #define sCR0_CLIENTPD                   (1 << 0)
88 #define sCR0_GFRE                       (1 << 1)
89 #define sCR0_GFIE                       (1 << 2)
90 #define sCR0_GCFGFRE                    (1 << 4)
91 #define sCR0_GCFGFIE                    (1 << 5)
92 #define sCR0_USFCFG                     (1 << 10)
93 #define sCR0_VMIDPNE                    (1 << 11)
94 #define sCR0_PTM                        (1 << 12)
95 #define sCR0_FB                         (1 << 13)
96 #define sCR0_BSU_SHIFT                  14
97 #define sCR0_BSU_MASK                   0x3
98
99 /* Identification registers */
100 #define ARM_SMMU_GR0_ID0                0x20
101 #define ARM_SMMU_GR0_ID1                0x24
102 #define ARM_SMMU_GR0_ID2                0x28
103 #define ARM_SMMU_GR0_ID3                0x2c
104 #define ARM_SMMU_GR0_ID4                0x30
105 #define ARM_SMMU_GR0_ID5                0x34
106 #define ARM_SMMU_GR0_ID6                0x38
107 #define ARM_SMMU_GR0_ID7                0x3c
108 #define ARM_SMMU_GR0_sGFSR              0x48
109 #define ARM_SMMU_GR0_sGFSYNR0           0x50
110 #define ARM_SMMU_GR0_sGFSYNR1           0x54
111 #define ARM_SMMU_GR0_sGFSYNR2           0x58
112
113 #define ID0_S1TS                        (1 << 30)
114 #define ID0_S2TS                        (1 << 29)
115 #define ID0_NTS                         (1 << 28)
116 #define ID0_SMS                         (1 << 27)
117 #define ID0_ATOSNS                      (1 << 26)
118 #define ID0_CTTW                        (1 << 14)
119 #define ID0_NUMIRPT_SHIFT               16
120 #define ID0_NUMIRPT_MASK                0xff
121 #define ID0_NUMSIDB_SHIFT               9
122 #define ID0_NUMSIDB_MASK                0xf
123 #define ID0_NUMSMRG_SHIFT               0
124 #define ID0_NUMSMRG_MASK                0xff
125
126 #define ID1_PAGESIZE                    (1 << 31)
127 #define ID1_NUMPAGENDXB_SHIFT           28
128 #define ID1_NUMPAGENDXB_MASK            7
129 #define ID1_NUMS2CB_SHIFT               16
130 #define ID1_NUMS2CB_MASK                0xff
131 #define ID1_NUMCB_SHIFT                 0
132 #define ID1_NUMCB_MASK                  0xff
133
134 #define ID2_OAS_SHIFT                   4
135 #define ID2_OAS_MASK                    0xf
136 #define ID2_IAS_SHIFT                   0
137 #define ID2_IAS_MASK                    0xf
138 #define ID2_UBS_SHIFT                   8
139 #define ID2_UBS_MASK                    0xf
140 #define ID2_PTFS_4K                     (1 << 12)
141 #define ID2_PTFS_16K                    (1 << 13)
142 #define ID2_PTFS_64K                    (1 << 14)
143
144 /* Global TLB invalidation */
145 #define ARM_SMMU_GR0_TLBIVMID           0x64
146 #define ARM_SMMU_GR0_TLBIALLNSNH        0x68
147 #define ARM_SMMU_GR0_TLBIALLH           0x6c
148 #define ARM_SMMU_GR0_sTLBGSYNC          0x70
149 #define ARM_SMMU_GR0_sTLBGSTATUS        0x74
150 #define sTLBGSTATUS_GSACTIVE            (1 << 0)
151 #define TLB_LOOP_TIMEOUT                1000000 /* 1s! */
152
153 /* Stream mapping registers */
154 #define ARM_SMMU_GR0_SMR(n)             (0x800 + ((n) << 2))
155 #define SMR_VALID                       (1 << 31)
156 #define SMR_MASK_SHIFT                  16
157 #define SMR_MASK_MASK                   0x7fff
158 #define SMR_ID_SHIFT                    0
159 #define SMR_ID_MASK                     0x7fff
160
161 #define ARM_SMMU_GR0_S2CR(n)            (0xc00 + ((n) << 2))
162 #define S2CR_CBNDX_SHIFT                0
163 #define S2CR_CBNDX_MASK                 0xff
164 #define S2CR_TYPE_SHIFT                 16
165 #define S2CR_TYPE_MASK                  0x3
166 #define S2CR_TYPE_TRANS                 (0 << S2CR_TYPE_SHIFT)
167 #define S2CR_TYPE_BYPASS                (1 << S2CR_TYPE_SHIFT)
168 #define S2CR_TYPE_FAULT                 (2 << S2CR_TYPE_SHIFT)
169
170 /* Context bank attribute registers */
171 #define ARM_SMMU_GR1_CBAR(n)            (0x0 + ((n) << 2))
172 #define CBAR_VMID_SHIFT                 0
173 #define CBAR_VMID_MASK                  0xff
174 #define CBAR_S1_BPSHCFG_SHIFT           8
175 #define CBAR_S1_BPSHCFG_MASK            3
176 #define CBAR_S1_BPSHCFG_NSH             3
177 #define CBAR_S1_MEMATTR_SHIFT           12
178 #define CBAR_S1_MEMATTR_MASK            0xf
179 #define CBAR_S1_MEMATTR_WB              0xf
180 #define CBAR_TYPE_SHIFT                 16
181 #define CBAR_TYPE_MASK                  0x3
182 #define CBAR_TYPE_S2_TRANS              (0 << CBAR_TYPE_SHIFT)
183 #define CBAR_TYPE_S1_TRANS_S2_BYPASS    (1 << CBAR_TYPE_SHIFT)
184 #define CBAR_TYPE_S1_TRANS_S2_FAULT     (2 << CBAR_TYPE_SHIFT)
185 #define CBAR_TYPE_S1_TRANS_S2_TRANS     (3 << CBAR_TYPE_SHIFT)
186 #define CBAR_IRPTNDX_SHIFT              24
187 #define CBAR_IRPTNDX_MASK               0xff
188
189 #define ARM_SMMU_GR1_CBA2R(n)           (0x800 + ((n) << 2))
190 #define CBA2R_RW64_32BIT                (0 << 0)
191 #define CBA2R_RW64_64BIT                (1 << 0)
192
193 /* Translation context bank */
194 #define ARM_SMMU_CB_BASE(smmu)          ((smmu)->base + ((smmu)->size >> 1))
195 #define ARM_SMMU_CB(smmu, n)            ((n) * (1 << (smmu)->pgshift))
196
197 #define ARM_SMMU_CB_SCTLR               0x0
198 #define ARM_SMMU_CB_RESUME              0x8
199 #define ARM_SMMU_CB_TTBCR2              0x10
200 #define ARM_SMMU_CB_TTBR0               0x20
201 #define ARM_SMMU_CB_TTBR1               0x28
202 #define ARM_SMMU_CB_TTBCR               0x30
203 #define ARM_SMMU_CB_S1_MAIR0            0x38
204 #define ARM_SMMU_CB_S1_MAIR1            0x3c
205 #define ARM_SMMU_CB_PAR_LO              0x50
206 #define ARM_SMMU_CB_PAR_HI              0x54
207 #define ARM_SMMU_CB_FSR                 0x58
208 #define ARM_SMMU_CB_FAR_LO              0x60
209 #define ARM_SMMU_CB_FAR_HI              0x64
210 #define ARM_SMMU_CB_FSYNR0              0x68
211 #define ARM_SMMU_CB_S1_TLBIVA           0x600
212 #define ARM_SMMU_CB_S1_TLBIASID         0x610
213 #define ARM_SMMU_CB_S1_TLBIVAL          0x620
214 #define ARM_SMMU_CB_S2_TLBIIPAS2        0x630
215 #define ARM_SMMU_CB_S2_TLBIIPAS2L       0x638
216 #define ARM_SMMU_CB_ATS1PR              0x800
217 #define ARM_SMMU_CB_ATSR                0x8f0
218
219 #define SCTLR_S1_ASIDPNE                (1 << 12)
220 #define SCTLR_CFCFG                     (1 << 7)
221 #define SCTLR_CFIE                      (1 << 6)
222 #define SCTLR_CFRE                      (1 << 5)
223 #define SCTLR_E                         (1 << 4)
224 #define SCTLR_AFE                       (1 << 2)
225 #define SCTLR_TRE                       (1 << 1)
226 #define SCTLR_M                         (1 << 0)
227 #define SCTLR_EAE_SBOP                  (SCTLR_AFE | SCTLR_TRE)
228
229 #define CB_PAR_F                        (1 << 0)
230
231 #define ATSR_ACTIVE                     (1 << 0)
232
233 #define RESUME_RETRY                    (0 << 0)
234 #define RESUME_TERMINATE                (1 << 0)
235
236 #define TTBCR2_SEP_SHIFT                15
237 #define TTBCR2_SEP_UPSTREAM             (0x7 << TTBCR2_SEP_SHIFT)
238
239 #define TTBRn_ASID_SHIFT                48
240
241 #define FSR_MULTI                       (1 << 31)
242 #define FSR_SS                          (1 << 30)
243 #define FSR_UUT                         (1 << 8)
244 #define FSR_ASF                         (1 << 7)
245 #define FSR_TLBLKF                      (1 << 6)
246 #define FSR_TLBMCF                      (1 << 5)
247 #define FSR_EF                          (1 << 4)
248 #define FSR_PF                          (1 << 3)
249 #define FSR_AFF                         (1 << 2)
250 #define FSR_TF                          (1 << 1)
251
252 #define FSR_IGN                         (FSR_AFF | FSR_ASF | \
253                                          FSR_TLBMCF | FSR_TLBLKF)
254 #define FSR_FAULT                       (FSR_MULTI | FSR_SS | FSR_UUT | \
255                                          FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
256
257 #define FSYNR0_WNR                      (1 << 4)
258
259 static int force_stage;
260 module_param_named(force_stage, force_stage, int, S_IRUGO);
261 MODULE_PARM_DESC(force_stage,
262         "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
263
264 enum arm_smmu_arch_version {
265         ARM_SMMU_V1 = 1,
266         ARM_SMMU_V2,
267 };
268
269 struct arm_smmu_smr {
270         u8                              idx;
271         u16                             mask;
272         u16                             id;
273 };
274
275 struct arm_smmu_master_cfg {
276         int                             num_streamids;
277         u16                             streamids[MAX_MASTER_STREAMIDS];
278         struct arm_smmu_smr             *smrs;
279 };
280
281 struct arm_smmu_master {
282         struct device_node              *of_node;
283         struct rb_node                  node;
284         struct arm_smmu_master_cfg      cfg;
285 };
286
287 struct arm_smmu_device {
288         struct device                   *dev;
289
290         void __iomem                    *base;
291         unsigned long                   size;
292         unsigned long                   pgshift;
293
294 #define ARM_SMMU_FEAT_COHERENT_WALK     (1 << 0)
295 #define ARM_SMMU_FEAT_STREAM_MATCH      (1 << 1)
296 #define ARM_SMMU_FEAT_TRANS_S1          (1 << 2)
297 #define ARM_SMMU_FEAT_TRANS_S2          (1 << 3)
298 #define ARM_SMMU_FEAT_TRANS_NESTED      (1 << 4)
299 #define ARM_SMMU_FEAT_TRANS_OPS         (1 << 5)
300         u32                             features;
301
302 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
303         u32                             options;
304         enum arm_smmu_arch_version      version;
305
306         u32                             num_context_banks;
307         u32                             num_s2_context_banks;
308         DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
309         atomic_t                        irptndx;
310
311         u32                             num_mapping_groups;
312         DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
313
314         unsigned long                   va_size;
315         unsigned long                   ipa_size;
316         unsigned long                   pa_size;
317
318         u32                             num_global_irqs;
319         u32                             num_context_irqs;
320         unsigned int                    *irqs;
321
322         struct list_head                list;
323         struct rb_root                  masters;
324 };
325
326 struct arm_smmu_cfg {
327         u8                              cbndx;
328         u8                              irptndx;
329         u32                             cbar;
330 };
331 #define INVALID_IRPTNDX                 0xff
332
333 #define ARM_SMMU_CB_ASID(cfg)           ((cfg)->cbndx)
334 #define ARM_SMMU_CB_VMID(cfg)           ((cfg)->cbndx + 1)
335
336 enum arm_smmu_domain_stage {
337         ARM_SMMU_DOMAIN_S1 = 0,
338         ARM_SMMU_DOMAIN_S2,
339         ARM_SMMU_DOMAIN_NESTED,
340 };
341
342 struct arm_smmu_domain {
343         struct arm_smmu_device          *smmu;
344         struct io_pgtable_ops           *pgtbl_ops;
345         spinlock_t                      pgtbl_lock;
346         struct arm_smmu_cfg             cfg;
347         enum arm_smmu_domain_stage      stage;
348         struct mutex                    init_mutex; /* Protects smmu pointer */
349         struct iommu_domain             domain;
350 };
351
352 static struct iommu_ops arm_smmu_ops;
353
354 static DEFINE_SPINLOCK(arm_smmu_devices_lock);
355 static LIST_HEAD(arm_smmu_devices);
356
357 struct arm_smmu_option_prop {
358         u32 opt;
359         const char *prop;
360 };
361
362 static struct arm_smmu_option_prop arm_smmu_options[] = {
363         { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
364         { 0, NULL},
365 };
366
367 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
368 {
369         return container_of(dom, struct arm_smmu_domain, domain);
370 }
371
372 static void parse_driver_options(struct arm_smmu_device *smmu)
373 {
374         int i = 0;
375
376         do {
377                 if (of_property_read_bool(smmu->dev->of_node,
378                                                 arm_smmu_options[i].prop)) {
379                         smmu->options |= arm_smmu_options[i].opt;
380                         dev_notice(smmu->dev, "option %s\n",
381                                 arm_smmu_options[i].prop);
382                 }
383         } while (arm_smmu_options[++i].opt);
384 }
385
386 static struct device_node *dev_get_dev_node(struct device *dev)
387 {
388         if (dev_is_pci(dev)) {
389                 struct pci_bus *bus = to_pci_dev(dev)->bus;
390
391                 while (!pci_is_root_bus(bus))
392                         bus = bus->parent;
393                 return bus->bridge->parent->of_node;
394         }
395
396         return dev->of_node;
397 }
398
399 static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
400                                                 struct device_node *dev_node)
401 {
402         struct rb_node *node = smmu->masters.rb_node;
403
404         while (node) {
405                 struct arm_smmu_master *master;
406
407                 master = container_of(node, struct arm_smmu_master, node);
408
409                 if (dev_node < master->of_node)
410                         node = node->rb_left;
411                 else if (dev_node > master->of_node)
412                         node = node->rb_right;
413                 else
414                         return master;
415         }
416
417         return NULL;
418 }
419
420 static struct arm_smmu_master_cfg *
421 find_smmu_master_cfg(struct device *dev)
422 {
423         struct arm_smmu_master_cfg *cfg = NULL;
424         struct iommu_group *group = iommu_group_get(dev);
425
426         if (group) {
427                 cfg = iommu_group_get_iommudata(group);
428                 iommu_group_put(group);
429         }
430
431         return cfg;
432 }
433
434 static int insert_smmu_master(struct arm_smmu_device *smmu,
435                               struct arm_smmu_master *master)
436 {
437         struct rb_node **new, *parent;
438
439         new = &smmu->masters.rb_node;
440         parent = NULL;
441         while (*new) {
442                 struct arm_smmu_master *this
443                         = container_of(*new, struct arm_smmu_master, node);
444
445                 parent = *new;
446                 if (master->of_node < this->of_node)
447                         new = &((*new)->rb_left);
448                 else if (master->of_node > this->of_node)
449                         new = &((*new)->rb_right);
450                 else
451                         return -EEXIST;
452         }
453
454         rb_link_node(&master->node, parent, new);
455         rb_insert_color(&master->node, &smmu->masters);
456         return 0;
457 }
458
459 static int register_smmu_master(struct arm_smmu_device *smmu,
460                                 struct device *dev,
461                                 struct of_phandle_args *masterspec)
462 {
463         int i;
464         struct arm_smmu_master *master;
465
466         master = find_smmu_master(smmu, masterspec->np);
467         if (master) {
468                 dev_err(dev,
469                         "rejecting multiple registrations for master device %s\n",
470                         masterspec->np->name);
471                 return -EBUSY;
472         }
473
474         if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
475                 dev_err(dev,
476                         "reached maximum number (%d) of stream IDs for master device %s\n",
477                         MAX_MASTER_STREAMIDS, masterspec->np->name);
478                 return -ENOSPC;
479         }
480
481         master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
482         if (!master)
483                 return -ENOMEM;
484
485         master->of_node                 = masterspec->np;
486         master->cfg.num_streamids       = masterspec->args_count;
487
488         for (i = 0; i < master->cfg.num_streamids; ++i) {
489                 u16 streamid = masterspec->args[i];
490
491                 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
492                      (streamid >= smmu->num_mapping_groups)) {
493                         dev_err(dev,
494                                 "stream ID for master device %s greater than maximum allowed (%d)\n",
495                                 masterspec->np->name, smmu->num_mapping_groups);
496                         return -ERANGE;
497                 }
498                 master->cfg.streamids[i] = streamid;
499         }
500         return insert_smmu_master(smmu, master);
501 }
502
503 static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
504 {
505         struct arm_smmu_device *smmu;
506         struct arm_smmu_master *master = NULL;
507         struct device_node *dev_node = dev_get_dev_node(dev);
508
509         spin_lock(&arm_smmu_devices_lock);
510         list_for_each_entry(smmu, &arm_smmu_devices, list) {
511                 master = find_smmu_master(smmu, dev_node);
512                 if (master)
513                         break;
514         }
515         spin_unlock(&arm_smmu_devices_lock);
516
517         return master ? smmu : NULL;
518 }
519
520 static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
521 {
522         int idx;
523
524         do {
525                 idx = find_next_zero_bit(map, end, start);
526                 if (idx == end)
527                         return -ENOSPC;
528         } while (test_and_set_bit(idx, map));
529
530         return idx;
531 }
532
533 static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
534 {
535         clear_bit(idx, map);
536 }
537
538 /* Wait for any pending TLB invalidations to complete */
539 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
540 {
541         int count = 0;
542         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
543
544         writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
545         while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
546                & sTLBGSTATUS_GSACTIVE) {
547                 cpu_relax();
548                 if (++count == TLB_LOOP_TIMEOUT) {
549                         dev_err_ratelimited(smmu->dev,
550                         "TLB sync timed out -- SMMU may be deadlocked\n");
551                         return;
552                 }
553                 udelay(1);
554         }
555 }
556
557 static void arm_smmu_tlb_sync(void *cookie)
558 {
559         struct arm_smmu_domain *smmu_domain = cookie;
560         __arm_smmu_tlb_sync(smmu_domain->smmu);
561 }
562
563 static void arm_smmu_tlb_inv_context(void *cookie)
564 {
565         struct arm_smmu_domain *smmu_domain = cookie;
566         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
567         struct arm_smmu_device *smmu = smmu_domain->smmu;
568         bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
569         void __iomem *base;
570
571         if (stage1) {
572                 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
573                 writel_relaxed(ARM_SMMU_CB_ASID(cfg),
574                                base + ARM_SMMU_CB_S1_TLBIASID);
575         } else {
576                 base = ARM_SMMU_GR0(smmu);
577                 writel_relaxed(ARM_SMMU_CB_VMID(cfg),
578                                base + ARM_SMMU_GR0_TLBIVMID);
579         }
580
581         __arm_smmu_tlb_sync(smmu);
582 }
583
584 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
585                                           bool leaf, void *cookie)
586 {
587         struct arm_smmu_domain *smmu_domain = cookie;
588         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
589         struct arm_smmu_device *smmu = smmu_domain->smmu;
590         bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
591         void __iomem *reg;
592
593         if (stage1) {
594                 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
595                 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
596
597                 if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
598                         iova &= ~12UL;
599                         iova |= ARM_SMMU_CB_ASID(cfg);
600                         writel_relaxed(iova, reg);
601 #ifdef CONFIG_64BIT
602                 } else {
603                         iova >>= 12;
604                         iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48;
605                         writeq_relaxed(iova, reg);
606 #endif
607                 }
608 #ifdef CONFIG_64BIT
609         } else if (smmu->version == ARM_SMMU_V2) {
610                 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
611                 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
612                               ARM_SMMU_CB_S2_TLBIIPAS2;
613                 writeq_relaxed(iova >> 12, reg);
614 #endif
615         } else {
616                 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
617                 writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg);
618         }
619 }
620
621 static struct iommu_gather_ops arm_smmu_gather_ops = {
622         .tlb_flush_all  = arm_smmu_tlb_inv_context,
623         .tlb_add_flush  = arm_smmu_tlb_inv_range_nosync,
624         .tlb_sync       = arm_smmu_tlb_sync,
625 };
626
627 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
628 {
629         int flags, ret;
630         u32 fsr, far, fsynr, resume;
631         unsigned long iova;
632         struct iommu_domain *domain = dev;
633         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
634         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
635         struct arm_smmu_device *smmu = smmu_domain->smmu;
636         void __iomem *cb_base;
637
638         cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
639         fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
640
641         if (!(fsr & FSR_FAULT))
642                 return IRQ_NONE;
643
644         if (fsr & FSR_IGN)
645                 dev_err_ratelimited(smmu->dev,
646                                     "Unexpected context fault (fsr 0x%x)\n",
647                                     fsr);
648
649         fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
650         flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
651
652         far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO);
653         iova = far;
654 #ifdef CONFIG_64BIT
655         far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI);
656         iova |= ((unsigned long)far << 32);
657 #endif
658
659         if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
660                 ret = IRQ_HANDLED;
661                 resume = RESUME_RETRY;
662         } else {
663                 dev_err_ratelimited(smmu->dev,
664                     "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
665                     iova, fsynr, cfg->cbndx);
666                 ret = IRQ_NONE;
667                 resume = RESUME_TERMINATE;
668         }
669
670         /* Clear the faulting FSR */
671         writel(fsr, cb_base + ARM_SMMU_CB_FSR);
672
673         /* Retry or terminate any stalled transactions */
674         if (fsr & FSR_SS)
675                 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
676
677         return ret;
678 }
679
680 static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
681 {
682         u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
683         struct arm_smmu_device *smmu = dev;
684         void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
685
686         gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
687         gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
688         gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
689         gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
690
691         if (!gfsr)
692                 return IRQ_NONE;
693
694         dev_err_ratelimited(smmu->dev,
695                 "Unexpected global fault, this could be serious\n");
696         dev_err_ratelimited(smmu->dev,
697                 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
698                 gfsr, gfsynr0, gfsynr1, gfsynr2);
699
700         writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
701         return IRQ_HANDLED;
702 }
703
704 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
705                                        struct io_pgtable_cfg *pgtbl_cfg)
706 {
707         u32 reg;
708         u64 reg64;
709         bool stage1;
710         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
711         struct arm_smmu_device *smmu = smmu_domain->smmu;
712         void __iomem *cb_base, *gr1_base;
713
714         gr1_base = ARM_SMMU_GR1(smmu);
715         stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
716         cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
717
718         if (smmu->version > ARM_SMMU_V1) {
719                 /*
720                  * CBA2R.
721                  * *Must* be initialised before CBAR thanks to VMID16
722                  * architectural oversight affected some implementations.
723                  */
724 #ifdef CONFIG_64BIT
725                 reg = CBA2R_RW64_64BIT;
726 #else
727                 reg = CBA2R_RW64_32BIT;
728 #endif
729                 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
730         }
731
732         /* CBAR */
733         reg = cfg->cbar;
734         if (smmu->version == ARM_SMMU_V1)
735                 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
736
737         /*
738          * Use the weakest shareability/memory types, so they are
739          * overridden by the ttbcr/pte.
740          */
741         if (stage1) {
742                 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
743                         (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
744         } else {
745                 reg |= ARM_SMMU_CB_VMID(cfg) << CBAR_VMID_SHIFT;
746         }
747         writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
748
749         /* TTBRs */
750         if (stage1) {
751                 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
752
753                 reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
754                 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
755
756                 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
757                 reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
758                 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR1);
759         } else {
760                 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
761                 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
762         }
763
764         /* TTBCR */
765         if (stage1) {
766                 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
767                 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
768                 if (smmu->version > ARM_SMMU_V1) {
769                         reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
770                         reg |= TTBCR2_SEP_UPSTREAM;
771                         writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
772                 }
773         } else {
774                 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
775                 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
776         }
777
778         /* MAIRs (stage-1 only) */
779         if (stage1) {
780                 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
781                 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
782                 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
783                 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
784         }
785
786         /* SCTLR */
787         reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
788         if (stage1)
789                 reg |= SCTLR_S1_ASIDPNE;
790 #ifdef __BIG_ENDIAN
791         reg |= SCTLR_E;
792 #endif
793         writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
794 }
795
796 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
797                                         struct arm_smmu_device *smmu)
798 {
799         int irq, start, ret = 0;
800         unsigned long ias, oas;
801         struct io_pgtable_ops *pgtbl_ops;
802         struct io_pgtable_cfg pgtbl_cfg;
803         enum io_pgtable_fmt fmt;
804         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
805         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
806
807         mutex_lock(&smmu_domain->init_mutex);
808         if (smmu_domain->smmu)
809                 goto out_unlock;
810
811         /*
812          * Mapping the requested stage onto what we support is surprisingly
813          * complicated, mainly because the spec allows S1+S2 SMMUs without
814          * support for nested translation. That means we end up with the
815          * following table:
816          *
817          * Requested        Supported        Actual
818          *     S1               N              S1
819          *     S1             S1+S2            S1
820          *     S1               S2             S2
821          *     S1               S1             S1
822          *     N                N              N
823          *     N              S1+S2            S2
824          *     N                S2             S2
825          *     N                S1             S1
826          *
827          * Note that you can't actually request stage-2 mappings.
828          */
829         if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
830                 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
831         if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
832                 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
833
834         switch (smmu_domain->stage) {
835         case ARM_SMMU_DOMAIN_S1:
836                 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
837                 start = smmu->num_s2_context_banks;
838                 ias = smmu->va_size;
839                 oas = smmu->ipa_size;
840                 if (IS_ENABLED(CONFIG_64BIT))
841                         fmt = ARM_64_LPAE_S1;
842                 else
843                         fmt = ARM_32_LPAE_S1;
844                 break;
845         case ARM_SMMU_DOMAIN_NESTED:
846                 /*
847                  * We will likely want to change this if/when KVM gets
848                  * involved.
849                  */
850         case ARM_SMMU_DOMAIN_S2:
851                 cfg->cbar = CBAR_TYPE_S2_TRANS;
852                 start = 0;
853                 ias = smmu->ipa_size;
854                 oas = smmu->pa_size;
855                 if (IS_ENABLED(CONFIG_64BIT))
856                         fmt = ARM_64_LPAE_S2;
857                 else
858                         fmt = ARM_32_LPAE_S2;
859                 break;
860         default:
861                 ret = -EINVAL;
862                 goto out_unlock;
863         }
864
865         ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
866                                       smmu->num_context_banks);
867         if (IS_ERR_VALUE(ret))
868                 goto out_unlock;
869
870         cfg->cbndx = ret;
871         if (smmu->version == ARM_SMMU_V1) {
872                 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
873                 cfg->irptndx %= smmu->num_context_irqs;
874         } else {
875                 cfg->irptndx = cfg->cbndx;
876         }
877
878         pgtbl_cfg = (struct io_pgtable_cfg) {
879                 .pgsize_bitmap  = arm_smmu_ops.pgsize_bitmap,
880                 .ias            = ias,
881                 .oas            = oas,
882                 .tlb            = &arm_smmu_gather_ops,
883                 .iommu_dev      = smmu->dev,
884         };
885
886         smmu_domain->smmu = smmu;
887         pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
888         if (!pgtbl_ops) {
889                 ret = -ENOMEM;
890                 goto out_clear_smmu;
891         }
892
893         /* Update our support page sizes to reflect the page table format */
894         arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
895
896         /* Initialise the context bank with our page table cfg */
897         arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
898
899         /*
900          * Request context fault interrupt. Do this last to avoid the
901          * handler seeing a half-initialised domain state.
902          */
903         irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
904         ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
905                           "arm-smmu-context-fault", domain);
906         if (IS_ERR_VALUE(ret)) {
907                 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
908                         cfg->irptndx, irq);
909                 cfg->irptndx = INVALID_IRPTNDX;
910         }
911
912         mutex_unlock(&smmu_domain->init_mutex);
913
914         /* Publish page table ops for map/unmap */
915         smmu_domain->pgtbl_ops = pgtbl_ops;
916         return 0;
917
918 out_clear_smmu:
919         smmu_domain->smmu = NULL;
920 out_unlock:
921         mutex_unlock(&smmu_domain->init_mutex);
922         return ret;
923 }
924
925 static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
926 {
927         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
928         struct arm_smmu_device *smmu = smmu_domain->smmu;
929         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
930         void __iomem *cb_base;
931         int irq;
932
933         if (!smmu)
934                 return;
935
936         /*
937          * Disable the context bank and free the page tables before freeing
938          * it.
939          */
940         cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
941         writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
942
943         if (cfg->irptndx != INVALID_IRPTNDX) {
944                 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
945                 free_irq(irq, domain);
946         }
947
948         if (smmu_domain->pgtbl_ops)
949                 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
950
951         __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
952 }
953
954 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
955 {
956         struct arm_smmu_domain *smmu_domain;
957
958         if (type != IOMMU_DOMAIN_UNMANAGED)
959                 return NULL;
960         /*
961          * Allocate the domain and initialise some of its data structures.
962          * We can't really do anything meaningful until we've added a
963          * master.
964          */
965         smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
966         if (!smmu_domain)
967                 return NULL;
968
969         mutex_init(&smmu_domain->init_mutex);
970         spin_lock_init(&smmu_domain->pgtbl_lock);
971
972         return &smmu_domain->domain;
973 }
974
975 static void arm_smmu_domain_free(struct iommu_domain *domain)
976 {
977         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
978
979         /*
980          * Free the domain resources. We assume that all devices have
981          * already been detached.
982          */
983         arm_smmu_destroy_domain_context(domain);
984         kfree(smmu_domain);
985 }
986
987 static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
988                                           struct arm_smmu_master_cfg *cfg)
989 {
990         int i;
991         struct arm_smmu_smr *smrs;
992         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
993
994         if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
995                 return 0;
996
997         if (cfg->smrs)
998                 return -EEXIST;
999
1000         smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
1001         if (!smrs) {
1002                 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1003                         cfg->num_streamids);
1004                 return -ENOMEM;
1005         }
1006
1007         /* Allocate the SMRs on the SMMU */
1008         for (i = 0; i < cfg->num_streamids; ++i) {
1009                 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1010                                                   smmu->num_mapping_groups);
1011                 if (IS_ERR_VALUE(idx)) {
1012                         dev_err(smmu->dev, "failed to allocate free SMR\n");
1013                         goto err_free_smrs;
1014                 }
1015
1016                 smrs[i] = (struct arm_smmu_smr) {
1017                         .idx    = idx,
1018                         .mask   = 0, /* We don't currently share SMRs */
1019                         .id     = cfg->streamids[i],
1020                 };
1021         }
1022
1023         /* It worked! Now, poke the actual hardware */
1024         for (i = 0; i < cfg->num_streamids; ++i) {
1025                 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1026                           smrs[i].mask << SMR_MASK_SHIFT;
1027                 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1028         }
1029
1030         cfg->smrs = smrs;
1031         return 0;
1032
1033 err_free_smrs:
1034         while (--i >= 0)
1035                 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1036         kfree(smrs);
1037         return -ENOSPC;
1038 }
1039
1040 static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
1041                                       struct arm_smmu_master_cfg *cfg)
1042 {
1043         int i;
1044         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1045         struct arm_smmu_smr *smrs = cfg->smrs;
1046
1047         if (!smrs)
1048                 return;
1049
1050         /* Invalidate the SMRs before freeing back to the allocator */
1051         for (i = 0; i < cfg->num_streamids; ++i) {
1052                 u8 idx = smrs[i].idx;
1053
1054                 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1055                 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1056         }
1057
1058         cfg->smrs = NULL;
1059         kfree(smrs);
1060 }
1061
1062 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1063                                       struct arm_smmu_master_cfg *cfg)
1064 {
1065         int i, ret;
1066         struct arm_smmu_device *smmu = smmu_domain->smmu;
1067         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1068
1069         /* Devices in an IOMMU group may already be configured */
1070         ret = arm_smmu_master_configure_smrs(smmu, cfg);
1071         if (ret)
1072                 return ret == -EEXIST ? 0 : ret;
1073
1074         for (i = 0; i < cfg->num_streamids; ++i) {
1075                 u32 idx, s2cr;
1076
1077                 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
1078                 s2cr = S2CR_TYPE_TRANS |
1079                        (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
1080                 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1081         }
1082
1083         return 0;
1084 }
1085
1086 static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
1087                                           struct arm_smmu_master_cfg *cfg)
1088 {
1089         int i;
1090         struct arm_smmu_device *smmu = smmu_domain->smmu;
1091         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1092
1093         /* An IOMMU group is torn down by the first device to be removed */
1094         if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1095                 return;
1096
1097         /*
1098          * We *must* clear the S2CR first, because freeing the SMR means
1099          * that it can be re-allocated immediately.
1100          */
1101         for (i = 0; i < cfg->num_streamids; ++i) {
1102                 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
1103
1104                 writel_relaxed(S2CR_TYPE_BYPASS,
1105                                gr0_base + ARM_SMMU_GR0_S2CR(idx));
1106         }
1107
1108         arm_smmu_master_free_smrs(smmu, cfg);
1109 }
1110
1111 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1112 {
1113         int ret;
1114         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1115         struct arm_smmu_device *smmu;
1116         struct arm_smmu_master_cfg *cfg;
1117
1118         smmu = find_smmu_for_device(dev);
1119         if (!smmu) {
1120                 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1121                 return -ENXIO;
1122         }
1123
1124         if (dev->archdata.iommu) {
1125                 dev_err(dev, "already attached to IOMMU domain\n");
1126                 return -EEXIST;
1127         }
1128
1129         /* Ensure that the domain is finalised */
1130         ret = arm_smmu_init_domain_context(domain, smmu);
1131         if (IS_ERR_VALUE(ret))
1132                 return ret;
1133
1134         /*
1135          * Sanity check the domain. We don't support domains across
1136          * different SMMUs.
1137          */
1138         if (smmu_domain->smmu != smmu) {
1139                 dev_err(dev,
1140                         "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1141                         dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1142                 return -EINVAL;
1143         }
1144
1145         /* Looks ok, so add the device to the domain */
1146         cfg = find_smmu_master_cfg(dev);
1147         if (!cfg)
1148                 return -ENODEV;
1149
1150         ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1151         if (!ret)
1152                 dev->archdata.iommu = domain;
1153         return ret;
1154 }
1155
1156 static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
1157 {
1158         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1159         struct arm_smmu_master_cfg *cfg;
1160
1161         cfg = find_smmu_master_cfg(dev);
1162         if (!cfg)
1163                 return;
1164
1165         dev->archdata.iommu = NULL;
1166         arm_smmu_domain_remove_master(smmu_domain, cfg);
1167 }
1168
1169 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1170                         phys_addr_t paddr, size_t size, int prot)
1171 {
1172         int ret;
1173         unsigned long flags;
1174         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1175         struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1176
1177         if (!ops)
1178                 return -ENODEV;
1179
1180         spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1181         ret = ops->map(ops, iova, paddr, size, prot);
1182         spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1183         return ret;
1184 }
1185
1186 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1187                              size_t size)
1188 {
1189         size_t ret;
1190         unsigned long flags;
1191         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1192         struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1193
1194         if (!ops)
1195                 return 0;
1196
1197         spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1198         ret = ops->unmap(ops, iova, size);
1199         spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1200         return ret;
1201 }
1202
1203 static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1204                                               dma_addr_t iova)
1205 {
1206         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1207         struct arm_smmu_device *smmu = smmu_domain->smmu;
1208         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1209         struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1210         struct device *dev = smmu->dev;
1211         void __iomem *cb_base;
1212         u32 tmp;
1213         u64 phys;
1214         unsigned long va;
1215
1216         cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1217
1218         /* ATS1 registers can only be written atomically */
1219         va = iova & ~0xfffUL;
1220         if (smmu->version == ARM_SMMU_V2)
1221                 smmu_writeq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1222         else
1223                 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
1224
1225         if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1226                                       !(tmp & ATSR_ACTIVE), 5, 50)) {
1227                 dev_err(dev,
1228                         "iova to phys timed out on %pad. Falling back to software table walk.\n",
1229                         &iova);
1230                 return ops->iova_to_phys(ops, iova);
1231         }
1232
1233         phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
1234         phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32;
1235
1236         if (phys & CB_PAR_F) {
1237                 dev_err(dev, "translation fault!\n");
1238                 dev_err(dev, "PAR = 0x%llx\n", phys);
1239                 return 0;
1240         }
1241
1242         return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1243 }
1244
1245 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1246                                         dma_addr_t iova)
1247 {
1248         phys_addr_t ret;
1249         unsigned long flags;
1250         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1251         struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1252
1253         if (!ops)
1254                 return 0;
1255
1256         spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1257         if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1258                         smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1259                 ret = arm_smmu_iova_to_phys_hard(domain, iova);
1260         } else {
1261                 ret = ops->iova_to_phys(ops, iova);
1262         }
1263
1264         spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1265
1266         return ret;
1267 }
1268
1269 static bool arm_smmu_capable(enum iommu_cap cap)
1270 {
1271         switch (cap) {
1272         case IOMMU_CAP_CACHE_COHERENCY:
1273                 /*
1274                  * Return true here as the SMMU can always send out coherent
1275                  * requests.
1276                  */
1277                 return true;
1278         case IOMMU_CAP_INTR_REMAP:
1279                 return true; /* MSIs are just memory writes */
1280         case IOMMU_CAP_NOEXEC:
1281                 return true;
1282         default:
1283                 return false;
1284         }
1285 }
1286
1287 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1288 {
1289         *((u16 *)data) = alias;
1290         return 0; /* Continue walking */
1291 }
1292
1293 static void __arm_smmu_release_pci_iommudata(void *data)
1294 {
1295         kfree(data);
1296 }
1297
1298 static int arm_smmu_add_pci_device(struct pci_dev *pdev)
1299 {
1300         int i, ret;
1301         u16 sid;
1302         struct iommu_group *group;
1303         struct arm_smmu_master_cfg *cfg;
1304
1305         group = iommu_group_get_for_dev(&pdev->dev);
1306         if (IS_ERR(group))
1307                 return PTR_ERR(group);
1308
1309         cfg = iommu_group_get_iommudata(group);
1310         if (!cfg) {
1311                 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1312                 if (!cfg) {
1313                         ret = -ENOMEM;
1314                         goto out_put_group;
1315                 }
1316
1317                 iommu_group_set_iommudata(group, cfg,
1318                                           __arm_smmu_release_pci_iommudata);
1319         }
1320
1321         if (cfg->num_streamids >= MAX_MASTER_STREAMIDS) {
1322                 ret = -ENOSPC;
1323                 goto out_put_group;
1324         }
1325
1326         /*
1327          * Assume Stream ID == Requester ID for now.
1328          * We need a way to describe the ID mappings in FDT.
1329          */
1330         pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1331         for (i = 0; i < cfg->num_streamids; ++i)
1332                 if (cfg->streamids[i] == sid)
1333                         break;
1334
1335         /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1336         if (i == cfg->num_streamids)
1337                 cfg->streamids[cfg->num_streamids++] = sid;
1338
1339         return 0;
1340 out_put_group:
1341         iommu_group_put(group);
1342         return ret;
1343 }
1344
1345 static int arm_smmu_add_platform_device(struct device *dev)
1346 {
1347         struct iommu_group *group;
1348         struct arm_smmu_master *master;
1349         struct arm_smmu_device *smmu = find_smmu_for_device(dev);
1350
1351         if (!smmu)
1352                 return -ENODEV;
1353
1354         master = find_smmu_master(smmu, dev->of_node);
1355         if (!master)
1356                 return -ENODEV;
1357
1358         /* No automatic group creation for platform devices */
1359         group = iommu_group_alloc();
1360         if (IS_ERR(group))
1361                 return PTR_ERR(group);
1362
1363         iommu_group_set_iommudata(group, &master->cfg, NULL);
1364         return iommu_group_add_device(group, dev);
1365 }
1366
1367 static int arm_smmu_add_device(struct device *dev)
1368 {
1369         if (dev_is_pci(dev))
1370                 return arm_smmu_add_pci_device(to_pci_dev(dev));
1371
1372         return arm_smmu_add_platform_device(dev);
1373 }
1374
1375 static void arm_smmu_remove_device(struct device *dev)
1376 {
1377         iommu_group_remove_device(dev);
1378 }
1379
1380 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1381                                     enum iommu_attr attr, void *data)
1382 {
1383         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1384
1385         switch (attr) {
1386         case DOMAIN_ATTR_NESTING:
1387                 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1388                 return 0;
1389         default:
1390                 return -ENODEV;
1391         }
1392 }
1393
1394 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1395                                     enum iommu_attr attr, void *data)
1396 {
1397         int ret = 0;
1398         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1399
1400         mutex_lock(&smmu_domain->init_mutex);
1401
1402         switch (attr) {
1403         case DOMAIN_ATTR_NESTING:
1404                 if (smmu_domain->smmu) {
1405                         ret = -EPERM;
1406                         goto out_unlock;
1407                 }
1408
1409                 if (*(int *)data)
1410                         smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1411                 else
1412                         smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1413
1414                 break;
1415         default:
1416                 ret = -ENODEV;
1417         }
1418
1419 out_unlock:
1420         mutex_unlock(&smmu_domain->init_mutex);
1421         return ret;
1422 }
1423
1424 static struct iommu_ops arm_smmu_ops = {
1425         .capable                = arm_smmu_capable,
1426         .domain_alloc           = arm_smmu_domain_alloc,
1427         .domain_free            = arm_smmu_domain_free,
1428         .attach_dev             = arm_smmu_attach_dev,
1429         .detach_dev             = arm_smmu_detach_dev,
1430         .map                    = arm_smmu_map,
1431         .unmap                  = arm_smmu_unmap,
1432         .map_sg                 = default_iommu_map_sg,
1433         .iova_to_phys           = arm_smmu_iova_to_phys,
1434         .add_device             = arm_smmu_add_device,
1435         .remove_device          = arm_smmu_remove_device,
1436         .domain_get_attr        = arm_smmu_domain_get_attr,
1437         .domain_set_attr        = arm_smmu_domain_set_attr,
1438         .pgsize_bitmap          = -1UL, /* Restricted during device attach */
1439 };
1440
1441 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1442 {
1443         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1444         void __iomem *cb_base;
1445         int i = 0;
1446         u32 reg;
1447
1448         /* clear global FSR */
1449         reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1450         writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1451
1452         /* Mark all SMRn as invalid and all S2CRn as bypass */
1453         for (i = 0; i < smmu->num_mapping_groups; ++i) {
1454                 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
1455                 writel_relaxed(S2CR_TYPE_BYPASS,
1456                         gr0_base + ARM_SMMU_GR0_S2CR(i));
1457         }
1458
1459         /* Make sure all context banks are disabled and clear CB_FSR  */
1460         for (i = 0; i < smmu->num_context_banks; ++i) {
1461                 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1462                 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1463                 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
1464         }
1465
1466         /* Invalidate the TLB, just in case */
1467         writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1468         writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1469
1470         reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1471
1472         /* Enable fault reporting */
1473         reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
1474
1475         /* Disable TLB broadcasting. */
1476         reg |= (sCR0_VMIDPNE | sCR0_PTM);
1477
1478         /* Enable client access, but bypass when no mapping is found */
1479         reg &= ~(sCR0_CLIENTPD | sCR0_USFCFG);
1480
1481         /* Disable forced broadcasting */
1482         reg &= ~sCR0_FB;
1483
1484         /* Don't upgrade barriers */
1485         reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
1486
1487         /* Push the button */
1488         __arm_smmu_tlb_sync(smmu);
1489         writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1490 }
1491
1492 static int arm_smmu_id_size_to_bits(int size)
1493 {
1494         switch (size) {
1495         case 0:
1496                 return 32;
1497         case 1:
1498                 return 36;
1499         case 2:
1500                 return 40;
1501         case 3:
1502                 return 42;
1503         case 4:
1504                 return 44;
1505         case 5:
1506         default:
1507                 return 48;
1508         }
1509 }
1510
1511 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1512 {
1513         unsigned long size;
1514         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1515         u32 id;
1516         bool cttw_dt, cttw_reg;
1517
1518         dev_notice(smmu->dev, "probing hardware configuration...\n");
1519         dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
1520
1521         /* ID0 */
1522         id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
1523
1524         /* Restrict available stages based on module parameter */
1525         if (force_stage == 1)
1526                 id &= ~(ID0_S2TS | ID0_NTS);
1527         else if (force_stage == 2)
1528                 id &= ~(ID0_S1TS | ID0_NTS);
1529
1530         if (id & ID0_S1TS) {
1531                 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1532                 dev_notice(smmu->dev, "\tstage 1 translation\n");
1533         }
1534
1535         if (id & ID0_S2TS) {
1536                 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1537                 dev_notice(smmu->dev, "\tstage 2 translation\n");
1538         }
1539
1540         if (id & ID0_NTS) {
1541                 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1542                 dev_notice(smmu->dev, "\tnested translation\n");
1543         }
1544
1545         if (!(smmu->features &
1546                 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
1547                 dev_err(smmu->dev, "\tno translation support!\n");
1548                 return -ENODEV;
1549         }
1550
1551         if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) {
1552                 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1553                 dev_notice(smmu->dev, "\taddress translation ops\n");
1554         }
1555
1556         /*
1557          * In order for DMA API calls to work properly, we must defer to what
1558          * the DT says about coherency, regardless of what the hardware claims.
1559          * Fortunately, this also opens up a workaround for systems where the
1560          * ID register value has ended up configured incorrectly.
1561          */
1562         cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1563         cttw_reg = !!(id & ID0_CTTW);
1564         if (cttw_dt)
1565                 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1566         if (cttw_dt || cttw_reg)
1567                 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1568                            cttw_dt ? "" : "non-");
1569         if (cttw_dt != cttw_reg)
1570                 dev_notice(smmu->dev,
1571                            "\t(IDR0.CTTW overridden by dma-coherent property)\n");
1572
1573         if (id & ID0_SMS) {
1574                 u32 smr, sid, mask;
1575
1576                 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1577                 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1578                                            ID0_NUMSMRG_MASK;
1579                 if (smmu->num_mapping_groups == 0) {
1580                         dev_err(smmu->dev,
1581                                 "stream-matching supported, but no SMRs present!\n");
1582                         return -ENODEV;
1583                 }
1584
1585                 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1586                 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1587                 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1588                 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1589
1590                 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1591                 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1592                 if ((mask & sid) != sid) {
1593                         dev_err(smmu->dev,
1594                                 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1595                                 mask, sid);
1596                         return -ENODEV;
1597                 }
1598
1599                 dev_notice(smmu->dev,
1600                            "\tstream matching with %u register groups, mask 0x%x",
1601                            smmu->num_mapping_groups, mask);
1602         } else {
1603                 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
1604                                            ID0_NUMSIDB_MASK;
1605         }
1606
1607         /* ID1 */
1608         id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
1609         smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
1610
1611         /* Check for size mismatch of SMMU address space from mapped region */
1612         size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
1613         size *= 2 << smmu->pgshift;
1614         if (smmu->size != size)
1615                 dev_warn(smmu->dev,
1616                         "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1617                         size, smmu->size);
1618
1619         smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
1620         smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1621         if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1622                 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1623                 return -ENODEV;
1624         }
1625         dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1626                    smmu->num_context_banks, smmu->num_s2_context_banks);
1627
1628         /* ID2 */
1629         id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1630         size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
1631         smmu->ipa_size = size;
1632
1633         /* The output mask is also applied for bypass */
1634         size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
1635         smmu->pa_size = size;
1636
1637         /*
1638          * What the page table walker can address actually depends on which
1639          * descriptor format is in use, but since a) we don't know that yet,
1640          * and b) it can vary per context bank, this will have to do...
1641          */
1642         if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1643                 dev_warn(smmu->dev,
1644                          "failed to set DMA mask for table walker\n");
1645
1646         if (smmu->version == ARM_SMMU_V1) {
1647                 smmu->va_size = smmu->ipa_size;
1648                 size = SZ_4K | SZ_2M | SZ_1G;
1649         } else {
1650                 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
1651                 smmu->va_size = arm_smmu_id_size_to_bits(size);
1652 #ifndef CONFIG_64BIT
1653                 smmu->va_size = min(32UL, smmu->va_size);
1654 #endif
1655                 size = 0;
1656                 if (id & ID2_PTFS_4K)
1657                         size |= SZ_4K | SZ_2M | SZ_1G;
1658                 if (id & ID2_PTFS_16K)
1659                         size |= SZ_16K | SZ_32M;
1660                 if (id & ID2_PTFS_64K)
1661                         size |= SZ_64K | SZ_512M;
1662         }
1663
1664         arm_smmu_ops.pgsize_bitmap &= size;
1665         dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
1666
1667         if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1668                 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1669                            smmu->va_size, smmu->ipa_size);
1670
1671         if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1672                 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1673                            smmu->ipa_size, smmu->pa_size);
1674
1675         return 0;
1676 }
1677
1678 static const struct of_device_id arm_smmu_of_match[] = {
1679         { .compatible = "arm,smmu-v1", .data = (void *)ARM_SMMU_V1 },
1680         { .compatible = "arm,smmu-v2", .data = (void *)ARM_SMMU_V2 },
1681         { .compatible = "arm,mmu-400", .data = (void *)ARM_SMMU_V1 },
1682         { .compatible = "arm,mmu-401", .data = (void *)ARM_SMMU_V1 },
1683         { .compatible = "arm,mmu-500", .data = (void *)ARM_SMMU_V2 },
1684         { },
1685 };
1686 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1687
1688 static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1689 {
1690         const struct of_device_id *of_id;
1691         struct resource *res;
1692         struct arm_smmu_device *smmu;
1693         struct device *dev = &pdev->dev;
1694         struct rb_node *node;
1695         struct of_phandle_args masterspec;
1696         int num_irqs, i, err;
1697
1698         smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1699         if (!smmu) {
1700                 dev_err(dev, "failed to allocate arm_smmu_device\n");
1701                 return -ENOMEM;
1702         }
1703         smmu->dev = dev;
1704
1705         of_id = of_match_node(arm_smmu_of_match, dev->of_node);
1706         smmu->version = (enum arm_smmu_arch_version)of_id->data;
1707
1708         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1709         smmu->base = devm_ioremap_resource(dev, res);
1710         if (IS_ERR(smmu->base))
1711                 return PTR_ERR(smmu->base);
1712         smmu->size = resource_size(res);
1713
1714         if (of_property_read_u32(dev->of_node, "#global-interrupts",
1715                                  &smmu->num_global_irqs)) {
1716                 dev_err(dev, "missing #global-interrupts property\n");
1717                 return -ENODEV;
1718         }
1719
1720         num_irqs = 0;
1721         while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1722                 num_irqs++;
1723                 if (num_irqs > smmu->num_global_irqs)
1724                         smmu->num_context_irqs++;
1725         }
1726
1727         if (!smmu->num_context_irqs) {
1728                 dev_err(dev, "found %d interrupts but expected at least %d\n",
1729                         num_irqs, smmu->num_global_irqs + 1);
1730                 return -ENODEV;
1731         }
1732
1733         smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1734                                   GFP_KERNEL);
1735         if (!smmu->irqs) {
1736                 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1737                 return -ENOMEM;
1738         }
1739
1740         for (i = 0; i < num_irqs; ++i) {
1741                 int irq = platform_get_irq(pdev, i);
1742
1743                 if (irq < 0) {
1744                         dev_err(dev, "failed to get irq index %d\n", i);
1745                         return -ENODEV;
1746                 }
1747                 smmu->irqs[i] = irq;
1748         }
1749
1750         err = arm_smmu_device_cfg_probe(smmu);
1751         if (err)
1752                 return err;
1753
1754         i = 0;
1755         smmu->masters = RB_ROOT;
1756         while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
1757                                            "#stream-id-cells", i,
1758                                            &masterspec)) {
1759                 err = register_smmu_master(smmu, dev, &masterspec);
1760                 if (err) {
1761                         dev_err(dev, "failed to add master %s\n",
1762                                 masterspec.np->name);
1763                         goto out_put_masters;
1764                 }
1765
1766                 i++;
1767         }
1768         dev_notice(dev, "registered %d master devices\n", i);
1769
1770         parse_driver_options(smmu);
1771
1772         if (smmu->version > ARM_SMMU_V1 &&
1773             smmu->num_context_banks != smmu->num_context_irqs) {
1774                 dev_err(dev,
1775                         "found only %d context interrupt(s) but %d required\n",
1776                         smmu->num_context_irqs, smmu->num_context_banks);
1777                 err = -ENODEV;
1778                 goto out_put_masters;
1779         }
1780
1781         for (i = 0; i < smmu->num_global_irqs; ++i) {
1782                 err = request_irq(smmu->irqs[i],
1783                                   arm_smmu_global_fault,
1784                                   IRQF_SHARED,
1785                                   "arm-smmu global fault",
1786                                   smmu);
1787                 if (err) {
1788                         dev_err(dev, "failed to request global IRQ %d (%u)\n",
1789                                 i, smmu->irqs[i]);
1790                         goto out_free_irqs;
1791                 }
1792         }
1793
1794         INIT_LIST_HEAD(&smmu->list);
1795         spin_lock(&arm_smmu_devices_lock);
1796         list_add(&smmu->list, &arm_smmu_devices);
1797         spin_unlock(&arm_smmu_devices_lock);
1798
1799         arm_smmu_device_reset(smmu);
1800         return 0;
1801
1802 out_free_irqs:
1803         while (i--)
1804                 free_irq(smmu->irqs[i], smmu);
1805
1806 out_put_masters:
1807         for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
1808                 struct arm_smmu_master *master
1809                         = container_of(node, struct arm_smmu_master, node);
1810                 of_node_put(master->of_node);
1811         }
1812
1813         return err;
1814 }
1815
1816 static int arm_smmu_device_remove(struct platform_device *pdev)
1817 {
1818         int i;
1819         struct device *dev = &pdev->dev;
1820         struct arm_smmu_device *curr, *smmu = NULL;
1821         struct rb_node *node;
1822
1823         spin_lock(&arm_smmu_devices_lock);
1824         list_for_each_entry(curr, &arm_smmu_devices, list) {
1825                 if (curr->dev == dev) {
1826                         smmu = curr;
1827                         list_del(&smmu->list);
1828                         break;
1829                 }
1830         }
1831         spin_unlock(&arm_smmu_devices_lock);
1832
1833         if (!smmu)
1834                 return -ENODEV;
1835
1836         for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
1837                 struct arm_smmu_master *master
1838                         = container_of(node, struct arm_smmu_master, node);
1839                 of_node_put(master->of_node);
1840         }
1841
1842         if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
1843                 dev_err(dev, "removing device with active domains!\n");
1844
1845         for (i = 0; i < smmu->num_global_irqs; ++i)
1846                 free_irq(smmu->irqs[i], smmu);
1847
1848         /* Turn the thing off */
1849         writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1850         return 0;
1851 }
1852
1853 static struct platform_driver arm_smmu_driver = {
1854         .driver = {
1855                 .name           = "arm-smmu",
1856                 .of_match_table = of_match_ptr(arm_smmu_of_match),
1857         },
1858         .probe  = arm_smmu_device_dt_probe,
1859         .remove = arm_smmu_device_remove,
1860 };
1861
1862 static int __init arm_smmu_init(void)
1863 {
1864         struct device_node *np;
1865         int ret;
1866
1867         /*
1868          * Play nice with systems that don't have an ARM SMMU by checking that
1869          * an ARM SMMU exists in the system before proceeding with the driver
1870          * and IOMMU bus operation registration.
1871          */
1872         np = of_find_matching_node(NULL, arm_smmu_of_match);
1873         if (!np)
1874                 return 0;
1875
1876         of_node_put(np);
1877
1878         ret = platform_driver_register(&arm_smmu_driver);
1879         if (ret)
1880                 return ret;
1881
1882         /* Oh, for a proper bus abstraction */
1883         if (!iommu_present(&platform_bus_type))
1884                 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1885
1886 #ifdef CONFIG_ARM_AMBA
1887         if (!iommu_present(&amba_bustype))
1888                 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
1889 #endif
1890
1891 #ifdef CONFIG_PCI
1892         if (!iommu_present(&pci_bus_type))
1893                 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
1894 #endif
1895
1896         return 0;
1897 }
1898
1899 static void __exit arm_smmu_exit(void)
1900 {
1901         return platform_driver_unregister(&arm_smmu_driver);
1902 }
1903
1904 subsys_initcall(arm_smmu_init);
1905 module_exit(arm_smmu_exit);
1906
1907 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
1908 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
1909 MODULE_LICENSE("GPL v2");