]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/iommu/arm-smmu.c
Merge branch 'i2c/for-current' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa...
[karo-tx-linux.git] / drivers / iommu / arm-smmu.c
1 /*
2  * IOMMU API for ARM architected SMMU implementations.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16  *
17  * Copyright (C) 2013 ARM Limited
18  *
19  * Author: Will Deacon <will.deacon@arm.com>
20  *
21  * This driver currently supports:
22  *      - SMMUv1 and v2 implementations
23  *      - Stream-matching and stream-indexing
24  *      - v7/v8 long-descriptor format
25  *      - Non-secure access to the SMMU
26  *      - Context fault reporting
27  *      - Extended Stream ID (16 bit)
28  */
29
30 #define pr_fmt(fmt) "arm-smmu: " fmt
31
32 #include <linux/acpi.h>
33 #include <linux/acpi_iort.h>
34 #include <linux/atomic.h>
35 #include <linux/delay.h>
36 #include <linux/dma-iommu.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/err.h>
39 #include <linux/interrupt.h>
40 #include <linux/io.h>
41 #include <linux/io-64-nonatomic-hi-lo.h>
42 #include <linux/iommu.h>
43 #include <linux/iopoll.h>
44 #include <linux/module.h>
45 #include <linux/of.h>
46 #include <linux/of_address.h>
47 #include <linux/of_device.h>
48 #include <linux/of_iommu.h>
49 #include <linux/pci.h>
50 #include <linux/platform_device.h>
51 #include <linux/slab.h>
52 #include <linux/spinlock.h>
53
54 #include <linux/amba/bus.h>
55
56 #include "io-pgtable.h"
57
58 /* Maximum number of context banks per SMMU */
59 #define ARM_SMMU_MAX_CBS                128
60
61 /* SMMU global address space */
62 #define ARM_SMMU_GR0(smmu)              ((smmu)->base)
63 #define ARM_SMMU_GR1(smmu)              ((smmu)->base + (1 << (smmu)->pgshift))
64
65 /*
66  * SMMU global address space with conditional offset to access secure
67  * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
68  * nsGFSYNR0: 0x450)
69  */
70 #define ARM_SMMU_GR0_NS(smmu)                                           \
71         ((smmu)->base +                                                 \
72                 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS)       \
73                         ? 0x400 : 0))
74
75 /*
76  * Some 64-bit registers only make sense to write atomically, but in such
77  * cases all the data relevant to AArch32 formats lies within the lower word,
78  * therefore this actually makes more sense than it might first appear.
79  */
80 #ifdef CONFIG_64BIT
81 #define smmu_write_atomic_lq            writeq_relaxed
82 #else
83 #define smmu_write_atomic_lq            writel_relaxed
84 #endif
85
86 /* Configuration registers */
87 #define ARM_SMMU_GR0_sCR0               0x0
88 #define sCR0_CLIENTPD                   (1 << 0)
89 #define sCR0_GFRE                       (1 << 1)
90 #define sCR0_GFIE                       (1 << 2)
91 #define sCR0_EXIDENABLE                 (1 << 3)
92 #define sCR0_GCFGFRE                    (1 << 4)
93 #define sCR0_GCFGFIE                    (1 << 5)
94 #define sCR0_USFCFG                     (1 << 10)
95 #define sCR0_VMIDPNE                    (1 << 11)
96 #define sCR0_PTM                        (1 << 12)
97 #define sCR0_FB                         (1 << 13)
98 #define sCR0_VMID16EN                   (1 << 31)
99 #define sCR0_BSU_SHIFT                  14
100 #define sCR0_BSU_MASK                   0x3
101
102 /* Auxiliary Configuration register */
103 #define ARM_SMMU_GR0_sACR               0x10
104
105 /* Identification registers */
106 #define ARM_SMMU_GR0_ID0                0x20
107 #define ARM_SMMU_GR0_ID1                0x24
108 #define ARM_SMMU_GR0_ID2                0x28
109 #define ARM_SMMU_GR0_ID3                0x2c
110 #define ARM_SMMU_GR0_ID4                0x30
111 #define ARM_SMMU_GR0_ID5                0x34
112 #define ARM_SMMU_GR0_ID6                0x38
113 #define ARM_SMMU_GR0_ID7                0x3c
114 #define ARM_SMMU_GR0_sGFSR              0x48
115 #define ARM_SMMU_GR0_sGFSYNR0           0x50
116 #define ARM_SMMU_GR0_sGFSYNR1           0x54
117 #define ARM_SMMU_GR0_sGFSYNR2           0x58
118
119 #define ID0_S1TS                        (1 << 30)
120 #define ID0_S2TS                        (1 << 29)
121 #define ID0_NTS                         (1 << 28)
122 #define ID0_SMS                         (1 << 27)
123 #define ID0_ATOSNS                      (1 << 26)
124 #define ID0_PTFS_NO_AARCH32             (1 << 25)
125 #define ID0_PTFS_NO_AARCH32S            (1 << 24)
126 #define ID0_CTTW                        (1 << 14)
127 #define ID0_NUMIRPT_SHIFT               16
128 #define ID0_NUMIRPT_MASK                0xff
129 #define ID0_NUMSIDB_SHIFT               9
130 #define ID0_NUMSIDB_MASK                0xf
131 #define ID0_EXIDS                       (1 << 8)
132 #define ID0_NUMSMRG_SHIFT               0
133 #define ID0_NUMSMRG_MASK                0xff
134
135 #define ID1_PAGESIZE                    (1 << 31)
136 #define ID1_NUMPAGENDXB_SHIFT           28
137 #define ID1_NUMPAGENDXB_MASK            7
138 #define ID1_NUMS2CB_SHIFT               16
139 #define ID1_NUMS2CB_MASK                0xff
140 #define ID1_NUMCB_SHIFT                 0
141 #define ID1_NUMCB_MASK                  0xff
142
143 #define ID2_OAS_SHIFT                   4
144 #define ID2_OAS_MASK                    0xf
145 #define ID2_IAS_SHIFT                   0
146 #define ID2_IAS_MASK                    0xf
147 #define ID2_UBS_SHIFT                   8
148 #define ID2_UBS_MASK                    0xf
149 #define ID2_PTFS_4K                     (1 << 12)
150 #define ID2_PTFS_16K                    (1 << 13)
151 #define ID2_PTFS_64K                    (1 << 14)
152 #define ID2_VMID16                      (1 << 15)
153
154 #define ID7_MAJOR_SHIFT                 4
155 #define ID7_MAJOR_MASK                  0xf
156
157 /* Global TLB invalidation */
158 #define ARM_SMMU_GR0_TLBIVMID           0x64
159 #define ARM_SMMU_GR0_TLBIALLNSNH        0x68
160 #define ARM_SMMU_GR0_TLBIALLH           0x6c
161 #define ARM_SMMU_GR0_sTLBGSYNC          0x70
162 #define ARM_SMMU_GR0_sTLBGSTATUS        0x74
163 #define sTLBGSTATUS_GSACTIVE            (1 << 0)
164 #define TLB_LOOP_TIMEOUT                1000000 /* 1s! */
165 #define TLB_SPIN_COUNT                  10
166
167 /* Stream mapping registers */
168 #define ARM_SMMU_GR0_SMR(n)             (0x800 + ((n) << 2))
169 #define SMR_VALID                       (1 << 31)
170 #define SMR_MASK_SHIFT                  16
171 #define SMR_ID_SHIFT                    0
172
173 #define ARM_SMMU_GR0_S2CR(n)            (0xc00 + ((n) << 2))
174 #define S2CR_CBNDX_SHIFT                0
175 #define S2CR_CBNDX_MASK                 0xff
176 #define S2CR_EXIDVALID                  (1 << 10)
177 #define S2CR_TYPE_SHIFT                 16
178 #define S2CR_TYPE_MASK                  0x3
179 enum arm_smmu_s2cr_type {
180         S2CR_TYPE_TRANS,
181         S2CR_TYPE_BYPASS,
182         S2CR_TYPE_FAULT,
183 };
184
185 #define S2CR_PRIVCFG_SHIFT              24
186 #define S2CR_PRIVCFG_MASK               0x3
187 enum arm_smmu_s2cr_privcfg {
188         S2CR_PRIVCFG_DEFAULT,
189         S2CR_PRIVCFG_DIPAN,
190         S2CR_PRIVCFG_UNPRIV,
191         S2CR_PRIVCFG_PRIV,
192 };
193
194 /* Context bank attribute registers */
195 #define ARM_SMMU_GR1_CBAR(n)            (0x0 + ((n) << 2))
196 #define CBAR_VMID_SHIFT                 0
197 #define CBAR_VMID_MASK                  0xff
198 #define CBAR_S1_BPSHCFG_SHIFT           8
199 #define CBAR_S1_BPSHCFG_MASK            3
200 #define CBAR_S1_BPSHCFG_NSH             3
201 #define CBAR_S1_MEMATTR_SHIFT           12
202 #define CBAR_S1_MEMATTR_MASK            0xf
203 #define CBAR_S1_MEMATTR_WB              0xf
204 #define CBAR_TYPE_SHIFT                 16
205 #define CBAR_TYPE_MASK                  0x3
206 #define CBAR_TYPE_S2_TRANS              (0 << CBAR_TYPE_SHIFT)
207 #define CBAR_TYPE_S1_TRANS_S2_BYPASS    (1 << CBAR_TYPE_SHIFT)
208 #define CBAR_TYPE_S1_TRANS_S2_FAULT     (2 << CBAR_TYPE_SHIFT)
209 #define CBAR_TYPE_S1_TRANS_S2_TRANS     (3 << CBAR_TYPE_SHIFT)
210 #define CBAR_IRPTNDX_SHIFT              24
211 #define CBAR_IRPTNDX_MASK               0xff
212
213 #define ARM_SMMU_GR1_CBA2R(n)           (0x800 + ((n) << 2))
214 #define CBA2R_RW64_32BIT                (0 << 0)
215 #define CBA2R_RW64_64BIT                (1 << 0)
216 #define CBA2R_VMID_SHIFT                16
217 #define CBA2R_VMID_MASK                 0xffff
218
219 /* Translation context bank */
220 #define ARM_SMMU_CB(smmu, n)    ((smmu)->cb_base + ((n) << (smmu)->pgshift))
221
222 #define ARM_SMMU_CB_SCTLR               0x0
223 #define ARM_SMMU_CB_ACTLR               0x4
224 #define ARM_SMMU_CB_RESUME              0x8
225 #define ARM_SMMU_CB_TTBCR2              0x10
226 #define ARM_SMMU_CB_TTBR0               0x20
227 #define ARM_SMMU_CB_TTBR1               0x28
228 #define ARM_SMMU_CB_TTBCR               0x30
229 #define ARM_SMMU_CB_CONTEXTIDR          0x34
230 #define ARM_SMMU_CB_S1_MAIR0            0x38
231 #define ARM_SMMU_CB_S1_MAIR1            0x3c
232 #define ARM_SMMU_CB_PAR                 0x50
233 #define ARM_SMMU_CB_FSR                 0x58
234 #define ARM_SMMU_CB_FAR                 0x60
235 #define ARM_SMMU_CB_FSYNR0              0x68
236 #define ARM_SMMU_CB_S1_TLBIVA           0x600
237 #define ARM_SMMU_CB_S1_TLBIASID         0x610
238 #define ARM_SMMU_CB_S1_TLBIVAL          0x620
239 #define ARM_SMMU_CB_S2_TLBIIPAS2        0x630
240 #define ARM_SMMU_CB_S2_TLBIIPAS2L       0x638
241 #define ARM_SMMU_CB_TLBSYNC             0x7f0
242 #define ARM_SMMU_CB_TLBSTATUS           0x7f4
243 #define ARM_SMMU_CB_ATS1PR              0x800
244 #define ARM_SMMU_CB_ATSR                0x8f0
245
246 #define SCTLR_S1_ASIDPNE                (1 << 12)
247 #define SCTLR_CFCFG                     (1 << 7)
248 #define SCTLR_CFIE                      (1 << 6)
249 #define SCTLR_CFRE                      (1 << 5)
250 #define SCTLR_E                         (1 << 4)
251 #define SCTLR_AFE                       (1 << 2)
252 #define SCTLR_TRE                       (1 << 1)
253 #define SCTLR_M                         (1 << 0)
254
255 #define ARM_MMU500_ACTLR_CPRE           (1 << 1)
256
257 #define ARM_MMU500_ACR_CACHE_LOCK       (1 << 26)
258 #define ARM_MMU500_ACR_SMTNMB_TLBEN     (1 << 8)
259
260 #define CB_PAR_F                        (1 << 0)
261
262 #define ATSR_ACTIVE                     (1 << 0)
263
264 #define RESUME_RETRY                    (0 << 0)
265 #define RESUME_TERMINATE                (1 << 0)
266
267 #define TTBCR2_SEP_SHIFT                15
268 #define TTBCR2_SEP_UPSTREAM             (0x7 << TTBCR2_SEP_SHIFT)
269 #define TTBCR2_AS                       (1 << 4)
270
271 #define TTBRn_ASID_SHIFT                48
272
273 #define FSR_MULTI                       (1 << 31)
274 #define FSR_SS                          (1 << 30)
275 #define FSR_UUT                         (1 << 8)
276 #define FSR_ASF                         (1 << 7)
277 #define FSR_TLBLKF                      (1 << 6)
278 #define FSR_TLBMCF                      (1 << 5)
279 #define FSR_EF                          (1 << 4)
280 #define FSR_PF                          (1 << 3)
281 #define FSR_AFF                         (1 << 2)
282 #define FSR_TF                          (1 << 1)
283
284 #define FSR_IGN                         (FSR_AFF | FSR_ASF | \
285                                          FSR_TLBMCF | FSR_TLBLKF)
286 #define FSR_FAULT                       (FSR_MULTI | FSR_SS | FSR_UUT | \
287                                          FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
288
289 #define FSYNR0_WNR                      (1 << 4)
290
291 #define MSI_IOVA_BASE                   0x8000000
292 #define MSI_IOVA_LENGTH                 0x100000
293
294 static int force_stage;
295 module_param(force_stage, int, S_IRUGO);
296 MODULE_PARM_DESC(force_stage,
297         "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
298 static bool disable_bypass;
299 module_param(disable_bypass, bool, S_IRUGO);
300 MODULE_PARM_DESC(disable_bypass,
301         "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
302
303 enum arm_smmu_arch_version {
304         ARM_SMMU_V1,
305         ARM_SMMU_V1_64K,
306         ARM_SMMU_V2,
307 };
308
309 enum arm_smmu_implementation {
310         GENERIC_SMMU,
311         ARM_MMU500,
312         CAVIUM_SMMUV2,
313 };
314
315 /* Until ACPICA headers cover IORT rev. C */
316 #ifndef ACPI_IORT_SMMU_CORELINK_MMU401
317 #define ACPI_IORT_SMMU_CORELINK_MMU401  0x4
318 #endif
319 #ifndef ACPI_IORT_SMMU_CAVIUM_THUNDERX
320 #define ACPI_IORT_SMMU_CAVIUM_THUNDERX  0x5
321 #endif
322
323 struct arm_smmu_s2cr {
324         struct iommu_group              *group;
325         int                             count;
326         enum arm_smmu_s2cr_type         type;
327         enum arm_smmu_s2cr_privcfg      privcfg;
328         u8                              cbndx;
329 };
330
331 #define s2cr_init_val (struct arm_smmu_s2cr){                           \
332         .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS,    \
333 }
334
335 struct arm_smmu_smr {
336         u16                             mask;
337         u16                             id;
338         bool                            valid;
339 };
340
341 struct arm_smmu_master_cfg {
342         struct arm_smmu_device          *smmu;
343         s16                             smendx[];
344 };
345 #define INVALID_SMENDX                  -1
346 #define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
347 #define fwspec_smmu(fw)  (__fwspec_cfg(fw)->smmu)
348 #define fwspec_smendx(fw, i) \
349         (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
350 #define for_each_cfg_sme(fw, i, idx) \
351         for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
352
353 struct arm_smmu_device {
354         struct device                   *dev;
355
356         void __iomem                    *base;
357         void __iomem                    *cb_base;
358         unsigned long                   pgshift;
359
360 #define ARM_SMMU_FEAT_COHERENT_WALK     (1 << 0)
361 #define ARM_SMMU_FEAT_STREAM_MATCH      (1 << 1)
362 #define ARM_SMMU_FEAT_TRANS_S1          (1 << 2)
363 #define ARM_SMMU_FEAT_TRANS_S2          (1 << 3)
364 #define ARM_SMMU_FEAT_TRANS_NESTED      (1 << 4)
365 #define ARM_SMMU_FEAT_TRANS_OPS         (1 << 5)
366 #define ARM_SMMU_FEAT_VMID16            (1 << 6)
367 #define ARM_SMMU_FEAT_FMT_AARCH64_4K    (1 << 7)
368 #define ARM_SMMU_FEAT_FMT_AARCH64_16K   (1 << 8)
369 #define ARM_SMMU_FEAT_FMT_AARCH64_64K   (1 << 9)
370 #define ARM_SMMU_FEAT_FMT_AARCH32_L     (1 << 10)
371 #define ARM_SMMU_FEAT_FMT_AARCH32_S     (1 << 11)
372 #define ARM_SMMU_FEAT_EXIDS             (1 << 12)
373         u32                             features;
374
375 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
376         u32                             options;
377         enum arm_smmu_arch_version      version;
378         enum arm_smmu_implementation    model;
379
380         u32                             num_context_banks;
381         u32                             num_s2_context_banks;
382         DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
383         atomic_t                        irptndx;
384
385         u32                             num_mapping_groups;
386         u16                             streamid_mask;
387         u16                             smr_mask_mask;
388         struct arm_smmu_smr             *smrs;
389         struct arm_smmu_s2cr            *s2crs;
390         struct mutex                    stream_map_mutex;
391
392         unsigned long                   va_size;
393         unsigned long                   ipa_size;
394         unsigned long                   pa_size;
395         unsigned long                   pgsize_bitmap;
396
397         u32                             num_global_irqs;
398         u32                             num_context_irqs;
399         unsigned int                    *irqs;
400
401         u32                             cavium_id_base; /* Specific to Cavium */
402
403         spinlock_t                      global_sync_lock;
404
405         /* IOMMU core code handle */
406         struct iommu_device             iommu;
407 };
408
409 enum arm_smmu_context_fmt {
410         ARM_SMMU_CTX_FMT_NONE,
411         ARM_SMMU_CTX_FMT_AARCH64,
412         ARM_SMMU_CTX_FMT_AARCH32_L,
413         ARM_SMMU_CTX_FMT_AARCH32_S,
414 };
415
416 struct arm_smmu_cfg {
417         u8                              cbndx;
418         u8                              irptndx;
419         union {
420                 u16                     asid;
421                 u16                     vmid;
422         };
423         u32                             cbar;
424         enum arm_smmu_context_fmt       fmt;
425 };
426 #define INVALID_IRPTNDX                 0xff
427
428 enum arm_smmu_domain_stage {
429         ARM_SMMU_DOMAIN_S1 = 0,
430         ARM_SMMU_DOMAIN_S2,
431         ARM_SMMU_DOMAIN_NESTED,
432         ARM_SMMU_DOMAIN_BYPASS,
433 };
434
435 struct arm_smmu_domain {
436         struct arm_smmu_device          *smmu;
437         struct io_pgtable_ops           *pgtbl_ops;
438         struct arm_smmu_cfg             cfg;
439         enum arm_smmu_domain_stage      stage;
440         struct mutex                    init_mutex; /* Protects smmu pointer */
441         spinlock_t                      cb_lock; /* Serialises ATS1* ops and TLB syncs */
442         struct iommu_domain             domain;
443 };
444
445 struct arm_smmu_option_prop {
446         u32 opt;
447         const char *prop;
448 };
449
450 static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
451
452 static bool using_legacy_binding, using_generic_binding;
453
454 static struct arm_smmu_option_prop arm_smmu_options[] = {
455         { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
456         { 0, NULL},
457 };
458
459 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
460 {
461         return container_of(dom, struct arm_smmu_domain, domain);
462 }
463
464 static void parse_driver_options(struct arm_smmu_device *smmu)
465 {
466         int i = 0;
467
468         do {
469                 if (of_property_read_bool(smmu->dev->of_node,
470                                                 arm_smmu_options[i].prop)) {
471                         smmu->options |= arm_smmu_options[i].opt;
472                         dev_notice(smmu->dev, "option %s\n",
473                                 arm_smmu_options[i].prop);
474                 }
475         } while (arm_smmu_options[++i].opt);
476 }
477
478 static struct device_node *dev_get_dev_node(struct device *dev)
479 {
480         if (dev_is_pci(dev)) {
481                 struct pci_bus *bus = to_pci_dev(dev)->bus;
482
483                 while (!pci_is_root_bus(bus))
484                         bus = bus->parent;
485                 return of_node_get(bus->bridge->parent->of_node);
486         }
487
488         return of_node_get(dev->of_node);
489 }
490
491 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
492 {
493         *((__be32 *)data) = cpu_to_be32(alias);
494         return 0; /* Continue walking */
495 }
496
497 static int __find_legacy_master_phandle(struct device *dev, void *data)
498 {
499         struct of_phandle_iterator *it = *(void **)data;
500         struct device_node *np = it->node;
501         int err;
502
503         of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
504                             "#stream-id-cells", 0)
505                 if (it->node == np) {
506                         *(void **)data = dev;
507                         return 1;
508                 }
509         it->node = np;
510         return err == -ENOENT ? 0 : err;
511 }
512
513 static struct platform_driver arm_smmu_driver;
514 static struct iommu_ops arm_smmu_ops;
515
516 static int arm_smmu_register_legacy_master(struct device *dev,
517                                            struct arm_smmu_device **smmu)
518 {
519         struct device *smmu_dev;
520         struct device_node *np;
521         struct of_phandle_iterator it;
522         void *data = &it;
523         u32 *sids;
524         __be32 pci_sid;
525         int err;
526
527         np = dev_get_dev_node(dev);
528         if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
529                 of_node_put(np);
530                 return -ENODEV;
531         }
532
533         it.node = np;
534         err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
535                                      __find_legacy_master_phandle);
536         smmu_dev = data;
537         of_node_put(np);
538         if (err == 0)
539                 return -ENODEV;
540         if (err < 0)
541                 return err;
542
543         if (dev_is_pci(dev)) {
544                 /* "mmu-masters" assumes Stream ID == Requester ID */
545                 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
546                                        &pci_sid);
547                 it.cur = &pci_sid;
548                 it.cur_count = 1;
549         }
550
551         err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
552                                 &arm_smmu_ops);
553         if (err)
554                 return err;
555
556         sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
557         if (!sids)
558                 return -ENOMEM;
559
560         *smmu = dev_get_drvdata(smmu_dev);
561         of_phandle_iterator_args(&it, sids, it.cur_count);
562         err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
563         kfree(sids);
564         return err;
565 }
566
567 static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
568 {
569         int idx;
570
571         do {
572                 idx = find_next_zero_bit(map, end, start);
573                 if (idx == end)
574                         return -ENOSPC;
575         } while (test_and_set_bit(idx, map));
576
577         return idx;
578 }
579
580 static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
581 {
582         clear_bit(idx, map);
583 }
584
585 /* Wait for any pending TLB invalidations to complete */
586 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
587                                 void __iomem *sync, void __iomem *status)
588 {
589         unsigned int spin_cnt, delay;
590
591         writel_relaxed(0, sync);
592         for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
593                 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
594                         if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
595                                 return;
596                         cpu_relax();
597                 }
598                 udelay(delay);
599         }
600         dev_err_ratelimited(smmu->dev,
601                             "TLB sync timed out -- SMMU may be deadlocked\n");
602 }
603
604 static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
605 {
606         void __iomem *base = ARM_SMMU_GR0(smmu);
607         unsigned long flags;
608
609         spin_lock_irqsave(&smmu->global_sync_lock, flags);
610         __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
611                             base + ARM_SMMU_GR0_sTLBGSTATUS);
612         spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
613 }
614
615 static void arm_smmu_tlb_sync_context(void *cookie)
616 {
617         struct arm_smmu_domain *smmu_domain = cookie;
618         struct arm_smmu_device *smmu = smmu_domain->smmu;
619         void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
620         unsigned long flags;
621
622         spin_lock_irqsave(&smmu_domain->cb_lock, flags);
623         __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
624                             base + ARM_SMMU_CB_TLBSTATUS);
625         spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
626 }
627
628 static void arm_smmu_tlb_sync_vmid(void *cookie)
629 {
630         struct arm_smmu_domain *smmu_domain = cookie;
631
632         arm_smmu_tlb_sync_global(smmu_domain->smmu);
633 }
634
635 static void arm_smmu_tlb_inv_context_s1(void *cookie)
636 {
637         struct arm_smmu_domain *smmu_domain = cookie;
638         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
639         void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
640
641         writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
642         arm_smmu_tlb_sync_context(cookie);
643 }
644
645 static void arm_smmu_tlb_inv_context_s2(void *cookie)
646 {
647         struct arm_smmu_domain *smmu_domain = cookie;
648         struct arm_smmu_device *smmu = smmu_domain->smmu;
649         void __iomem *base = ARM_SMMU_GR0(smmu);
650
651         writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
652         arm_smmu_tlb_sync_global(smmu);
653 }
654
655 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
656                                           size_t granule, bool leaf, void *cookie)
657 {
658         struct arm_smmu_domain *smmu_domain = cookie;
659         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
660         bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
661         void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
662
663         if (stage1) {
664                 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
665
666                 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
667                         iova &= ~12UL;
668                         iova |= cfg->asid;
669                         do {
670                                 writel_relaxed(iova, reg);
671                                 iova += granule;
672                         } while (size -= granule);
673                 } else {
674                         iova >>= 12;
675                         iova |= (u64)cfg->asid << 48;
676                         do {
677                                 writeq_relaxed(iova, reg);
678                                 iova += granule >> 12;
679                         } while (size -= granule);
680                 }
681         } else {
682                 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
683                               ARM_SMMU_CB_S2_TLBIIPAS2;
684                 iova >>= 12;
685                 do {
686                         smmu_write_atomic_lq(iova, reg);
687                         iova += granule >> 12;
688                 } while (size -= granule);
689         }
690 }
691
692 /*
693  * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
694  * almost negligible, but the benefit of getting the first one in as far ahead
695  * of the sync as possible is significant, hence we don't just make this a
696  * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
697  */
698 static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
699                                          size_t granule, bool leaf, void *cookie)
700 {
701         struct arm_smmu_domain *smmu_domain = cookie;
702         void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
703
704         writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
705 }
706
707 static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
708         .tlb_flush_all  = arm_smmu_tlb_inv_context_s1,
709         .tlb_add_flush  = arm_smmu_tlb_inv_range_nosync,
710         .tlb_sync       = arm_smmu_tlb_sync_context,
711 };
712
713 static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
714         .tlb_flush_all  = arm_smmu_tlb_inv_context_s2,
715         .tlb_add_flush  = arm_smmu_tlb_inv_range_nosync,
716         .tlb_sync       = arm_smmu_tlb_sync_context,
717 };
718
719 static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
720         .tlb_flush_all  = arm_smmu_tlb_inv_context_s2,
721         .tlb_add_flush  = arm_smmu_tlb_inv_vmid_nosync,
722         .tlb_sync       = arm_smmu_tlb_sync_vmid,
723 };
724
725 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
726 {
727         u32 fsr, fsynr;
728         unsigned long iova;
729         struct iommu_domain *domain = dev;
730         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
731         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
732         struct arm_smmu_device *smmu = smmu_domain->smmu;
733         void __iomem *cb_base;
734
735         cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
736         fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
737
738         if (!(fsr & FSR_FAULT))
739                 return IRQ_NONE;
740
741         fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
742         iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
743
744         dev_err_ratelimited(smmu->dev,
745         "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
746                             fsr, iova, fsynr, cfg->cbndx);
747
748         writel(fsr, cb_base + ARM_SMMU_CB_FSR);
749         return IRQ_HANDLED;
750 }
751
752 static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
753 {
754         u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
755         struct arm_smmu_device *smmu = dev;
756         void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
757
758         gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
759         gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
760         gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
761         gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
762
763         if (!gfsr)
764                 return IRQ_NONE;
765
766         dev_err_ratelimited(smmu->dev,
767                 "Unexpected global fault, this could be serious\n");
768         dev_err_ratelimited(smmu->dev,
769                 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
770                 gfsr, gfsynr0, gfsynr1, gfsynr2);
771
772         writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
773         return IRQ_HANDLED;
774 }
775
776 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
777                                        struct io_pgtable_cfg *pgtbl_cfg)
778 {
779         u32 reg, reg2;
780         u64 reg64;
781         bool stage1;
782         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
783         struct arm_smmu_device *smmu = smmu_domain->smmu;
784         void __iomem *cb_base, *gr1_base;
785
786         gr1_base = ARM_SMMU_GR1(smmu);
787         stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
788         cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
789
790         if (smmu->version > ARM_SMMU_V1) {
791                 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
792                         reg = CBA2R_RW64_64BIT;
793                 else
794                         reg = CBA2R_RW64_32BIT;
795                 /* 16-bit VMIDs live in CBA2R */
796                 if (smmu->features & ARM_SMMU_FEAT_VMID16)
797                         reg |= cfg->vmid << CBA2R_VMID_SHIFT;
798
799                 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
800         }
801
802         /* CBAR */
803         reg = cfg->cbar;
804         if (smmu->version < ARM_SMMU_V2)
805                 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
806
807         /*
808          * Use the weakest shareability/memory types, so they are
809          * overridden by the ttbcr/pte.
810          */
811         if (stage1) {
812                 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
813                         (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
814         } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
815                 /* 8-bit VMIDs live in CBAR */
816                 reg |= cfg->vmid << CBAR_VMID_SHIFT;
817         }
818         writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
819
820         /*
821          * TTBCR
822          * We must write this before the TTBRs, since it determines the
823          * access behaviour of some fields (in particular, ASID[15:8]).
824          */
825         if (stage1) {
826                 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
827                         reg = pgtbl_cfg->arm_v7s_cfg.tcr;
828                         reg2 = 0;
829                 } else {
830                         reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
831                         reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
832                         reg2 |= TTBCR2_SEP_UPSTREAM;
833                         if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
834                                 reg2 |= TTBCR2_AS;
835                 }
836                 if (smmu->version > ARM_SMMU_V1)
837                         writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
838         } else {
839                 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
840         }
841         writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
842
843         /* TTBRs */
844         if (stage1) {
845                 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
846                         reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
847                         writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
848                         reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
849                         writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
850                         writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
851                 } else {
852                         reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
853                         reg64 |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
854                         writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
855                         reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
856                         reg64 |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
857                         writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
858                 }
859         } else {
860                 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
861                 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
862         }
863
864         /* MAIRs (stage-1 only) */
865         if (stage1) {
866                 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
867                         reg = pgtbl_cfg->arm_v7s_cfg.prrr;
868                         reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
869                 } else {
870                         reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
871                         reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
872                 }
873                 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
874                 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
875         }
876
877         /* SCTLR */
878         reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
879         if (stage1)
880                 reg |= SCTLR_S1_ASIDPNE;
881 #ifdef __BIG_ENDIAN
882         reg |= SCTLR_E;
883 #endif
884         writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
885 }
886
887 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
888                                         struct arm_smmu_device *smmu)
889 {
890         int irq, start, ret = 0;
891         unsigned long ias, oas;
892         struct io_pgtable_ops *pgtbl_ops;
893         struct io_pgtable_cfg pgtbl_cfg;
894         enum io_pgtable_fmt fmt;
895         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
896         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
897         const struct iommu_gather_ops *tlb_ops;
898
899         mutex_lock(&smmu_domain->init_mutex);
900         if (smmu_domain->smmu)
901                 goto out_unlock;
902
903         if (domain->type == IOMMU_DOMAIN_IDENTITY) {
904                 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
905                 smmu_domain->smmu = smmu;
906                 goto out_unlock;
907         }
908
909         /*
910          * Mapping the requested stage onto what we support is surprisingly
911          * complicated, mainly because the spec allows S1+S2 SMMUs without
912          * support for nested translation. That means we end up with the
913          * following table:
914          *
915          * Requested        Supported        Actual
916          *     S1               N              S1
917          *     S1             S1+S2            S1
918          *     S1               S2             S2
919          *     S1               S1             S1
920          *     N                N              N
921          *     N              S1+S2            S2
922          *     N                S2             S2
923          *     N                S1             S1
924          *
925          * Note that you can't actually request stage-2 mappings.
926          */
927         if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
928                 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
929         if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
930                 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
931
932         /*
933          * Choosing a suitable context format is even more fiddly. Until we
934          * grow some way for the caller to express a preference, and/or move
935          * the decision into the io-pgtable code where it arguably belongs,
936          * just aim for the closest thing to the rest of the system, and hope
937          * that the hardware isn't esoteric enough that we can't assume AArch64
938          * support to be a superset of AArch32 support...
939          */
940         if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
941                 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
942         if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
943             !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
944             (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
945             (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
946                 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
947         if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
948             (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
949                                ARM_SMMU_FEAT_FMT_AARCH64_16K |
950                                ARM_SMMU_FEAT_FMT_AARCH64_4K)))
951                 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
952
953         if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
954                 ret = -EINVAL;
955                 goto out_unlock;
956         }
957
958         switch (smmu_domain->stage) {
959         case ARM_SMMU_DOMAIN_S1:
960                 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
961                 start = smmu->num_s2_context_banks;
962                 ias = smmu->va_size;
963                 oas = smmu->ipa_size;
964                 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
965                         fmt = ARM_64_LPAE_S1;
966                 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
967                         fmt = ARM_32_LPAE_S1;
968                         ias = min(ias, 32UL);
969                         oas = min(oas, 40UL);
970                 } else {
971                         fmt = ARM_V7S;
972                         ias = min(ias, 32UL);
973                         oas = min(oas, 32UL);
974                 }
975                 tlb_ops = &arm_smmu_s1_tlb_ops;
976                 break;
977         case ARM_SMMU_DOMAIN_NESTED:
978                 /*
979                  * We will likely want to change this if/when KVM gets
980                  * involved.
981                  */
982         case ARM_SMMU_DOMAIN_S2:
983                 cfg->cbar = CBAR_TYPE_S2_TRANS;
984                 start = 0;
985                 ias = smmu->ipa_size;
986                 oas = smmu->pa_size;
987                 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
988                         fmt = ARM_64_LPAE_S2;
989                 } else {
990                         fmt = ARM_32_LPAE_S2;
991                         ias = min(ias, 40UL);
992                         oas = min(oas, 40UL);
993                 }
994                 if (smmu->version == ARM_SMMU_V2)
995                         tlb_ops = &arm_smmu_s2_tlb_ops_v2;
996                 else
997                         tlb_ops = &arm_smmu_s2_tlb_ops_v1;
998                 break;
999         default:
1000                 ret = -EINVAL;
1001                 goto out_unlock;
1002         }
1003         ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
1004                                       smmu->num_context_banks);
1005         if (ret < 0)
1006                 goto out_unlock;
1007
1008         cfg->cbndx = ret;
1009         if (smmu->version < ARM_SMMU_V2) {
1010                 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1011                 cfg->irptndx %= smmu->num_context_irqs;
1012         } else {
1013                 cfg->irptndx = cfg->cbndx;
1014         }
1015
1016         if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
1017                 cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
1018         else
1019                 cfg->asid = cfg->cbndx + smmu->cavium_id_base;
1020
1021         pgtbl_cfg = (struct io_pgtable_cfg) {
1022                 .pgsize_bitmap  = smmu->pgsize_bitmap,
1023                 .ias            = ias,
1024                 .oas            = oas,
1025                 .tlb            = tlb_ops,
1026                 .iommu_dev      = smmu->dev,
1027         };
1028
1029         if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
1030                 pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
1031
1032         smmu_domain->smmu = smmu;
1033         pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1034         if (!pgtbl_ops) {
1035                 ret = -ENOMEM;
1036                 goto out_clear_smmu;
1037         }
1038
1039         /* Update the domain's page sizes to reflect the page table format */
1040         domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
1041         domain->geometry.aperture_end = (1UL << ias) - 1;
1042         domain->geometry.force_aperture = true;
1043
1044         /* Initialise the context bank with our page table cfg */
1045         arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
1046
1047         /*
1048          * Request context fault interrupt. Do this last to avoid the
1049          * handler seeing a half-initialised domain state.
1050          */
1051         irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1052         ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
1053                                IRQF_SHARED, "arm-smmu-context-fault", domain);
1054         if (ret < 0) {
1055                 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1056                         cfg->irptndx, irq);
1057                 cfg->irptndx = INVALID_IRPTNDX;
1058         }
1059
1060         mutex_unlock(&smmu_domain->init_mutex);
1061
1062         /* Publish page table ops for map/unmap */
1063         smmu_domain->pgtbl_ops = pgtbl_ops;
1064         return 0;
1065
1066 out_clear_smmu:
1067         smmu_domain->smmu = NULL;
1068 out_unlock:
1069         mutex_unlock(&smmu_domain->init_mutex);
1070         return ret;
1071 }
1072
1073 static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1074 {
1075         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1076         struct arm_smmu_device *smmu = smmu_domain->smmu;
1077         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1078         void __iomem *cb_base;
1079         int irq;
1080
1081         if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
1082                 return;
1083
1084         /*
1085          * Disable the context bank and free the page tables before freeing
1086          * it.
1087          */
1088         cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
1089         writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1090
1091         if (cfg->irptndx != INVALID_IRPTNDX) {
1092                 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1093                 devm_free_irq(smmu->dev, irq, domain);
1094         }
1095
1096         free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1097         __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
1098 }
1099
1100 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1101 {
1102         struct arm_smmu_domain *smmu_domain;
1103
1104         if (type != IOMMU_DOMAIN_UNMANAGED &&
1105             type != IOMMU_DOMAIN_DMA &&
1106             type != IOMMU_DOMAIN_IDENTITY)
1107                 return NULL;
1108         /*
1109          * Allocate the domain and initialise some of its data structures.
1110          * We can't really do anything meaningful until we've added a
1111          * master.
1112          */
1113         smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1114         if (!smmu_domain)
1115                 return NULL;
1116
1117         if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1118             iommu_get_dma_cookie(&smmu_domain->domain))) {
1119                 kfree(smmu_domain);
1120                 return NULL;
1121         }
1122
1123         mutex_init(&smmu_domain->init_mutex);
1124         spin_lock_init(&smmu_domain->cb_lock);
1125
1126         return &smmu_domain->domain;
1127 }
1128
1129 static void arm_smmu_domain_free(struct iommu_domain *domain)
1130 {
1131         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1132
1133         /*
1134          * Free the domain resources. We assume that all devices have
1135          * already been detached.
1136          */
1137         iommu_put_dma_cookie(domain);
1138         arm_smmu_destroy_domain_context(domain);
1139         kfree(smmu_domain);
1140 }
1141
1142 static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1143 {
1144         struct arm_smmu_smr *smr = smmu->smrs + idx;
1145         u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
1146
1147         if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
1148                 reg |= SMR_VALID;
1149         writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1150 }
1151
1152 static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1153 {
1154         struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1155         u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1156                   (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1157                   (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1158
1159         if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
1160             smmu->smrs[idx].valid)
1161                 reg |= S2CR_EXIDVALID;
1162         writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1163 }
1164
1165 static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1166 {
1167         arm_smmu_write_s2cr(smmu, idx);
1168         if (smmu->smrs)
1169                 arm_smmu_write_smr(smmu, idx);
1170 }
1171
1172 /*
1173  * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
1174  * should be called after sCR0 is written.
1175  */
1176 static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
1177 {
1178         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1179         u32 smr;
1180
1181         if (!smmu->smrs)
1182                 return;
1183
1184         /*
1185          * SMR.ID bits may not be preserved if the corresponding MASK
1186          * bits are set, so check each one separately. We can reject
1187          * masters later if they try to claim IDs outside these masks.
1188          */
1189         smr = smmu->streamid_mask << SMR_ID_SHIFT;
1190         writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1191         smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1192         smmu->streamid_mask = smr >> SMR_ID_SHIFT;
1193
1194         smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1195         writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1196         smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1197         smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
1198 }
1199
1200 static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
1201 {
1202         struct arm_smmu_smr *smrs = smmu->smrs;
1203         int i, free_idx = -ENOSPC;
1204
1205         /* Stream indexing is blissfully easy */
1206         if (!smrs)
1207                 return id;
1208
1209         /* Validating SMRs is... less so */
1210         for (i = 0; i < smmu->num_mapping_groups; ++i) {
1211                 if (!smrs[i].valid) {
1212                         /*
1213                          * Note the first free entry we come across, which
1214                          * we'll claim in the end if nothing else matches.
1215                          */
1216                         if (free_idx < 0)
1217                                 free_idx = i;
1218                         continue;
1219                 }
1220                 /*
1221                  * If the new entry is _entirely_ matched by an existing entry,
1222                  * then reuse that, with the guarantee that there also cannot
1223                  * be any subsequent conflicting entries. In normal use we'd
1224                  * expect simply identical entries for this case, but there's
1225                  * no harm in accommodating the generalisation.
1226                  */
1227                 if ((mask & smrs[i].mask) == mask &&
1228                     !((id ^ smrs[i].id) & ~smrs[i].mask))
1229                         return i;
1230                 /*
1231                  * If the new entry has any other overlap with an existing one,
1232                  * though, then there always exists at least one stream ID
1233                  * which would cause a conflict, and we can't allow that risk.
1234                  */
1235                 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1236                         return -EINVAL;
1237         }
1238
1239         return free_idx;
1240 }
1241
1242 static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1243 {
1244         if (--smmu->s2crs[idx].count)
1245                 return false;
1246
1247         smmu->s2crs[idx] = s2cr_init_val;
1248         if (smmu->smrs)
1249                 smmu->smrs[idx].valid = false;
1250
1251         return true;
1252 }
1253
1254 static int arm_smmu_master_alloc_smes(struct device *dev)
1255 {
1256         struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1257         struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
1258         struct arm_smmu_device *smmu = cfg->smmu;
1259         struct arm_smmu_smr *smrs = smmu->smrs;
1260         struct iommu_group *group;
1261         int i, idx, ret;
1262
1263         mutex_lock(&smmu->stream_map_mutex);
1264         /* Figure out a viable stream map entry allocation */
1265         for_each_cfg_sme(fwspec, i, idx) {
1266                 u16 sid = fwspec->ids[i];
1267                 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1268
1269                 if (idx != INVALID_SMENDX) {
1270                         ret = -EEXIST;
1271                         goto out_err;
1272                 }
1273
1274                 ret = arm_smmu_find_sme(smmu, sid, mask);
1275                 if (ret < 0)
1276                         goto out_err;
1277
1278                 idx = ret;
1279                 if (smrs && smmu->s2crs[idx].count == 0) {
1280                         smrs[idx].id = sid;
1281                         smrs[idx].mask = mask;
1282                         smrs[idx].valid = true;
1283                 }
1284                 smmu->s2crs[idx].count++;
1285                 cfg->smendx[i] = (s16)idx;
1286         }
1287
1288         group = iommu_group_get_for_dev(dev);
1289         if (!group)
1290                 group = ERR_PTR(-ENOMEM);
1291         if (IS_ERR(group)) {
1292                 ret = PTR_ERR(group);
1293                 goto out_err;
1294         }
1295         iommu_group_put(group);
1296
1297         /* It worked! Now, poke the actual hardware */
1298         for_each_cfg_sme(fwspec, i, idx) {
1299                 arm_smmu_write_sme(smmu, idx);
1300                 smmu->s2crs[idx].group = group;
1301         }
1302
1303         mutex_unlock(&smmu->stream_map_mutex);
1304         return 0;
1305
1306 out_err:
1307         while (i--) {
1308                 arm_smmu_free_sme(smmu, cfg->smendx[i]);
1309                 cfg->smendx[i] = INVALID_SMENDX;
1310         }
1311         mutex_unlock(&smmu->stream_map_mutex);
1312         return ret;
1313 }
1314
1315 static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
1316 {
1317         struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1318         struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
1319         int i, idx;
1320
1321         mutex_lock(&smmu->stream_map_mutex);
1322         for_each_cfg_sme(fwspec, i, idx) {
1323                 if (arm_smmu_free_sme(smmu, idx))
1324                         arm_smmu_write_sme(smmu, idx);
1325                 cfg->smendx[i] = INVALID_SMENDX;
1326         }
1327         mutex_unlock(&smmu->stream_map_mutex);
1328 }
1329
1330 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1331                                       struct iommu_fwspec *fwspec)
1332 {
1333         struct arm_smmu_device *smmu = smmu_domain->smmu;
1334         struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1335         u8 cbndx = smmu_domain->cfg.cbndx;
1336         enum arm_smmu_s2cr_type type;
1337         int i, idx;
1338
1339         if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1340                 type = S2CR_TYPE_BYPASS;
1341         else
1342                 type = S2CR_TYPE_TRANS;
1343
1344         for_each_cfg_sme(fwspec, i, idx) {
1345                 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
1346                         continue;
1347
1348                 s2cr[idx].type = type;
1349                 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
1350                 s2cr[idx].cbndx = cbndx;
1351                 arm_smmu_write_s2cr(smmu, idx);
1352         }
1353         return 0;
1354 }
1355
1356 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1357 {
1358         int ret;
1359         struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1360         struct arm_smmu_device *smmu;
1361         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1362
1363         if (!fwspec || fwspec->ops != &arm_smmu_ops) {
1364                 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1365                 return -ENXIO;
1366         }
1367
1368         /*
1369          * FIXME: The arch/arm DMA API code tries to attach devices to its own
1370          * domains between of_xlate() and add_device() - we have no way to cope
1371          * with that, so until ARM gets converted to rely on groups and default
1372          * domains, just say no (but more politely than by dereferencing NULL).
1373          * This should be at least a WARN_ON once that's sorted.
1374          */
1375         if (!fwspec->iommu_priv)
1376                 return -ENODEV;
1377
1378         smmu = fwspec_smmu(fwspec);
1379         /* Ensure that the domain is finalised */
1380         ret = arm_smmu_init_domain_context(domain, smmu);
1381         if (ret < 0)
1382                 return ret;
1383
1384         /*
1385          * Sanity check the domain. We don't support domains across
1386          * different SMMUs.
1387          */
1388         if (smmu_domain->smmu != smmu) {
1389                 dev_err(dev,
1390                         "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1391                         dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1392                 return -EINVAL;
1393         }
1394
1395         /* Looks ok, so add the device to the domain */
1396         return arm_smmu_domain_add_master(smmu_domain, fwspec);
1397 }
1398
1399 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1400                         phys_addr_t paddr, size_t size, int prot)
1401 {
1402         struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1403
1404         if (!ops)
1405                 return -ENODEV;
1406
1407         return ops->map(ops, iova, paddr, size, prot);
1408 }
1409
1410 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1411                              size_t size)
1412 {
1413         struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1414
1415         if (!ops)
1416                 return 0;
1417
1418         return ops->unmap(ops, iova, size);
1419 }
1420
1421 static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1422                                               dma_addr_t iova)
1423 {
1424         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1425         struct arm_smmu_device *smmu = smmu_domain->smmu;
1426         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1427         struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1428         struct device *dev = smmu->dev;
1429         void __iomem *cb_base;
1430         u32 tmp;
1431         u64 phys;
1432         unsigned long va, flags;
1433
1434         cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
1435
1436         spin_lock_irqsave(&smmu_domain->cb_lock, flags);
1437         /* ATS1 registers can only be written atomically */
1438         va = iova & ~0xfffUL;
1439         if (smmu->version == ARM_SMMU_V2)
1440                 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1441         else /* Register is only 32-bit in v1 */
1442                 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
1443
1444         if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1445                                       !(tmp & ATSR_ACTIVE), 5, 50)) {
1446                 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1447                 dev_err(dev,
1448                         "iova to phys timed out on %pad. Falling back to software table walk.\n",
1449                         &iova);
1450                 return ops->iova_to_phys(ops, iova);
1451         }
1452
1453         phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
1454         spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1455         if (phys & CB_PAR_F) {
1456                 dev_err(dev, "translation fault!\n");
1457                 dev_err(dev, "PAR = 0x%llx\n", phys);
1458                 return 0;
1459         }
1460
1461         return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1462 }
1463
1464 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1465                                         dma_addr_t iova)
1466 {
1467         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1468         struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1469
1470         if (domain->type == IOMMU_DOMAIN_IDENTITY)
1471                 return iova;
1472
1473         if (!ops)
1474                 return 0;
1475
1476         if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1477                         smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1478                 return arm_smmu_iova_to_phys_hard(domain, iova);
1479
1480         return ops->iova_to_phys(ops, iova);
1481 }
1482
1483 static bool arm_smmu_capable(enum iommu_cap cap)
1484 {
1485         switch (cap) {
1486         case IOMMU_CAP_CACHE_COHERENCY:
1487                 /*
1488                  * Return true here as the SMMU can always send out coherent
1489                  * requests.
1490                  */
1491                 return true;
1492         case IOMMU_CAP_NOEXEC:
1493                 return true;
1494         default:
1495                 return false;
1496         }
1497 }
1498
1499 static int arm_smmu_match_node(struct device *dev, void *data)
1500 {
1501         return dev->fwnode == data;
1502 }
1503
1504 static
1505 struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
1506 {
1507         struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
1508                                                 fwnode, arm_smmu_match_node);
1509         put_device(dev);
1510         return dev ? dev_get_drvdata(dev) : NULL;
1511 }
1512
1513 static int arm_smmu_add_device(struct device *dev)
1514 {
1515         struct arm_smmu_device *smmu;
1516         struct arm_smmu_master_cfg *cfg;
1517         struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1518         int i, ret;
1519
1520         if (using_legacy_binding) {
1521                 ret = arm_smmu_register_legacy_master(dev, &smmu);
1522                 if (ret)
1523                         goto out_free;
1524         } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
1525                 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
1526         } else {
1527                 return -ENODEV;
1528         }
1529
1530         ret = -EINVAL;
1531         for (i = 0; i < fwspec->num_ids; i++) {
1532                 u16 sid = fwspec->ids[i];
1533                 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1534
1535                 if (sid & ~smmu->streamid_mask) {
1536                         dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
1537                                 sid, smmu->streamid_mask);
1538                         goto out_free;
1539                 }
1540                 if (mask & ~smmu->smr_mask_mask) {
1541                         dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
1542                                 mask, smmu->smr_mask_mask);
1543                         goto out_free;
1544                 }
1545         }
1546
1547         ret = -ENOMEM;
1548         cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1549                       GFP_KERNEL);
1550         if (!cfg)
1551                 goto out_free;
1552
1553         cfg->smmu = smmu;
1554         fwspec->iommu_priv = cfg;
1555         while (i--)
1556                 cfg->smendx[i] = INVALID_SMENDX;
1557
1558         ret = arm_smmu_master_alloc_smes(dev);
1559         if (ret)
1560                 goto out_cfg_free;
1561
1562         iommu_device_link(&smmu->iommu, dev);
1563
1564         return 0;
1565
1566 out_cfg_free:
1567         kfree(cfg);
1568 out_free:
1569         iommu_fwspec_free(dev);
1570         return ret;
1571 }
1572
1573 static void arm_smmu_remove_device(struct device *dev)
1574 {
1575         struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1576         struct arm_smmu_master_cfg *cfg;
1577         struct arm_smmu_device *smmu;
1578
1579
1580         if (!fwspec || fwspec->ops != &arm_smmu_ops)
1581                 return;
1582
1583         cfg  = fwspec->iommu_priv;
1584         smmu = cfg->smmu;
1585
1586         iommu_device_unlink(&smmu->iommu, dev);
1587         arm_smmu_master_free_smes(fwspec);
1588         iommu_group_remove_device(dev);
1589         kfree(fwspec->iommu_priv);
1590         iommu_fwspec_free(dev);
1591 }
1592
1593 static struct iommu_group *arm_smmu_device_group(struct device *dev)
1594 {
1595         struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1596         struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1597         struct iommu_group *group = NULL;
1598         int i, idx;
1599
1600         for_each_cfg_sme(fwspec, i, idx) {
1601                 if (group && smmu->s2crs[idx].group &&
1602                     group != smmu->s2crs[idx].group)
1603                         return ERR_PTR(-EINVAL);
1604
1605                 group = smmu->s2crs[idx].group;
1606         }
1607
1608         if (group)
1609                 return iommu_group_ref_get(group);
1610
1611         if (dev_is_pci(dev))
1612                 group = pci_device_group(dev);
1613         else
1614                 group = generic_device_group(dev);
1615
1616         return group;
1617 }
1618
1619 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1620                                     enum iommu_attr attr, void *data)
1621 {
1622         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1623
1624         if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1625                 return -EINVAL;
1626
1627         switch (attr) {
1628         case DOMAIN_ATTR_NESTING:
1629                 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1630                 return 0;
1631         default:
1632                 return -ENODEV;
1633         }
1634 }
1635
1636 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1637                                     enum iommu_attr attr, void *data)
1638 {
1639         int ret = 0;
1640         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1641
1642         if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1643                 return -EINVAL;
1644
1645         mutex_lock(&smmu_domain->init_mutex);
1646
1647         switch (attr) {
1648         case DOMAIN_ATTR_NESTING:
1649                 if (smmu_domain->smmu) {
1650                         ret = -EPERM;
1651                         goto out_unlock;
1652                 }
1653
1654                 if (*(int *)data)
1655                         smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1656                 else
1657                         smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1658
1659                 break;
1660         default:
1661                 ret = -ENODEV;
1662         }
1663
1664 out_unlock:
1665         mutex_unlock(&smmu_domain->init_mutex);
1666         return ret;
1667 }
1668
1669 static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1670 {
1671         u32 mask, fwid = 0;
1672
1673         if (args->args_count > 0)
1674                 fwid |= (u16)args->args[0];
1675
1676         if (args->args_count > 1)
1677                 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
1678         else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1679                 fwid |= (u16)mask << SMR_MASK_SHIFT;
1680
1681         return iommu_fwspec_add_ids(dev, &fwid, 1);
1682 }
1683
1684 static void arm_smmu_get_resv_regions(struct device *dev,
1685                                       struct list_head *head)
1686 {
1687         struct iommu_resv_region *region;
1688         int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1689
1690         region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
1691                                          prot, IOMMU_RESV_SW_MSI);
1692         if (!region)
1693                 return;
1694
1695         list_add_tail(&region->list, head);
1696
1697         iommu_dma_get_resv_regions(dev, head);
1698 }
1699
1700 static void arm_smmu_put_resv_regions(struct device *dev,
1701                                       struct list_head *head)
1702 {
1703         struct iommu_resv_region *entry, *next;
1704
1705         list_for_each_entry_safe(entry, next, head, list)
1706                 kfree(entry);
1707 }
1708
1709 static struct iommu_ops arm_smmu_ops = {
1710         .capable                = arm_smmu_capable,
1711         .domain_alloc           = arm_smmu_domain_alloc,
1712         .domain_free            = arm_smmu_domain_free,
1713         .attach_dev             = arm_smmu_attach_dev,
1714         .map                    = arm_smmu_map,
1715         .unmap                  = arm_smmu_unmap,
1716         .map_sg                 = default_iommu_map_sg,
1717         .iova_to_phys           = arm_smmu_iova_to_phys,
1718         .add_device             = arm_smmu_add_device,
1719         .remove_device          = arm_smmu_remove_device,
1720         .device_group           = arm_smmu_device_group,
1721         .domain_get_attr        = arm_smmu_domain_get_attr,
1722         .domain_set_attr        = arm_smmu_domain_set_attr,
1723         .of_xlate               = arm_smmu_of_xlate,
1724         .get_resv_regions       = arm_smmu_get_resv_regions,
1725         .put_resv_regions       = arm_smmu_put_resv_regions,
1726         .pgsize_bitmap          = -1UL, /* Restricted during device attach */
1727 };
1728
1729 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1730 {
1731         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1732         void __iomem *cb_base;
1733         int i;
1734         u32 reg, major;
1735
1736         /* clear global FSR */
1737         reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1738         writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1739
1740         /*
1741          * Reset stream mapping groups: Initial values mark all SMRn as
1742          * invalid and all S2CRn as bypass unless overridden.
1743          */
1744         for (i = 0; i < smmu->num_mapping_groups; ++i)
1745                 arm_smmu_write_sme(smmu, i);
1746
1747         if (smmu->model == ARM_MMU500) {
1748                 /*
1749                  * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1750                  * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1751                  * bit is only present in MMU-500r2 onwards.
1752                  */
1753                 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1754                 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1755                 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1756                 if (major >= 2)
1757                         reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1758                 /*
1759                  * Allow unmatched Stream IDs to allocate bypass
1760                  * TLB entries for reduced latency.
1761                  */
1762                 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
1763                 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1764         }
1765
1766         /* Make sure all context banks are disabled and clear CB_FSR  */
1767         for (i = 0; i < smmu->num_context_banks; ++i) {
1768                 cb_base = ARM_SMMU_CB(smmu, i);
1769                 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1770                 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
1771                 /*
1772                  * Disable MMU-500's not-particularly-beneficial next-page
1773                  * prefetcher for the sake of errata #841119 and #826419.
1774                  */
1775                 if (smmu->model == ARM_MMU500) {
1776                         reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1777                         reg &= ~ARM_MMU500_ACTLR_CPRE;
1778                         writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1779                 }
1780         }
1781
1782         /* Invalidate the TLB, just in case */
1783         writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1784         writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1785
1786         reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1787
1788         /* Enable fault reporting */
1789         reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
1790
1791         /* Disable TLB broadcasting. */
1792         reg |= (sCR0_VMIDPNE | sCR0_PTM);
1793
1794         /* Enable client access, handling unmatched streams as appropriate */
1795         reg &= ~sCR0_CLIENTPD;
1796         if (disable_bypass)
1797                 reg |= sCR0_USFCFG;
1798         else
1799                 reg &= ~sCR0_USFCFG;
1800
1801         /* Disable forced broadcasting */
1802         reg &= ~sCR0_FB;
1803
1804         /* Don't upgrade barriers */
1805         reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
1806
1807         if (smmu->features & ARM_SMMU_FEAT_VMID16)
1808                 reg |= sCR0_VMID16EN;
1809
1810         if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1811                 reg |= sCR0_EXIDENABLE;
1812
1813         /* Push the button */
1814         arm_smmu_tlb_sync_global(smmu);
1815         writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1816 }
1817
1818 static int arm_smmu_id_size_to_bits(int size)
1819 {
1820         switch (size) {
1821         case 0:
1822                 return 32;
1823         case 1:
1824                 return 36;
1825         case 2:
1826                 return 40;
1827         case 3:
1828                 return 42;
1829         case 4:
1830                 return 44;
1831         case 5:
1832         default:
1833                 return 48;
1834         }
1835 }
1836
1837 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1838 {
1839         unsigned long size;
1840         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1841         u32 id;
1842         bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
1843         int i;
1844
1845         dev_notice(smmu->dev, "probing hardware configuration...\n");
1846         dev_notice(smmu->dev, "SMMUv%d with:\n",
1847                         smmu->version == ARM_SMMU_V2 ? 2 : 1);
1848
1849         /* ID0 */
1850         id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
1851
1852         /* Restrict available stages based on module parameter */
1853         if (force_stage == 1)
1854                 id &= ~(ID0_S2TS | ID0_NTS);
1855         else if (force_stage == 2)
1856                 id &= ~(ID0_S1TS | ID0_NTS);
1857
1858         if (id & ID0_S1TS) {
1859                 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1860                 dev_notice(smmu->dev, "\tstage 1 translation\n");
1861         }
1862
1863         if (id & ID0_S2TS) {
1864                 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1865                 dev_notice(smmu->dev, "\tstage 2 translation\n");
1866         }
1867
1868         if (id & ID0_NTS) {
1869                 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1870                 dev_notice(smmu->dev, "\tnested translation\n");
1871         }
1872
1873         if (!(smmu->features &
1874                 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
1875                 dev_err(smmu->dev, "\tno translation support!\n");
1876                 return -ENODEV;
1877         }
1878
1879         if ((id & ID0_S1TS) &&
1880                 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
1881                 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1882                 dev_notice(smmu->dev, "\taddress translation ops\n");
1883         }
1884
1885         /*
1886          * In order for DMA API calls to work properly, we must defer to what
1887          * the FW says about coherency, regardless of what the hardware claims.
1888          * Fortunately, this also opens up a workaround for systems where the
1889          * ID register value has ended up configured incorrectly.
1890          */
1891         cttw_reg = !!(id & ID0_CTTW);
1892         if (cttw_fw || cttw_reg)
1893                 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1894                            cttw_fw ? "" : "non-");
1895         if (cttw_fw != cttw_reg)
1896                 dev_notice(smmu->dev,
1897                            "\t(IDR0.CTTW overridden by FW configuration)\n");
1898
1899         /* Max. number of entries we have for stream matching/indexing */
1900         if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1901                 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1902                 size = 1 << 16;
1903         } else {
1904                 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1905         }
1906         smmu->streamid_mask = size - 1;
1907         if (id & ID0_SMS) {
1908                 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1909                 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1910                 if (size == 0) {
1911                         dev_err(smmu->dev,
1912                                 "stream-matching supported, but no SMRs present!\n");
1913                         return -ENODEV;
1914                 }
1915
1916                 /* Zero-initialised to mark as invalid */
1917                 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1918                                           GFP_KERNEL);
1919                 if (!smmu->smrs)
1920                         return -ENOMEM;
1921
1922                 dev_notice(smmu->dev,
1923                            "\tstream matching with %lu register groups", size);
1924         }
1925         /* s2cr->type == 0 means translation, so initialise explicitly */
1926         smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1927                                          GFP_KERNEL);
1928         if (!smmu->s2crs)
1929                 return -ENOMEM;
1930         for (i = 0; i < size; i++)
1931                 smmu->s2crs[i] = s2cr_init_val;
1932
1933         smmu->num_mapping_groups = size;
1934         mutex_init(&smmu->stream_map_mutex);
1935         spin_lock_init(&smmu->global_sync_lock);
1936
1937         if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1938                 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1939                 if (!(id & ID0_PTFS_NO_AARCH32S))
1940                         smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1941         }
1942
1943         /* ID1 */
1944         id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
1945         smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
1946
1947         /* Check for size mismatch of SMMU address space from mapped region */
1948         size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
1949         size <<= smmu->pgshift;
1950         if (smmu->cb_base != gr0_base + size)
1951                 dev_warn(smmu->dev,
1952                         "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n",
1953                         size * 2, (smmu->cb_base - gr0_base) * 2);
1954
1955         smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
1956         smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1957         if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1958                 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1959                 return -ENODEV;
1960         }
1961         dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1962                    smmu->num_context_banks, smmu->num_s2_context_banks);
1963         /*
1964          * Cavium CN88xx erratum #27704.
1965          * Ensure ASID and VMID allocation is unique across all SMMUs in
1966          * the system.
1967          */
1968         if (smmu->model == CAVIUM_SMMUV2) {
1969                 smmu->cavium_id_base =
1970                         atomic_add_return(smmu->num_context_banks,
1971                                           &cavium_smmu_context_count);
1972                 smmu->cavium_id_base -= smmu->num_context_banks;
1973                 dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
1974         }
1975
1976         /* ID2 */
1977         id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1978         size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
1979         smmu->ipa_size = size;
1980
1981         /* The output mask is also applied for bypass */
1982         size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
1983         smmu->pa_size = size;
1984
1985         if (id & ID2_VMID16)
1986                 smmu->features |= ARM_SMMU_FEAT_VMID16;
1987
1988         /*
1989          * What the page table walker can address actually depends on which
1990          * descriptor format is in use, but since a) we don't know that yet,
1991          * and b) it can vary per context bank, this will have to do...
1992          */
1993         if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1994                 dev_warn(smmu->dev,
1995                          "failed to set DMA mask for table walker\n");
1996
1997         if (smmu->version < ARM_SMMU_V2) {
1998                 smmu->va_size = smmu->ipa_size;
1999                 if (smmu->version == ARM_SMMU_V1_64K)
2000                         smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
2001         } else {
2002                 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
2003                 smmu->va_size = arm_smmu_id_size_to_bits(size);
2004                 if (id & ID2_PTFS_4K)
2005                         smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
2006                 if (id & ID2_PTFS_16K)
2007                         smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
2008                 if (id & ID2_PTFS_64K)
2009                         smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
2010         }
2011
2012         /* Now we've corralled the various formats, what'll it do? */
2013         if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
2014                 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
2015         if (smmu->features &
2016             (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
2017                 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
2018         if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
2019                 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
2020         if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
2021                 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
2022
2023         if (arm_smmu_ops.pgsize_bitmap == -1UL)
2024                 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2025         else
2026                 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2027         dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2028                    smmu->pgsize_bitmap);
2029
2030
2031         if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2032                 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
2033                            smmu->va_size, smmu->ipa_size);
2034
2035         if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2036                 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
2037                            smmu->ipa_size, smmu->pa_size);
2038
2039         return 0;
2040 }
2041
2042 struct arm_smmu_match_data {
2043         enum arm_smmu_arch_version version;
2044         enum arm_smmu_implementation model;
2045 };
2046
2047 #define ARM_SMMU_MATCH_DATA(name, ver, imp)     \
2048 static struct arm_smmu_match_data name = { .version = ver, .model = imp }
2049
2050 ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
2051 ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
2052 ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
2053 ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
2054 ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
2055
2056 static const struct of_device_id arm_smmu_of_match[] = {
2057         { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
2058         { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
2059         { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
2060         { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
2061         { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
2062         { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
2063         { },
2064 };
2065 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2066
2067 #ifdef CONFIG_ACPI
2068 static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
2069 {
2070         int ret = 0;
2071
2072         switch (model) {
2073         case ACPI_IORT_SMMU_V1:
2074         case ACPI_IORT_SMMU_CORELINK_MMU400:
2075                 smmu->version = ARM_SMMU_V1;
2076                 smmu->model = GENERIC_SMMU;
2077                 break;
2078         case ACPI_IORT_SMMU_CORELINK_MMU401:
2079                 smmu->version = ARM_SMMU_V1_64K;
2080                 smmu->model = GENERIC_SMMU;
2081                 break;
2082         case ACPI_IORT_SMMU_V2:
2083                 smmu->version = ARM_SMMU_V2;
2084                 smmu->model = GENERIC_SMMU;
2085                 break;
2086         case ACPI_IORT_SMMU_CORELINK_MMU500:
2087                 smmu->version = ARM_SMMU_V2;
2088                 smmu->model = ARM_MMU500;
2089                 break;
2090         case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
2091                 smmu->version = ARM_SMMU_V2;
2092                 smmu->model = CAVIUM_SMMUV2;
2093                 break;
2094         default:
2095                 ret = -ENODEV;
2096         }
2097
2098         return ret;
2099 }
2100
2101 static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2102                                       struct arm_smmu_device *smmu)
2103 {
2104         struct device *dev = smmu->dev;
2105         struct acpi_iort_node *node =
2106                 *(struct acpi_iort_node **)dev_get_platdata(dev);
2107         struct acpi_iort_smmu *iort_smmu;
2108         int ret;
2109
2110         /* Retrieve SMMU1/2 specific data */
2111         iort_smmu = (struct acpi_iort_smmu *)node->node_data;
2112
2113         ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2114         if (ret < 0)
2115                 return ret;
2116
2117         /* Ignore the configuration access interrupt */
2118         smmu->num_global_irqs = 1;
2119
2120         if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2121                 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2122
2123         return 0;
2124 }
2125 #else
2126 static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2127                                              struct arm_smmu_device *smmu)
2128 {
2129         return -ENODEV;
2130 }
2131 #endif
2132
2133 static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2134                                     struct arm_smmu_device *smmu)
2135 {
2136         const struct arm_smmu_match_data *data;
2137         struct device *dev = &pdev->dev;
2138         bool legacy_binding;
2139
2140         if (of_property_read_u32(dev->of_node, "#global-interrupts",
2141                                  &smmu->num_global_irqs)) {
2142                 dev_err(dev, "missing #global-interrupts property\n");
2143                 return -ENODEV;
2144         }
2145
2146         data = of_device_get_match_data(dev);
2147         smmu->version = data->version;
2148         smmu->model = data->model;
2149
2150         parse_driver_options(smmu);
2151
2152         legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2153         if (legacy_binding && !using_generic_binding) {
2154                 if (!using_legacy_binding)
2155                         pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
2156                 using_legacy_binding = true;
2157         } else if (!legacy_binding && !using_legacy_binding) {
2158                 using_generic_binding = true;
2159         } else {
2160                 dev_err(dev, "not probing due to mismatched DT properties\n");
2161                 return -ENODEV;
2162         }
2163
2164         if (of_dma_is_coherent(dev->of_node))
2165                 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2166
2167         return 0;
2168 }
2169
2170 static void arm_smmu_bus_init(void)
2171 {
2172         /* Oh, for a proper bus abstraction */
2173         if (!iommu_present(&platform_bus_type))
2174                 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2175 #ifdef CONFIG_ARM_AMBA
2176         if (!iommu_present(&amba_bustype))
2177                 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2178 #endif
2179 #ifdef CONFIG_PCI
2180         if (!iommu_present(&pci_bus_type)) {
2181                 pci_request_acs();
2182                 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2183         }
2184 #endif
2185 }
2186
2187 static int arm_smmu_device_probe(struct platform_device *pdev)
2188 {
2189         struct resource *res;
2190         resource_size_t ioaddr;
2191         struct arm_smmu_device *smmu;
2192         struct device *dev = &pdev->dev;
2193         int num_irqs, i, err;
2194
2195         smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2196         if (!smmu) {
2197                 dev_err(dev, "failed to allocate arm_smmu_device\n");
2198                 return -ENOMEM;
2199         }
2200         smmu->dev = dev;
2201
2202         if (dev->of_node)
2203                 err = arm_smmu_device_dt_probe(pdev, smmu);
2204         else
2205                 err = arm_smmu_device_acpi_probe(pdev, smmu);
2206
2207         if (err)
2208                 return err;
2209
2210         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2211         ioaddr = res->start;
2212         smmu->base = devm_ioremap_resource(dev, res);
2213         if (IS_ERR(smmu->base))
2214                 return PTR_ERR(smmu->base);
2215         smmu->cb_base = smmu->base + resource_size(res) / 2;
2216
2217         num_irqs = 0;
2218         while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2219                 num_irqs++;
2220                 if (num_irqs > smmu->num_global_irqs)
2221                         smmu->num_context_irqs++;
2222         }
2223
2224         if (!smmu->num_context_irqs) {
2225                 dev_err(dev, "found %d interrupts but expected at least %d\n",
2226                         num_irqs, smmu->num_global_irqs + 1);
2227                 return -ENODEV;
2228         }
2229
2230         smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
2231                                   GFP_KERNEL);
2232         if (!smmu->irqs) {
2233                 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2234                 return -ENOMEM;
2235         }
2236
2237         for (i = 0; i < num_irqs; ++i) {
2238                 int irq = platform_get_irq(pdev, i);
2239
2240                 if (irq < 0) {
2241                         dev_err(dev, "failed to get irq index %d\n", i);
2242                         return -ENODEV;
2243                 }
2244                 smmu->irqs[i] = irq;
2245         }
2246
2247         err = arm_smmu_device_cfg_probe(smmu);
2248         if (err)
2249                 return err;
2250
2251         if (smmu->version == ARM_SMMU_V2 &&
2252             smmu->num_context_banks != smmu->num_context_irqs) {
2253                 dev_err(dev,
2254                         "found only %d context interrupt(s) but %d required\n",
2255                         smmu->num_context_irqs, smmu->num_context_banks);
2256                 return -ENODEV;
2257         }
2258
2259         for (i = 0; i < smmu->num_global_irqs; ++i) {
2260                 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2261                                        arm_smmu_global_fault,
2262                                        IRQF_SHARED,
2263                                        "arm-smmu global fault",
2264                                        smmu);
2265                 if (err) {
2266                         dev_err(dev, "failed to request global IRQ %d (%u)\n",
2267                                 i, smmu->irqs[i]);
2268                         return err;
2269                 }
2270         }
2271
2272         err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2273                                      "smmu.%pa", &ioaddr);
2274         if (err) {
2275                 dev_err(dev, "Failed to register iommu in sysfs\n");
2276                 return err;
2277         }
2278
2279         iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2280         iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2281
2282         err = iommu_device_register(&smmu->iommu);
2283         if (err) {
2284                 dev_err(dev, "Failed to register iommu\n");
2285                 return err;
2286         }
2287
2288         platform_set_drvdata(pdev, smmu);
2289         arm_smmu_device_reset(smmu);
2290         arm_smmu_test_smr_masks(smmu);
2291
2292         /*
2293          * For ACPI and generic DT bindings, an SMMU will be probed before
2294          * any device which might need it, so we want the bus ops in place
2295          * ready to handle default domain setup as soon as any SMMU exists.
2296          */
2297         if (!using_legacy_binding)
2298                 arm_smmu_bus_init();
2299
2300         return 0;
2301 }
2302
2303 /*
2304  * With the legacy DT binding in play, though, we have no guarantees about
2305  * probe order, but then we're also not doing default domains, so we can
2306  * delay setting bus ops until we're sure every possible SMMU is ready,
2307  * and that way ensure that no add_device() calls get missed.
2308  */
2309 static int arm_smmu_legacy_bus_init(void)
2310 {
2311         if (using_legacy_binding)
2312                 arm_smmu_bus_init();
2313         return 0;
2314 }
2315 device_initcall_sync(arm_smmu_legacy_bus_init);
2316
2317 static int arm_smmu_device_remove(struct platform_device *pdev)
2318 {
2319         struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2320
2321         if (!smmu)
2322                 return -ENODEV;
2323
2324         if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
2325                 dev_err(&pdev->dev, "removing device with active domains!\n");
2326
2327         /* Turn the thing off */
2328         writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
2329         return 0;
2330 }
2331
2332 static struct platform_driver arm_smmu_driver = {
2333         .driver = {
2334                 .name           = "arm-smmu",
2335                 .of_match_table = of_match_ptr(arm_smmu_of_match),
2336         },
2337         .probe  = arm_smmu_device_probe,
2338         .remove = arm_smmu_device_remove,
2339 };
2340 module_platform_driver(arm_smmu_driver);
2341
2342 IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", NULL);
2343 IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", NULL);
2344 IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", NULL);
2345 IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", NULL);
2346 IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", NULL);
2347 IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", NULL);
2348
2349 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2350 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2351 MODULE_LICENSE("GPL v2");