2 * IOMMU API for ARM architected SMMU implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * Copyright (C) 2013 ARM Limited
19 * Author: Will Deacon <will.deacon@arm.com>
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
26 * - Context fault reporting
29 #define pr_fmt(fmt) "arm-smmu: " fmt
31 #include <linux/atomic.h>
32 #include <linux/delay.h>
33 #include <linux/dma-iommu.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/err.h>
36 #include <linux/interrupt.h>
38 #include <linux/io-64-nonatomic-hi-lo.h>
39 #include <linux/iommu.h>
40 #include <linux/iopoll.h>
41 #include <linux/module.h>
43 #include <linux/of_address.h>
44 #include <linux/pci.h>
45 #include <linux/platform_device.h>
46 #include <linux/slab.h>
47 #include <linux/spinlock.h>
49 #include <linux/amba/bus.h>
51 #include "io-pgtable.h"
53 /* Maximum number of stream IDs assigned to a single device */
54 #define MAX_MASTER_STREAMIDS 128
56 /* Maximum number of context banks per SMMU */
57 #define ARM_SMMU_MAX_CBS 128
59 /* SMMU global address space */
60 #define ARM_SMMU_GR0(smmu) ((smmu)->base)
61 #define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
64 * SMMU global address space with conditional offset to access secure
65 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
68 #define ARM_SMMU_GR0_NS(smmu) \
70 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
74 * Some 64-bit registers only make sense to write atomically, but in such
75 * cases all the data relevant to AArch32 formats lies within the lower word,
76 * therefore this actually makes more sense than it might first appear.
79 #define smmu_write_atomic_lq writeq_relaxed
81 #define smmu_write_atomic_lq writel_relaxed
84 /* Configuration registers */
85 #define ARM_SMMU_GR0_sCR0 0x0
86 #define sCR0_CLIENTPD (1 << 0)
87 #define sCR0_GFRE (1 << 1)
88 #define sCR0_GFIE (1 << 2)
89 #define sCR0_GCFGFRE (1 << 4)
90 #define sCR0_GCFGFIE (1 << 5)
91 #define sCR0_USFCFG (1 << 10)
92 #define sCR0_VMIDPNE (1 << 11)
93 #define sCR0_PTM (1 << 12)
94 #define sCR0_FB (1 << 13)
95 #define sCR0_VMID16EN (1 << 31)
96 #define sCR0_BSU_SHIFT 14
97 #define sCR0_BSU_MASK 0x3
99 /* Auxiliary Configuration register */
100 #define ARM_SMMU_GR0_sACR 0x10
102 /* Identification registers */
103 #define ARM_SMMU_GR0_ID0 0x20
104 #define ARM_SMMU_GR0_ID1 0x24
105 #define ARM_SMMU_GR0_ID2 0x28
106 #define ARM_SMMU_GR0_ID3 0x2c
107 #define ARM_SMMU_GR0_ID4 0x30
108 #define ARM_SMMU_GR0_ID5 0x34
109 #define ARM_SMMU_GR0_ID6 0x38
110 #define ARM_SMMU_GR0_ID7 0x3c
111 #define ARM_SMMU_GR0_sGFSR 0x48
112 #define ARM_SMMU_GR0_sGFSYNR0 0x50
113 #define ARM_SMMU_GR0_sGFSYNR1 0x54
114 #define ARM_SMMU_GR0_sGFSYNR2 0x58
116 #define ID0_S1TS (1 << 30)
117 #define ID0_S2TS (1 << 29)
118 #define ID0_NTS (1 << 28)
119 #define ID0_SMS (1 << 27)
120 #define ID0_ATOSNS (1 << 26)
121 #define ID0_PTFS_NO_AARCH32 (1 << 25)
122 #define ID0_PTFS_NO_AARCH32S (1 << 24)
123 #define ID0_CTTW (1 << 14)
124 #define ID0_NUMIRPT_SHIFT 16
125 #define ID0_NUMIRPT_MASK 0xff
126 #define ID0_NUMSIDB_SHIFT 9
127 #define ID0_NUMSIDB_MASK 0xf
128 #define ID0_NUMSMRG_SHIFT 0
129 #define ID0_NUMSMRG_MASK 0xff
131 #define ID1_PAGESIZE (1 << 31)
132 #define ID1_NUMPAGENDXB_SHIFT 28
133 #define ID1_NUMPAGENDXB_MASK 7
134 #define ID1_NUMS2CB_SHIFT 16
135 #define ID1_NUMS2CB_MASK 0xff
136 #define ID1_NUMCB_SHIFT 0
137 #define ID1_NUMCB_MASK 0xff
139 #define ID2_OAS_SHIFT 4
140 #define ID2_OAS_MASK 0xf
141 #define ID2_IAS_SHIFT 0
142 #define ID2_IAS_MASK 0xf
143 #define ID2_UBS_SHIFT 8
144 #define ID2_UBS_MASK 0xf
145 #define ID2_PTFS_4K (1 << 12)
146 #define ID2_PTFS_16K (1 << 13)
147 #define ID2_PTFS_64K (1 << 14)
148 #define ID2_VMID16 (1 << 15)
150 #define ID7_MAJOR_SHIFT 4
151 #define ID7_MAJOR_MASK 0xf
153 /* Global TLB invalidation */
154 #define ARM_SMMU_GR0_TLBIVMID 0x64
155 #define ARM_SMMU_GR0_TLBIALLNSNH 0x68
156 #define ARM_SMMU_GR0_TLBIALLH 0x6c
157 #define ARM_SMMU_GR0_sTLBGSYNC 0x70
158 #define ARM_SMMU_GR0_sTLBGSTATUS 0x74
159 #define sTLBGSTATUS_GSACTIVE (1 << 0)
160 #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
162 /* Stream mapping registers */
163 #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
164 #define SMR_VALID (1 << 31)
165 #define SMR_MASK_SHIFT 16
166 #define SMR_ID_SHIFT 0
168 #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
169 #define S2CR_CBNDX_SHIFT 0
170 #define S2CR_CBNDX_MASK 0xff
171 #define S2CR_TYPE_SHIFT 16
172 #define S2CR_TYPE_MASK 0x3
173 #define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
174 #define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
175 #define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
177 #define S2CR_PRIVCFG_SHIFT 24
178 #define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
180 /* Context bank attribute registers */
181 #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
182 #define CBAR_VMID_SHIFT 0
183 #define CBAR_VMID_MASK 0xff
184 #define CBAR_S1_BPSHCFG_SHIFT 8
185 #define CBAR_S1_BPSHCFG_MASK 3
186 #define CBAR_S1_BPSHCFG_NSH 3
187 #define CBAR_S1_MEMATTR_SHIFT 12
188 #define CBAR_S1_MEMATTR_MASK 0xf
189 #define CBAR_S1_MEMATTR_WB 0xf
190 #define CBAR_TYPE_SHIFT 16
191 #define CBAR_TYPE_MASK 0x3
192 #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
193 #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
194 #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
195 #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
196 #define CBAR_IRPTNDX_SHIFT 24
197 #define CBAR_IRPTNDX_MASK 0xff
199 #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
200 #define CBA2R_RW64_32BIT (0 << 0)
201 #define CBA2R_RW64_64BIT (1 << 0)
202 #define CBA2R_VMID_SHIFT 16
203 #define CBA2R_VMID_MASK 0xffff
205 /* Translation context bank */
206 #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
207 #define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
209 #define ARM_SMMU_CB_SCTLR 0x0
210 #define ARM_SMMU_CB_ACTLR 0x4
211 #define ARM_SMMU_CB_RESUME 0x8
212 #define ARM_SMMU_CB_TTBCR2 0x10
213 #define ARM_SMMU_CB_TTBR0 0x20
214 #define ARM_SMMU_CB_TTBR1 0x28
215 #define ARM_SMMU_CB_TTBCR 0x30
216 #define ARM_SMMU_CB_CONTEXTIDR 0x34
217 #define ARM_SMMU_CB_S1_MAIR0 0x38
218 #define ARM_SMMU_CB_S1_MAIR1 0x3c
219 #define ARM_SMMU_CB_PAR 0x50
220 #define ARM_SMMU_CB_FSR 0x58
221 #define ARM_SMMU_CB_FAR 0x60
222 #define ARM_SMMU_CB_FSYNR0 0x68
223 #define ARM_SMMU_CB_S1_TLBIVA 0x600
224 #define ARM_SMMU_CB_S1_TLBIASID 0x610
225 #define ARM_SMMU_CB_S1_TLBIVAL 0x620
226 #define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
227 #define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
228 #define ARM_SMMU_CB_ATS1PR 0x800
229 #define ARM_SMMU_CB_ATSR 0x8f0
231 #define SCTLR_S1_ASIDPNE (1 << 12)
232 #define SCTLR_CFCFG (1 << 7)
233 #define SCTLR_CFIE (1 << 6)
234 #define SCTLR_CFRE (1 << 5)
235 #define SCTLR_E (1 << 4)
236 #define SCTLR_AFE (1 << 2)
237 #define SCTLR_TRE (1 << 1)
238 #define SCTLR_M (1 << 0)
240 #define ARM_MMU500_ACTLR_CPRE (1 << 1)
242 #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
244 #define CB_PAR_F (1 << 0)
246 #define ATSR_ACTIVE (1 << 0)
248 #define RESUME_RETRY (0 << 0)
249 #define RESUME_TERMINATE (1 << 0)
251 #define TTBCR2_SEP_SHIFT 15
252 #define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
254 #define TTBRn_ASID_SHIFT 48
256 #define FSR_MULTI (1 << 31)
257 #define FSR_SS (1 << 30)
258 #define FSR_UUT (1 << 8)
259 #define FSR_ASF (1 << 7)
260 #define FSR_TLBLKF (1 << 6)
261 #define FSR_TLBMCF (1 << 5)
262 #define FSR_EF (1 << 4)
263 #define FSR_PF (1 << 3)
264 #define FSR_AFF (1 << 2)
265 #define FSR_TF (1 << 1)
267 #define FSR_IGN (FSR_AFF | FSR_ASF | \
268 FSR_TLBMCF | FSR_TLBLKF)
269 #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
270 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
272 #define FSYNR0_WNR (1 << 4)
274 static int force_stage;
275 module_param(force_stage, int, S_IRUGO);
276 MODULE_PARM_DESC(force_stage,
277 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
278 static bool disable_bypass;
279 module_param(disable_bypass, bool, S_IRUGO);
280 MODULE_PARM_DESC(disable_bypass,
281 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
283 enum arm_smmu_arch_version {
289 enum arm_smmu_implementation {
295 struct arm_smmu_smr {
301 struct arm_smmu_master_cfg {
303 u16 streamids[MAX_MASTER_STREAMIDS];
304 s16 smendx[MAX_MASTER_STREAMIDS];
306 #define INVALID_SMENDX -1
308 struct arm_smmu_master {
309 struct device_node *of_node;
311 struct arm_smmu_master_cfg cfg;
314 struct arm_smmu_device {
319 unsigned long pgshift;
321 #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
322 #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
323 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
324 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
325 #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
326 #define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
327 #define ARM_SMMU_FEAT_VMID16 (1 << 6)
328 #define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
329 #define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
330 #define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
331 #define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
332 #define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
335 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
337 enum arm_smmu_arch_version version;
338 enum arm_smmu_implementation model;
340 u32 num_context_banks;
341 u32 num_s2_context_banks;
342 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
345 u32 num_mapping_groups;
348 struct arm_smmu_smr *smrs;
350 unsigned long va_size;
351 unsigned long ipa_size;
352 unsigned long pa_size;
353 unsigned long pgsize_bitmap;
356 u32 num_context_irqs;
359 struct list_head list;
360 struct rb_root masters;
362 u32 cavium_id_base; /* Specific to Cavium */
365 enum arm_smmu_context_fmt {
366 ARM_SMMU_CTX_FMT_NONE,
367 ARM_SMMU_CTX_FMT_AARCH64,
368 ARM_SMMU_CTX_FMT_AARCH32_L,
369 ARM_SMMU_CTX_FMT_AARCH32_S,
372 struct arm_smmu_cfg {
376 enum arm_smmu_context_fmt fmt;
378 #define INVALID_IRPTNDX 0xff
380 #define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
381 #define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
383 enum arm_smmu_domain_stage {
384 ARM_SMMU_DOMAIN_S1 = 0,
386 ARM_SMMU_DOMAIN_NESTED,
389 struct arm_smmu_domain {
390 struct arm_smmu_device *smmu;
391 struct io_pgtable_ops *pgtbl_ops;
392 spinlock_t pgtbl_lock;
393 struct arm_smmu_cfg cfg;
394 enum arm_smmu_domain_stage stage;
395 struct mutex init_mutex; /* Protects smmu pointer */
396 struct iommu_domain domain;
399 struct arm_smmu_phandle_args {
400 struct device_node *np;
402 uint32_t args[MAX_MASTER_STREAMIDS];
405 static DEFINE_SPINLOCK(arm_smmu_devices_lock);
406 static LIST_HEAD(arm_smmu_devices);
408 struct arm_smmu_option_prop {
413 static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
415 static struct arm_smmu_option_prop arm_smmu_options[] = {
416 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
420 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
422 return container_of(dom, struct arm_smmu_domain, domain);
425 static void parse_driver_options(struct arm_smmu_device *smmu)
430 if (of_property_read_bool(smmu->dev->of_node,
431 arm_smmu_options[i].prop)) {
432 smmu->options |= arm_smmu_options[i].opt;
433 dev_notice(smmu->dev, "option %s\n",
434 arm_smmu_options[i].prop);
436 } while (arm_smmu_options[++i].opt);
439 static struct device_node *dev_get_dev_node(struct device *dev)
441 if (dev_is_pci(dev)) {
442 struct pci_bus *bus = to_pci_dev(dev)->bus;
444 while (!pci_is_root_bus(bus))
446 return bus->bridge->parent->of_node;
452 static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
453 struct device_node *dev_node)
455 struct rb_node *node = smmu->masters.rb_node;
458 struct arm_smmu_master *master;
460 master = container_of(node, struct arm_smmu_master, node);
462 if (dev_node < master->of_node)
463 node = node->rb_left;
464 else if (dev_node > master->of_node)
465 node = node->rb_right;
473 static struct arm_smmu_master_cfg *
474 find_smmu_master_cfg(struct device *dev)
476 struct arm_smmu_master_cfg *cfg = NULL;
477 struct iommu_group *group = iommu_group_get(dev);
480 cfg = iommu_group_get_iommudata(group);
481 iommu_group_put(group);
487 static int insert_smmu_master(struct arm_smmu_device *smmu,
488 struct arm_smmu_master *master)
490 struct rb_node **new, *parent;
492 new = &smmu->masters.rb_node;
495 struct arm_smmu_master *this
496 = container_of(*new, struct arm_smmu_master, node);
499 if (master->of_node < this->of_node)
500 new = &((*new)->rb_left);
501 else if (master->of_node > this->of_node)
502 new = &((*new)->rb_right);
507 rb_link_node(&master->node, parent, new);
508 rb_insert_color(&master->node, &smmu->masters);
512 static int register_smmu_master(struct arm_smmu_device *smmu,
514 struct arm_smmu_phandle_args *masterspec)
517 struct arm_smmu_master *master;
519 master = find_smmu_master(smmu, masterspec->np);
522 "rejecting multiple registrations for master device %s\n",
523 masterspec->np->name);
527 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
529 "reached maximum number (%d) of stream IDs for master device %s\n",
530 MAX_MASTER_STREAMIDS, masterspec->np->name);
534 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
538 master->of_node = masterspec->np;
539 master->cfg.num_streamids = masterspec->args_count;
541 for (i = 0; i < master->cfg.num_streamids; ++i) {
542 u16 streamid = masterspec->args[i];
544 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
545 (streamid >= smmu->num_mapping_groups)) {
547 "stream ID for master device %s greater than maximum allowed (%d)\n",
548 masterspec->np->name, smmu->num_mapping_groups);
551 master->cfg.streamids[i] = streamid;
552 master->cfg.smendx[i] = INVALID_SMENDX;
554 return insert_smmu_master(smmu, master);
557 static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
559 struct arm_smmu_device *smmu;
560 struct arm_smmu_master *master = NULL;
561 struct device_node *dev_node = dev_get_dev_node(dev);
563 spin_lock(&arm_smmu_devices_lock);
564 list_for_each_entry(smmu, &arm_smmu_devices, list) {
565 master = find_smmu_master(smmu, dev_node);
569 spin_unlock(&arm_smmu_devices_lock);
571 return master ? smmu : NULL;
574 static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
579 idx = find_next_zero_bit(map, end, start);
582 } while (test_and_set_bit(idx, map));
587 static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
592 /* Wait for any pending TLB invalidations to complete */
593 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
596 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
598 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
599 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
600 & sTLBGSTATUS_GSACTIVE) {
602 if (++count == TLB_LOOP_TIMEOUT) {
603 dev_err_ratelimited(smmu->dev,
604 "TLB sync timed out -- SMMU may be deadlocked\n");
611 static void arm_smmu_tlb_sync(void *cookie)
613 struct arm_smmu_domain *smmu_domain = cookie;
614 __arm_smmu_tlb_sync(smmu_domain->smmu);
617 static void arm_smmu_tlb_inv_context(void *cookie)
619 struct arm_smmu_domain *smmu_domain = cookie;
620 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
621 struct arm_smmu_device *smmu = smmu_domain->smmu;
622 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
626 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
627 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
628 base + ARM_SMMU_CB_S1_TLBIASID);
630 base = ARM_SMMU_GR0(smmu);
631 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
632 base + ARM_SMMU_GR0_TLBIVMID);
635 __arm_smmu_tlb_sync(smmu);
638 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
639 size_t granule, bool leaf, void *cookie)
641 struct arm_smmu_domain *smmu_domain = cookie;
642 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
643 struct arm_smmu_device *smmu = smmu_domain->smmu;
644 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
648 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
649 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
651 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
653 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
655 writel_relaxed(iova, reg);
657 } while (size -= granule);
660 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
662 writeq_relaxed(iova, reg);
663 iova += granule >> 12;
664 } while (size -= granule);
666 } else if (smmu->version == ARM_SMMU_V2) {
667 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
668 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
669 ARM_SMMU_CB_S2_TLBIIPAS2;
672 smmu_write_atomic_lq(iova, reg);
673 iova += granule >> 12;
674 } while (size -= granule);
676 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
677 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
681 static struct iommu_gather_ops arm_smmu_gather_ops = {
682 .tlb_flush_all = arm_smmu_tlb_inv_context,
683 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
684 .tlb_sync = arm_smmu_tlb_sync,
687 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
691 struct iommu_domain *domain = dev;
692 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
693 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
694 struct arm_smmu_device *smmu = smmu_domain->smmu;
695 void __iomem *cb_base;
697 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
698 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
700 if (!(fsr & FSR_FAULT))
703 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
704 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
706 dev_err_ratelimited(smmu->dev,
707 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
708 fsr, iova, fsynr, cfg->cbndx);
710 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
714 static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
716 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
717 struct arm_smmu_device *smmu = dev;
718 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
720 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
721 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
722 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
723 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
728 dev_err_ratelimited(smmu->dev,
729 "Unexpected global fault, this could be serious\n");
730 dev_err_ratelimited(smmu->dev,
731 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
732 gfsr, gfsynr0, gfsynr1, gfsynr2);
734 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
738 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
739 struct io_pgtable_cfg *pgtbl_cfg)
744 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
745 struct arm_smmu_device *smmu = smmu_domain->smmu;
746 void __iomem *cb_base, *gr1_base;
748 gr1_base = ARM_SMMU_GR1(smmu);
749 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
750 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
752 if (smmu->version > ARM_SMMU_V1) {
753 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
754 reg = CBA2R_RW64_64BIT;
756 reg = CBA2R_RW64_32BIT;
757 /* 16-bit VMIDs live in CBA2R */
758 if (smmu->features & ARM_SMMU_FEAT_VMID16)
759 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
761 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
766 if (smmu->version < ARM_SMMU_V2)
767 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
770 * Use the weakest shareability/memory types, so they are
771 * overridden by the ttbcr/pte.
774 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
775 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
776 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
777 /* 8-bit VMIDs live in CBAR */
778 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
780 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
784 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
786 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
787 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
788 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
789 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
790 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
791 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
793 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
794 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
795 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
796 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
797 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
798 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
801 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
802 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
807 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
808 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
811 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
812 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
813 reg2 |= TTBCR2_SEP_UPSTREAM;
815 if (smmu->version > ARM_SMMU_V1)
816 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
818 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
820 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
822 /* MAIRs (stage-1 only) */
824 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
825 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
826 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
828 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
829 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
831 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
832 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
836 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
838 reg |= SCTLR_S1_ASIDPNE;
842 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
845 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
846 struct arm_smmu_device *smmu)
848 int irq, start, ret = 0;
849 unsigned long ias, oas;
850 struct io_pgtable_ops *pgtbl_ops;
851 struct io_pgtable_cfg pgtbl_cfg;
852 enum io_pgtable_fmt fmt;
853 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
854 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
856 mutex_lock(&smmu_domain->init_mutex);
857 if (smmu_domain->smmu)
860 /* We're bypassing these SIDs, so don't allocate an actual context */
861 if (domain->type == IOMMU_DOMAIN_DMA) {
862 smmu_domain->smmu = smmu;
867 * Mapping the requested stage onto what we support is surprisingly
868 * complicated, mainly because the spec allows S1+S2 SMMUs without
869 * support for nested translation. That means we end up with the
872 * Requested Supported Actual
882 * Note that you can't actually request stage-2 mappings.
884 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
885 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
886 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
887 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
890 * Choosing a suitable context format is even more fiddly. Until we
891 * grow some way for the caller to express a preference, and/or move
892 * the decision into the io-pgtable code where it arguably belongs,
893 * just aim for the closest thing to the rest of the system, and hope
894 * that the hardware isn't esoteric enough that we can't assume AArch64
895 * support to be a superset of AArch32 support...
897 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
898 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
899 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
900 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
901 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
902 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
903 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
904 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
905 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
906 ARM_SMMU_FEAT_FMT_AARCH64_16K |
907 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
908 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
910 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
915 switch (smmu_domain->stage) {
916 case ARM_SMMU_DOMAIN_S1:
917 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
918 start = smmu->num_s2_context_banks;
920 oas = smmu->ipa_size;
921 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
922 fmt = ARM_64_LPAE_S1;
923 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
924 fmt = ARM_32_LPAE_S1;
925 ias = min(ias, 32UL);
926 oas = min(oas, 40UL);
929 ias = min(ias, 32UL);
930 oas = min(oas, 32UL);
933 case ARM_SMMU_DOMAIN_NESTED:
935 * We will likely want to change this if/when KVM gets
938 case ARM_SMMU_DOMAIN_S2:
939 cfg->cbar = CBAR_TYPE_S2_TRANS;
941 ias = smmu->ipa_size;
943 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
944 fmt = ARM_64_LPAE_S2;
946 fmt = ARM_32_LPAE_S2;
947 ias = min(ias, 40UL);
948 oas = min(oas, 40UL);
956 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
957 smmu->num_context_banks);
962 if (smmu->version < ARM_SMMU_V2) {
963 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
964 cfg->irptndx %= smmu->num_context_irqs;
966 cfg->irptndx = cfg->cbndx;
969 pgtbl_cfg = (struct io_pgtable_cfg) {
970 .pgsize_bitmap = smmu->pgsize_bitmap,
973 .tlb = &arm_smmu_gather_ops,
974 .iommu_dev = smmu->dev,
977 smmu_domain->smmu = smmu;
978 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
984 /* Update the domain's page sizes to reflect the page table format */
985 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
987 /* Initialise the context bank with our page table cfg */
988 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
991 * Request context fault interrupt. Do this last to avoid the
992 * handler seeing a half-initialised domain state.
994 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
995 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
996 IRQF_SHARED, "arm-smmu-context-fault", domain);
998 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1000 cfg->irptndx = INVALID_IRPTNDX;
1003 mutex_unlock(&smmu_domain->init_mutex);
1005 /* Publish page table ops for map/unmap */
1006 smmu_domain->pgtbl_ops = pgtbl_ops;
1010 smmu_domain->smmu = NULL;
1012 mutex_unlock(&smmu_domain->init_mutex);
1016 static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1018 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1019 struct arm_smmu_device *smmu = smmu_domain->smmu;
1020 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1021 void __iomem *cb_base;
1024 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
1028 * Disable the context bank and free the page tables before freeing
1031 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1032 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1034 if (cfg->irptndx != INVALID_IRPTNDX) {
1035 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1036 devm_free_irq(smmu->dev, irq, domain);
1039 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1040 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
1043 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1045 struct arm_smmu_domain *smmu_domain;
1047 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
1050 * Allocate the domain and initialise some of its data structures.
1051 * We can't really do anything meaningful until we've added a
1054 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1058 if (type == IOMMU_DOMAIN_DMA &&
1059 iommu_get_dma_cookie(&smmu_domain->domain)) {
1064 mutex_init(&smmu_domain->init_mutex);
1065 spin_lock_init(&smmu_domain->pgtbl_lock);
1067 return &smmu_domain->domain;
1070 static void arm_smmu_domain_free(struct iommu_domain *domain)
1072 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1075 * Free the domain resources. We assume that all devices have
1076 * already been detached.
1078 iommu_put_dma_cookie(domain);
1079 arm_smmu_destroy_domain_context(domain);
1083 static int arm_smmu_alloc_smr(struct arm_smmu_device *smmu)
1087 for (i = 0; i < smmu->num_mapping_groups; i++)
1088 if (!cmpxchg(&smmu->smrs[i].valid, false, true))
1091 return INVALID_SMENDX;
1094 static void arm_smmu_free_smr(struct arm_smmu_device *smmu, int idx)
1096 writel_relaxed(~SMR_VALID, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1097 WRITE_ONCE(smmu->smrs[idx].valid, false);
1100 static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1102 struct arm_smmu_smr *smr = smmu->smrs + idx;
1103 u32 reg = (smr->id & smmu->streamid_mask) << SMR_ID_SHIFT |
1104 (smr->mask & smmu->smr_mask_mask) << SMR_MASK_SHIFT;
1108 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1111 static int arm_smmu_master_alloc_smes(struct arm_smmu_device *smmu,
1112 struct arm_smmu_master_cfg *cfg)
1114 struct arm_smmu_smr *smrs = smmu->smrs;
1117 /* Allocate the SMRs on the SMMU */
1118 for (i = 0; i < cfg->num_streamids; ++i) {
1119 if (cfg->smendx[i] != INVALID_SMENDX)
1122 /* ...except on stream indexing hardware, of course */
1124 cfg->smendx[i] = cfg->streamids[i];
1128 idx = arm_smmu_alloc_smr(smmu);
1130 dev_err(smmu->dev, "failed to allocate free SMR\n");
1133 cfg->smendx[i] = idx;
1135 smrs[idx].id = cfg->streamids[i];
1136 smrs[idx].mask = 0; /* We don't currently share SMRs */
1142 /* It worked! Now, poke the actual hardware */
1143 for (i = 0; i < cfg->num_streamids; ++i)
1144 arm_smmu_write_smr(smmu, cfg->smendx[i]);
1150 arm_smmu_free_smr(smmu, cfg->smendx[i]);
1151 cfg->smendx[i] = INVALID_SMENDX;
1156 static void arm_smmu_master_free_smes(struct arm_smmu_device *smmu,
1157 struct arm_smmu_master_cfg *cfg)
1161 /* Invalidate the SMRs before freeing back to the allocator */
1162 for (i = 0; i < cfg->num_streamids; ++i) {
1164 arm_smmu_free_smr(smmu, cfg->smendx[i]);
1166 cfg->smendx[i] = INVALID_SMENDX;
1170 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1171 struct arm_smmu_master_cfg *cfg)
1174 struct arm_smmu_device *smmu = smmu_domain->smmu;
1175 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1178 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1179 * for all devices behind the SMMU. Note that we need to take
1180 * care configuring SMRs for devices both a platform_device and
1181 * and a PCI device (i.e. a PCI host controller)
1183 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1186 /* Devices in an IOMMU group may already be configured */
1187 ret = arm_smmu_master_alloc_smes(smmu, cfg);
1189 return ret == -EEXIST ? 0 : ret;
1191 for (i = 0; i < cfg->num_streamids; ++i) {
1194 idx = cfg->smendx[i];
1195 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
1196 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
1197 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1203 static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
1204 struct arm_smmu_master_cfg *cfg)
1207 struct arm_smmu_device *smmu = smmu_domain->smmu;
1208 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1211 * We *must* clear the S2CR first, because freeing the SMR means
1212 * that it can be re-allocated immediately.
1214 for (i = 0; i < cfg->num_streamids; ++i) {
1215 int idx = cfg->smendx[i];
1216 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
1218 /* An IOMMU group is torn down by the first device to be removed */
1219 if (idx == INVALID_SMENDX)
1222 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1225 arm_smmu_master_free_smes(smmu, cfg);
1228 static void arm_smmu_detach_dev(struct device *dev,
1229 struct arm_smmu_master_cfg *cfg)
1231 struct iommu_domain *domain = dev->archdata.iommu;
1232 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1234 dev->archdata.iommu = NULL;
1235 arm_smmu_domain_remove_master(smmu_domain, cfg);
1238 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1241 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1242 struct arm_smmu_device *smmu;
1243 struct arm_smmu_master_cfg *cfg;
1245 smmu = find_smmu_for_device(dev);
1247 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1251 /* Ensure that the domain is finalised */
1252 ret = arm_smmu_init_domain_context(domain, smmu);
1257 * Sanity check the domain. We don't support domains across
1260 if (smmu_domain->smmu != smmu) {
1262 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1263 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1267 /* Looks ok, so add the device to the domain */
1268 cfg = find_smmu_master_cfg(dev);
1272 /* Detach the dev from its current domain */
1273 if (dev->archdata.iommu)
1274 arm_smmu_detach_dev(dev, cfg);
1276 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1278 dev->archdata.iommu = domain;
1282 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1283 phys_addr_t paddr, size_t size, int prot)
1286 unsigned long flags;
1287 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1288 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1293 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1294 ret = ops->map(ops, iova, paddr, size, prot);
1295 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1299 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1303 unsigned long flags;
1304 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1305 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1310 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1311 ret = ops->unmap(ops, iova, size);
1312 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1316 static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1319 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1320 struct arm_smmu_device *smmu = smmu_domain->smmu;
1321 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1322 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1323 struct device *dev = smmu->dev;
1324 void __iomem *cb_base;
1329 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1331 /* ATS1 registers can only be written atomically */
1332 va = iova & ~0xfffUL;
1333 if (smmu->version == ARM_SMMU_V2)
1334 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1335 else /* Register is only 32-bit in v1 */
1336 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
1338 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1339 !(tmp & ATSR_ACTIVE), 5, 50)) {
1341 "iova to phys timed out on %pad. Falling back to software table walk.\n",
1343 return ops->iova_to_phys(ops, iova);
1346 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
1347 if (phys & CB_PAR_F) {
1348 dev_err(dev, "translation fault!\n");
1349 dev_err(dev, "PAR = 0x%llx\n", phys);
1353 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1356 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1360 unsigned long flags;
1361 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1362 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1367 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1368 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1369 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1370 ret = arm_smmu_iova_to_phys_hard(domain, iova);
1372 ret = ops->iova_to_phys(ops, iova);
1375 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1380 static bool arm_smmu_capable(enum iommu_cap cap)
1383 case IOMMU_CAP_CACHE_COHERENCY:
1385 * Return true here as the SMMU can always send out coherent
1389 case IOMMU_CAP_INTR_REMAP:
1390 return true; /* MSIs are just memory writes */
1391 case IOMMU_CAP_NOEXEC:
1398 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1400 *((u16 *)data) = alias;
1401 return 0; /* Continue walking */
1404 static void __arm_smmu_release_pci_iommudata(void *data)
1409 static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1410 struct iommu_group *group)
1412 struct arm_smmu_master_cfg *cfg;
1416 cfg = iommu_group_get_iommudata(group);
1418 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1422 iommu_group_set_iommudata(group, cfg,
1423 __arm_smmu_release_pci_iommudata);
1426 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1430 * Assume Stream ID == Requester ID for now.
1431 * We need a way to describe the ID mappings in FDT.
1433 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1434 for (i = 0; i < cfg->num_streamids; ++i)
1435 if (cfg->streamids[i] == sid)
1438 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1439 if (i == cfg->num_streamids) {
1440 cfg->streamids[i] = sid;
1441 cfg->smendx[i] = INVALID_SMENDX;
1442 cfg->num_streamids++;
1448 static int arm_smmu_init_platform_device(struct device *dev,
1449 struct iommu_group *group)
1451 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
1452 struct arm_smmu_master *master;
1457 master = find_smmu_master(smmu, dev->of_node);
1461 iommu_group_set_iommudata(group, &master->cfg, NULL);
1466 static int arm_smmu_add_device(struct device *dev)
1468 struct iommu_group *group;
1470 group = iommu_group_get_for_dev(dev);
1472 return PTR_ERR(group);
1474 iommu_group_put(group);
1478 static void arm_smmu_remove_device(struct device *dev)
1480 iommu_group_remove_device(dev);
1483 static struct iommu_group *arm_smmu_device_group(struct device *dev)
1485 struct iommu_group *group;
1488 if (dev_is_pci(dev))
1489 group = pci_device_group(dev);
1491 group = generic_device_group(dev);
1496 if (dev_is_pci(dev))
1497 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1499 ret = arm_smmu_init_platform_device(dev, group);
1502 iommu_group_put(group);
1503 group = ERR_PTR(ret);
1509 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1510 enum iommu_attr attr, void *data)
1512 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1515 case DOMAIN_ATTR_NESTING:
1516 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1523 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1524 enum iommu_attr attr, void *data)
1527 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1529 mutex_lock(&smmu_domain->init_mutex);
1532 case DOMAIN_ATTR_NESTING:
1533 if (smmu_domain->smmu) {
1539 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1541 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1549 mutex_unlock(&smmu_domain->init_mutex);
1553 static struct iommu_ops arm_smmu_ops = {
1554 .capable = arm_smmu_capable,
1555 .domain_alloc = arm_smmu_domain_alloc,
1556 .domain_free = arm_smmu_domain_free,
1557 .attach_dev = arm_smmu_attach_dev,
1558 .map = arm_smmu_map,
1559 .unmap = arm_smmu_unmap,
1560 .map_sg = default_iommu_map_sg,
1561 .iova_to_phys = arm_smmu_iova_to_phys,
1562 .add_device = arm_smmu_add_device,
1563 .remove_device = arm_smmu_remove_device,
1564 .device_group = arm_smmu_device_group,
1565 .domain_get_attr = arm_smmu_domain_get_attr,
1566 .domain_set_attr = arm_smmu_domain_set_attr,
1567 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1570 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1572 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1573 void __iomem *cb_base;
1577 /* clear global FSR */
1578 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1579 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1582 * Reset stream mapping groups: Initial values mark all SMRn as
1583 * invalid and all S2CRn as bypass unless overridden.
1585 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
1586 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1588 arm_smmu_write_smr(smmu, i);
1589 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
1593 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1594 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1595 * bit is only present in MMU-500r2 onwards.
1597 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1598 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1599 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
1600 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1601 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1602 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1605 /* Make sure all context banks are disabled and clear CB_FSR */
1606 for (i = 0; i < smmu->num_context_banks; ++i) {
1607 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1608 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1609 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
1611 * Disable MMU-500's not-particularly-beneficial next-page
1612 * prefetcher for the sake of errata #841119 and #826419.
1614 if (smmu->model == ARM_MMU500) {
1615 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1616 reg &= ~ARM_MMU500_ACTLR_CPRE;
1617 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1621 /* Invalidate the TLB, just in case */
1622 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1623 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1625 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1627 /* Enable fault reporting */
1628 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
1630 /* Disable TLB broadcasting. */
1631 reg |= (sCR0_VMIDPNE | sCR0_PTM);
1633 /* Enable client access, handling unmatched streams as appropriate */
1634 reg &= ~sCR0_CLIENTPD;
1638 reg &= ~sCR0_USFCFG;
1640 /* Disable forced broadcasting */
1643 /* Don't upgrade barriers */
1644 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
1646 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1647 reg |= sCR0_VMID16EN;
1649 /* Push the button */
1650 __arm_smmu_tlb_sync(smmu);
1651 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1654 static int arm_smmu_id_size_to_bits(int size)
1673 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1676 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1678 bool cttw_dt, cttw_reg;
1680 dev_notice(smmu->dev, "probing hardware configuration...\n");
1681 dev_notice(smmu->dev, "SMMUv%d with:\n",
1682 smmu->version == ARM_SMMU_V2 ? 2 : 1);
1685 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
1687 /* Restrict available stages based on module parameter */
1688 if (force_stage == 1)
1689 id &= ~(ID0_S2TS | ID0_NTS);
1690 else if (force_stage == 2)
1691 id &= ~(ID0_S1TS | ID0_NTS);
1693 if (id & ID0_S1TS) {
1694 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1695 dev_notice(smmu->dev, "\tstage 1 translation\n");
1698 if (id & ID0_S2TS) {
1699 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1700 dev_notice(smmu->dev, "\tstage 2 translation\n");
1704 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1705 dev_notice(smmu->dev, "\tnested translation\n");
1708 if (!(smmu->features &
1709 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
1710 dev_err(smmu->dev, "\tno translation support!\n");
1714 if ((id & ID0_S1TS) &&
1715 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
1716 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1717 dev_notice(smmu->dev, "\taddress translation ops\n");
1721 * In order for DMA API calls to work properly, we must defer to what
1722 * the DT says about coherency, regardless of what the hardware claims.
1723 * Fortunately, this also opens up a workaround for systems where the
1724 * ID register value has ended up configured incorrectly.
1726 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1727 cttw_reg = !!(id & ID0_CTTW);
1729 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1730 if (cttw_dt || cttw_reg)
1731 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1732 cttw_dt ? "" : "non-");
1733 if (cttw_dt != cttw_reg)
1734 dev_notice(smmu->dev,
1735 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
1737 /* Max. number of entries we have for stream matching/indexing */
1738 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1739 smmu->streamid_mask = size - 1;
1743 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1744 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1747 "stream-matching supported, but no SMRs present!\n");
1752 * SMR.ID bits may not be preserved if the corresponding MASK
1753 * bits are set, so check each one separately. We can reject
1754 * masters later if they try to claim IDs outside these masks.
1756 smr = smmu->streamid_mask << SMR_ID_SHIFT;
1757 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1758 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1759 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
1761 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1762 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1763 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1764 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
1766 /* Zero-initialised to mark as invalid */
1767 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1772 dev_notice(smmu->dev,
1773 "\tstream matching with %lu register groups, mask 0x%x",
1774 size, smmu->smr_mask_mask);
1776 smmu->num_mapping_groups = size;
1778 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1779 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1780 if (!(id & ID0_PTFS_NO_AARCH32S))
1781 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1785 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
1786 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
1788 /* Check for size mismatch of SMMU address space from mapped region */
1789 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
1790 size *= 2 << smmu->pgshift;
1791 if (smmu->size != size)
1793 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1796 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
1797 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1798 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1799 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1802 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1803 smmu->num_context_banks, smmu->num_s2_context_banks);
1805 * Cavium CN88xx erratum #27704.
1806 * Ensure ASID and VMID allocation is unique across all SMMUs in
1809 if (smmu->model == CAVIUM_SMMUV2) {
1810 smmu->cavium_id_base =
1811 atomic_add_return(smmu->num_context_banks,
1812 &cavium_smmu_context_count);
1813 smmu->cavium_id_base -= smmu->num_context_banks;
1817 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1818 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
1819 smmu->ipa_size = size;
1821 /* The output mask is also applied for bypass */
1822 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
1823 smmu->pa_size = size;
1825 if (id & ID2_VMID16)
1826 smmu->features |= ARM_SMMU_FEAT_VMID16;
1829 * What the page table walker can address actually depends on which
1830 * descriptor format is in use, but since a) we don't know that yet,
1831 * and b) it can vary per context bank, this will have to do...
1833 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1835 "failed to set DMA mask for table walker\n");
1837 if (smmu->version < ARM_SMMU_V2) {
1838 smmu->va_size = smmu->ipa_size;
1839 if (smmu->version == ARM_SMMU_V1_64K)
1840 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1842 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
1843 smmu->va_size = arm_smmu_id_size_to_bits(size);
1844 if (id & ID2_PTFS_4K)
1845 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
1846 if (id & ID2_PTFS_16K)
1847 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
1848 if (id & ID2_PTFS_64K)
1849 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1852 /* Now we've corralled the various formats, what'll it do? */
1853 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
1854 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
1855 if (smmu->features &
1856 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
1857 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
1858 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
1859 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
1860 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
1861 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
1863 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1864 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1866 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1867 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1868 smmu->pgsize_bitmap);
1871 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1872 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1873 smmu->va_size, smmu->ipa_size);
1875 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1876 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1877 smmu->ipa_size, smmu->pa_size);
1882 struct arm_smmu_match_data {
1883 enum arm_smmu_arch_version version;
1884 enum arm_smmu_implementation model;
1887 #define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1888 static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1890 ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1891 ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
1892 ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
1893 ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
1894 ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
1896 static const struct of_device_id arm_smmu_of_match[] = {
1897 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1898 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1899 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
1900 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
1901 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
1902 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1905 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1907 static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1909 const struct of_device_id *of_id;
1910 const struct arm_smmu_match_data *data;
1911 struct resource *res;
1912 struct arm_smmu_device *smmu;
1913 struct device *dev = &pdev->dev;
1914 struct rb_node *node;
1915 struct of_phandle_iterator it;
1916 struct arm_smmu_phandle_args *masterspec;
1917 int num_irqs, i, err;
1919 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1921 dev_err(dev, "failed to allocate arm_smmu_device\n");
1926 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
1928 smmu->version = data->version;
1929 smmu->model = data->model;
1931 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1932 smmu->base = devm_ioremap_resource(dev, res);
1933 if (IS_ERR(smmu->base))
1934 return PTR_ERR(smmu->base);
1935 smmu->size = resource_size(res);
1937 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1938 &smmu->num_global_irqs)) {
1939 dev_err(dev, "missing #global-interrupts property\n");
1944 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1946 if (num_irqs > smmu->num_global_irqs)
1947 smmu->num_context_irqs++;
1950 if (!smmu->num_context_irqs) {
1951 dev_err(dev, "found %d interrupts but expected at least %d\n",
1952 num_irqs, smmu->num_global_irqs + 1);
1956 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1959 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1963 for (i = 0; i < num_irqs; ++i) {
1964 int irq = platform_get_irq(pdev, i);
1967 dev_err(dev, "failed to get irq index %d\n", i);
1970 smmu->irqs[i] = irq;
1973 err = arm_smmu_device_cfg_probe(smmu);
1978 smmu->masters = RB_ROOT;
1981 /* No need to zero the memory for masterspec */
1982 masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
1984 goto out_put_masters;
1986 of_for_each_phandle(&it, err, dev->of_node,
1987 "mmu-masters", "#stream-id-cells", 0) {
1988 int count = of_phandle_iterator_args(&it, masterspec->args,
1989 MAX_MASTER_STREAMIDS);
1990 masterspec->np = of_node_get(it.node);
1991 masterspec->args_count = count;
1993 err = register_smmu_master(smmu, dev, masterspec);
1995 dev_err(dev, "failed to add master %s\n",
1996 masterspec->np->name);
1998 goto out_put_masters;
2004 dev_notice(dev, "registered %d master devices\n", i);
2008 parse_driver_options(smmu);
2010 if (smmu->version == ARM_SMMU_V2 &&
2011 smmu->num_context_banks != smmu->num_context_irqs) {
2013 "found only %d context interrupt(s) but %d required\n",
2014 smmu->num_context_irqs, smmu->num_context_banks);
2016 goto out_put_masters;
2019 for (i = 0; i < smmu->num_global_irqs; ++i) {
2020 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2021 arm_smmu_global_fault,
2023 "arm-smmu global fault",
2026 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2028 goto out_put_masters;
2032 INIT_LIST_HEAD(&smmu->list);
2033 spin_lock(&arm_smmu_devices_lock);
2034 list_add(&smmu->list, &arm_smmu_devices);
2035 spin_unlock(&arm_smmu_devices_lock);
2037 arm_smmu_device_reset(smmu);
2041 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
2042 struct arm_smmu_master *master
2043 = container_of(node, struct arm_smmu_master, node);
2044 of_node_put(master->of_node);
2050 static int arm_smmu_device_remove(struct platform_device *pdev)
2052 struct device *dev = &pdev->dev;
2053 struct arm_smmu_device *curr, *smmu = NULL;
2054 struct rb_node *node;
2056 spin_lock(&arm_smmu_devices_lock);
2057 list_for_each_entry(curr, &arm_smmu_devices, list) {
2058 if (curr->dev == dev) {
2060 list_del(&smmu->list);
2064 spin_unlock(&arm_smmu_devices_lock);
2069 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
2070 struct arm_smmu_master *master
2071 = container_of(node, struct arm_smmu_master, node);
2072 of_node_put(master->of_node);
2075 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
2076 dev_err(dev, "removing device with active domains!\n");
2078 /* Turn the thing off */
2079 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
2083 static struct platform_driver arm_smmu_driver = {
2086 .of_match_table = of_match_ptr(arm_smmu_of_match),
2088 .probe = arm_smmu_device_dt_probe,
2089 .remove = arm_smmu_device_remove,
2092 static int __init arm_smmu_init(void)
2094 struct device_node *np;
2098 * Play nice with systems that don't have an ARM SMMU by checking that
2099 * an ARM SMMU exists in the system before proceeding with the driver
2100 * and IOMMU bus operation registration.
2102 np = of_find_matching_node(NULL, arm_smmu_of_match);
2108 ret = platform_driver_register(&arm_smmu_driver);
2112 /* Oh, for a proper bus abstraction */
2113 if (!iommu_present(&platform_bus_type))
2114 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2116 #ifdef CONFIG_ARM_AMBA
2117 if (!iommu_present(&amba_bustype))
2118 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2122 if (!iommu_present(&pci_bus_type)) {
2124 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2131 static void __exit arm_smmu_exit(void)
2133 return platform_driver_unregister(&arm_smmu_driver);
2136 subsys_initcall(arm_smmu_init);
2137 module_exit(arm_smmu_exit);
2139 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2140 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2141 MODULE_LICENSE("GPL v2");