]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/iommu/mtk_iommu.c
Merge tag 'fbdev-v4.13-rc5' of git://github.com/bzolnier/linux
[karo-tx-linux.git] / drivers / iommu / mtk_iommu.c
1 /*
2  * Copyright (c) 2015-2016 MediaTek Inc.
3  * Author: Yong Wu <yong.wu@mediatek.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  */
14 #include <linux/bootmem.h>
15 #include <linux/bug.h>
16 #include <linux/clk.h>
17 #include <linux/component.h>
18 #include <linux/device.h>
19 #include <linux/dma-iommu.h>
20 #include <linux/err.h>
21 #include <linux/interrupt.h>
22 #include <linux/io.h>
23 #include <linux/iommu.h>
24 #include <linux/iopoll.h>
25 #include <linux/list.h>
26 #include <linux/of_address.h>
27 #include <linux/of_iommu.h>
28 #include <linux/of_irq.h>
29 #include <linux/of_platform.h>
30 #include <linux/platform_device.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <asm/barrier.h>
34 #include <dt-bindings/memory/mt8173-larb-port.h>
35 #include <soc/mediatek/smi.h>
36
37 #include "mtk_iommu.h"
38
39 #define REG_MMU_PT_BASE_ADDR                    0x000
40
41 #define REG_MMU_INVALIDATE                      0x020
42 #define F_ALL_INVLD                             0x2
43 #define F_MMU_INV_RANGE                         0x1
44
45 #define REG_MMU_INVLD_START_A                   0x024
46 #define REG_MMU_INVLD_END_A                     0x028
47
48 #define REG_MMU_INV_SEL                         0x038
49 #define F_INVLD_EN0                             BIT(0)
50 #define F_INVLD_EN1                             BIT(1)
51
52 #define REG_MMU_STANDARD_AXI_MODE               0x048
53 #define REG_MMU_DCM_DIS                         0x050
54
55 #define REG_MMU_CTRL_REG                        0x110
56 #define F_MMU_PREFETCH_RT_REPLACE_MOD           BIT(4)
57 #define F_MMU_TF_PROTECT_SEL(prot)              (((prot) & 0x3) << 5)
58
59 #define REG_MMU_IVRP_PADDR                      0x114
60 #define F_MMU_IVRP_PA_SET(pa, ext)              (((pa) >> 1) | ((!!(ext)) << 31))
61
62 #define REG_MMU_INT_CONTROL0                    0x120
63 #define F_L2_MULIT_HIT_EN                       BIT(0)
64 #define F_TABLE_WALK_FAULT_INT_EN               BIT(1)
65 #define F_PREETCH_FIFO_OVERFLOW_INT_EN          BIT(2)
66 #define F_MISS_FIFO_OVERFLOW_INT_EN             BIT(3)
67 #define F_PREFETCH_FIFO_ERR_INT_EN              BIT(5)
68 #define F_MISS_FIFO_ERR_INT_EN                  BIT(6)
69 #define F_INT_CLR_BIT                           BIT(12)
70
71 #define REG_MMU_INT_MAIN_CONTROL                0x124
72 #define F_INT_TRANSLATION_FAULT                 BIT(0)
73 #define F_INT_MAIN_MULTI_HIT_FAULT              BIT(1)
74 #define F_INT_INVALID_PA_FAULT                  BIT(2)
75 #define F_INT_ENTRY_REPLACEMENT_FAULT           BIT(3)
76 #define F_INT_TLB_MISS_FAULT                    BIT(4)
77 #define F_INT_MISS_TRANSACTION_FIFO_FAULT       BIT(5)
78 #define F_INT_PRETETCH_TRANSATION_FIFO_FAULT    BIT(6)
79
80 #define REG_MMU_CPE_DONE                        0x12C
81
82 #define REG_MMU_FAULT_ST1                       0x134
83
84 #define REG_MMU_FAULT_VA                        0x13c
85 #define F_MMU_FAULT_VA_MSK                      0xfffff000
86 #define F_MMU_FAULT_VA_WRITE_BIT                BIT(1)
87 #define F_MMU_FAULT_VA_LAYER_BIT                BIT(0)
88
89 #define REG_MMU_INVLD_PA                        0x140
90 #define REG_MMU_INT_ID                          0x150
91 #define F_MMU0_INT_ID_LARB_ID(a)                (((a) >> 7) & 0x7)
92 #define F_MMU0_INT_ID_PORT_ID(a)                (((a) >> 2) & 0x1f)
93
94 #define MTK_PROTECT_PA_ALIGN                    128
95
96 struct mtk_iommu_domain {
97         spinlock_t                      pgtlock; /* lock for page table */
98
99         struct io_pgtable_cfg           cfg;
100         struct io_pgtable_ops           *iop;
101
102         struct iommu_domain             domain;
103 };
104
105 static struct iommu_ops mtk_iommu_ops;
106
107 static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
108 {
109         return container_of(dom, struct mtk_iommu_domain, domain);
110 }
111
112 static void mtk_iommu_tlb_flush_all(void *cookie)
113 {
114         struct mtk_iommu_data *data = cookie;
115
116         writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, data->base + REG_MMU_INV_SEL);
117         writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
118         wmb(); /* Make sure the tlb flush all done */
119 }
120
121 static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
122                                            size_t granule, bool leaf,
123                                            void *cookie)
124 {
125         struct mtk_iommu_data *data = cookie;
126
127         writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, data->base + REG_MMU_INV_SEL);
128
129         writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
130         writel_relaxed(iova + size - 1, data->base + REG_MMU_INVLD_END_A);
131         writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE);
132         data->tlb_flush_active = true;
133 }
134
135 static void mtk_iommu_tlb_sync(void *cookie)
136 {
137         struct mtk_iommu_data *data = cookie;
138         int ret;
139         u32 tmp;
140
141         /* Avoid timing out if there's nothing to wait for */
142         if (!data->tlb_flush_active)
143                 return;
144
145         ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp,
146                                         tmp != 0, 10, 100000);
147         if (ret) {
148                 dev_warn(data->dev,
149                          "Partial TLB flush timed out, falling back to full flush\n");
150                 mtk_iommu_tlb_flush_all(cookie);
151         }
152         /* Clear the CPE status */
153         writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
154         data->tlb_flush_active = false;
155 }
156
157 static const struct iommu_gather_ops mtk_iommu_gather_ops = {
158         .tlb_flush_all = mtk_iommu_tlb_flush_all,
159         .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync,
160         .tlb_sync = mtk_iommu_tlb_sync,
161 };
162
163 static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
164 {
165         struct mtk_iommu_data *data = dev_id;
166         struct mtk_iommu_domain *dom = data->m4u_dom;
167         u32 int_state, regval, fault_iova, fault_pa;
168         unsigned int fault_larb, fault_port;
169         bool layer, write;
170
171         /* Read error info from registers */
172         int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
173         fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA);
174         layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
175         write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
176         fault_iova &= F_MMU_FAULT_VA_MSK;
177         fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA);
178         regval = readl_relaxed(data->base + REG_MMU_INT_ID);
179         fault_larb = F_MMU0_INT_ID_LARB_ID(regval);
180         fault_port = F_MMU0_INT_ID_PORT_ID(regval);
181
182         if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
183                                write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
184                 dev_err_ratelimited(
185                         data->dev,
186                         "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n",
187                         int_state, fault_iova, fault_pa, fault_larb, fault_port,
188                         layer, write ? "write" : "read");
189         }
190
191         /* Interrupt clear */
192         regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0);
193         regval |= F_INT_CLR_BIT;
194         writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
195
196         mtk_iommu_tlb_flush_all(data);
197
198         return IRQ_HANDLED;
199 }
200
201 static void mtk_iommu_config(struct mtk_iommu_data *data,
202                              struct device *dev, bool enable)
203 {
204         struct mtk_smi_larb_iommu    *larb_mmu;
205         unsigned int                 larbid, portid;
206         struct iommu_fwspec *fwspec = dev->iommu_fwspec;
207         int i;
208
209         for (i = 0; i < fwspec->num_ids; ++i) {
210                 larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
211                 portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
212                 larb_mmu = &data->smi_imu.larb_imu[larbid];
213
214                 dev_dbg(dev, "%s iommu port: %d\n",
215                         enable ? "enable" : "disable", portid);
216
217                 if (enable)
218                         larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
219                 else
220                         larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
221         }
222 }
223
224 static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
225 {
226         struct mtk_iommu_domain *dom = data->m4u_dom;
227
228         spin_lock_init(&dom->pgtlock);
229
230         dom->cfg = (struct io_pgtable_cfg) {
231                 .quirks = IO_PGTABLE_QUIRK_ARM_NS |
232                         IO_PGTABLE_QUIRK_NO_PERMS |
233                         IO_PGTABLE_QUIRK_TLBI_ON_MAP,
234                 .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
235                 .ias = 32,
236                 .oas = 32,
237                 .tlb = &mtk_iommu_gather_ops,
238                 .iommu_dev = data->dev,
239         };
240
241         if (data->enable_4GB)
242                 dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_4GB;
243
244         dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
245         if (!dom->iop) {
246                 dev_err(data->dev, "Failed to alloc io pgtable\n");
247                 return -EINVAL;
248         }
249
250         /* Update our support page sizes bitmap */
251         dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
252
253         writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
254                data->base + REG_MMU_PT_BASE_ADDR);
255         return 0;
256 }
257
258 static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
259 {
260         struct mtk_iommu_domain *dom;
261
262         if (type != IOMMU_DOMAIN_DMA)
263                 return NULL;
264
265         dom = kzalloc(sizeof(*dom), GFP_KERNEL);
266         if (!dom)
267                 return NULL;
268
269         if (iommu_get_dma_cookie(&dom->domain)) {
270                 kfree(dom);
271                 return NULL;
272         }
273
274         dom->domain.geometry.aperture_start = 0;
275         dom->domain.geometry.aperture_end = DMA_BIT_MASK(32);
276         dom->domain.geometry.force_aperture = true;
277
278         return &dom->domain;
279 }
280
281 static void mtk_iommu_domain_free(struct iommu_domain *domain)
282 {
283         iommu_put_dma_cookie(domain);
284         kfree(to_mtk_domain(domain));
285 }
286
287 static int mtk_iommu_attach_device(struct iommu_domain *domain,
288                                    struct device *dev)
289 {
290         struct mtk_iommu_domain *dom = to_mtk_domain(domain);
291         struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
292         int ret;
293
294         if (!data)
295                 return -ENODEV;
296
297         if (!data->m4u_dom) {
298                 data->m4u_dom = dom;
299                 ret = mtk_iommu_domain_finalise(data);
300                 if (ret) {
301                         data->m4u_dom = NULL;
302                         return ret;
303                 }
304         } else if (data->m4u_dom != dom) {
305                 /* All the client devices should be in the same m4u domain */
306                 dev_err(dev, "try to attach into the error iommu domain\n");
307                 return -EPERM;
308         }
309
310         mtk_iommu_config(data, dev, true);
311         return 0;
312 }
313
314 static void mtk_iommu_detach_device(struct iommu_domain *domain,
315                                     struct device *dev)
316 {
317         struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
318
319         if (!data)
320                 return;
321
322         mtk_iommu_config(data, dev, false);
323 }
324
325 static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
326                          phys_addr_t paddr, size_t size, int prot)
327 {
328         struct mtk_iommu_domain *dom = to_mtk_domain(domain);
329         unsigned long flags;
330         int ret;
331
332         spin_lock_irqsave(&dom->pgtlock, flags);
333         ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
334         spin_unlock_irqrestore(&dom->pgtlock, flags);
335
336         return ret;
337 }
338
339 static size_t mtk_iommu_unmap(struct iommu_domain *domain,
340                               unsigned long iova, size_t size)
341 {
342         struct mtk_iommu_domain *dom = to_mtk_domain(domain);
343         unsigned long flags;
344         size_t unmapsz;
345
346         spin_lock_irqsave(&dom->pgtlock, flags);
347         unmapsz = dom->iop->unmap(dom->iop, iova, size);
348         spin_unlock_irqrestore(&dom->pgtlock, flags);
349
350         return unmapsz;
351 }
352
353 static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
354                                           dma_addr_t iova)
355 {
356         struct mtk_iommu_domain *dom = to_mtk_domain(domain);
357         unsigned long flags;
358         phys_addr_t pa;
359
360         spin_lock_irqsave(&dom->pgtlock, flags);
361         pa = dom->iop->iova_to_phys(dom->iop, iova);
362         spin_unlock_irqrestore(&dom->pgtlock, flags);
363
364         return pa;
365 }
366
367 static int mtk_iommu_add_device(struct device *dev)
368 {
369         struct mtk_iommu_data *data;
370         struct iommu_group *group;
371
372         if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
373                 return -ENODEV; /* Not a iommu client device */
374
375         data = dev->iommu_fwspec->iommu_priv;
376         iommu_device_link(&data->iommu, dev);
377
378         group = iommu_group_get_for_dev(dev);
379         if (IS_ERR(group))
380                 return PTR_ERR(group);
381
382         iommu_group_put(group);
383         return 0;
384 }
385
386 static void mtk_iommu_remove_device(struct device *dev)
387 {
388         struct mtk_iommu_data *data;
389
390         if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
391                 return;
392
393         data = dev->iommu_fwspec->iommu_priv;
394         iommu_device_unlink(&data->iommu, dev);
395
396         iommu_group_remove_device(dev);
397         iommu_fwspec_free(dev);
398 }
399
400 static struct iommu_group *mtk_iommu_device_group(struct device *dev)
401 {
402         struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
403
404         if (!data)
405                 return ERR_PTR(-ENODEV);
406
407         /* All the client devices are in the same m4u iommu-group */
408         if (!data->m4u_group) {
409                 data->m4u_group = iommu_group_alloc();
410                 if (IS_ERR(data->m4u_group))
411                         dev_err(dev, "Failed to allocate M4U IOMMU group\n");
412         } else {
413                 iommu_group_ref_get(data->m4u_group);
414         }
415         return data->m4u_group;
416 }
417
418 static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
419 {
420         struct platform_device *m4updev;
421
422         if (args->args_count != 1) {
423                 dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
424                         args->args_count);
425                 return -EINVAL;
426         }
427
428         if (!dev->iommu_fwspec->iommu_priv) {
429                 /* Get the m4u device */
430                 m4updev = of_find_device_by_node(args->np);
431                 if (WARN_ON(!m4updev))
432                         return -EINVAL;
433
434                 dev->iommu_fwspec->iommu_priv = platform_get_drvdata(m4updev);
435         }
436
437         return iommu_fwspec_add_ids(dev, args->args, 1);
438 }
439
440 static struct iommu_ops mtk_iommu_ops = {
441         .domain_alloc   = mtk_iommu_domain_alloc,
442         .domain_free    = mtk_iommu_domain_free,
443         .attach_dev     = mtk_iommu_attach_device,
444         .detach_dev     = mtk_iommu_detach_device,
445         .map            = mtk_iommu_map,
446         .unmap          = mtk_iommu_unmap,
447         .map_sg         = default_iommu_map_sg,
448         .iova_to_phys   = mtk_iommu_iova_to_phys,
449         .add_device     = mtk_iommu_add_device,
450         .remove_device  = mtk_iommu_remove_device,
451         .device_group   = mtk_iommu_device_group,
452         .of_xlate       = mtk_iommu_of_xlate,
453         .pgsize_bitmap  = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
454 };
455
456 static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
457 {
458         u32 regval;
459         int ret;
460
461         ret = clk_prepare_enable(data->bclk);
462         if (ret) {
463                 dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
464                 return ret;
465         }
466
467         regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
468                 F_MMU_TF_PROTECT_SEL(2);
469         writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
470
471         regval = F_L2_MULIT_HIT_EN |
472                 F_TABLE_WALK_FAULT_INT_EN |
473                 F_PREETCH_FIFO_OVERFLOW_INT_EN |
474                 F_MISS_FIFO_OVERFLOW_INT_EN |
475                 F_PREFETCH_FIFO_ERR_INT_EN |
476                 F_MISS_FIFO_ERR_INT_EN;
477         writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
478
479         regval = F_INT_TRANSLATION_FAULT |
480                 F_INT_MAIN_MULTI_HIT_FAULT |
481                 F_INT_INVALID_PA_FAULT |
482                 F_INT_ENTRY_REPLACEMENT_FAULT |
483                 F_INT_TLB_MISS_FAULT |
484                 F_INT_MISS_TRANSACTION_FIFO_FAULT |
485                 F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
486         writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
487
488         writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB),
489                        data->base + REG_MMU_IVRP_PADDR);
490
491         writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
492         writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE);
493
494         if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
495                              dev_name(data->dev), (void *)data)) {
496                 writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
497                 clk_disable_unprepare(data->bclk);
498                 dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
499                 return -ENODEV;
500         }
501
502         return 0;
503 }
504
505 static const struct component_master_ops mtk_iommu_com_ops = {
506         .bind           = mtk_iommu_bind,
507         .unbind         = mtk_iommu_unbind,
508 };
509
510 static int mtk_iommu_probe(struct platform_device *pdev)
511 {
512         struct mtk_iommu_data   *data;
513         struct device           *dev = &pdev->dev;
514         struct resource         *res;
515         resource_size_t         ioaddr;
516         struct component_match  *match = NULL;
517         void                    *protect;
518         int                     i, larb_nr, ret;
519
520         data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
521         if (!data)
522                 return -ENOMEM;
523         data->dev = dev;
524
525         /* Protect memory. HW will access here while translation fault.*/
526         protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
527         if (!protect)
528                 return -ENOMEM;
529         data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
530
531         /* Whether the current dram is over 4GB */
532         data->enable_4GB = !!(max_pfn > (0xffffffffUL >> PAGE_SHIFT));
533
534         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
535         data->base = devm_ioremap_resource(dev, res);
536         if (IS_ERR(data->base))
537                 return PTR_ERR(data->base);
538         ioaddr = res->start;
539
540         data->irq = platform_get_irq(pdev, 0);
541         if (data->irq < 0)
542                 return data->irq;
543
544         data->bclk = devm_clk_get(dev, "bclk");
545         if (IS_ERR(data->bclk))
546                 return PTR_ERR(data->bclk);
547
548         larb_nr = of_count_phandle_with_args(dev->of_node,
549                                              "mediatek,larbs", NULL);
550         if (larb_nr < 0)
551                 return larb_nr;
552         data->smi_imu.larb_nr = larb_nr;
553
554         for (i = 0; i < larb_nr; i++) {
555                 struct device_node *larbnode;
556                 struct platform_device *plarbdev;
557
558                 larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
559                 if (!larbnode)
560                         return -EINVAL;
561
562                 if (!of_device_is_available(larbnode))
563                         continue;
564
565                 plarbdev = of_find_device_by_node(larbnode);
566                 if (!plarbdev) {
567                         plarbdev = of_platform_device_create(
568                                                 larbnode, NULL,
569                                                 platform_bus_type.dev_root);
570                         if (!plarbdev) {
571                                 of_node_put(larbnode);
572                                 return -EPROBE_DEFER;
573                         }
574                 }
575                 data->smi_imu.larb_imu[i].dev = &plarbdev->dev;
576
577                 component_match_add_release(dev, &match, release_of,
578                                             compare_of, larbnode);
579         }
580
581         platform_set_drvdata(pdev, data);
582
583         ret = mtk_iommu_hw_init(data);
584         if (ret)
585                 return ret;
586
587         ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
588                                      "mtk-iommu.%pa", &ioaddr);
589         if (ret)
590                 return ret;
591
592         iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
593         iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode);
594
595         ret = iommu_device_register(&data->iommu);
596         if (ret)
597                 return ret;
598
599         if (!iommu_present(&platform_bus_type))
600                 bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
601
602         return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
603 }
604
605 static int mtk_iommu_remove(struct platform_device *pdev)
606 {
607         struct mtk_iommu_data *data = platform_get_drvdata(pdev);
608
609         iommu_device_sysfs_remove(&data->iommu);
610         iommu_device_unregister(&data->iommu);
611
612         if (iommu_present(&platform_bus_type))
613                 bus_set_iommu(&platform_bus_type, NULL);
614
615         free_io_pgtable_ops(data->m4u_dom->iop);
616         clk_disable_unprepare(data->bclk);
617         devm_free_irq(&pdev->dev, data->irq, data);
618         component_master_del(&pdev->dev, &mtk_iommu_com_ops);
619         return 0;
620 }
621
622 static int __maybe_unused mtk_iommu_suspend(struct device *dev)
623 {
624         struct mtk_iommu_data *data = dev_get_drvdata(dev);
625         struct mtk_iommu_suspend_reg *reg = &data->reg;
626         void __iomem *base = data->base;
627
628         reg->standard_axi_mode = readl_relaxed(base +
629                                                REG_MMU_STANDARD_AXI_MODE);
630         reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
631         reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
632         reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
633         reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
634         return 0;
635 }
636
637 static int __maybe_unused mtk_iommu_resume(struct device *dev)
638 {
639         struct mtk_iommu_data *data = dev_get_drvdata(dev);
640         struct mtk_iommu_suspend_reg *reg = &data->reg;
641         void __iomem *base = data->base;
642
643         writel_relaxed(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
644                        base + REG_MMU_PT_BASE_ADDR);
645         writel_relaxed(reg->standard_axi_mode,
646                        base + REG_MMU_STANDARD_AXI_MODE);
647         writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
648         writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
649         writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
650         writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
651         writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB),
652                        base + REG_MMU_IVRP_PADDR);
653         return 0;
654 }
655
656 const struct dev_pm_ops mtk_iommu_pm_ops = {
657         SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
658 };
659
660 static const struct of_device_id mtk_iommu_of_ids[] = {
661         { .compatible = "mediatek,mt8173-m4u", },
662         {}
663 };
664
665 static struct platform_driver mtk_iommu_driver = {
666         .probe  = mtk_iommu_probe,
667         .remove = mtk_iommu_remove,
668         .driver = {
669                 .name = "mtk-iommu",
670                 .of_match_table = mtk_iommu_of_ids,
671                 .pm = &mtk_iommu_pm_ops,
672         }
673 };
674
675 static int mtk_iommu_init_fn(struct device_node *np)
676 {
677         int ret;
678         struct platform_device *pdev;
679
680         pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root);
681         if (!pdev)
682                 return -ENOMEM;
683
684         ret = platform_driver_register(&mtk_iommu_driver);
685         if (ret) {
686                 pr_err("%s: Failed to register driver\n", __func__);
687                 return ret;
688         }
689
690         return 0;
691 }
692
693 IOMMU_OF_DECLARE(mtkm4u, "mediatek,mt8173-m4u", mtk_iommu_init_fn);