1 #include <linux/dma-mapping.h>
2 #include <linux/dma-debug.h>
3 #include <linux/dmar.h>
4 #include <linux/export.h>
5 #include <linux/bootmem.h>
8 #include <linux/kmemleak.h>
10 #include <asm/proto.h>
12 #include <asm/iommu.h>
14 #include <asm/calgary.h>
15 #include <asm/x86_init.h>
16 #include <asm/iommu_table.h>
18 static int forbid_dac __read_mostly;
20 const struct dma_map_ops *dma_ops = &nommu_dma_ops;
21 EXPORT_SYMBOL(dma_ops);
23 static int iommu_sac_force __read_mostly;
25 #ifdef CONFIG_IOMMU_DEBUG
26 int panic_on_overflow __read_mostly = 1;
27 int force_iommu __read_mostly = 1;
29 int panic_on_overflow __read_mostly = 0;
30 int force_iommu __read_mostly = 0;
33 int iommu_merge __read_mostly = 0;
35 int no_iommu __read_mostly;
36 /* Set this to 1 if there is a HW IOMMU in the system */
37 int iommu_detected __read_mostly = 0;
40 * This variable becomes 1 if iommu=pt is passed on the kernel command line.
41 * If this variable is 1, IOMMU implementations do no DMA translation for
42 * devices and allow every device to access to whole physical memory. This is
43 * useful if a user wants to use an IOMMU only for KVM device assignment to
44 * guests and not for driver dma translation.
46 int iommu_pass_through __read_mostly;
48 extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
50 /* Dummy device used for NULL arguments (normally ISA). */
51 struct device x86_dma_fallback_dev = {
52 .init_name = "fallback device",
53 .coherent_dma_mask = ISA_DMA_BIT_MASK,
54 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
56 EXPORT_SYMBOL(x86_dma_fallback_dev);
58 /* Number of entries preallocated for DMA-API debugging */
59 #define PREALLOC_DMA_DEBUG_ENTRIES 65536
61 void __init pci_iommu_alloc(void)
63 struct iommu_table_entry *p;
65 sort_iommu_table(__iommu_table, __iommu_table_end);
66 check_iommu_entries(__iommu_table, __iommu_table_end);
68 for (p = __iommu_table; p < __iommu_table_end; p++) {
69 if (p && p->detect && p->detect() > 0) {
70 p->flags |= IOMMU_DETECTED;
73 if (p->flags & IOMMU_FINISH_IF_DETECTED)
78 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
79 dma_addr_t *dma_addr, gfp_t flag,
82 unsigned long dma_mask;
84 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
87 dma_mask = dma_alloc_coherent_mask(dev, flag);
92 /* CMA can be used only in the context which permits sleeping */
93 if (gfpflags_allow_blocking(flag)) {
94 page = dma_alloc_from_contiguous(dev, count, get_order(size),
96 if (page && page_to_phys(page) + size > dma_mask) {
97 dma_release_from_contiguous(dev, page, count);
103 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
107 addr = page_to_phys(page);
108 if (addr + size > dma_mask) {
109 __free_pages(page, get_order(size));
111 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
112 flag = (flag & ~GFP_DMA32) | GFP_DMA;
118 memset(page_address(page), 0, size);
120 return page_address(page);
123 void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
124 dma_addr_t dma_addr, unsigned long attrs)
126 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
127 struct page *page = virt_to_page(vaddr);
129 if (!dma_release_from_contiguous(dev, page, count))
130 free_pages((unsigned long)vaddr, get_order(size));
133 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
136 *dev = &x86_dma_fallback_dev;
138 *gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
139 *gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
141 if (!is_device_dma_capable(*dev))
146 EXPORT_SYMBOL(arch_dma_alloc_attrs);
149 * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
150 * parameter documentation.
152 static __init int iommu_setup(char *p)
160 if (!strncmp(p, "off", 3))
162 /* gart_parse_options has more force support */
163 if (!strncmp(p, "force", 5))
165 if (!strncmp(p, "noforce", 7)) {
170 if (!strncmp(p, "biomerge", 8)) {
174 if (!strncmp(p, "panic", 5))
175 panic_on_overflow = 1;
176 if (!strncmp(p, "nopanic", 7))
177 panic_on_overflow = 0;
178 if (!strncmp(p, "merge", 5)) {
182 if (!strncmp(p, "nomerge", 7))
184 if (!strncmp(p, "forcesac", 8))
186 if (!strncmp(p, "allowdac", 8))
188 if (!strncmp(p, "nodac", 5))
190 if (!strncmp(p, "usedac", 6)) {
194 #ifdef CONFIG_SWIOTLB
195 if (!strncmp(p, "soft", 4))
198 if (!strncmp(p, "pt", 2))
199 iommu_pass_through = 1;
201 gart_parse_options(p);
203 #ifdef CONFIG_CALGARY_IOMMU
204 if (!strncmp(p, "calgary", 7))
206 #endif /* CONFIG_CALGARY_IOMMU */
208 p += strcspn(p, ",");
214 early_param("iommu", iommu_setup);
216 int x86_dma_supported(struct device *dev, u64 mask)
219 if (mask > 0xffffffff && forbid_dac > 0) {
220 dev_info(dev, "PCI: Disallowing DAC for device\n");
225 /* Copied from i386. Doesn't make much sense, because it will
226 only work for pci_alloc_coherent.
227 The caller just has to use GFP_DMA in this case. */
228 if (mask < DMA_BIT_MASK(24))
231 /* Tell the device to use SAC when IOMMU force is on. This
232 allows the driver to use cheaper accesses in some cases.
234 Problem with this is that if we overflow the IOMMU area and
235 return DAC as fallback address the device may not handle it
238 As a special case some controllers have a 39bit address
239 mode that is as efficient as 32bit (aic79xx). Don't force
240 SAC for these. Assume all masks <= 40 bits are of this
241 type. Normally this doesn't make any difference, but gives
242 more gentle handling of IOMMU overflow. */
243 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
244 dev_info(dev, "Force SAC with mask %Lx\n", mask);
251 static int __init pci_iommu_init(void)
253 struct iommu_table_entry *p;
254 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
257 dma_debug_add_bus(&pci_bus_type);
259 x86_init.iommu.iommu_init();
261 for (p = __iommu_table; p < __iommu_table_end; p++) {
262 if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
268 /* Must execute after PCI subsystem */
269 rootfs_initcall(pci_iommu_init);
272 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
274 static void via_no_dac(struct pci_dev *dev)
276 if (forbid_dac == 0) {
277 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
281 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
282 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);