struct deferred_flush_table *tables;
};
-DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
+static DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
/* bitmap for indexing intel_iommus */
static int g_num_of_iommus;
/* No lock here, assumes no domain exit in normal case */
info = dev->archdata.iommu;
- if (info)
+ if (likely(info))
return info->domain;
return NULL;
}
return iova_pfn;
}
-static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
+static struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
{
struct dmar_domain *domain, *tmp;
struct dmar_rmrr_unit *rmrr;
return domain;
}
-static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
-{
- struct device_domain_info *info;
-
- /* No lock here, assumes no domain exit in normal case */
- info = dev->archdata.iommu;
- if (likely(info))
- return info->domain;
-
- return __get_valid_domain_for_dev(dev);
-}
-
/* Check if the dev needs to go through non-identity map and unmap process.*/
static int iommu_no_mapping(struct device *dev)
{
struct intel_iommu *iommu;
struct deferred_flush_entry *entry;
struct deferred_flush_data *flush_data;
- unsigned int cpuid;
- cpuid = get_cpu();
- flush_data = per_cpu_ptr(&deferred_flush, cpuid);
+ flush_data = raw_cpu_ptr(&deferred_flush);
/* Flush all CPUs' entries to avoid deferring too much. If
* this becomes a bottleneck, can just flush us, and rely on
}
flush_data->size++;
spin_unlock_irqrestore(&flush_data->lock, flags);
-
- put_cpu();
}
static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
return !dma_addr;
}
-struct dma_map_ops intel_dma_ops = {
+const struct dma_map_ops intel_dma_ops = {
.alloc = intel_alloc_coherent,
.free = intel_free_coherent,
.map_sg = intel_map_sg,
.map_page = intel_map_page,
.unmap_page = intel_unmap_page,
.mapping_error = intel_mapping_error,
+#ifdef CONFIG_X86
+ .dma_supported = x86_dma_supported,
+#endif
};
static inline int iommu_domain_cache_init(void)