]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/iommu/tegra-gart.c
Merge branches 'iommu/fixes', 'x86/vt-d', 'x86/amd', 'arm/smmu', 'arm/tegra' and...
[karo-tx-linux.git] / drivers / iommu / tegra-gart.c
index 5f3b68c26b839de6feea48ef9cf8820aa4e972ba..37e708fdbb5aac0bb66ac0cf17db030aa30a3a1e 100644 (file)
@@ -63,11 +63,21 @@ struct gart_device {
        struct device           *dev;
 };
 
+struct gart_domain {
+       struct iommu_domain domain;             /* generic domain handle */
+       struct gart_device *gart;               /* link to gart device   */
+};
+
 static struct gart_device *gart_handle; /* unique for a system */
 
 #define GART_PTE(_pfn)                                         \
        (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
 
+static struct gart_domain *to_gart_domain(struct iommu_domain *dom)
+{
+       return container_of(dom, struct gart_domain, domain);
+}
+
 /*
  * Any interaction between any block on PPSB and a block on APB or AHB
  * must have these read-back to ensure the APB/AHB bus transaction is
@@ -156,7 +166,8 @@ static inline bool gart_iova_range_valid(struct gart_device *gart,
 static int gart_iommu_attach_dev(struct iommu_domain *domain,
                                 struct device *dev)
 {
-       struct gart_device *gart = domain->priv;
+       struct gart_domain *gart_domain = to_gart_domain(domain);
+       struct gart_device *gart = gart_domain->gart;
        struct gart_client *client, *c;
        int err = 0;
 
@@ -188,7 +199,8 @@ fail:
 static void gart_iommu_detach_dev(struct iommu_domain *domain,
                                  struct device *dev)
 {
-       struct gart_device *gart = domain->priv;
+       struct gart_domain *gart_domain = to_gart_domain(domain);
+       struct gart_device *gart = gart_domain->gart;
        struct gart_client *c;
 
        spin_lock(&gart->client_lock);
@@ -206,46 +218,55 @@ out:
        spin_unlock(&gart->client_lock);
 }
 
-static int gart_iommu_domain_init(struct iommu_domain *domain)
+static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
 {
+       struct gart_domain *gart_domain;
        struct gart_device *gart;
 
+       if (type != IOMMU_DOMAIN_UNMANAGED)
+               return NULL;
+
        gart = gart_handle;
        if (!gart)
-               return -EINVAL;
+               return NULL;
 
-       domain->priv = gart;
+       gart_domain = kzalloc(sizeof(*gart_domain), GFP_KERNEL);
+       if (!gart_domain)
+               return NULL;
 
-       domain->geometry.aperture_start = gart->iovmm_base;
-       domain->geometry.aperture_end = gart->iovmm_base +
+       gart_domain->gart = gart;
+       gart_domain->domain.geometry.aperture_start = gart->iovmm_base;
+       gart_domain->domain.geometry.aperture_end = gart->iovmm_base +
                                        gart->page_count * GART_PAGE_SIZE - 1;
-       domain->geometry.force_aperture = true;
+       gart_domain->domain.geometry.force_aperture = true;
 
-       return 0;
+       return &gart_domain->domain;
 }
 
-static void gart_iommu_domain_destroy(struct iommu_domain *domain)
+static void gart_iommu_domain_free(struct iommu_domain *domain)
 {
-       struct gart_device *gart = domain->priv;
-
-       if (!gart)
-               return;
+       struct gart_domain *gart_domain = to_gart_domain(domain);
+       struct gart_device *gart = gart_domain->gart;
 
-       spin_lock(&gart->client_lock);
-       if (!list_empty(&gart->client)) {
-               struct gart_client *c;
+       if (gart) {
+               spin_lock(&gart->client_lock);
+               if (!list_empty(&gart->client)) {
+                       struct gart_client *c;
 
-               list_for_each_entry(c, &gart->client, list)
-                       gart_iommu_detach_dev(domain, c->dev);
+                       list_for_each_entry(c, &gart->client, list)
+                               gart_iommu_detach_dev(domain, c->dev);
+               }
+               spin_unlock(&gart->client_lock);
        }
-       spin_unlock(&gart->client_lock);
-       domain->priv = NULL;
+
+       kfree(gart_domain);
 }
 
 static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
                          phys_addr_t pa, size_t bytes, int prot)
 {
-       struct gart_device *gart = domain->priv;
+       struct gart_domain *gart_domain = to_gart_domain(domain);
+       struct gart_device *gart = gart_domain->gart;
        unsigned long flags;
        unsigned long pfn;
 
@@ -268,7 +289,8 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
 static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
                               size_t bytes)
 {
-       struct gart_device *gart = domain->priv;
+       struct gart_domain *gart_domain = to_gart_domain(domain);
+       struct gart_device *gart = gart_domain->gart;
        unsigned long flags;
 
        if (!gart_iova_range_valid(gart, iova, bytes))
@@ -284,7 +306,8 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
 static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
                                           dma_addr_t iova)
 {
-       struct gart_device *gart = domain->priv;
+       struct gart_domain *gart_domain = to_gart_domain(domain);
+       struct gart_device *gart = gart_domain->gart;
        unsigned long pte;
        phys_addr_t pa;
        unsigned long flags;
@@ -313,8 +336,8 @@ static bool gart_iommu_capable(enum iommu_cap cap)
 
 static const struct iommu_ops gart_iommu_ops = {
        .capable        = gart_iommu_capable,
-       .domain_init    = gart_iommu_domain_init,
-       .domain_destroy = gart_iommu_domain_destroy,
+       .domain_alloc   = gart_iommu_domain_alloc,
+       .domain_free    = gart_iommu_domain_free,
        .attach_dev     = gart_iommu_attach_dev,
        .detach_dev     = gart_iommu_detach_dev,
        .map            = gart_iommu_map,