]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
drm/i915: Split GEN6 PPGTT initialization up
authorBen Widawsky <benjamin.widawsky@intel.com>
Thu, 20 Feb 2014 06:05:49 +0000 (22:05 -0800)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Wed, 5 Mar 2014 20:30:00 +0000 (21:30 +0100)
Simply to match the GEN8 style of PPGTT initialization, split up the
allocations and mappings. Unlike GEN8, we skip a separate dma_addr_t
allocation function, as it is much simpler pre-gen8.

With this code it would be easy to make a more general PPGTT
initialization function with per GEN alloc/map/etc. or use a common
helper, similar to the ringbuffer code. I don't see a benefit to doing
this just yet, but who knows...

Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_gem_gtt.c

index 6b027f5cbd9a7aacfc0f477e03ef0dc319676fc1..f16c6ae2df63782a6d68d1d5a0ea4f944d91b707 100644 (file)
@@ -1036,14 +1036,14 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
        gen6_ppgtt_free(ppgtt);
 }
 
-static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
 {
 #define GEN6_PD_ALIGN (PAGE_SIZE * 16)
 #define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
        struct drm_device *dev = ppgtt->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        bool retried = false;
-       int i, ret;
+       int ret;
 
        /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
         * allocator works in address space sizes, so it's multiplied by page
@@ -1070,42 +1070,60 @@ alloc:
        if (ppgtt->node.start < dev_priv->gtt.mappable_end)
                DRM_DEBUG("Forced to use aperture for PDEs\n");
 
-       ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
        ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
-       if (IS_GEN6(dev)) {
-               ppgtt->enable = gen6_ppgtt_enable;
-               ppgtt->switch_mm = gen6_mm_switch;
-       } else if (IS_HASWELL(dev)) {
-               ppgtt->enable = gen7_ppgtt_enable;
-               ppgtt->switch_mm = hsw_mm_switch;
-       } else if (IS_GEN7(dev)) {
-               ppgtt->enable = gen7_ppgtt_enable;
-               ppgtt->switch_mm = gen7_mm_switch;
-       } else
-               BUG();
-       ppgtt->base.clear_range = gen6_ppgtt_clear_range;
-       ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
-       ppgtt->base.cleanup = gen6_ppgtt_cleanup;
-       ppgtt->base.scratch = dev_priv->gtt.base.scratch;
-       ppgtt->base.start = 0;
-       ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
+       return ret;
+}
+
+static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
+{
+       int i;
+
        ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
                                  GFP_KERNEL);
-       if (!ppgtt->pt_pages) {
-               drm_mm_remove_node(&ppgtt->node);
+
+       if (!ppgtt->pt_pages)
                return -ENOMEM;
-       }
 
        for (i = 0; i < ppgtt->num_pd_entries; i++) {
                ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
-               if (!ppgtt->pt_pages[i])
-                       goto err_pt_alloc;
+               if (!ppgtt->pt_pages[i]) {
+                       gen6_ppgtt_free(ppgtt);
+                       return -ENOMEM;
+               }
+       }
+
+       return 0;
+}
+
+static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
+{
+       int ret;
+
+       ret = gen6_ppgtt_allocate_page_directories(ppgtt);
+       if (ret)
+               return ret;
+
+       ret = gen6_ppgtt_allocate_page_tables(ppgtt);
+       if (ret) {
+               drm_mm_remove_node(&ppgtt->node);
+               return ret;
        }
 
        ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
                                     GFP_KERNEL);
-       if (!ppgtt->pt_dma_addr)
-               goto err_pt_alloc;
+       if (!ppgtt->pt_dma_addr) {
+               drm_mm_remove_node(&ppgtt->node);
+               gen6_ppgtt_free(ppgtt);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
+{
+       struct drm_device *dev = ppgtt->base.dev;
+       int i;
 
        for (i = 0; i < ppgtt->num_pd_entries; i++) {
                dma_addr_t pt_addr;
@@ -1114,40 +1132,63 @@ alloc:
                                       PCI_DMA_BIDIRECTIONAL);
 
                if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
-                       ret = -EIO;
-                       goto err_pd_pin;
-
+                       gen6_ppgtt_unmap_pages(ppgtt);
+                       return -EIO;
                }
+
                ppgtt->pt_dma_addr[i] = pt_addr;
        }
 
-       ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
+       return 0;
+}
+
+static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+{
+       struct drm_device *dev = ppgtt->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+
+       ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
+       if (IS_GEN6(dev)) {
+               ppgtt->enable = gen6_ppgtt_enable;
+               ppgtt->switch_mm = gen6_mm_switch;
+       } else if (IS_HASWELL(dev)) {
+               ppgtt->enable = gen7_ppgtt_enable;
+               ppgtt->switch_mm = hsw_mm_switch;
+       } else if (IS_GEN7(dev)) {
+               ppgtt->enable = gen7_ppgtt_enable;
+               ppgtt->switch_mm = gen7_mm_switch;
+       } else
+               BUG();
+
+       ret = gen6_ppgtt_alloc(ppgtt);
+       if (ret)
+               return ret;
+
+       ret = gen6_ppgtt_setup_page_tables(ppgtt);
+       if (ret) {
+               gen6_ppgtt_free(ppgtt);
+               return ret;
+       }
+
+       ppgtt->base.clear_range = gen6_ppgtt_clear_range;
+       ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
+       ppgtt->base.cleanup = gen6_ppgtt_cleanup;
+       ppgtt->base.scratch = dev_priv->gtt.base.scratch;
+       ppgtt->base.start = 0;
+       ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
        ppgtt->debug_dump = gen6_dump_ppgtt;
 
-       DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
-                        ppgtt->node.size >> 20,
-                        ppgtt->node.start / PAGE_SIZE);
        ppgtt->pd_offset =
                ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
 
-       return 0;
+       ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
 
-err_pd_pin:
-       if (ppgtt->pt_dma_addr) {
-               for (i--; i >= 0; i--)
-                       pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
-                                      4096, PCI_DMA_BIDIRECTIONAL);
-       }
-err_pt_alloc:
-       kfree(ppgtt->pt_dma_addr);
-       for (i = 0; i < ppgtt->num_pd_entries; i++) {
-               if (ppgtt->pt_pages[i])
-                       __free_page(ppgtt->pt_pages[i]);
-       }
-       kfree(ppgtt->pt_pages);
-       drm_mm_remove_node(&ppgtt->node);
+       DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
+                        ppgtt->node.size >> 20,
+                        ppgtt->node.start / PAGE_SIZE);
 
-       return ret;
+       return 0;
 }
 
 int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)