]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/gpu/drm/i915/i915_gem_gtt.c
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_gem_gtt.c
index 2c150dee78a70468f96a87295cd29595cfa46a68..926a1e2dd2349ea0aa5960bd4b0e089225631083 100644 (file)
@@ -44,9 +44,9 @@ typedef uint32_t gtt_pte_t;
 #define GEN6_PTE_CACHE_LLC_MLC         (3 << 1)
 #define GEN6_PTE_ADDR_ENCODE(addr)     GEN6_GTT_ADDR_ENCODE(addr)
 
-static inline gtt_pte_t pte_encode(struct drm_device *dev,
-                                  dma_addr_t addr,
-                                  enum i915_cache_level level)
+static inline gtt_pte_t gen6_pte_encode(struct drm_device *dev,
+                                       dma_addr_t addr,
+                                       enum i915_cache_level level)
 {
        gtt_pte_t pte = GEN6_PTE_VALID;
        pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -77,7 +77,7 @@ static inline gtt_pte_t pte_encode(struct drm_device *dev,
 }
 
 /* PPGTT support for Sandybdrige/Gen6 and later */
-static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
+static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
                                   unsigned first_entry,
                                   unsigned num_entries)
 {
@@ -87,8 +87,9 @@ static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
        unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
        unsigned last_pte, i;
 
-       scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
-                                I915_CACHE_LLC);
+       scratch_pte = gen6_pte_encode(ppgtt->dev,
+                                     ppgtt->scratch_page_dma_addr,
+                                     I915_CACHE_LLC);
 
        while (num_entries) {
                last_pte = first_pte + num_entries;
@@ -108,10 +109,72 @@ static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
        }
 }
 
-int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
+static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
+                                     struct sg_table *pages,
+                                     unsigned first_entry,
+                                     enum i915_cache_level cache_level)
 {
+       gtt_pte_t *pt_vaddr;
+       unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
+       unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+       unsigned i, j, m, segment_len;
+       dma_addr_t page_addr;
+       struct scatterlist *sg;
+
+       /* init sg walking */
+       sg = pages->sgl;
+       i = 0;
+       segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
+       m = 0;
+
+       while (i < pages->nents) {
+               pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
+
+               for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
+                       page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+                       pt_vaddr[j] = gen6_pte_encode(ppgtt->dev, page_addr,
+                                                     cache_level);
+
+                       /* grab the next page */
+                       if (++m == segment_len) {
+                               if (++i == pages->nents)
+                                       break;
+
+                               sg = sg_next(sg);
+                               segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
+                               m = 0;
+                       }
+               }
+
+               kunmap_atomic(pt_vaddr);
+
+               first_pte = 0;
+               act_pd++;
+       }
+}
+
+static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
+{
+       int i;
+
+       if (ppgtt->pt_dma_addr) {
+               for (i = 0; i < ppgtt->num_pd_entries; i++)
+                       pci_unmap_page(ppgtt->dev->pdev,
+                                      ppgtt->pt_dma_addr[i],
+                                      4096, PCI_DMA_BIDIRECTIONAL);
+       }
+
+       kfree(ppgtt->pt_dma_addr);
+       for (i = 0; i < ppgtt->num_pd_entries; i++)
+               __free_page(ppgtt->pt_pages[i]);
+       kfree(ppgtt->pt_pages);
+       kfree(ppgtt);
+}
+
+static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+{
+       struct drm_device *dev = ppgtt->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_hw_ppgtt *ppgtt;
        unsigned first_pd_entry_in_global_pt;
        int i;
        int ret = -ENOMEM;
@@ -119,18 +182,17 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
        /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
         * entries. For aliasing ppgtt support we just steal them at the end for
         * now. */
-       first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
-
-       ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
-       if (!ppgtt)
-               return ret;
+       first_pd_entry_in_global_pt =
+               gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES;
 
-       ppgtt->dev = dev;
        ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
+       ppgtt->clear_range = gen6_ppgtt_clear_range;
+       ppgtt->insert_entries = gen6_ppgtt_insert_entries;
+       ppgtt->cleanup = gen6_ppgtt_cleanup;
        ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
                                  GFP_KERNEL);
        if (!ppgtt->pt_pages)
-               goto err_ppgtt;
+               return -ENOMEM;
 
        for (i = 0; i < ppgtt->num_pd_entries; i++) {
                ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
@@ -138,39 +200,32 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
                        goto err_pt_alloc;
        }
 
-       if (dev_priv->mm.gtt->needs_dmar) {
-               ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
-                                               *ppgtt->num_pd_entries,
-                                            GFP_KERNEL);
-               if (!ppgtt->pt_dma_addr)
-                       goto err_pt_alloc;
+       ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
+                                    GFP_KERNEL);
+       if (!ppgtt->pt_dma_addr)
+               goto err_pt_alloc;
 
-               for (i = 0; i < ppgtt->num_pd_entries; i++) {
-                       dma_addr_t pt_addr;
+       for (i = 0; i < ppgtt->num_pd_entries; i++) {
+               dma_addr_t pt_addr;
 
-                       pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
-                                              0, 4096,
-                                              PCI_DMA_BIDIRECTIONAL);
+               pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
+                                      PCI_DMA_BIDIRECTIONAL);
 
-                       if (pci_dma_mapping_error(dev->pdev,
-                                                 pt_addr)) {
-                               ret = -EIO;
-                               goto err_pd_pin;
+               if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
+                       ret = -EIO;
+                       goto err_pd_pin;
 
-                       }
-                       ppgtt->pt_dma_addr[i] = pt_addr;
                }
+               ppgtt->pt_dma_addr[i] = pt_addr;
        }
 
-       ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
+       ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
 
-       i915_ppgtt_clear_range(ppgtt, 0,
-                              ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
+       ppgtt->clear_range(ppgtt, 0,
+                          ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
 
        ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
 
-       dev_priv->mm.aliasing_ppgtt = ppgtt;
-
        return 0;
 
 err_pd_pin:
@@ -186,94 +241,57 @@ err_pt_alloc:
                        __free_page(ppgtt->pt_pages[i]);
        }
        kfree(ppgtt->pt_pages);
-err_ppgtt:
-       kfree(ppgtt);
 
        return ret;
 }
 
-void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
+static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-       int i;
+       struct i915_hw_ppgtt *ppgtt;
+       int ret;
 
+       ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
        if (!ppgtt)
-               return;
+               return -ENOMEM;
 
-       if (ppgtt->pt_dma_addr) {
-               for (i = 0; i < ppgtt->num_pd_entries; i++)
-                       pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
-                                      4096, PCI_DMA_BIDIRECTIONAL);
-       }
+       ppgtt->dev = dev;
 
-       kfree(ppgtt->pt_dma_addr);
-       for (i = 0; i < ppgtt->num_pd_entries; i++)
-               __free_page(ppgtt->pt_pages[i]);
-       kfree(ppgtt->pt_pages);
-       kfree(ppgtt);
+       ret = gen6_ppgtt_init(ppgtt);
+       if (ret)
+               kfree(ppgtt);
+       else
+               dev_priv->mm.aliasing_ppgtt = ppgtt;
+
+       return ret;
 }
 
-static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
-                                        const struct sg_table *pages,
-                                        unsigned first_entry,
-                                        enum i915_cache_level cache_level)
+void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
 {
-       gtt_pte_t *pt_vaddr;
-       unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
-       unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
-       unsigned i, j, m, segment_len;
-       dma_addr_t page_addr;
-       struct scatterlist *sg;
-
-       /* init sg walking */
-       sg = pages->sgl;
-       i = 0;
-       segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
-       m = 0;
-
-       while (i < pages->nents) {
-               pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
-
-               for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
-                       page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
-                       pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
-                                                cache_level);
-
-                       /* grab the next page */
-                       if (++m == segment_len) {
-                               if (++i == pages->nents)
-                                       break;
-
-                               sg = sg_next(sg);
-                               segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
-                               m = 0;
-                       }
-               }
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 
-               kunmap_atomic(pt_vaddr);
+       if (!ppgtt)
+               return;
 
-               first_pte = 0;
-               act_pd++;
-       }
+       ppgtt->cleanup(ppgtt);
 }
 
 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
                            struct drm_i915_gem_object *obj,
                            enum i915_cache_level cache_level)
 {
-       i915_ppgtt_insert_sg_entries(ppgtt,
-                                    obj->pages,
-                                    obj->gtt_space->start >> PAGE_SHIFT,
-                                    cache_level);
+       ppgtt->insert_entries(ppgtt, obj->pages,
+                             obj->gtt_space->start >> PAGE_SHIFT,
+                             cache_level);
 }
 
 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
                              struct drm_i915_gem_object *obj)
 {
-       i915_ppgtt_clear_range(ppgtt,
-                              obj->gtt_space->start >> PAGE_SHIFT,
-                              obj->base.size >> PAGE_SHIFT);
+       ppgtt->clear_range(ppgtt,
+                          obj->gtt_space->start >> PAGE_SHIFT,
+                          obj->base.size >> PAGE_SHIFT);
 }
 
 void i915_gem_init_ppgtt(struct drm_device *dev)
@@ -282,7 +300,7 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
        uint32_t pd_offset;
        struct intel_ring_buffer *ring;
        struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-       uint32_t __iomem *pd_addr;
+       gtt_pte_t __iomem *pd_addr;
        uint32_t pd_entry;
        int i;
 
@@ -290,15 +308,11 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
                return;
 
 
-       pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
+       pd_addr = (gtt_pte_t __iomem*)dev_priv->gtt.gsm + ppgtt->pd_offset/sizeof(gtt_pte_t);
        for (i = 0; i < ppgtt->num_pd_entries; i++) {
                dma_addr_t pt_addr;
 
-               if (dev_priv->mm.gtt->needs_dmar)
-                       pt_addr = ppgtt->pt_dma_addr[i];
-               else
-                       pt_addr = page_to_phys(ppgtt->pt_pages[i]);
-
+               pt_addr = ppgtt->pt_dma_addr[i];
                pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
                pd_entry |= GEN6_PDE_VALID;
 
@@ -338,11 +352,27 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
        }
 }
 
+extern int intel_iommu_gfx_mapped;
+/* Certain Gen5 chipsets require require idling the GPU before
+ * unmapping anything from the GTT when VT-d is enabled.
+ */
+static inline bool needs_idle_maps(struct drm_device *dev)
+{
+#ifdef CONFIG_INTEL_IOMMU
+       /* Query intel_iommu to see if we need the workaround. Presumably that
+        * was loaded first.
+        */
+       if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
+               return true;
+#endif
+       return false;
+}
+
 static bool do_idling(struct drm_i915_private *dev_priv)
 {
        bool ret = dev_priv->mm.interruptible;
 
-       if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
+       if (unlikely(dev_priv->gtt.do_idle_maps)) {
                dev_priv->mm.interruptible = false;
                if (i915_gpu_idle(dev_priv->dev)) {
                        DRM_ERROR("Couldn't idle GPU\n");
@@ -356,45 +386,18 @@ static bool do_idling(struct drm_i915_private *dev_priv)
 
 static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
 {
-       if (unlikely(dev_priv->mm.gtt->do_idle_maps))
+       if (unlikely(dev_priv->gtt.do_idle_maps))
                dev_priv->mm.interruptible = interruptible;
 }
 
-
-static void i915_ggtt_clear_range(struct drm_device *dev,
-                                unsigned first_entry,
-                                unsigned num_entries)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       gtt_pte_t scratch_pte;
-       gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
-       const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
-       int i;
-
-       if (INTEL_INFO(dev)->gen < 6) {
-               intel_gtt_clear_range(first_entry, num_entries);
-               return;
-       }
-
-       if (WARN(num_entries > max_entries,
-                "First entry = %d; Num entries = %d (max=%d)\n",
-                first_entry, num_entries, max_entries))
-               num_entries = max_entries;
-
-       scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
-       for (i = 0; i < num_entries; i++)
-               iowrite32(scratch_pte, &gtt_base[i]);
-       readl(gtt_base);
-}
-
 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
 
        /* First fill our portion of the GTT with scratch pages */
-       i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
-                             (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
+       dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
+                                     dev_priv->gtt.total / PAGE_SIZE);
 
        list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
                i915_gem_clflush_object(obj);
@@ -423,16 +426,15 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
  * within the global GTT as well as accessible by the GPU through the GMADR
  * mapped BAR (dev_priv->mm.gtt->gtt).
  */
-static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
-                                 enum i915_cache_level level)
+static void gen6_ggtt_insert_entries(struct drm_device *dev,
+                                    struct sg_table *st,
+                                    unsigned int first_entry,
+                                    enum i915_cache_level level)
 {
-       struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct sg_table *st = obj->pages;
        struct scatterlist *sg = st->sgl;
-       const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
-       const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
-       gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
+       gtt_pte_t __iomem *gtt_entries =
+               (gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
        int unused, i = 0;
        unsigned int len, m = 0;
        dma_addr_t addr;
@@ -441,14 +443,12 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
                len = sg_dma_len(sg) >> PAGE_SHIFT;
                for (m = 0; m < len; m++) {
                        addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
-                       iowrite32(pte_encode(dev, addr, level), &gtt_entries[i]);
+                       iowrite32(gen6_pte_encode(dev, addr, level),
+                                 &gtt_entries[i]);
                        i++;
                }
        }
 
-       BUG_ON(i > max_entries);
-       BUG_ON(i != obj->base.size / PAGE_SIZE);
-
        /* XXX: This serves as a posting read to make sure that the PTE has
         * actually been updated. There is some concern that even though
         * registers and PTEs are within the same BAR that they are potentially
@@ -456,7 +456,8 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
         * hardware should work, we must keep this posting read for paranoia.
         */
        if (i != 0)
-               WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
+               WARN_ON(readl(&gtt_entries[i-1])
+                       != gen6_pte_encode(dev, addr, level));
 
        /* This next bit makes the above posting read even more important. We
         * want to flush the TLBs only after we're certain all the PTE updates
@@ -466,28 +467,70 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
        POSTING_READ(GFX_FLSH_CNTL_GEN6);
 }
 
+static void gen6_ggtt_clear_range(struct drm_device *dev,
+                                 unsigned int first_entry,
+                                 unsigned int num_entries)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       gtt_pte_t scratch_pte;
+       gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
+       const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
+       int i;
+
+       if (WARN(num_entries > max_entries,
+                "First entry = %d; Num entries = %d (max=%d)\n",
+                first_entry, num_entries, max_entries))
+               num_entries = max_entries;
+
+       scratch_pte = gen6_pte_encode(dev, dev_priv->gtt.scratch_page_dma,
+                                     I915_CACHE_LLC);
+       for (i = 0; i < num_entries; i++)
+               iowrite32(scratch_pte, &gtt_base[i]);
+       readl(gtt_base);
+}
+
+
+static void i915_ggtt_insert_entries(struct drm_device *dev,
+                                    struct sg_table *st,
+                                    unsigned int pg_start,
+                                    enum i915_cache_level cache_level)
+{
+       unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+               AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+       intel_gtt_insert_sg_entries(st, pg_start, flags);
+
+}
+
+static void i915_ggtt_clear_range(struct drm_device *dev,
+                                 unsigned int first_entry,
+                                 unsigned int num_entries)
+{
+       intel_gtt_clear_range(first_entry, num_entries);
+}
+
+
 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
                              enum i915_cache_level cache_level)
 {
        struct drm_device *dev = obj->base.dev;
-       if (INTEL_INFO(dev)->gen < 6) {
-               unsigned int flags = (cache_level == I915_CACHE_NONE) ?
-                       AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
-               intel_gtt_insert_sg_entries(obj->pages,
-                                           obj->gtt_space->start >> PAGE_SHIFT,
-                                           flags);
-       } else {
-               gen6_ggtt_bind_object(obj, cache_level);
-       }
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
+                                        obj->gtt_space->start >> PAGE_SHIFT,
+                                        cache_level);
 
        obj->has_global_gtt_mapping = 1;
 }
 
 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
 {
-       i915_ggtt_clear_range(obj->base.dev,
-                             obj->gtt_space->start >> PAGE_SHIFT,
-                             obj->base.size >> PAGE_SHIFT);
+       struct drm_device *dev = obj->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       dev_priv->gtt.gtt_clear_range(obj->base.dev,
+                                     obj->gtt_space->start >> PAGE_SHIFT,
+                                     obj->base.size >> PAGE_SHIFT);
 
        obj->has_global_gtt_mapping = 0;
 }
@@ -524,27 +567,101 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
                        *end -= 4096;
        }
 }
-
-void i915_gem_init_global_gtt(struct drm_device *dev,
-                             unsigned long start,
-                             unsigned long mappable_end,
-                             unsigned long end)
+void i915_gem_setup_global_gtt(struct drm_device *dev,
+                              unsigned long start,
+                              unsigned long mappable_end,
+                              unsigned long end)
 {
+       /* Let GEM Manage all of the aperture.
+        *
+        * However, leave one page at the end still bound to the scratch page.
+        * There are a number of places where the hardware apparently prefetches
+        * past the end of the object, and we've seen multiple hangs with the
+        * GPU head pointer stuck in a batchbuffer bound at the last page of the
+        * aperture.  One page should be enough to keep any prefetching inside
+        * of the aperture.
+        */
        drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_mm_node *entry;
+       struct drm_i915_gem_object *obj;
+       unsigned long hole_start, hole_end;
 
-       /* Substract the guard page ... */
+       BUG_ON(mappable_end > end);
+
+       /* Subtract the guard page ... */
        drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
        if (!HAS_LLC(dev))
                dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
 
-       dev_priv->mm.gtt_start = start;
-       dev_priv->mm.gtt_mappable_end = mappable_end;
-       dev_priv->mm.gtt_end = end;
-       dev_priv->mm.gtt_total = end - start;
-       dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
+       /* Mark any preallocated objects as occupied */
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
+               DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
+                             obj->gtt_offset, obj->base.size);
+
+               BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
+               obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
+                                                    obj->gtt_offset,
+                                                    obj->base.size,
+                                                    false);
+               obj->has_global_gtt_mapping = 1;
+       }
+
+       dev_priv->gtt.start = start;
+       dev_priv->gtt.total = end - start;
+
+       /* Clear any non-preallocated blocks */
+       drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
+                            hole_start, hole_end) {
+               DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
+                             hole_start, hole_end);
+               dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
+                                             (hole_end-hole_start) / PAGE_SIZE);
+       }
 
-       /* ... but ensure that we clear the entire range. */
-       i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+       /* And finally clear the reserved guard page */
+       dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
+}
+
+static bool
+intel_enable_ppgtt(struct drm_device *dev)
+{
+       if (i915_enable_ppgtt >= 0)
+               return i915_enable_ppgtt;
+
+#ifdef CONFIG_INTEL_IOMMU
+       /* Disable ppgtt on SNB if VT-d is on. */
+       if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+               return false;
+#endif
+
+       return true;
+}
+
+void i915_gem_init_global_gtt(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned long gtt_size, mappable_size;
+
+       gtt_size = dev_priv->gtt.total;
+       mappable_size = dev_priv->gtt.mappable_end;
+
+       if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
+               int ret;
+               /* PPGTT pdes are stolen from global gtt ptes, so shrink the
+                * aperture accordingly when using aliasing ppgtt. */
+               gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+
+               i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
+
+               ret = i915_gem_init_aliasing_ppgtt(dev);
+               if (!ret)
+                       return;
+
+               DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
+               drm_mm_takedown(&dev_priv->mm.gtt_space);
+               gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+       }
+       i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
 }
 
 static int setup_scratch_page(struct drm_device *dev)
@@ -567,8 +684,8 @@ static int setup_scratch_page(struct drm_device *dev)
 #else
        dma_addr = page_to_phys(page);
 #endif
-       dev_priv->mm.gtt->scratch_page = page;
-       dev_priv->mm.gtt->scratch_page_dma = dma_addr;
+       dev_priv->gtt.scratch_page = page;
+       dev_priv->gtt.scratch_page_dma = dma_addr;
 
        return 0;
 }
@@ -576,11 +693,11 @@ static int setup_scratch_page(struct drm_device *dev)
 static void teardown_scratch_page(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       set_pages_wb(dev_priv->mm.gtt->scratch_page, 1);
-       pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma,
+       set_pages_wb(dev_priv->gtt.scratch_page, 1);
+       pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
                       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-       put_page(dev_priv->mm.gtt->scratch_page);
-       __free_page(dev_priv->mm.gtt->scratch_page);
+       put_page(dev_priv->gtt.scratch_page);
+       __free_page(dev_priv->gtt.scratch_page);
 }
 
 static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -590,14 +707,14 @@ static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
        return snb_gmch_ctl << 20;
 }
 
-static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
+static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
 {
        snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
        snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
        return snb_gmch_ctl << 25; /* 32 MB units */
 }
 
-static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
+static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
 {
        static const int stolen_decoder[] = {
                0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
@@ -606,103 +723,127 @@ static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
        return stolen_decoder[snb_gmch_ctl] << 20;
 }
 
-int i915_gem_gtt_init(struct drm_device *dev)
+static int gen6_gmch_probe(struct drm_device *dev,
+                          size_t *gtt_total,
+                          size_t *stolen,
+                          phys_addr_t *mappable_base,
+                          unsigned long *mappable_end)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        phys_addr_t gtt_bus_addr;
+       unsigned int gtt_size;
        u16 snb_gmch_ctl;
        int ret;
 
-       /* On modern platforms we need not worry ourself with the legacy
-        * hostbridge query stuff. Skip it entirely
-        */
-       if (INTEL_INFO(dev)->gen < 6) {
-               ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
-               if (!ret) {
-                       DRM_ERROR("failed to set up gmch\n");
-                       return -EIO;
-               }
+       *mappable_base = pci_resource_start(dev->pdev, 2);
+       *mappable_end = pci_resource_len(dev->pdev, 2);
 
-               dev_priv->mm.gtt = intel_gtt_get();
-               if (!dev_priv->mm.gtt) {
-                       DRM_ERROR("Failed to initialize GTT\n");
-                       intel_gmch_remove();
-                       return -ENODEV;
-               }
-               return 0;
+       /* 64/512MB is the current min/max we actually know of, but this is just
+        * a coarse sanity check.
+        */
+       if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
+               DRM_ERROR("Unknown GMADR size (%lx)\n",
+                         dev_priv->gtt.mappable_end);
+               return -ENXIO;
        }
 
-       dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
-       if (!dev_priv->mm.gtt)
-               return -ENOMEM;
-
        if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
                pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
+       pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+       gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
 
-#ifdef CONFIG_INTEL_IOMMU
-       dev_priv->mm.gtt->needs_dmar = 1;
-#endif
+       if (IS_GEN7(dev))
+               *stolen = gen7_get_stolen_size(snb_gmch_ctl);
+       else
+               *stolen = gen6_get_stolen_size(snb_gmch_ctl);
+
+       *gtt_total = (gtt_size / sizeof(gtt_pte_t)) << PAGE_SHIFT;
 
        /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
        gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
-       dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
-
-       /* i9xx_setup */
-       pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-       dev_priv->mm.gtt->gtt_total_entries =
-               gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
-       if (INTEL_INFO(dev)->gen < 7)
-               dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
-       else
-               dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
-
-       dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
-       /* 64/512MB is the current min/max we actually know of, but this is just a
-        * coarse sanity check.
-        */
-       if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
-           dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
-               DRM_ERROR("Unknown GMADR entries (%d)\n",
-                         dev_priv->mm.gtt->gtt_mappable_entries);
-               ret = -ENXIO;
-               goto err_out;
+       dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
+       if (!dev_priv->gtt.gsm) {
+               DRM_ERROR("Failed to map the gtt page table\n");
+               return -ENOMEM;
        }
 
        ret = setup_scratch_page(dev);
-       if (ret) {
+       if (ret)
                DRM_ERROR("Scratch setup failed\n");
-               goto err_out;
-       }
 
-       dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr,
-                                          dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
-       if (!dev_priv->mm.gtt->gtt) {
-               DRM_ERROR("Failed to map the gtt page table\n");
-               teardown_scratch_page(dev);
-               ret = -ENOMEM;
-               goto err_out;
+       dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
+       dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
+
+       return ret;
+}
+
+static void gen6_gmch_remove(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       iounmap(dev_priv->gtt.gsm);
+       teardown_scratch_page(dev_priv->dev);
+}
+
+static int i915_gmch_probe(struct drm_device *dev,
+                          size_t *gtt_total,
+                          size_t *stolen,
+                          phys_addr_t *mappable_base,
+                          unsigned long *mappable_end)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+
+       ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
+       if (!ret) {
+               DRM_ERROR("failed to set up gmch\n");
+               return -EIO;
        }
 
-       /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
-       DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
-       DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
-       DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
+       intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
+
+       dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
+       dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
+       dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
 
        return 0;
+}
 
-err_out:
-       kfree(dev_priv->mm.gtt);
-       if (INTEL_INFO(dev)->gen < 6)
-               intel_gmch_remove();
-       return ret;
+static void i915_gmch_remove(struct drm_device *dev)
+{
+       intel_gmch_remove();
 }
 
-void i915_gem_gtt_fini(struct drm_device *dev)
+int i915_gem_gtt_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       iounmap(dev_priv->mm.gtt->gtt);
-       teardown_scratch_page(dev);
-       if (INTEL_INFO(dev)->gen < 6)
-               intel_gmch_remove();
-       kfree(dev_priv->mm.gtt);
+       struct i915_gtt *gtt = &dev_priv->gtt;
+       unsigned long gtt_size;
+       int ret;
+
+       if (INTEL_INFO(dev)->gen <= 5) {
+               dev_priv->gtt.gtt_probe = i915_gmch_probe;
+               dev_priv->gtt.gtt_remove = i915_gmch_remove;
+       } else {
+               dev_priv->gtt.gtt_probe = gen6_gmch_probe;
+               dev_priv->gtt.gtt_remove = gen6_gmch_remove;
+       }
+
+       ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
+                                    &dev_priv->gtt.stolen_size,
+                                    &gtt->mappable_base,
+                                    &gtt->mappable_end);
+       if (ret)
+               return ret;
+
+       gtt_size = (dev_priv->gtt.total >> PAGE_SHIFT) * sizeof(gtt_pte_t);
+
+       /* GMADR is the PCI mmio aperture into the global GTT. */
+       DRM_INFO("Memory usable by graphics device = %zdM\n",
+                dev_priv->gtt.total >> 20);
+       DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
+                        dev_priv->gtt.mappable_end >> 20);
+       DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
+                        dev_priv->gtt.stolen_size >> 20);
+
+       return 0;
 }