]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
drm: merge Linux master into HEAD
authorDave Airlie <airlied@redhat.com>
Sun, 29 Mar 2009 00:22:18 +0000 (20:22 -0400)
committerDave Airlie <airlied@redhat.com>
Sun, 29 Mar 2009 00:22:18 +0000 (20:22 -0400)
Conflicts:
drivers/gpu/drm/drm_info.c
drivers/gpu/drm/drm_proc.c
drivers/gpu/drm/i915/i915_gem_debugfs.c

1  2 
drivers/gpu/drm/drm_info.c
drivers/gpu/drm/drm_sysfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_gem.c
include/drm/drm_pciids.h
include/drm/radeon_drm.h

index 60a1b6cb376ad83611ed2798c2d8ee5dbe83baf5,1b699768ccfb5a196ce194bcf96280bd52dd05a9..f0f6c6b93f3a235a4a667bcac27a56f5667b4562
@@@ -72,7 -72,7 +72,7 @@@ int drm_vm_info(struct seq_file *m, voi
  {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
 -      struct drm_map *map;
 +      struct drm_local_map *map;
        struct drm_map_list *r_list;
  
        /* Hardcoded from _DRM_FRAME_BUFFER,
@@@ -94,9 -94,9 +94,9 @@@
                else
                        type = types[map->type];
  
 -              seq_printf(m, "%4d 0x%08lx 0x%08lx %4.4s  0x%02x 0x%08lx ",
 +              seq_printf(m, "%4d 0x%016llx 0x%08lx %4.4s  0x%02x 0x%08lx ",
                           i,
 -                         map->offset,
 +                         (unsigned long long)map->offset,
                           map->size, type, map->flags,
                           (unsigned long) r_list->user_token);
                if (map->mtrr < 0)
@@@ -286,9 -286,9 +286,9 @@@ int drm_vma_info(struct seq_file *m, vo
  #endif
  
        mutex_lock(&dev->struct_mutex);
-       seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08lx\n",
+       seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
                   atomic_read(&dev->vma_count),
-                  high_memory, virt_to_phys(high_memory));
+                  high_memory, (u64)virt_to_phys(high_memory));
  
        list_for_each_entry(pt, &dev->vmalist, head) {
                vma = pt->vma;
index f7510a8f0eb9bfdfaffc91f9b3b590ee73869d6d,186d08159d4871560291e8cf8eccfffd6ebcfa0e..5de573a981cb19d5da7789270bfd0b05bdf8006f
@@@ -35,9 -35,7 +35,9 @@@ static int drm_sysfs_suspend(struct dev
        struct drm_minor *drm_minor = to_drm_minor(dev);
        struct drm_device *drm_dev = drm_minor->dev;
  
 -      if (drm_minor->type == DRM_MINOR_LEGACY && drm_dev->driver->suspend)
 +      if (drm_minor->type == DRM_MINOR_LEGACY &&
 +          !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
 +          drm_dev->driver->suspend)
                return drm_dev->driver->suspend(drm_dev, state);
  
        return 0;
@@@ -55,9 -53,7 +55,9 @@@ static int drm_sysfs_resume(struct devi
        struct drm_minor *drm_minor = to_drm_minor(dev);
        struct drm_device *drm_dev = drm_minor->dev;
  
 -      if (drm_minor->type == DRM_MINOR_LEGACY && drm_dev->driver->resume)
 +      if (drm_minor->type == DRM_MINOR_LEGACY &&
 +          !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
 +          drm_dev->driver->resume)
                return drm_dev->driver->resume(drm_dev);
  
        return 0;
@@@ -122,6 -118,20 +122,6 @@@ void drm_sysfs_destroy(void
        class_destroy(drm_class);
  }
  
 -static ssize_t show_dri(struct device *device, struct device_attribute *attr,
 -                      char *buf)
 -{
 -      struct drm_minor *drm_minor = to_drm_minor(device);
 -      struct drm_device *drm_dev = drm_minor->dev;
 -      if (drm_dev->driver->dri_library_name)
 -              return drm_dev->driver->dri_library_name(drm_dev, buf);
 -      return snprintf(buf, PAGE_SIZE, "%s\n", drm_dev->driver->pci_driver.name);
 -}
 -
 -static struct device_attribute device_attrs[] = {
 -      __ATTR(dri_library_name, S_IRUGO, show_dri, NULL),
 -};
 -
  /**
   * drm_sysfs_device_release - do nothing
   * @dev: Linux device
@@@ -349,8 -359,8 +349,8 @@@ int drm_sysfs_connector_add(struct drm_
        DRM_DEBUG("adding \"%s\" to sysfs\n",
                  drm_get_connector_name(connector));
  
-       snprintf(connector->kdev.bus_id, BUS_ID_SIZE, "card%d-%s",
-                dev->primary->index, drm_get_connector_name(connector));
+       dev_set_name(&connector->kdev, "card%d-%s",
+                    dev->primary->index, drm_get_connector_name(connector));
        ret = device_register(&connector->kdev);
  
        if (ret) {
@@@ -464,6 -474,7 +464,6 @@@ void drm_sysfs_hotplug_event(struct drm
  int drm_sysfs_device_add(struct drm_minor *minor)
  {
        int err;
 -      int i, j;
        char *minor_str;
  
        minor->kdev.parent = &minor->dev->pdev->dev;
                goto err_out;
        }
  
 -      for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
 -              err = device_create_file(&minor->kdev, &device_attrs[i]);
 -              if (err)
 -                      goto err_out_files;
 -      }
 -
        return 0;
  
 -err_out_files:
 -      if (i > 0)
 -              for (j = 0; j < i; j++)
 -                      device_remove_file(&minor->kdev, &device_attrs[j]);
        device_unregister(&minor->kdev);
  err_out:
  
   */
  void drm_sysfs_device_remove(struct drm_minor *minor)
  {
 -      int i;
 -
 -      for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
 -              device_remove_file(&minor->kdev, &device_attrs[i]);
        device_unregister(&minor->kdev);
  }
index 4d9f5c6818ca8ab534d6c1ccf5b8d2a41088d3c6,a818b377e1f73207e6e25c35d09babe0d0b82a1f..85549f615b1f23560f78831c550c3d1747a4358f
@@@ -41,7 -41,6 +41,6 @@@
  int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
        drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
        u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
        u32 last_acthd = I915_READ(acthd_reg);
                if (ring->space >= n)
                        return 0;
  
-               if (master_priv->sarea_priv)
-                       master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+               if (dev->primary->master) {
+                       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+                       if (master_priv->sarea_priv)
+                               master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+               }
  
                if (ring->head != last_head)
                        i = 0;
@@@ -356,7 -359,7 +359,7 @@@ static int validate_cmd(int cmd
        return ret;
  }
  
- static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords)
+ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
        int i;
        for (i = 0; i < dwords;) {
                int cmd, sz;
  
-               if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
-                       return -EINVAL;
+               cmd = buffer[i];
  
                if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
                        return -EINVAL;
                OUT_RING(cmd);
  
                while (++i, --sz) {
-                       if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
-                                                        sizeof(cmd))) {
-                               return -EINVAL;
-                       }
-                       OUT_RING(cmd);
+                       OUT_RING(buffer[i]);
                }
        }
  
  
  int
  i915_emit_box(struct drm_device *dev,
-             struct drm_clip_rect __user *boxes,
+             struct drm_clip_rect *boxes,
              int i, int DR1, int DR4)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_clip_rect box;
+       struct drm_clip_rect box = boxes[i];
        RING_LOCALS;
  
-       if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
-               return -EFAULT;
-       }
        if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
                DRM_ERROR("Bad box %d,%d..%d,%d\n",
                          box.x1, box.y1, box.x2, box.y2);
@@@ -460,7 -454,9 +454,9 @@@ static void i915_emit_breadcrumb(struc
  }
  
  static int i915_dispatch_cmdbuffer(struct drm_device * dev,
-                                  drm_i915_cmdbuffer_t * cmd)
+                                  drm_i915_cmdbuffer_t *cmd,
+                                  struct drm_clip_rect *cliprects,
+                                  void *cmdbuf)
  {
        int nbox = cmd->num_cliprects;
        int i = 0, count, ret;
  
        for (i = 0; i < count; i++) {
                if (i < nbox) {
-                       ret = i915_emit_box(dev, cmd->cliprects, i,
+                       ret = i915_emit_box(dev, cliprects, i,
                                            cmd->DR1, cmd->DR4);
                        if (ret)
                                return ret;
                }
  
-               ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
+               ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
                if (ret)
                        return ret;
        }
  }
  
  static int i915_dispatch_batchbuffer(struct drm_device * dev,
-                                    drm_i915_batchbuffer_t * batch)
+                                    drm_i915_batchbuffer_t * batch,
+                                    struct drm_clip_rect *cliprects)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_clip_rect __user *boxes = batch->cliprects;
        int nbox = batch->num_cliprects;
        int i = 0, count;
        RING_LOCALS;
  
        for (i = 0; i < count; i++) {
                if (i < nbox) {
-                       int ret = i915_emit_box(dev, boxes, i,
+                       int ret = i915_emit_box(dev, cliprects, i,
                                                batch->DR1, batch->DR4);
                        if (ret)
                                return ret;
@@@ -626,6 -622,7 +622,7 @@@ static int i915_batchbuffer(struct drm_
            master_priv->sarea_priv;
        drm_i915_batchbuffer_t *batch = data;
        int ret;
+       struct drm_clip_rect *cliprects = NULL;
  
        if (!dev_priv->allow_batchbuffer) {
                DRM_ERROR("Batchbuffer ioctl disabled\n");
  
        RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  
-       if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
-                                                      batch->num_cliprects *
-                                                      sizeof(struct drm_clip_rect)))
-               return -EFAULT;
+       if (batch->num_cliprects < 0)
+               return -EINVAL;
+       if (batch->num_cliprects) {
+               cliprects = drm_calloc(batch->num_cliprects,
+                                      sizeof(struct drm_clip_rect),
+                                      DRM_MEM_DRIVER);
+               if (cliprects == NULL)
+                       return -ENOMEM;
+               ret = copy_from_user(cliprects, batch->cliprects,
+                                    batch->num_cliprects *
+                                    sizeof(struct drm_clip_rect));
+               if (ret != 0)
+                       goto fail_free;
+       }
  
        mutex_lock(&dev->struct_mutex);
-       ret = i915_dispatch_batchbuffer(dev, batch);
+       ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
        mutex_unlock(&dev->struct_mutex);
  
        if (sarea_priv)
                sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+ fail_free:
+       drm_free(cliprects,
+                batch->num_cliprects * sizeof(struct drm_clip_rect),
+                DRM_MEM_DRIVER);
        return ret;
  }
  
@@@ -659,6 -674,8 +674,8 @@@ static int i915_cmdbuffer(struct drm_de
        drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
            master_priv->sarea_priv;
        drm_i915_cmdbuffer_t *cmdbuf = data;
+       struct drm_clip_rect *cliprects = NULL;
+       void *batch_data;
        int ret;
  
        DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
  
        RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  
-       if (cmdbuf->num_cliprects &&
-           DRM_VERIFYAREA_READ(cmdbuf->cliprects,
-                               cmdbuf->num_cliprects *
-                               sizeof(struct drm_clip_rect))) {
-               DRM_ERROR("Fault accessing cliprects\n");
-               return -EFAULT;
+       if (cmdbuf->num_cliprects < 0)
+               return -EINVAL;
+       batch_data = drm_alloc(cmdbuf->sz, DRM_MEM_DRIVER);
+       if (batch_data == NULL)
+               return -ENOMEM;
+       ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
+       if (ret != 0)
+               goto fail_batch_free;
+       if (cmdbuf->num_cliprects) {
+               cliprects = drm_calloc(cmdbuf->num_cliprects,
+                                      sizeof(struct drm_clip_rect),
+                                      DRM_MEM_DRIVER);
+               if (cliprects == NULL)
+                       goto fail_batch_free;
+               ret = copy_from_user(cliprects, cmdbuf->cliprects,
+                                    cmdbuf->num_cliprects *
+                                    sizeof(struct drm_clip_rect));
+               if (ret != 0)
+                       goto fail_clip_free;
        }
  
        mutex_lock(&dev->struct_mutex);
-       ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
+       ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
        mutex_unlock(&dev->struct_mutex);
        if (ret) {
                DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
-               return ret;
+               goto fail_batch_free;
        }
  
        if (sarea_priv)
                sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-       return 0;
+ fail_batch_free:
+       drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER);
+ fail_clip_free:
+       drm_free(cliprects,
+                cmdbuf->num_cliprects * sizeof(struct drm_clip_rect),
+                DRM_MEM_DRIVER);
+       return ret;
  }
  
  static int i915_flip_bufs(struct drm_device *dev, void *data,
@@@ -1057,7 -1099,7 +1099,7 @@@ void i915_master_destroy(struct drm_dev
  int i915_driver_load(struct drm_device *dev, unsigned long flags)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      unsigned long base, size;
 +      resource_size_t base, size;
        int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
  
        /* i915 has 4 more counters */
index 8d5ec5fd525205fdfc0f264ea606c2b4d78c30c4,b52cba0f16d2c6339fd875adeecc1f46078328b4..e5d2bdf2cc9b664b91d085c2db1d3358848df4ea
@@@ -43,8 -43,8 +43,8 @@@ static int i915_gem_object_set_cpu_read
                                                     uint64_t offset,
                                                     uint64_t size);
  static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
- static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
- static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
+ static int i915_gem_object_get_pages(struct drm_gem_object *obj);
+ static void i915_gem_object_put_pages(struct drm_gem_object *obj);
  static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
  static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
                                           unsigned alignment);
@@@ -136,6 -136,224 +136,224 @@@ i915_gem_create_ioctl(struct drm_devic
        return 0;
  }
  
+ static inline int
+ fast_shmem_read(struct page **pages,
+               loff_t page_base, int page_offset,
+               char __user *data,
+               int length)
+ {
+       char __iomem *vaddr;
+       int ret;
+       vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
+       if (vaddr == NULL)
+               return -ENOMEM;
+       ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
+       kunmap_atomic(vaddr, KM_USER0);
+       return ret;
+ }
+ static inline int
+ slow_shmem_copy(struct page *dst_page,
+               int dst_offset,
+               struct page *src_page,
+               int src_offset,
+               int length)
+ {
+       char *dst_vaddr, *src_vaddr;
+       dst_vaddr = kmap_atomic(dst_page, KM_USER0);
+       if (dst_vaddr == NULL)
+               return -ENOMEM;
+       src_vaddr = kmap_atomic(src_page, KM_USER1);
+       if (src_vaddr == NULL) {
+               kunmap_atomic(dst_vaddr, KM_USER0);
+               return -ENOMEM;
+       }
+       memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
+       kunmap_atomic(src_vaddr, KM_USER1);
+       kunmap_atomic(dst_vaddr, KM_USER0);
+       return 0;
+ }
+ /**
+  * This is the fast shmem pread path, which attempts to copy_from_user directly
+  * from the backing pages of the object to the user's address space.  On a
+  * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
+  */
+ static int
+ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
+                         struct drm_i915_gem_pread *args,
+                         struct drm_file *file_priv)
+ {
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       ssize_t remain;
+       loff_t offset, page_base;
+       char __user *user_data;
+       int page_offset, page_length;
+       int ret;
+       user_data = (char __user *) (uintptr_t) args->data_ptr;
+       remain = args->size;
+       mutex_lock(&dev->struct_mutex);
+       ret = i915_gem_object_get_pages(obj);
+       if (ret != 0)
+               goto fail_unlock;
+       ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
+                                                       args->size);
+       if (ret != 0)
+               goto fail_put_pages;
+       obj_priv = obj->driver_private;
+       offset = args->offset;
+       while (remain > 0) {
+               /* Operation in this page
+                *
+                * page_base = page offset within aperture
+                * page_offset = offset within page
+                * page_length = bytes to copy for this page
+                */
+               page_base = (offset & ~(PAGE_SIZE-1));
+               page_offset = offset & (PAGE_SIZE-1);
+               page_length = remain;
+               if ((page_offset + remain) > PAGE_SIZE)
+                       page_length = PAGE_SIZE - page_offset;
+               ret = fast_shmem_read(obj_priv->pages,
+                                     page_base, page_offset,
+                                     user_data, page_length);
+               if (ret)
+                       goto fail_put_pages;
+               remain -= page_length;
+               user_data += page_length;
+               offset += page_length;
+       }
+ fail_put_pages:
+       i915_gem_object_put_pages(obj);
+ fail_unlock:
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+ }
+ /**
+  * This is the fallback shmem pread path, which allocates temporary storage
+  * in kernel space to copy_to_user into outside of the struct_mutex, so we
+  * can copy out of the object's backing pages while holding the struct mutex
+  * and not take page faults.
+  */
+ static int
+ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
+                         struct drm_i915_gem_pread *args,
+                         struct drm_file *file_priv)
+ {
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct mm_struct *mm = current->mm;
+       struct page **user_pages;
+       ssize_t remain;
+       loff_t offset, pinned_pages, i;
+       loff_t first_data_page, last_data_page, num_pages;
+       int shmem_page_index, shmem_page_offset;
+       int data_page_index,  data_page_offset;
+       int page_length;
+       int ret;
+       uint64_t data_ptr = args->data_ptr;
+       remain = args->size;
+       /* Pin the user pages containing the data.  We can't fault while
+        * holding the struct mutex, yet we want to hold it while
+        * dereferencing the user data.
+        */
+       first_data_page = data_ptr / PAGE_SIZE;
+       last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+       num_pages = last_data_page - first_data_page + 1;
+       user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
+       if (user_pages == NULL)
+               return -ENOMEM;
+       down_read(&mm->mmap_sem);
+       pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+                                     num_pages, 0, 0, user_pages, NULL);
+       up_read(&mm->mmap_sem);
+       if (pinned_pages < num_pages) {
+               ret = -EFAULT;
+               goto fail_put_user_pages;
+       }
+       mutex_lock(&dev->struct_mutex);
+       ret = i915_gem_object_get_pages(obj);
+       if (ret != 0)
+               goto fail_unlock;
+       ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
+                                                       args->size);
+       if (ret != 0)
+               goto fail_put_pages;
+       obj_priv = obj->driver_private;
+       offset = args->offset;
+       while (remain > 0) {
+               /* Operation in this page
+                *
+                * shmem_page_index = page number within shmem file
+                * shmem_page_offset = offset within page in shmem file
+                * data_page_index = page number in get_user_pages return
+                * data_page_offset = offset with data_page_index page.
+                * page_length = bytes to copy for this page
+                */
+               shmem_page_index = offset / PAGE_SIZE;
+               shmem_page_offset = offset & ~PAGE_MASK;
+               data_page_index = data_ptr / PAGE_SIZE - first_data_page;
+               data_page_offset = data_ptr & ~PAGE_MASK;
+               page_length = remain;
+               if ((shmem_page_offset + page_length) > PAGE_SIZE)
+                       page_length = PAGE_SIZE - shmem_page_offset;
+               if ((data_page_offset + page_length) > PAGE_SIZE)
+                       page_length = PAGE_SIZE - data_page_offset;
+               ret = slow_shmem_copy(user_pages[data_page_index],
+                                     data_page_offset,
+                                     obj_priv->pages[shmem_page_index],
+                                     shmem_page_offset,
+                                     page_length);
+               if (ret)
+                       goto fail_put_pages;
+               remain -= page_length;
+               data_ptr += page_length;
+               offset += page_length;
+       }
+ fail_put_pages:
+       i915_gem_object_put_pages(obj);
+ fail_unlock:
+       mutex_unlock(&dev->struct_mutex);
+ fail_put_user_pages:
+       for (i = 0; i < pinned_pages; i++) {
+               SetPageDirty(user_pages[i]);
+               page_cache_release(user_pages[i]);
+       }
+       kfree(user_pages);
+       return ret;
+ }
  /**
   * Reads data from the object referenced by handle.
   *
@@@ -148,8 -366,6 +366,6 @@@ i915_gem_pread_ioctl(struct drm_device 
        struct drm_i915_gem_pread *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
-       ssize_t read;
-       loff_t offset;
        int ret;
  
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
                return -EINVAL;
        }
  
-       mutex_lock(&dev->struct_mutex);
-       ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
-                                                       args->size);
-       if (ret != 0) {
-               drm_gem_object_unreference(obj);
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
-       }
-       offset = args->offset;
-       read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
-                       args->size, &offset);
-       if (read != args->size) {
-               drm_gem_object_unreference(obj);
-               mutex_unlock(&dev->struct_mutex);
-               if (read < 0)
-                       return read;
-               else
-                       return -EINVAL;
-       }
+       ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
+       if (ret != 0)
+               ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
  
        drm_gem_object_unreference(obj);
-       mutex_unlock(&dev->struct_mutex);
  
-       return 0;
+       return ret;
  }
  
  /* This is the fast write path which cannot handle
@@@ -223,29 -419,51 +419,51 @@@ fast_user_write(struct io_mapping *mapp
   */
  
  static inline int
- slow_user_write(struct io_mapping *mapping,
-               loff_t page_base, int page_offset,
-               char __user *user_data,
-               int length)
+ slow_kernel_write(struct io_mapping *mapping,
+                 loff_t gtt_base, int gtt_offset,
+                 struct page *user_page, int user_offset,
+                 int length)
  {
-       char __iomem *vaddr;
+       char *src_vaddr, *dst_vaddr;
        unsigned long unwritten;
  
-       vaddr = io_mapping_map_wc(mapping, page_base);
-       if (vaddr == NULL)
-               return -EFAULT;
-       unwritten = __copy_from_user(vaddr + page_offset,
-                                    user_data, length);
-       io_mapping_unmap(vaddr);
+       dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
+       src_vaddr = kmap_atomic(user_page, KM_USER1);
+       unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
+                                                     src_vaddr + user_offset,
+                                                     length);
+       kunmap_atomic(src_vaddr, KM_USER1);
+       io_mapping_unmap_atomic(dst_vaddr);
        if (unwritten)
                return -EFAULT;
        return 0;
  }
  
+ static inline int
+ fast_shmem_write(struct page **pages,
+                loff_t page_base, int page_offset,
+                char __user *data,
+                int length)
+ {
+       char __iomem *vaddr;
+       vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
+       if (vaddr == NULL)
+               return -ENOMEM;
+       __copy_from_user_inatomic(vaddr + page_offset, data, length);
+       kunmap_atomic(vaddr, KM_USER0);
+       return 0;
+ }
+ /**
+  * This is the fast pwrite path, where we copy the data directly from the
+  * user into the GTT, uncached.
+  */
  static int
- i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
-                   struct drm_i915_gem_pwrite *args,
-                   struct drm_file *file_priv)
+ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+                        struct drm_i915_gem_pwrite *args,
+                        struct drm_file *file_priv)
  {
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
        drm_i915_private_t *dev_priv = dev->dev_private;
  
        obj_priv = obj->driver_private;
        offset = obj_priv->gtt_offset + args->offset;
-       obj_priv->dirty = 1;
  
        while (remain > 0) {
                /* Operation in this page
                                       page_offset, user_data, page_length);
  
                /* If we get a fault while copying data, then (presumably) our
-                * source page isn't available. In this case, use the
-                * non-atomic function
+                * source page isn't available.  Return the error and we'll
+                * retry in the slow path.
                 */
-               if (ret) {
-                       ret = slow_user_write (dev_priv->mm.gtt_mapping,
-                                              page_base, page_offset,
-                                              user_data, page_length);
-                       if (ret)
-                               goto fail;
-               }
+               if (ret)
+                       goto fail;
  
                remain -= page_length;
                user_data += page_length;
@@@ -315,39 -527,284 +527,284 @@@ fail
        return ret;
  }
  
+ /**
+  * This is the fallback GTT pwrite path, which uses get_user_pages to pin
+  * the memory and maps it using kmap_atomic for copying.
+  *
+  * This code resulted in x11perf -rgb10text consuming about 10% more CPU
+  * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
+  */
  static int
- i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
-                     struct drm_i915_gem_pwrite *args,
-                     struct drm_file *file_priv)
+ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+                        struct drm_i915_gem_pwrite *args,
+                        struct drm_file *file_priv)
  {
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       ssize_t remain;
+       loff_t gtt_page_base, offset;
+       loff_t first_data_page, last_data_page, num_pages;
+       loff_t pinned_pages, i;
+       struct page **user_pages;
+       struct mm_struct *mm = current->mm;
+       int gtt_page_offset, data_page_offset, data_page_index, page_length;
        int ret;
-       loff_t offset;
-       ssize_t written;
+       uint64_t data_ptr = args->data_ptr;
+       remain = args->size;
+       /* Pin the user pages containing the data.  We can't fault while
+        * holding the struct mutex, and all of the pwrite implementations
+        * want to hold it while dereferencing the user data.
+        */
+       first_data_page = data_ptr / PAGE_SIZE;
+       last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+       num_pages = last_data_page - first_data_page + 1;
+       user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
+       if (user_pages == NULL)
+               return -ENOMEM;
+       down_read(&mm->mmap_sem);
+       pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+                                     num_pages, 0, 0, user_pages, NULL);
+       up_read(&mm->mmap_sem);
+       if (pinned_pages < num_pages) {
+               ret = -EFAULT;
+               goto out_unpin_pages;
+       }
  
        mutex_lock(&dev->struct_mutex);
+       ret = i915_gem_object_pin(obj, 0);
+       if (ret)
+               goto out_unlock;
+       ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+       if (ret)
+               goto out_unpin_object;
+       obj_priv = obj->driver_private;
+       offset = obj_priv->gtt_offset + args->offset;
+       while (remain > 0) {
+               /* Operation in this page
+                *
+                * gtt_page_base = page offset within aperture
+                * gtt_page_offset = offset within page in aperture
+                * data_page_index = page number in get_user_pages return
+                * data_page_offset = offset with data_page_index page.
+                * page_length = bytes to copy for this page
+                */
+               gtt_page_base = offset & PAGE_MASK;
+               gtt_page_offset = offset & ~PAGE_MASK;
+               data_page_index = data_ptr / PAGE_SIZE - first_data_page;
+               data_page_offset = data_ptr & ~PAGE_MASK;
+               page_length = remain;
+               if ((gtt_page_offset + page_length) > PAGE_SIZE)
+                       page_length = PAGE_SIZE - gtt_page_offset;
+               if ((data_page_offset + page_length) > PAGE_SIZE)
+                       page_length = PAGE_SIZE - data_page_offset;
+               ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
+                                       gtt_page_base, gtt_page_offset,
+                                       user_pages[data_page_index],
+                                       data_page_offset,
+                                       page_length);
+               /* If we get a fault while copying data, then (presumably) our
+                * source page isn't available.  Return the error and we'll
+                * retry in the slow path.
+                */
+               if (ret)
+                       goto out_unpin_object;
+               remain -= page_length;
+               offset += page_length;
+               data_ptr += page_length;
+       }
+ out_unpin_object:
+       i915_gem_object_unpin(obj);
+ out_unlock:
+       mutex_unlock(&dev->struct_mutex);
+ out_unpin_pages:
+       for (i = 0; i < pinned_pages; i++)
+               page_cache_release(user_pages[i]);
+       kfree(user_pages);
+       return ret;
+ }
+ /**
+  * This is the fast shmem pwrite path, which attempts to directly
+  * copy_from_user into the kmapped pages backing the object.
+  */
+ static int
+ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+                          struct drm_i915_gem_pwrite *args,
+                          struct drm_file *file_priv)
+ {
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       ssize_t remain;
+       loff_t offset, page_base;
+       char __user *user_data;
+       int page_offset, page_length;
+       int ret;
+       user_data = (char __user *) (uintptr_t) args->data_ptr;
+       remain = args->size;
+       mutex_lock(&dev->struct_mutex);
+       ret = i915_gem_object_get_pages(obj);
+       if (ret != 0)
+               goto fail_unlock;
  
        ret = i915_gem_object_set_to_cpu_domain(obj, 1);
-       if (ret) {
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
+       if (ret != 0)
+               goto fail_put_pages;
+       obj_priv = obj->driver_private;
+       offset = args->offset;
+       obj_priv->dirty = 1;
+       while (remain > 0) {
+               /* Operation in this page
+                *
+                * page_base = page offset within aperture
+                * page_offset = offset within page
+                * page_length = bytes to copy for this page
+                */
+               page_base = (offset & ~(PAGE_SIZE-1));
+               page_offset = offset & (PAGE_SIZE-1);
+               page_length = remain;
+               if ((page_offset + remain) > PAGE_SIZE)
+                       page_length = PAGE_SIZE - page_offset;
+               ret = fast_shmem_write(obj_priv->pages,
+                                      page_base, page_offset,
+                                      user_data, page_length);
+               if (ret)
+                       goto fail_put_pages;
+               remain -= page_length;
+               user_data += page_length;
+               offset += page_length;
+       }
+ fail_put_pages:
+       i915_gem_object_put_pages(obj);
+ fail_unlock:
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+ }
+ /**
+  * This is the fallback shmem pwrite path, which uses get_user_pages to pin
+  * the memory and maps it using kmap_atomic for copying.
+  *
+  * This avoids taking mmap_sem for faulting on the user's address while the
+  * struct_mutex is held.
+  */
+ static int
+ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+                          struct drm_i915_gem_pwrite *args,
+                          struct drm_file *file_priv)
+ {
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct mm_struct *mm = current->mm;
+       struct page **user_pages;
+       ssize_t remain;
+       loff_t offset, pinned_pages, i;
+       loff_t first_data_page, last_data_page, num_pages;
+       int shmem_page_index, shmem_page_offset;
+       int data_page_index,  data_page_offset;
+       int page_length;
+       int ret;
+       uint64_t data_ptr = args->data_ptr;
+       remain = args->size;
+       /* Pin the user pages containing the data.  We can't fault while
+        * holding the struct mutex, and all of the pwrite implementations
+        * want to hold it while dereferencing the user data.
+        */
+       first_data_page = data_ptr / PAGE_SIZE;
+       last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+       num_pages = last_data_page - first_data_page + 1;
+       user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
+       if (user_pages == NULL)
+               return -ENOMEM;
+       down_read(&mm->mmap_sem);
+       pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+                                     num_pages, 0, 0, user_pages, NULL);
+       up_read(&mm->mmap_sem);
+       if (pinned_pages < num_pages) {
+               ret = -EFAULT;
+               goto fail_put_user_pages;
        }
  
+       mutex_lock(&dev->struct_mutex);
+       ret = i915_gem_object_get_pages(obj);
+       if (ret != 0)
+               goto fail_unlock;
+       ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+       if (ret != 0)
+               goto fail_put_pages;
+       obj_priv = obj->driver_private;
        offset = args->offset;
+       obj_priv->dirty = 1;
  
-       written = vfs_write(obj->filp,
-                           (char __user *)(uintptr_t) args->data_ptr,
-                           args->size, &offset);
-       if (written != args->size) {
-               mutex_unlock(&dev->struct_mutex);
-               if (written < 0)
-                       return written;
-               else
-                       return -EINVAL;
+       while (remain > 0) {
+               /* Operation in this page
+                *
+                * shmem_page_index = page number within shmem file
+                * shmem_page_offset = offset within page in shmem file
+                * data_page_index = page number in get_user_pages return
+                * data_page_offset = offset with data_page_index page.
+                * page_length = bytes to copy for this page
+                */
+               shmem_page_index = offset / PAGE_SIZE;
+               shmem_page_offset = offset & ~PAGE_MASK;
+               data_page_index = data_ptr / PAGE_SIZE - first_data_page;
+               data_page_offset = data_ptr & ~PAGE_MASK;
+               page_length = remain;
+               if ((shmem_page_offset + page_length) > PAGE_SIZE)
+                       page_length = PAGE_SIZE - shmem_page_offset;
+               if ((data_page_offset + page_length) > PAGE_SIZE)
+                       page_length = PAGE_SIZE - data_page_offset;
+               ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
+                                     shmem_page_offset,
+                                     user_pages[data_page_index],
+                                     data_page_offset,
+                                     page_length);
+               if (ret)
+                       goto fail_put_pages;
+               remain -= page_length;
+               data_ptr += page_length;
+               offset += page_length;
        }
  
+ fail_put_pages:
+       i915_gem_object_put_pages(obj);
+ fail_unlock:
        mutex_unlock(&dev->struct_mutex);
+ fail_put_user_pages:
+       for (i = 0; i < pinned_pages; i++)
+               page_cache_release(user_pages[i]);
+       kfree(user_pages);
  
-       return 0;
+       return ret;
  }
  
  /**
@@@ -388,10 -845,19 +845,19 @@@ i915_gem_pwrite_ioctl(struct drm_devic
        if (obj_priv->phys_obj)
                ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
        else if (obj_priv->tiling_mode == I915_TILING_NONE &&
-                dev->gtt_total != 0)
-               ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
-       else
-               ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
+                dev->gtt_total != 0) {
+               ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
+               if (ret == -EFAULT) {
+                       ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
+                                                      file_priv);
+               }
+       } else {
+               ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
+               if (ret == -EFAULT) {
+                       ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
+                                                        file_priv);
+               }
+       }
  
  #if WATCH_PWRITE
        if (ret)
@@@ -627,7 -1093,7 +1093,7 @@@ i915_gem_create_mmap_offset(struct drm_
        struct drm_gem_mm *mm = dev->mm_private;
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
        struct drm_map_list *list;
 -      struct drm_map *map;
 +      struct drm_local_map *map;
        int ret = 0;
  
        /* Set the object up for mmap'ing */
@@@ -816,29 -1282,30 +1282,30 @@@ i915_gem_mmap_gtt_ioctl(struct drm_devi
  }
  
  static void
- i915_gem_object_free_page_list(struct drm_gem_object *obj)
+ i915_gem_object_put_pages(struct drm_gem_object *obj)
  {
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
        int page_count = obj->size / PAGE_SIZE;
        int i;
  
-       if (obj_priv->page_list == NULL)
-               return;
+       BUG_ON(obj_priv->pages_refcount == 0);
  
+       if (--obj_priv->pages_refcount != 0)
+               return;
  
        for (i = 0; i < page_count; i++)
-               if (obj_priv->page_list[i] != NULL) {
+               if (obj_priv->pages[i] != NULL) {
                        if (obj_priv->dirty)
-                               set_page_dirty(obj_priv->page_list[i]);
-                       mark_page_accessed(obj_priv->page_list[i]);
-                       page_cache_release(obj_priv->page_list[i]);
+                               set_page_dirty(obj_priv->pages[i]);
+                       mark_page_accessed(obj_priv->pages[i]);
+                       page_cache_release(obj_priv->pages[i]);
                }
        obj_priv->dirty = 0;
  
-       drm_free(obj_priv->page_list,
+       drm_free(obj_priv->pages,
                 page_count * sizeof(struct page *),
                 DRM_MEM_DRIVER);
-       obj_priv->page_list = NULL;
+       obj_priv->pages = NULL;
  }
  
  static void
@@@ -1290,7 -1757,7 +1757,7 @@@ i915_gem_object_unbind(struct drm_gem_o
        if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
                i915_gem_clear_fence_reg(obj);
  
-       i915_gem_object_free_page_list(obj);
+       i915_gem_object_put_pages(obj);
  
        if (obj_priv->gtt_space) {
                atomic_dec(&dev->gtt_count);
@@@ -1409,7 -1876,7 +1876,7 @@@ i915_gem_evict_everything(struct drm_de
  }
  
  static int
- i915_gem_object_get_page_list(struct drm_gem_object *obj)
+ i915_gem_object_get_pages(struct drm_gem_object *obj)
  {
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
        int page_count, i;
        struct page *page;
        int ret;
  
-       if (obj_priv->page_list)
+       if (obj_priv->pages_refcount++ != 0)
                return 0;
  
        /* Get the list of pages out of our struct file.  They'll be pinned
         * at this point until we release them.
         */
        page_count = obj->size / PAGE_SIZE;
-       BUG_ON(obj_priv->page_list != NULL);
-       obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
-                                        DRM_MEM_DRIVER);
-       if (obj_priv->page_list == NULL) {
+       BUG_ON(obj_priv->pages != NULL);
+       obj_priv->pages = drm_calloc(page_count, sizeof(struct page *),
+                                    DRM_MEM_DRIVER);
+       if (obj_priv->pages == NULL) {
                DRM_ERROR("Faled to allocate page list\n");
+               obj_priv->pages_refcount--;
                return -ENOMEM;
        }
  
                if (IS_ERR(page)) {
                        ret = PTR_ERR(page);
                        DRM_ERROR("read_mapping_page failed: %d\n", ret);
-                       i915_gem_object_free_page_list(obj);
+                       i915_gem_object_put_pages(obj);
                        return ret;
                }
-               obj_priv->page_list[i] = page;
+               obj_priv->pages[i] = page;
        }
        return 0;
  }
@@@ -1766,7 -2234,7 +2234,7 @@@ i915_gem_object_bind_to_gtt(struct drm_
        DRM_INFO("Binding object of size %d at 0x%08x\n",
                 obj->size, obj_priv->gtt_offset);
  #endif
-       ret = i915_gem_object_get_page_list(obj);
+       ret = i915_gem_object_get_pages(obj);
        if (ret) {
                drm_mm_put_block(obj_priv->gtt_space);
                obj_priv->gtt_space = NULL;
         * into the GTT.
         */
        obj_priv->agp_mem = drm_agp_bind_pages(dev,
-                                              obj_priv->page_list,
+                                              obj_priv->pages,
                                               page_count,
                                               obj_priv->gtt_offset,
                                               obj_priv->agp_type);
        if (obj_priv->agp_mem == NULL) {
-               i915_gem_object_free_page_list(obj);
+               i915_gem_object_put_pages(obj);
                drm_mm_put_block(obj_priv->gtt_space);
                obj_priv->gtt_space = NULL;
                return -ENOMEM;
@@@ -1810,10 -2278,10 +2278,10 @@@ i915_gem_clflush_object(struct drm_gem_
         * to GPU, and we can ignore the cache flush because it'll happen
         * again at bind time.
         */
-       if (obj_priv->page_list == NULL)
+       if (obj_priv->pages == NULL)
                return;
  
-       drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
+       drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
  }
  
  /** Flushes any GPU write domain for the object if it's dirty. */
@@@ -2155,7 -2623,7 +2623,7 @@@ i915_gem_object_set_to_full_cpu_read_do
                for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
                        if (obj_priv->page_cpu_valid[i])
                                continue;
-                       drm_clflush_pages(obj_priv->page_list + i, 1);
+                       drm_clflush_pages(obj_priv->pages + i, 1);
                }
        }
  
@@@ -2220,7 -2688,7 +2688,7 @@@ i915_gem_object_set_cpu_read_domain_ran
                if (obj_priv->page_cpu_valid[i])
                        continue;
  
-               drm_clflush_pages(obj_priv->page_list + i, 1);
+               drm_clflush_pages(obj_priv->pages + i, 1);
  
                obj_priv->page_cpu_valid[i] = 1;
        }
  static int
  i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
                                 struct drm_file *file_priv,
-                                struct drm_i915_gem_exec_object *entry)
+                                struct drm_i915_gem_exec_object *entry,
+                                struct drm_i915_gem_relocation_entry *relocs)
  {
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_relocation_entry reloc;
-       struct drm_i915_gem_relocation_entry __user *relocs;
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
        int i, ret;
        void __iomem *reloc_page;
  
        entry->offset = obj_priv->gtt_offset;
  
-       relocs = (struct drm_i915_gem_relocation_entry __user *)
-                (uintptr_t) entry->relocs_ptr;
        /* Apply the relocations, using the GTT aperture to avoid cache
         * flushing requirements.
         */
        for (i = 0; i < entry->relocation_count; i++) {
+               struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
                struct drm_gem_object *target_obj;
                struct drm_i915_gem_object *target_obj_priv;
                uint32_t reloc_val, reloc_offset;
                uint32_t __iomem *reloc_entry;
  
-               ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
-               if (ret != 0) {
-                       i915_gem_object_unpin(obj);
-                       return ret;
-               }
                target_obj = drm_gem_object_lookup(obj->dev, file_priv,
-                                                  reloc.target_handle);
+                                                  reloc->target_handle);
                if (target_obj == NULL) {
                        i915_gem_object_unpin(obj);
                        return -EBADF;
                 */
                if (target_obj_priv->gtt_space == NULL) {
                        DRM_ERROR("No GTT space found for object %d\n",
-                                 reloc.target_handle);
+                                 reloc->target_handle);
                        drm_gem_object_unreference(target_obj);
                        i915_gem_object_unpin(obj);
                        return -EINVAL;
                }
  
-               if (reloc.offset > obj->size - 4) {
+               if (reloc->offset > obj->size - 4) {
                        DRM_ERROR("Relocation beyond object bounds: "
                                  "obj %p target %d offset %d size %d.\n",
-                                 obj, reloc.target_handle,
-                                 (int) reloc.offset, (int) obj->size);
+                                 obj, reloc->target_handle,
+                                 (int) reloc->offset, (int) obj->size);
                        drm_gem_object_unreference(target_obj);
                        i915_gem_object_unpin(obj);
                        return -EINVAL;
                }
-               if (reloc.offset & 3) {
+               if (reloc->offset & 3) {
                        DRM_ERROR("Relocation not 4-byte aligned: "
                                  "obj %p target %d offset %d.\n",
-                                 obj, reloc.target_handle,
-                                 (int) reloc.offset);
+                                 obj, reloc->target_handle,
+                                 (int) reloc->offset);
                        drm_gem_object_unreference(target_obj);
                        i915_gem_object_unpin(obj);
                        return -EINVAL;
                }
  
-               if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
-                   reloc.read_domains & I915_GEM_DOMAIN_CPU) {
+               if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
+                   reloc->read_domains & I915_GEM_DOMAIN_CPU) {
                        DRM_ERROR("reloc with read/write CPU domains: "
                                  "obj %p target %d offset %d "
                                  "read %08x write %08x",
-                                 obj, reloc.target_handle,
-                                 (int) reloc.offset,
-                                 reloc.read_domains,
-                                 reloc.write_domain);
+                                 obj, reloc->target_handle,
+                                 (int) reloc->offset,
+                                 reloc->read_domains,
+                                 reloc->write_domain);
                        drm_gem_object_unreference(target_obj);
                        i915_gem_object_unpin(obj);
                        return -EINVAL;
                }
  
-               if (reloc.write_domain && target_obj->pending_write_domain &&
-                   reloc.write_domain != target_obj->pending_write_domain) {
+               if (reloc->write_domain && target_obj->pending_write_domain &&
+                   reloc->write_domain != target_obj->pending_write_domain) {
                        DRM_ERROR("Write domain conflict: "
                                  "obj %p target %d offset %d "
                                  "new %08x old %08x\n",
-                                 obj, reloc.target_handle,
-                                 (int) reloc.offset,
-                                 reloc.write_domain,
+                                 obj, reloc->target_handle,
+                                 (int) reloc->offset,
+                                 reloc->write_domain,
                                  target_obj->pending_write_domain);
                        drm_gem_object_unreference(target_obj);
                        i915_gem_object_unpin(obj);
                         "presumed %08x delta %08x\n",
                         __func__,
                         obj,
-                        (int) reloc.offset,
-                        (int) reloc.target_handle,
-                        (int) reloc.read_domains,
-                        (int) reloc.write_domain,
+                        (int) reloc->offset,
+                        (int) reloc->target_handle,
+                        (int) reloc->read_domains,
+                        (int) reloc->write_domain,
                         (int) target_obj_priv->gtt_offset,
-                        (int) reloc.presumed_offset,
-                        reloc.delta);
+                        (int) reloc->presumed_offset,
+                        reloc->delta);
  #endif
  
-               target_obj->pending_read_domains |= reloc.read_domains;
-               target_obj->pending_write_domain |= reloc.write_domain;
+               target_obj->pending_read_domains |= reloc->read_domains;
+               target_obj->pending_write_domain |= reloc->write_domain;
  
                /* If the relocation already has the right value in it, no
                 * more work needs to be done.
                 */
-               if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
+               if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
                        drm_gem_object_unreference(target_obj);
                        continue;
                }
                /* Map the page containing the relocation we're going to
                 * perform.
                 */
-               reloc_offset = obj_priv->gtt_offset + reloc.offset;
+               reloc_offset = obj_priv->gtt_offset + reloc->offset;
                reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
                                                      (reloc_offset &
                                                       ~(PAGE_SIZE - 1)));
                reloc_entry = (uint32_t __iomem *)(reloc_page +
                                                   (reloc_offset & (PAGE_SIZE - 1)));
-               reloc_val = target_obj_priv->gtt_offset + reloc.delta;
+               reloc_val = target_obj_priv->gtt_offset + reloc->delta;
  
  #if WATCH_BUF
                DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
-                         obj, (unsigned int) reloc.offset,
+                         obj, (unsigned int) reloc->offset,
                          readl(reloc_entry), reloc_val);
  #endif
                writel(reloc_val, reloc_entry);
                io_mapping_unmap_atomic(reloc_page);
  
-               /* Write the updated presumed offset for this entry back out
-                * to the user.
+               /* The updated presumed offset for this entry will be
+                * copied back out to the user.
                 */
-               reloc.presumed_offset = target_obj_priv->gtt_offset;
-               ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
-               if (ret != 0) {
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return ret;
-               }
+               reloc->presumed_offset = target_obj_priv->gtt_offset;
  
                drm_gem_object_unreference(target_obj);
        }
  static int
  i915_dispatch_gem_execbuffer(struct drm_device *dev,
                              struct drm_i915_gem_execbuffer *exec,
+                             struct drm_clip_rect *cliprects,
                              uint64_t exec_offset)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
-                                            (uintptr_t) exec->cliprects_ptr;
        int nbox = exec->num_cliprects;
        int i = 0, count;
        uint32_t        exec_start, exec_len;
  
        for (i = 0; i < count; i++) {
                if (i < nbox) {
-                       int ret = i915_emit_box(dev, boxes, i,
+                       int ret = i915_emit_box(dev, cliprects, i,
                                                exec->DR1, exec->DR4);
                        if (ret)
                                return ret;
@@@ -2500,6 -2953,75 +2953,75 @@@ i915_gem_ring_throttle(struct drm_devic
        return ret;
  }
  
+ static int
+ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
+                             uint32_t buffer_count,
+                             struct drm_i915_gem_relocation_entry **relocs)
+ {
+       uint32_t reloc_count = 0, reloc_index = 0, i;
+       int ret;
+       *relocs = NULL;
+       for (i = 0; i < buffer_count; i++) {
+               if (reloc_count + exec_list[i].relocation_count < reloc_count)
+                       return -EINVAL;
+               reloc_count += exec_list[i].relocation_count;
+       }
+       *relocs = drm_calloc(reloc_count, sizeof(**relocs), DRM_MEM_DRIVER);
+       if (*relocs == NULL)
+               return -ENOMEM;
+       for (i = 0; i < buffer_count; i++) {
+               struct drm_i915_gem_relocation_entry __user *user_relocs;
+               user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+               ret = copy_from_user(&(*relocs)[reloc_index],
+                                    user_relocs,
+                                    exec_list[i].relocation_count *
+                                    sizeof(**relocs));
+               if (ret != 0) {
+                       drm_free(*relocs, reloc_count * sizeof(**relocs),
+                                DRM_MEM_DRIVER);
+                       *relocs = NULL;
+                       return ret;
+               }
+               reloc_index += exec_list[i].relocation_count;
+       }
+       return ret;
+ }
+ static int
+ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
+                           uint32_t buffer_count,
+                           struct drm_i915_gem_relocation_entry *relocs)
+ {
+       uint32_t reloc_count = 0, i;
+       int ret;
+       for (i = 0; i < buffer_count; i++) {
+               struct drm_i915_gem_relocation_entry __user *user_relocs;
+               user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+               if (ret == 0) {
+                       ret = copy_to_user(user_relocs,
+                                          &relocs[reloc_count],
+                                          exec_list[i].relocation_count *
+                                          sizeof(*relocs));
+               }
+               reloc_count += exec_list[i].relocation_count;
+       }
+       drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER);
+       return ret;
+ }
  int
  i915_gem_execbuffer(struct drm_device *dev, void *data,
                    struct drm_file *file_priv)
        struct drm_gem_object **object_list = NULL;
        struct drm_gem_object *batch_obj;
        struct drm_i915_gem_object *obj_priv;
-       int ret, i, pinned = 0;
+       struct drm_clip_rect *cliprects = NULL;
+       struct drm_i915_gem_relocation_entry *relocs;
+       int ret, ret2, i, pinned = 0;
        uint64_t exec_offset;
-       uint32_t seqno, flush_domains;
+       uint32_t seqno, flush_domains, reloc_index;
        int pin_tries;
  
  #if WATCH_EXEC
                goto pre_mutex_err;
        }
  
+       if (args->num_cliprects != 0) {
+               cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects),
+                                      DRM_MEM_DRIVER);
+               if (cliprects == NULL)
+                       goto pre_mutex_err;
+               ret = copy_from_user(cliprects,
+                                    (struct drm_clip_rect __user *)
+                                    (uintptr_t) args->cliprects_ptr,
+                                    sizeof(*cliprects) * args->num_cliprects);
+               if (ret != 0) {
+                       DRM_ERROR("copy %d cliprects failed: %d\n",
+                                 args->num_cliprects, ret);
+                       goto pre_mutex_err;
+               }
+       }
+       ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
+                                           &relocs);
+       if (ret != 0)
+               goto pre_mutex_err;
        mutex_lock(&dev->struct_mutex);
  
        i915_verify_inactive(dev, __FILE__, __LINE__);
        /* Pin and relocate */
        for (pin_tries = 0; ; pin_tries++) {
                ret = 0;
+               reloc_index = 0;
                for (i = 0; i < args->buffer_count; i++) {
                        object_list[i]->pending_read_domains = 0;
                        object_list[i]->pending_write_domain = 0;
                        ret = i915_gem_object_pin_and_relocate(object_list[i],
                                                               file_priv,
-                                                              &exec_list[i]);
+                                                              &exec_list[i],
+                                                              &relocs[reloc_index]);
                        if (ret)
                                break;
                        pinned = i + 1;
+                       reloc_index += exec_list[i].relocation_count;
                }
                /* success */
                if (ret == 0)
  #endif
  
        /* Exec the batchbuffer */
-       ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
+       ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
        if (ret) {
                DRM_ERROR("dispatch failed %d\n", ret);
                goto err;
                                  args->buffer_count, ret);
        }
  
+       /* Copy the updated relocations out regardless of current error
+        * state.  Failure to update the relocs would mean that the next
+        * time userland calls execbuf, it would do so with presumed offset
+        * state that didn't match the actual object state.
+        */
+       ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
+                                          relocs);
+       if (ret2 != 0) {
+               DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
+               if (ret == 0)
+                       ret = ret2;
+       }
  pre_mutex_err:
        drm_free(object_list, sizeof(*object_list) * args->buffer_count,
                 DRM_MEM_DRIVER);
        drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
                 DRM_MEM_DRIVER);
+       drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects,
+                DRM_MEM_DRIVER);
  
        return ret;
  }
@@@ -3188,7 -3754,7 +3754,7 @@@ i915_gem_init_hws(struct drm_device *de
  
        dev_priv->status_gfx_addr = obj_priv->gtt_offset;
  
-       dev_priv->hw_status_page = kmap(obj_priv->page_list[0]);
+       dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
        if (dev_priv->hw_status_page == NULL) {
                DRM_ERROR("Failed to map status page.\n");
                memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
@@@ -3218,7 -3784,7 +3784,7 @@@ i915_gem_cleanup_hws(struct drm_device 
        obj = dev_priv->hws_obj;
        obj_priv = obj->driver_private;
  
-       kunmap(obj_priv->page_list[0]);
+       kunmap(obj_priv->pages[0]);
        i915_gem_object_unpin(obj);
        drm_gem_object_unreference(obj);
        dev_priv->hws_obj = NULL;
@@@ -3521,20 -4087,20 +4087,20 @@@ void i915_gem_detach_phys_object(struc
        if (!obj_priv->phys_obj)
                return;
  
-       ret = i915_gem_object_get_page_list(obj);
+       ret = i915_gem_object_get_pages(obj);
        if (ret)
                goto out;
  
        page_count = obj->size / PAGE_SIZE;
  
        for (i = 0; i < page_count; i++) {
-               char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0);
+               char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
                char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
  
                memcpy(dst, src, PAGE_SIZE);
                kunmap_atomic(dst, KM_USER0);
        }
-       drm_clflush_pages(obj_priv->page_list, page_count);
+       drm_clflush_pages(obj_priv->pages, page_count);
        drm_agp_chipset_flush(dev);
  out:
        obj_priv->phys_obj->cur_obj = NULL;
@@@ -3577,7 -4143,7 +4143,7 @@@ i915_gem_attach_phys_object(struct drm_
        obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
        obj_priv->phys_obj->cur_obj = obj;
  
-       ret = i915_gem_object_get_page_list(obj);
+       ret = i915_gem_object_get_pages(obj);
        if (ret) {
                DRM_ERROR("failed to get page list\n");
                goto out;
        page_count = obj->size / PAGE_SIZE;
  
        for (i = 0; i < page_count; i++) {
-               char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0);
+               char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
                char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
  
                memcpy(dst, src, PAGE_SIZE);
diff --combined include/drm/drm_pciids.h
index c2fd3c58283ae671c940385d52c44eb0885c8e91,76c4c8243038388b3843e6470c256afbe1e125c8..f3f6718b6eb027fc71eba888e334a491a6eafbad
        {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
        {0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
 +      {0x1002, 0x793f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x7941, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x7942, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x796c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
        {0x1002, 0x796d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
        {0x1002, 0x796e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
        {0x1002, 0x796f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
 +      {0x1002, 0x9400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9402, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9403, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x940A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x940B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9440, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9442, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9444, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x944A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x944B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x944C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x944E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9450, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9456, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x945A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x945B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x946A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x946B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x947A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x947B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9480, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9487, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9488, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9489, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x948F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9490, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9491, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9498, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x949C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x949E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x949F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x94C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x94C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x94C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x94C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x94C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x94C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x94C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x94C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x94C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x94CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x94CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x94CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9501, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9504, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9505, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9506, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9507, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9508, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9509, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x950F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9515, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9517, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9519, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9540, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9542, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x954E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x954F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9553, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9555, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9580, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9581, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9583, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9586, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9587, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9588, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9589, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x958A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x958B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x958C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x958D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x958E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x958F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9590, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9593, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9595, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9596, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9597, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9598, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9599, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x959B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x95C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x95C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x95C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x95C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x95C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x95C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x95C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x95CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x95CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x95CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x95CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
 +      {0x1002, 0x9610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 +      {0x1002, 0x9611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 +      {0x1002, 0x9612, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 +      {0x1002, 0x9613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 +      {0x1002, 0x9614, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0, 0, 0}
  
  #define r128_PCI_IDS \
        {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
        {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
        {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
+       {0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
+       {0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
        {0, 0, 0}
diff --combined include/drm/radeon_drm.h
index 937a275cbb9a9f29651f9324d6a23baf6640edb6,72ecf67ad3ec1591c24d74920c488f118b70ca2a..fe3e3a4b4aed9257ec14a6c1e3799fd52444b90e
@@@ -33,6 -33,8 +33,8 @@@
  #ifndef __RADEON_DRM_H__
  #define __RADEON_DRM_H__
  
+ #include <linux/types.h>
  /* WARNING: If you change any of these defines, make sure to change the
   * defines in the X server file (radeon_sarea.h)
   */
@@@ -304,8 -306,6 +306,8 @@@ typedef union 
  
  #define RADEON_SCRATCH_REG_OFFSET     32
  
 +#define R600_SCRATCH_REG_OFFSET         256
 +
  #define RADEON_NR_SAREA_CLIPRECTS     12
  
  /* There are 2 heaps (local/GART).  Each region within a heap is a
@@@ -528,8 -528,7 +530,8 @@@ typedef struct drm_radeon_init 
                RADEON_INIT_CP = 0x01,
                RADEON_CLEANUP_CP = 0x02,
                RADEON_INIT_R200_CP = 0x03,
 -              RADEON_INIT_R300_CP = 0x04
 +              RADEON_INIT_R300_CP = 0x04,
 +              RADEON_INIT_R600_CP = 0x05
        } func;
        unsigned long sarea_priv_offset;
        int is_pci;
@@@ -725,7 -724,7 +727,7 @@@ typedef struct drm_radeon_irq_wait 
  
  typedef struct drm_radeon_setparam {
        unsigned int param;
-       int64_t value;
+       __s64 value;
  } drm_radeon_setparam_t;
  
  #define RADEON_SETPARAM_FB_LOCATION    1      /* determined framebuffer location */