2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
32 #include "i915_gem_dmabuf.h"
33 #include "i915_vgpu.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36 #include "intel_frontbuffer.h"
37 #include "intel_mocs.h"
38 #include <linux/reservation.h>
39 #include <linux/shmem_fs.h>
40 #include <linux/slab.h>
41 #include <linux/swap.h>
42 #include <linux/pci.h>
43 #include <linux/dma-buf.h>
45 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
46 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
48 static bool cpu_cache_is_coherent(struct drm_device *dev,
49 enum i915_cache_level level)
51 return HAS_LLC(dev) || level != I915_CACHE_NONE;
54 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
56 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
59 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
62 return obj->pin_display;
66 insert_mappable_node(struct drm_i915_private *i915,
67 struct drm_mm_node *node, u32 size)
69 memset(node, 0, sizeof(*node));
70 return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
72 i915->ggtt.mappable_end,
73 DRM_MM_SEARCH_DEFAULT,
74 DRM_MM_CREATE_DEFAULT);
78 remove_mappable_node(struct drm_mm_node *node)
80 drm_mm_remove_node(node);
83 /* some bookkeeping */
84 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
87 spin_lock(&dev_priv->mm.object_stat_lock);
88 dev_priv->mm.object_count++;
89 dev_priv->mm.object_memory += size;
90 spin_unlock(&dev_priv->mm.object_stat_lock);
93 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
96 spin_lock(&dev_priv->mm.object_stat_lock);
97 dev_priv->mm.object_count--;
98 dev_priv->mm.object_memory -= size;
99 spin_unlock(&dev_priv->mm.object_stat_lock);
103 i915_gem_wait_for_error(struct i915_gpu_error *error)
107 if (!i915_reset_in_progress(error))
111 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
112 * userspace. If it takes that long something really bad is going on and
113 * we should simply try to bail out and fail as gracefully as possible.
115 ret = wait_event_interruptible_timeout(error->reset_queue,
116 !i915_reset_in_progress(error),
119 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
121 } else if (ret < 0) {
128 int i915_mutex_lock_interruptible(struct drm_device *dev)
130 struct drm_i915_private *dev_priv = to_i915(dev);
133 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
137 ret = mutex_lock_interruptible(&dev->struct_mutex);
145 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
146 struct drm_file *file)
148 struct drm_i915_private *dev_priv = to_i915(dev);
149 struct i915_ggtt *ggtt = &dev_priv->ggtt;
150 struct drm_i915_gem_get_aperture *args = data;
151 struct i915_vma *vma;
155 mutex_lock(&dev->struct_mutex);
156 list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
157 if (i915_vma_is_pinned(vma))
158 pinned += vma->node.size;
159 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
160 if (i915_vma_is_pinned(vma))
161 pinned += vma->node.size;
162 mutex_unlock(&dev->struct_mutex);
164 args->aper_size = ggtt->base.total;
165 args->aper_available_size = args->aper_size - pinned;
171 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
173 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
174 char *vaddr = obj->phys_handle->vaddr;
176 struct scatterlist *sg;
179 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
182 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
186 page = shmem_read_mapping_page(mapping, i);
188 return PTR_ERR(page);
190 src = kmap_atomic(page);
191 memcpy(vaddr, src, PAGE_SIZE);
192 drm_clflush_virt_range(vaddr, PAGE_SIZE);
199 i915_gem_chipset_flush(to_i915(obj->base.dev));
201 st = kmalloc(sizeof(*st), GFP_KERNEL);
205 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
212 sg->length = obj->base.size;
214 sg_dma_address(sg) = obj->phys_handle->busaddr;
215 sg_dma_len(sg) = obj->base.size;
222 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
226 BUG_ON(obj->madv == __I915_MADV_PURGED);
228 ret = i915_gem_object_set_to_cpu_domain(obj, true);
230 /* In the event of a disaster, abandon all caches and
233 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
236 if (obj->madv == I915_MADV_DONTNEED)
240 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
241 char *vaddr = obj->phys_handle->vaddr;
244 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
248 page = shmem_read_mapping_page(mapping, i);
252 dst = kmap_atomic(page);
253 drm_clflush_virt_range(vaddr, PAGE_SIZE);
254 memcpy(dst, vaddr, PAGE_SIZE);
257 set_page_dirty(page);
258 if (obj->madv == I915_MADV_WILLNEED)
259 mark_page_accessed(page);
266 sg_free_table(obj->pages);
271 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
273 drm_pci_free(obj->base.dev, obj->phys_handle);
276 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
277 .get_pages = i915_gem_object_get_pages_phys,
278 .put_pages = i915_gem_object_put_pages_phys,
279 .release = i915_gem_object_release_phys,
283 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
285 struct i915_vma *vma;
286 LIST_HEAD(still_in_list);
289 /* The vma will only be freed if it is marked as closed, and if we wait
290 * upon rendering to the vma, we may unbind anything in the list.
292 while ((vma = list_first_entry_or_null(&obj->vma_list,
295 list_move_tail(&vma->obj_link, &still_in_list);
296 ret = i915_vma_unbind(vma);
300 list_splice(&still_in_list, &obj->vma_list);
306 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
309 drm_dma_handle_t *phys;
312 if (obj->phys_handle) {
313 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
319 if (obj->madv != I915_MADV_WILLNEED)
322 if (obj->base.filp == NULL)
325 ret = i915_gem_object_unbind(obj);
329 ret = i915_gem_object_put_pages(obj);
333 /* create a new object */
334 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
338 obj->phys_handle = phys;
339 obj->ops = &i915_gem_phys_ops;
341 return i915_gem_object_get_pages(obj);
345 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
346 struct drm_i915_gem_pwrite *args,
347 struct drm_file *file_priv)
349 struct drm_device *dev = obj->base.dev;
350 void *vaddr = obj->phys_handle->vaddr + args->offset;
351 char __user *user_data = u64_to_user_ptr(args->data_ptr);
354 /* We manually control the domain here and pretend that it
355 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
357 ret = i915_gem_object_wait_rendering(obj, false);
361 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
362 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
363 unsigned long unwritten;
365 /* The physical object once assigned is fixed for the lifetime
366 * of the obj, so we can safely drop the lock and continue
369 mutex_unlock(&dev->struct_mutex);
370 unwritten = copy_from_user(vaddr, user_data, args->size);
371 mutex_lock(&dev->struct_mutex);
378 drm_clflush_virt_range(vaddr, args->size);
379 i915_gem_chipset_flush(to_i915(dev));
382 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
386 void *i915_gem_object_alloc(struct drm_device *dev)
388 struct drm_i915_private *dev_priv = to_i915(dev);
389 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
392 void i915_gem_object_free(struct drm_i915_gem_object *obj)
394 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
395 kmem_cache_free(dev_priv->objects, obj);
399 i915_gem_create(struct drm_file *file,
400 struct drm_device *dev,
404 struct drm_i915_gem_object *obj;
408 size = roundup(size, PAGE_SIZE);
412 /* Allocate the new object */
413 obj = i915_gem_object_create(dev, size);
417 ret = drm_gem_handle_create(file, &obj->base, &handle);
418 /* drop reference from allocate - handle holds it now */
419 i915_gem_object_put_unlocked(obj);
428 i915_gem_dumb_create(struct drm_file *file,
429 struct drm_device *dev,
430 struct drm_mode_create_dumb *args)
432 /* have to work out size/pitch and return them */
433 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
434 args->size = args->pitch * args->height;
435 return i915_gem_create(file, dev,
436 args->size, &args->handle);
440 * Creates a new mm object and returns a handle to it.
441 * @dev: drm device pointer
442 * @data: ioctl data blob
443 * @file: drm file pointer
446 i915_gem_create_ioctl(struct drm_device *dev, void *data,
447 struct drm_file *file)
449 struct drm_i915_gem_create *args = data;
451 return i915_gem_create(file, dev,
452 args->size, &args->handle);
456 __copy_to_user_swizzled(char __user *cpu_vaddr,
457 const char *gpu_vaddr, int gpu_offset,
460 int ret, cpu_offset = 0;
463 int cacheline_end = ALIGN(gpu_offset + 1, 64);
464 int this_length = min(cacheline_end - gpu_offset, length);
465 int swizzled_gpu_offset = gpu_offset ^ 64;
467 ret = __copy_to_user(cpu_vaddr + cpu_offset,
468 gpu_vaddr + swizzled_gpu_offset,
473 cpu_offset += this_length;
474 gpu_offset += this_length;
475 length -= this_length;
482 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
483 const char __user *cpu_vaddr,
486 int ret, cpu_offset = 0;
489 int cacheline_end = ALIGN(gpu_offset + 1, 64);
490 int this_length = min(cacheline_end - gpu_offset, length);
491 int swizzled_gpu_offset = gpu_offset ^ 64;
493 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
494 cpu_vaddr + cpu_offset,
499 cpu_offset += this_length;
500 gpu_offset += this_length;
501 length -= this_length;
508 * Pins the specified object's pages and synchronizes the object with
509 * GPU accesses. Sets needs_clflush to non-zero if the caller should
510 * flush the object from the CPU cache.
512 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
519 if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
522 ret = i915_gem_object_wait_rendering(obj, true);
526 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
527 /* If we're not in the cpu read domain, set ourself into the gtt
528 * read domain and manually flush cachelines (if required). This
529 * optimizes for the case when the gpu will dirty the data
530 * anyway again before the next pread happens. */
531 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
535 ret = i915_gem_object_get_pages(obj);
539 i915_gem_object_pin_pages(obj);
544 /* Per-page copy function for the shmem pread fastpath.
545 * Flushes invalid cachelines before reading the target if
546 * needs_clflush is set. */
548 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
549 char __user *user_data,
550 bool page_do_bit17_swizzling, bool needs_clflush)
555 if (unlikely(page_do_bit17_swizzling))
558 vaddr = kmap_atomic(page);
560 drm_clflush_virt_range(vaddr + shmem_page_offset,
562 ret = __copy_to_user_inatomic(user_data,
563 vaddr + shmem_page_offset,
565 kunmap_atomic(vaddr);
567 return ret ? -EFAULT : 0;
571 shmem_clflush_swizzled_range(char *addr, unsigned long length,
574 if (unlikely(swizzled)) {
575 unsigned long start = (unsigned long) addr;
576 unsigned long end = (unsigned long) addr + length;
578 /* For swizzling simply ensure that we always flush both
579 * channels. Lame, but simple and it works. Swizzled
580 * pwrite/pread is far from a hotpath - current userspace
581 * doesn't use it at all. */
582 start = round_down(start, 128);
583 end = round_up(end, 128);
585 drm_clflush_virt_range((void *)start, end - start);
587 drm_clflush_virt_range(addr, length);
592 /* Only difference to the fast-path function is that this can handle bit17
593 * and uses non-atomic copy and kmap functions. */
595 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
596 char __user *user_data,
597 bool page_do_bit17_swizzling, bool needs_clflush)
604 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
606 page_do_bit17_swizzling);
608 if (page_do_bit17_swizzling)
609 ret = __copy_to_user_swizzled(user_data,
610 vaddr, shmem_page_offset,
613 ret = __copy_to_user(user_data,
614 vaddr + shmem_page_offset,
618 return ret ? - EFAULT : 0;
621 static inline unsigned long
622 slow_user_access(struct io_mapping *mapping,
623 uint64_t page_base, int page_offset,
624 char __user *user_data,
625 unsigned long length, bool pwrite)
627 void __iomem *ioaddr;
631 ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
632 /* We can use the cpu mem copy function because this is X86. */
633 vaddr = (void __force *)ioaddr + page_offset;
635 unwritten = __copy_from_user(vaddr, user_data, length);
637 unwritten = __copy_to_user(user_data, vaddr, length);
639 io_mapping_unmap(ioaddr);
644 i915_gem_gtt_pread(struct drm_device *dev,
645 struct drm_i915_gem_object *obj, uint64_t size,
646 uint64_t data_offset, uint64_t data_ptr)
648 struct drm_i915_private *dev_priv = to_i915(dev);
649 struct i915_ggtt *ggtt = &dev_priv->ggtt;
650 struct drm_mm_node node;
651 char __user *user_data;
656 ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
658 ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
662 ret = i915_gem_object_get_pages(obj);
664 remove_mappable_node(&node);
668 i915_gem_object_pin_pages(obj);
670 node.start = i915_gem_obj_ggtt_offset(obj);
671 node.allocated = false;
672 ret = i915_gem_object_put_fence(obj);
677 ret = i915_gem_object_set_to_gtt_domain(obj, false);
681 user_data = u64_to_user_ptr(data_ptr);
683 offset = data_offset;
685 mutex_unlock(&dev->struct_mutex);
686 if (likely(!i915.prefault_disable)) {
687 ret = fault_in_multipages_writeable(user_data, remain);
689 mutex_lock(&dev->struct_mutex);
695 /* Operation in this page
697 * page_base = page offset within aperture
698 * page_offset = offset within page
699 * page_length = bytes to copy for this page
701 u32 page_base = node.start;
702 unsigned page_offset = offset_in_page(offset);
703 unsigned page_length = PAGE_SIZE - page_offset;
704 page_length = remain < page_length ? remain : page_length;
705 if (node.allocated) {
707 ggtt->base.insert_page(&ggtt->base,
708 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
713 page_base += offset & PAGE_MASK;
715 /* This is a slow read/write as it tries to read from
716 * and write to user memory which may result into page
717 * faults, and so we cannot perform this under struct_mutex.
719 if (slow_user_access(ggtt->mappable, page_base,
720 page_offset, user_data,
721 page_length, false)) {
726 remain -= page_length;
727 user_data += page_length;
728 offset += page_length;
731 mutex_lock(&dev->struct_mutex);
732 if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
733 /* The user has modified the object whilst we tried
734 * reading from it, and we now have no idea what domain
735 * the pages should be in. As we have just been touching
736 * them directly, flush everything back to the GTT
739 ret = i915_gem_object_set_to_gtt_domain(obj, false);
743 if (node.allocated) {
745 ggtt->base.clear_range(&ggtt->base,
746 node.start, node.size,
748 i915_gem_object_unpin_pages(obj);
749 remove_mappable_node(&node);
751 i915_gem_object_ggtt_unpin(obj);
758 i915_gem_shmem_pread(struct drm_device *dev,
759 struct drm_i915_gem_object *obj,
760 struct drm_i915_gem_pread *args,
761 struct drm_file *file)
763 char __user *user_data;
766 int shmem_page_offset, page_length, ret = 0;
767 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
769 int needs_clflush = 0;
770 struct sg_page_iter sg_iter;
772 if (!i915_gem_object_has_struct_page(obj))
775 user_data = u64_to_user_ptr(args->data_ptr);
778 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
780 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
784 offset = args->offset;
786 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
787 offset >> PAGE_SHIFT) {
788 struct page *page = sg_page_iter_page(&sg_iter);
793 /* Operation in this page
795 * shmem_page_offset = offset within page in shmem file
796 * page_length = bytes to copy for this page
798 shmem_page_offset = offset_in_page(offset);
799 page_length = remain;
800 if ((shmem_page_offset + page_length) > PAGE_SIZE)
801 page_length = PAGE_SIZE - shmem_page_offset;
803 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
804 (page_to_phys(page) & (1 << 17)) != 0;
806 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
807 user_data, page_do_bit17_swizzling,
812 mutex_unlock(&dev->struct_mutex);
814 if (likely(!i915.prefault_disable) && !prefaulted) {
815 ret = fault_in_multipages_writeable(user_data, remain);
816 /* Userspace is tricking us, but we've already clobbered
817 * its pages with the prefault and promised to write the
818 * data up to the first fault. Hence ignore any errors
819 * and just continue. */
824 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
825 user_data, page_do_bit17_swizzling,
828 mutex_lock(&dev->struct_mutex);
834 remain -= page_length;
835 user_data += page_length;
836 offset += page_length;
840 i915_gem_object_unpin_pages(obj);
846 * Reads data from the object referenced by handle.
847 * @dev: drm device pointer
848 * @data: ioctl data blob
849 * @file: drm file pointer
851 * On error, the contents of *data are undefined.
854 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
855 struct drm_file *file)
857 struct drm_i915_gem_pread *args = data;
858 struct drm_i915_gem_object *obj;
864 if (!access_ok(VERIFY_WRITE,
865 u64_to_user_ptr(args->data_ptr),
869 ret = i915_mutex_lock_interruptible(dev);
873 obj = i915_gem_object_lookup(file, args->handle);
879 /* Bounds check source. */
880 if (args->offset > obj->base.size ||
881 args->size > obj->base.size - args->offset) {
886 trace_i915_gem_object_pread(obj, args->offset, args->size);
888 ret = i915_gem_shmem_pread(dev, obj, args, file);
890 /* pread for non shmem backed objects */
891 if (ret == -EFAULT || ret == -ENODEV) {
892 intel_runtime_pm_get(to_i915(dev));
893 ret = i915_gem_gtt_pread(dev, obj, args->size,
894 args->offset, args->data_ptr);
895 intel_runtime_pm_put(to_i915(dev));
899 i915_gem_object_put(obj);
901 mutex_unlock(&dev->struct_mutex);
905 /* This is the fast write path which cannot handle
906 * page faults in the source data
910 fast_user_write(struct io_mapping *mapping,
911 loff_t page_base, int page_offset,
912 char __user *user_data,
915 void __iomem *vaddr_atomic;
917 unsigned long unwritten;
919 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
920 /* We can use the cpu mem copy function because this is X86. */
921 vaddr = (void __force*)vaddr_atomic + page_offset;
922 unwritten = __copy_from_user_inatomic_nocache(vaddr,
924 io_mapping_unmap_atomic(vaddr_atomic);
929 * This is the fast pwrite path, where we copy the data directly from the
930 * user into the GTT, uncached.
931 * @i915: i915 device private data
932 * @obj: i915 gem object
933 * @args: pwrite arguments structure
934 * @file: drm file pointer
937 i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
938 struct drm_i915_gem_object *obj,
939 struct drm_i915_gem_pwrite *args,
940 struct drm_file *file)
942 struct i915_ggtt *ggtt = &i915->ggtt;
943 struct drm_device *dev = obj->base.dev;
944 struct drm_mm_node node;
945 uint64_t remain, offset;
946 char __user *user_data;
948 bool hit_slow_path = false;
950 if (obj->tiling_mode != I915_TILING_NONE)
953 ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
954 PIN_MAPPABLE | PIN_NONBLOCK);
956 ret = insert_mappable_node(i915, &node, PAGE_SIZE);
960 ret = i915_gem_object_get_pages(obj);
962 remove_mappable_node(&node);
966 i915_gem_object_pin_pages(obj);
968 node.start = i915_gem_obj_ggtt_offset(obj);
969 node.allocated = false;
970 ret = i915_gem_object_put_fence(obj);
975 ret = i915_gem_object_set_to_gtt_domain(obj, true);
979 intel_fb_obj_invalidate(obj, ORIGIN_GTT);
982 user_data = u64_to_user_ptr(args->data_ptr);
983 offset = args->offset;
986 /* Operation in this page
988 * page_base = page offset within aperture
989 * page_offset = offset within page
990 * page_length = bytes to copy for this page
992 u32 page_base = node.start;
993 unsigned page_offset = offset_in_page(offset);
994 unsigned page_length = PAGE_SIZE - page_offset;
995 page_length = remain < page_length ? remain : page_length;
996 if (node.allocated) {
997 wmb(); /* flush the write before we modify the GGTT */
998 ggtt->base.insert_page(&ggtt->base,
999 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1000 node.start, I915_CACHE_NONE, 0);
1001 wmb(); /* flush modifications to the GGTT (insert_page) */
1003 page_base += offset & PAGE_MASK;
1005 /* If we get a fault while copying data, then (presumably) our
1006 * source page isn't available. Return the error and we'll
1007 * retry in the slow path.
1008 * If the object is non-shmem backed, we retry again with the
1009 * path that handles page fault.
1011 if (fast_user_write(ggtt->mappable, page_base,
1012 page_offset, user_data, page_length)) {
1013 hit_slow_path = true;
1014 mutex_unlock(&dev->struct_mutex);
1015 if (slow_user_access(ggtt->mappable,
1017 page_offset, user_data,
1018 page_length, true)) {
1020 mutex_lock(&dev->struct_mutex);
1024 mutex_lock(&dev->struct_mutex);
1027 remain -= page_length;
1028 user_data += page_length;
1029 offset += page_length;
1033 if (hit_slow_path) {
1035 (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
1036 /* The user has modified the object whilst we tried
1037 * reading from it, and we now have no idea what domain
1038 * the pages should be in. As we have just been touching
1039 * them directly, flush everything back to the GTT
1042 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1046 intel_fb_obj_flush(obj, false, ORIGIN_GTT);
1048 if (node.allocated) {
1050 ggtt->base.clear_range(&ggtt->base,
1051 node.start, node.size,
1053 i915_gem_object_unpin_pages(obj);
1054 remove_mappable_node(&node);
1056 i915_gem_object_ggtt_unpin(obj);
1062 /* Per-page copy function for the shmem pwrite fastpath.
1063 * Flushes invalid cachelines before writing to the target if
1064 * needs_clflush_before is set and flushes out any written cachelines after
1065 * writing if needs_clflush is set. */
1067 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
1068 char __user *user_data,
1069 bool page_do_bit17_swizzling,
1070 bool needs_clflush_before,
1071 bool needs_clflush_after)
1076 if (unlikely(page_do_bit17_swizzling))
1079 vaddr = kmap_atomic(page);
1080 if (needs_clflush_before)
1081 drm_clflush_virt_range(vaddr + shmem_page_offset,
1083 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
1084 user_data, page_length);
1085 if (needs_clflush_after)
1086 drm_clflush_virt_range(vaddr + shmem_page_offset,
1088 kunmap_atomic(vaddr);
1090 return ret ? -EFAULT : 0;
1093 /* Only difference to the fast-path function is that this can handle bit17
1094 * and uses non-atomic copy and kmap functions. */
1096 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
1097 char __user *user_data,
1098 bool page_do_bit17_swizzling,
1099 bool needs_clflush_before,
1100 bool needs_clflush_after)
1106 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1107 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1109 page_do_bit17_swizzling);
1110 if (page_do_bit17_swizzling)
1111 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
1115 ret = __copy_from_user(vaddr + shmem_page_offset,
1118 if (needs_clflush_after)
1119 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1121 page_do_bit17_swizzling);
1124 return ret ? -EFAULT : 0;
1128 i915_gem_shmem_pwrite(struct drm_device *dev,
1129 struct drm_i915_gem_object *obj,
1130 struct drm_i915_gem_pwrite *args,
1131 struct drm_file *file)
1135 char __user *user_data;
1136 int shmem_page_offset, page_length, ret = 0;
1137 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
1138 int hit_slowpath = 0;
1139 int needs_clflush_after = 0;
1140 int needs_clflush_before = 0;
1141 struct sg_page_iter sg_iter;
1143 user_data = u64_to_user_ptr(args->data_ptr);
1144 remain = args->size;
1146 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
1148 ret = i915_gem_object_wait_rendering(obj, false);
1152 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1153 /* If we're not in the cpu write domain, set ourself into the gtt
1154 * write domain and manually flush cachelines (if required). This
1155 * optimizes for the case when the gpu will use the data
1156 * right away and we therefore have to clflush anyway. */
1157 needs_clflush_after = cpu_write_needs_clflush(obj);
1159 /* Same trick applies to invalidate partially written cachelines read
1160 * before writing. */
1161 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
1162 needs_clflush_before =
1163 !cpu_cache_is_coherent(dev, obj->cache_level);
1165 ret = i915_gem_object_get_pages(obj);
1169 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1171 i915_gem_object_pin_pages(obj);
1173 offset = args->offset;
1176 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
1177 offset >> PAGE_SHIFT) {
1178 struct page *page = sg_page_iter_page(&sg_iter);
1179 int partial_cacheline_write;
1184 /* Operation in this page
1186 * shmem_page_offset = offset within page in shmem file
1187 * page_length = bytes to copy for this page
1189 shmem_page_offset = offset_in_page(offset);
1191 page_length = remain;
1192 if ((shmem_page_offset + page_length) > PAGE_SIZE)
1193 page_length = PAGE_SIZE - shmem_page_offset;
1195 /* If we don't overwrite a cacheline completely we need to be
1196 * careful to have up-to-date data by first clflushing. Don't
1197 * overcomplicate things and flush the entire patch. */
1198 partial_cacheline_write = needs_clflush_before &&
1199 ((shmem_page_offset | page_length)
1200 & (boot_cpu_data.x86_clflush_size - 1));
1202 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
1203 (page_to_phys(page) & (1 << 17)) != 0;
1205 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
1206 user_data, page_do_bit17_swizzling,
1207 partial_cacheline_write,
1208 needs_clflush_after);
1213 mutex_unlock(&dev->struct_mutex);
1214 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
1215 user_data, page_do_bit17_swizzling,
1216 partial_cacheline_write,
1217 needs_clflush_after);
1219 mutex_lock(&dev->struct_mutex);
1225 remain -= page_length;
1226 user_data += page_length;
1227 offset += page_length;
1231 i915_gem_object_unpin_pages(obj);
1235 * Fixup: Flush cpu caches in case we didn't flush the dirty
1236 * cachelines in-line while writing and the object moved
1237 * out of the cpu write domain while we've dropped the lock.
1239 if (!needs_clflush_after &&
1240 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1241 if (i915_gem_clflush_object(obj, obj->pin_display))
1242 needs_clflush_after = true;
1246 if (needs_clflush_after)
1247 i915_gem_chipset_flush(to_i915(dev));
1249 obj->cache_dirty = true;
1251 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1256 * Writes data to the object referenced by handle.
1258 * @data: ioctl data blob
1261 * On error, the contents of the buffer that were to be modified are undefined.
1264 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1265 struct drm_file *file)
1267 struct drm_i915_private *dev_priv = to_i915(dev);
1268 struct drm_i915_gem_pwrite *args = data;
1269 struct drm_i915_gem_object *obj;
1272 if (args->size == 0)
1275 if (!access_ok(VERIFY_READ,
1276 u64_to_user_ptr(args->data_ptr),
1280 if (likely(!i915.prefault_disable)) {
1281 ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
1287 intel_runtime_pm_get(dev_priv);
1289 ret = i915_mutex_lock_interruptible(dev);
1293 obj = i915_gem_object_lookup(file, args->handle);
1299 /* Bounds check destination. */
1300 if (args->offset > obj->base.size ||
1301 args->size > obj->base.size - args->offset) {
1306 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1309 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1310 * it would end up going through the fenced access, and we'll get
1311 * different detiling behavior between reading and writing.
1312 * pread/pwrite currently are reading and writing from the CPU
1313 * perspective, requiring manual detiling by the client.
1315 if (!i915_gem_object_has_struct_page(obj) ||
1316 cpu_write_needs_clflush(obj)) {
1317 ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
1318 /* Note that the gtt paths might fail with non-page-backed user
1319 * pointers (e.g. gtt mappings when moving data between
1320 * textures). Fallback to the shmem path in that case. */
1323 if (ret == -EFAULT || ret == -ENOSPC) {
1324 if (obj->phys_handle)
1325 ret = i915_gem_phys_pwrite(obj, args, file);
1326 else if (i915_gem_object_has_struct_page(obj))
1327 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1333 i915_gem_object_put(obj);
1335 mutex_unlock(&dev->struct_mutex);
1337 intel_runtime_pm_put(dev_priv);
1343 * Ensures that all rendering to the object has completed and the object is
1344 * safe to unbind from the GTT or access from the CPU.
1345 * @obj: i915 gem object
1346 * @readonly: waiting for read access or write
1349 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1352 struct reservation_object *resv;
1353 struct i915_gem_active *active;
1354 unsigned long active_mask;
1357 lockdep_assert_held(&obj->base.dev->struct_mutex);
1360 active = obj->last_read;
1361 active_mask = obj->active;
1364 active = &obj->last_write;
1367 for_each_active(active_mask, idx) {
1368 ret = i915_gem_active_wait(&active[idx],
1369 &obj->base.dev->struct_mutex);
1374 resv = i915_gem_object_get_dmabuf_resv(obj);
1378 err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
1379 MAX_SCHEDULE_TIMEOUT);
1387 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1388 * as the object state may change during this call.
1390 static __must_check int
1391 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1392 struct intel_rps_client *rps,
1395 struct drm_device *dev = obj->base.dev;
1396 struct drm_i915_private *dev_priv = to_i915(dev);
1397 struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
1398 struct i915_gem_active *active;
1399 unsigned long active_mask;
1402 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1403 BUG_ON(!dev_priv->mm.interruptible);
1405 active_mask = obj->active;
1410 active = obj->last_read;
1413 active = &obj->last_write;
1416 for_each_active(active_mask, i) {
1417 struct drm_i915_gem_request *req;
1419 req = i915_gem_active_get(&active[i],
1420 &obj->base.dev->struct_mutex);
1422 requests[n++] = req;
1425 mutex_unlock(&dev->struct_mutex);
1427 for (i = 0; ret == 0 && i < n; i++)
1428 ret = i915_wait_request(requests[i], true, NULL, rps);
1429 mutex_lock(&dev->struct_mutex);
1431 for (i = 0; i < n; i++)
1432 i915_gem_request_put(requests[i]);
1437 static struct intel_rps_client *to_rps_client(struct drm_file *file)
1439 struct drm_i915_file_private *fpriv = file->driver_priv;
1443 static enum fb_op_origin
1444 write_origin(struct drm_i915_gem_object *obj, unsigned domain)
1446 return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ?
1447 ORIGIN_GTT : ORIGIN_CPU;
1451 * Called when user space prepares to use an object with the CPU, either
1452 * through the mmap ioctl's mapping or a GTT mapping.
1454 * @data: ioctl data blob
1458 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1459 struct drm_file *file)
1461 struct drm_i915_gem_set_domain *args = data;
1462 struct drm_i915_gem_object *obj;
1463 uint32_t read_domains = args->read_domains;
1464 uint32_t write_domain = args->write_domain;
1467 /* Only handle setting domains to types used by the CPU. */
1468 if (write_domain & I915_GEM_GPU_DOMAINS)
1471 if (read_domains & I915_GEM_GPU_DOMAINS)
1474 /* Having something in the write domain implies it's in the read
1475 * domain, and only that read domain. Enforce that in the request.
1477 if (write_domain != 0 && read_domains != write_domain)
1480 ret = i915_mutex_lock_interruptible(dev);
1484 obj = i915_gem_object_lookup(file, args->handle);
1490 /* Try to flush the object off the GPU without holding the lock.
1491 * We will repeat the flush holding the lock in the normal manner
1492 * to catch cases where we are gazumped.
1494 ret = i915_gem_object_wait_rendering__nonblocking(obj,
1495 to_rps_client(file),
1500 if (read_domains & I915_GEM_DOMAIN_GTT)
1501 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1503 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1505 if (write_domain != 0)
1506 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
1509 i915_gem_object_put(obj);
1511 mutex_unlock(&dev->struct_mutex);
1516 * Called when user space has done writes to this buffer
1518 * @data: ioctl data blob
1522 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1523 struct drm_file *file)
1525 struct drm_i915_gem_sw_finish *args = data;
1526 struct drm_i915_gem_object *obj;
1529 ret = i915_mutex_lock_interruptible(dev);
1533 obj = i915_gem_object_lookup(file, args->handle);
1539 /* Pinned buffers may be scanout, so flush the cache */
1540 if (obj->pin_display)
1541 i915_gem_object_flush_cpu_write_domain(obj);
1543 i915_gem_object_put(obj);
1545 mutex_unlock(&dev->struct_mutex);
1550 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1553 * @data: ioctl data blob
1556 * While the mapping holds a reference on the contents of the object, it doesn't
1557 * imply a ref on the object itself.
1561 * DRM driver writers who look a this function as an example for how to do GEM
1562 * mmap support, please don't implement mmap support like here. The modern way
1563 * to implement DRM mmap support is with an mmap offset ioctl (like
1564 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1565 * That way debug tooling like valgrind will understand what's going on, hiding
1566 * the mmap call in a driver private ioctl will break that. The i915 driver only
1567 * does cpu mmaps this way because we didn't know better.
1570 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1571 struct drm_file *file)
1573 struct drm_i915_gem_mmap *args = data;
1574 struct drm_i915_gem_object *obj;
1577 if (args->flags & ~(I915_MMAP_WC))
1580 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1583 obj = i915_gem_object_lookup(file, args->handle);
1587 /* prime objects have no backing filp to GEM mmap
1590 if (!obj->base.filp) {
1591 i915_gem_object_put_unlocked(obj);
1595 addr = vm_mmap(obj->base.filp, 0, args->size,
1596 PROT_READ | PROT_WRITE, MAP_SHARED,
1598 if (args->flags & I915_MMAP_WC) {
1599 struct mm_struct *mm = current->mm;
1600 struct vm_area_struct *vma;
1602 if (down_write_killable(&mm->mmap_sem)) {
1603 i915_gem_object_put_unlocked(obj);
1606 vma = find_vma(mm, addr);
1609 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1612 up_write(&mm->mmap_sem);
1614 /* This may race, but that's ok, it only gets set */
1615 WRITE_ONCE(obj->has_wc_mmap, true);
1617 i915_gem_object_put_unlocked(obj);
1618 if (IS_ERR((void *)addr))
1621 args->addr_ptr = (uint64_t) addr;
1627 * i915_gem_fault - fault a page into the GTT
1628 * @vma: VMA in question
1631 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1632 * from userspace. The fault handler takes care of binding the object to
1633 * the GTT (if needed), allocating and programming a fence register (again,
1634 * only if needed based on whether the old reg is still valid or the object
1635 * is tiled) and inserting a new PTE into the faulting process.
1637 * Note that the faulting process may involve evicting existing objects
1638 * from the GTT and/or fence registers to make room. So performance may
1639 * suffer if the GTT working set is large or there are few fence registers
1642 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1644 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1645 struct drm_device *dev = obj->base.dev;
1646 struct drm_i915_private *dev_priv = to_i915(dev);
1647 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1648 struct i915_ggtt_view view = i915_ggtt_view_normal;
1649 pgoff_t page_offset;
1652 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1654 intel_runtime_pm_get(dev_priv);
1656 /* We don't use vmf->pgoff since that has the fake offset */
1657 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1660 ret = i915_mutex_lock_interruptible(dev);
1664 trace_i915_gem_object_fault(obj, page_offset, true, write);
1666 /* Try to flush the object off the GPU first without holding the lock.
1667 * Upon reacquiring the lock, we will perform our sanity checks and then
1668 * repeat the flush holding the lock in the normal manner to catch cases
1669 * where we are gazumped.
1671 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1675 /* Access to snoopable pages through the GTT is incoherent. */
1676 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1681 /* Use a partial view if the object is bigger than the aperture. */
1682 if (obj->base.size >= ggtt->mappable_end &&
1683 obj->tiling_mode == I915_TILING_NONE) {
1684 static const unsigned int chunk_size = 256; // 1 MiB
1686 memset(&view, 0, sizeof(view));
1687 view.type = I915_GGTT_VIEW_PARTIAL;
1688 view.params.partial.offset = rounddown(page_offset, chunk_size);
1689 view.params.partial.size =
1692 (vma->vm_end - vma->vm_start)/PAGE_SIZE -
1693 view.params.partial.offset);
1696 /* Now pin it into the GTT if needed */
1697 ret = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
1701 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1705 ret = i915_gem_object_get_fence(obj);
1709 /* Finally, remap it using the new GTT offset */
1710 pfn = ggtt->mappable_base +
1711 i915_gem_obj_ggtt_offset_view(obj, &view);
1714 if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
1715 /* Overriding existing pages in partial view does not cause
1716 * us any trouble as TLBs are still valid because the fault
1717 * is due to userspace losing part of the mapping or never
1718 * having accessed it before (at this partials' range).
1720 unsigned long base = vma->vm_start +
1721 (view.params.partial.offset << PAGE_SHIFT);
1724 for (i = 0; i < view.params.partial.size; i++) {
1725 ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
1730 obj->fault_mappable = true;
1732 if (!obj->fault_mappable) {
1733 unsigned long size = min_t(unsigned long,
1734 vma->vm_end - vma->vm_start,
1738 for (i = 0; i < size >> PAGE_SHIFT; i++) {
1739 ret = vm_insert_pfn(vma,
1740 (unsigned long)vma->vm_start + i * PAGE_SIZE,
1746 obj->fault_mappable = true;
1748 ret = vm_insert_pfn(vma,
1749 (unsigned long)vmf->virtual_address,
1753 i915_gem_object_ggtt_unpin_view(obj, &view);
1755 mutex_unlock(&dev->struct_mutex);
1760 * We eat errors when the gpu is terminally wedged to avoid
1761 * userspace unduly crashing (gl has no provisions for mmaps to
1762 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1763 * and so needs to be reported.
1765 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1766 ret = VM_FAULT_SIGBUS;
1771 * EAGAIN means the gpu is hung and we'll wait for the error
1772 * handler to reset everything when re-faulting in
1773 * i915_mutex_lock_interruptible.
1780 * EBUSY is ok: this just means that another thread
1781 * already did the job.
1783 ret = VM_FAULT_NOPAGE;
1790 ret = VM_FAULT_SIGBUS;
1793 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1794 ret = VM_FAULT_SIGBUS;
1798 intel_runtime_pm_put(dev_priv);
1803 * i915_gem_release_mmap - remove physical page mappings
1804 * @obj: obj in question
1806 * Preserve the reservation of the mmapping with the DRM core code, but
1807 * relinquish ownership of the pages back to the system.
1809 * It is vital that we remove the page mapping if we have mapped a tiled
1810 * object through the GTT and then lose the fence register due to
1811 * resource pressure. Similarly if the object has been moved out of the
1812 * aperture, than pages mapped into userspace must be revoked. Removing the
1813 * mapping will then trigger a page fault on the next user access, allowing
1814 * fixup by i915_gem_fault().
1817 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1819 /* Serialisation between user GTT access and our code depends upon
1820 * revoking the CPU's PTE whilst the mutex is held. The next user
1821 * pagefault then has to wait until we release the mutex.
1823 lockdep_assert_held(&obj->base.dev->struct_mutex);
1825 if (!obj->fault_mappable)
1828 drm_vma_node_unmap(&obj->base.vma_node,
1829 obj->base.dev->anon_inode->i_mapping);
1831 /* Ensure that the CPU's PTE are revoked and there are not outstanding
1832 * memory transactions from userspace before we return. The TLB
1833 * flushing implied above by changing the PTE above *should* be
1834 * sufficient, an extra barrier here just provides us with a bit
1835 * of paranoid documentation about our requirement to serialise
1836 * memory writes before touching registers / GSM.
1840 obj->fault_mappable = false;
1844 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1846 struct drm_i915_gem_object *obj;
1848 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1849 i915_gem_release_mmap(obj);
1853 * i915_gem_get_ggtt_size - return required global GTT size for an object
1854 * @dev_priv: i915 device
1855 * @size: object size
1856 * @tiling_mode: tiling mode
1858 * Return the required global GTT size for an object, taking into account
1859 * potential fence register mapping.
1861 u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
1862 u64 size, int tiling_mode)
1866 GEM_BUG_ON(size == 0);
1868 if (INTEL_GEN(dev_priv) >= 4 ||
1869 tiling_mode == I915_TILING_NONE)
1872 /* Previous chips need a power-of-two fence region when tiling */
1873 if (IS_GEN3(dev_priv))
1874 ggtt_size = 1024*1024;
1876 ggtt_size = 512*1024;
1878 while (ggtt_size < size)
1885 * i915_gem_get_ggtt_alignment - return required global GTT alignment
1886 * @dev_priv: i915 device
1887 * @size: object size
1888 * @tiling_mode: tiling mode
1889 * @fenced: is fenced alignment required or not
1891 * Return the required global GTT alignment for an object, taking into account
1892 * potential fence register mapping.
1894 u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
1895 int tiling_mode, bool fenced)
1897 GEM_BUG_ON(size == 0);
1900 * Minimum alignment is 4k (GTT page size), but might be greater
1901 * if a fence register is needed for the object.
1903 if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
1904 tiling_mode == I915_TILING_NONE)
1908 * Previous chips need to be aligned to the size of the smallest
1909 * fence register that can contain the object.
1911 return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
1914 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1916 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
1919 dev_priv->mm.shrinker_no_lock_stealing = true;
1921 ret = drm_gem_create_mmap_offset(&obj->base);
1925 /* Badly fragmented mmap space? The only way we can recover
1926 * space is by destroying unwanted objects. We can't randomly release
1927 * mmap_offsets as userspace expects them to be persistent for the
1928 * lifetime of the objects. The closest we can is to release the
1929 * offsets on purgeable objects by truncating it and marking it purged,
1930 * which prevents userspace from ever using that object again.
1932 i915_gem_shrink(dev_priv,
1933 obj->base.size >> PAGE_SHIFT,
1935 I915_SHRINK_UNBOUND |
1936 I915_SHRINK_PURGEABLE);
1937 ret = drm_gem_create_mmap_offset(&obj->base);
1941 i915_gem_shrink_all(dev_priv);
1942 ret = drm_gem_create_mmap_offset(&obj->base);
1944 dev_priv->mm.shrinker_no_lock_stealing = false;
1949 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1951 drm_gem_free_mmap_offset(&obj->base);
1955 i915_gem_mmap_gtt(struct drm_file *file,
1956 struct drm_device *dev,
1960 struct drm_i915_gem_object *obj;
1963 ret = i915_mutex_lock_interruptible(dev);
1967 obj = i915_gem_object_lookup(file, handle);
1973 if (obj->madv != I915_MADV_WILLNEED) {
1974 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
1979 ret = i915_gem_object_create_mmap_offset(obj);
1983 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1986 i915_gem_object_put(obj);
1988 mutex_unlock(&dev->struct_mutex);
1993 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1995 * @data: GTT mapping ioctl data
1996 * @file: GEM object info
1998 * Simply returns the fake offset to userspace so it can mmap it.
1999 * The mmap call will end up in drm_gem_mmap(), which will set things
2000 * up so we can get faults in the handler above.
2002 * The fault handler will take care of binding the object into the GTT
2003 * (since it may have been evicted to make room for something), allocating
2004 * a fence register, and mapping the appropriate aperture address into
2008 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2009 struct drm_file *file)
2011 struct drm_i915_gem_mmap_gtt *args = data;
2013 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2016 /* Immediately discard the backing storage */
2018 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2020 i915_gem_object_free_mmap_offset(obj);
2022 if (obj->base.filp == NULL)
2025 /* Our goal here is to return as much of the memory as
2026 * is possible back to the system as we are called from OOM.
2027 * To do this we must instruct the shmfs to drop all of its
2028 * backing pages, *now*.
2030 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2031 obj->madv = __I915_MADV_PURGED;
2034 /* Try to discard unwanted pages */
2036 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2038 struct address_space *mapping;
2040 switch (obj->madv) {
2041 case I915_MADV_DONTNEED:
2042 i915_gem_object_truncate(obj);
2043 case __I915_MADV_PURGED:
2047 if (obj->base.filp == NULL)
2050 mapping = file_inode(obj->base.filp)->i_mapping,
2051 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2055 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2057 struct sgt_iter sgt_iter;
2061 BUG_ON(obj->madv == __I915_MADV_PURGED);
2063 ret = i915_gem_object_set_to_cpu_domain(obj, true);
2065 /* In the event of a disaster, abandon all caches and
2066 * hope for the best.
2068 i915_gem_clflush_object(obj, true);
2069 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2072 i915_gem_gtt_finish_object(obj);
2074 if (i915_gem_object_needs_bit17_swizzle(obj))
2075 i915_gem_object_save_bit_17_swizzle(obj);
2077 if (obj->madv == I915_MADV_DONTNEED)
2080 for_each_sgt_page(page, sgt_iter, obj->pages) {
2082 set_page_dirty(page);
2084 if (obj->madv == I915_MADV_WILLNEED)
2085 mark_page_accessed(page);
2091 sg_free_table(obj->pages);
2096 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2098 const struct drm_i915_gem_object_ops *ops = obj->ops;
2100 if (obj->pages == NULL)
2103 if (obj->pages_pin_count)
2106 GEM_BUG_ON(obj->bind_count);
2108 /* ->put_pages might need to allocate memory for the bit17 swizzle
2109 * array, hence protect them from being reaped by removing them from gtt
2111 list_del(&obj->global_list);
2114 if (is_vmalloc_addr(obj->mapping))
2115 vunmap(obj->mapping);
2117 kunmap(kmap_to_page(obj->mapping));
2118 obj->mapping = NULL;
2121 ops->put_pages(obj);
2124 i915_gem_object_invalidate(obj);
2130 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2132 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2134 struct address_space *mapping;
2135 struct sg_table *st;
2136 struct scatterlist *sg;
2137 struct sgt_iter sgt_iter;
2139 unsigned long last_pfn = 0; /* suppress gcc warning */
2143 /* Assert that the object is not currently in any GPU domain. As it
2144 * wasn't in the GTT, there shouldn't be any way it could have been in
2147 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2148 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2150 st = kmalloc(sizeof(*st), GFP_KERNEL);
2154 page_count = obj->base.size / PAGE_SIZE;
2155 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2160 /* Get the list of pages out of our struct file. They'll be pinned
2161 * at this point until we release them.
2163 * Fail silently without starting the shrinker
2165 mapping = file_inode(obj->base.filp)->i_mapping;
2166 gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2167 gfp |= __GFP_NORETRY | __GFP_NOWARN;
2170 for (i = 0; i < page_count; i++) {
2171 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2173 i915_gem_shrink(dev_priv,
2176 I915_SHRINK_UNBOUND |
2177 I915_SHRINK_PURGEABLE);
2178 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2181 /* We've tried hard to allocate the memory by reaping
2182 * our own buffer, now let the real VM do its job and
2183 * go down in flames if truly OOM.
2185 i915_gem_shrink_all(dev_priv);
2186 page = shmem_read_mapping_page(mapping, i);
2188 ret = PTR_ERR(page);
2192 #ifdef CONFIG_SWIOTLB
2193 if (swiotlb_nr_tbl()) {
2195 sg_set_page(sg, page, PAGE_SIZE, 0);
2200 if (!i || page_to_pfn(page) != last_pfn + 1) {
2204 sg_set_page(sg, page, PAGE_SIZE, 0);
2206 sg->length += PAGE_SIZE;
2208 last_pfn = page_to_pfn(page);
2210 /* Check that the i965g/gm workaround works. */
2211 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2213 #ifdef CONFIG_SWIOTLB
2214 if (!swiotlb_nr_tbl())
2219 ret = i915_gem_gtt_prepare_object(obj);
2223 if (i915_gem_object_needs_bit17_swizzle(obj))
2224 i915_gem_object_do_bit_17_swizzle(obj);
2226 if (obj->tiling_mode != I915_TILING_NONE &&
2227 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2228 i915_gem_object_pin_pages(obj);
2234 for_each_sgt_page(page, sgt_iter, st)
2239 /* shmemfs first checks if there is enough memory to allocate the page
2240 * and reports ENOSPC should there be insufficient, along with the usual
2241 * ENOMEM for a genuine allocation failure.
2243 * We use ENOSPC in our driver to mean that we have run out of aperture
2244 * space and so want to translate the error from shmemfs back to our
2245 * usual understanding of ENOMEM.
2253 /* Ensure that the associated pages are gathered from the backing storage
2254 * and pinned into our object. i915_gem_object_get_pages() may be called
2255 * multiple times before they are released by a single call to
2256 * i915_gem_object_put_pages() - once the pages are no longer referenced
2257 * either as a result of memory pressure (reaping pages under the shrinker)
2258 * or as the object is itself released.
2261 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2263 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2264 const struct drm_i915_gem_object_ops *ops = obj->ops;
2270 if (obj->madv != I915_MADV_WILLNEED) {
2271 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2275 BUG_ON(obj->pages_pin_count);
2277 ret = ops->get_pages(obj);
2281 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2283 obj->get_page.sg = obj->pages->sgl;
2284 obj->get_page.last = 0;
2289 /* The 'mapping' part of i915_gem_object_pin_map() below */
2290 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
2292 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2293 struct sg_table *sgt = obj->pages;
2294 struct sgt_iter sgt_iter;
2296 struct page *stack_pages[32];
2297 struct page **pages = stack_pages;
2298 unsigned long i = 0;
2301 /* A single page can always be kmapped */
2303 return kmap(sg_page(sgt->sgl));
2305 if (n_pages > ARRAY_SIZE(stack_pages)) {
2306 /* Too big for stack -- allocate temporary array instead */
2307 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2312 for_each_sgt_page(page, sgt_iter, sgt)
2315 /* Check that we have the expected number of pages */
2316 GEM_BUG_ON(i != n_pages);
2318 addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
2320 if (pages != stack_pages)
2321 drm_free_large(pages);
2326 /* get, pin, and map the pages of the object into kernel space */
2327 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
2331 lockdep_assert_held(&obj->base.dev->struct_mutex);
2333 ret = i915_gem_object_get_pages(obj);
2335 return ERR_PTR(ret);
2337 i915_gem_object_pin_pages(obj);
2339 if (!obj->mapping) {
2340 obj->mapping = i915_gem_object_map(obj);
2341 if (!obj->mapping) {
2342 i915_gem_object_unpin_pages(obj);
2343 return ERR_PTR(-ENOMEM);
2347 return obj->mapping;
2351 i915_gem_object_retire__write(struct i915_gem_active *active,
2352 struct drm_i915_gem_request *request)
2354 struct drm_i915_gem_object *obj =
2355 container_of(active, struct drm_i915_gem_object, last_write);
2357 intel_fb_obj_flush(obj, true, ORIGIN_CS);
2361 i915_gem_object_retire__read(struct i915_gem_active *active,
2362 struct drm_i915_gem_request *request)
2364 int idx = request->engine->id;
2365 struct drm_i915_gem_object *obj =
2366 container_of(active, struct drm_i915_gem_object, last_read[idx]);
2368 GEM_BUG_ON((obj->active & (1 << idx)) == 0);
2370 obj->active &= ~(1 << idx);
2374 /* Bump our place on the bound list to keep it roughly in LRU order
2375 * so that we don't steal from recently used but inactive objects
2376 * (unless we are forced to ofc!)
2378 if (obj->bind_count)
2379 list_move_tail(&obj->global_list,
2380 &request->i915->mm.bound_list);
2382 i915_gem_object_put(obj);
2385 static bool i915_context_is_banned(const struct i915_gem_context *ctx)
2387 unsigned long elapsed;
2389 if (ctx->hang_stats.banned)
2392 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2393 if (ctx->hang_stats.ban_period_seconds &&
2394 elapsed <= ctx->hang_stats.ban_period_seconds) {
2395 DRM_DEBUG("context hanging too fast, banning!\n");
2402 static void i915_set_reset_status(struct i915_gem_context *ctx,
2405 struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
2408 hs->banned = i915_context_is_banned(ctx);
2410 hs->guilty_ts = get_seconds();
2412 hs->batch_pending++;
2416 struct drm_i915_gem_request *
2417 i915_gem_find_active_request(struct intel_engine_cs *engine)
2419 struct drm_i915_gem_request *request;
2421 /* We are called by the error capture and reset at a random
2422 * point in time. In particular, note that neither is crucially
2423 * ordered with an interrupt. After a hang, the GPU is dead and we
2424 * assume that no more writes can happen (we waited long enough for
2425 * all writes that were in transaction to be flushed) - adding an
2426 * extra delay for a recent interrupt is pointless. Hence, we do
2427 * not need an engine->irq_seqno_barrier() before the seqno reads.
2429 list_for_each_entry(request, &engine->request_list, link) {
2430 if (i915_gem_request_completed(request))
2439 static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
2441 struct drm_i915_gem_request *request;
2444 request = i915_gem_find_active_request(engine);
2445 if (request == NULL)
2448 ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2450 i915_set_reset_status(request->ctx, ring_hung);
2451 list_for_each_entry_continue(request, &engine->request_list, link)
2452 i915_set_reset_status(request->ctx, false);
2455 static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
2457 struct intel_ring *ring;
2459 /* Mark all pending requests as complete so that any concurrent
2460 * (lockless) lookup doesn't try and wait upon the request as we
2463 intel_engine_init_seqno(engine, engine->last_submitted_seqno);
2466 * Clear the execlists queue up before freeing the requests, as those
2467 * are the ones that keep the context and ringbuffer backing objects
2471 if (i915.enable_execlists) {
2472 /* Ensure irq handler finishes or is cancelled. */
2473 tasklet_kill(&engine->irq_tasklet);
2475 intel_execlists_cancel_requests(engine);
2479 * We must free the requests after all the corresponding objects have
2480 * been moved off active lists. Which is the same order as the normal
2481 * retire_requests function does. This is important if object hold
2482 * implicit references on things like e.g. ppgtt address spaces through
2485 if (!list_empty(&engine->request_list)) {
2486 struct drm_i915_gem_request *request;
2488 request = list_last_entry(&engine->request_list,
2489 struct drm_i915_gem_request,
2492 i915_gem_request_retire_upto(request);
2495 /* Having flushed all requests from all queues, we know that all
2496 * ringbuffers must now be empty. However, since we do not reclaim
2497 * all space when retiring the request (to prevent HEADs colliding
2498 * with rapid ringbuffer wraparound) the amount of available space
2499 * upon reset is less than when we start. Do one more pass over
2500 * all the ringbuffers to reset last_retired_head.
2502 list_for_each_entry(ring, &engine->buffers, link) {
2503 ring->last_retired_head = ring->tail;
2504 intel_ring_update_space(ring);
2507 engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
2510 void i915_gem_reset(struct drm_device *dev)
2512 struct drm_i915_private *dev_priv = to_i915(dev);
2513 struct intel_engine_cs *engine;
2516 * Before we free the objects from the requests, we need to inspect
2517 * them for finding the guilty party. As the requests only borrow
2518 * their reference to the objects, the inspection must be done first.
2520 for_each_engine(engine, dev_priv)
2521 i915_gem_reset_engine_status(engine);
2523 for_each_engine(engine, dev_priv)
2524 i915_gem_reset_engine_cleanup(engine);
2525 mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
2527 i915_gem_context_reset(dev);
2529 i915_gem_restore_fences(dev);
2533 i915_gem_retire_work_handler(struct work_struct *work)
2535 struct drm_i915_private *dev_priv =
2536 container_of(work, typeof(*dev_priv), gt.retire_work.work);
2537 struct drm_device *dev = &dev_priv->drm;
2539 /* Come back later if the device is busy... */
2540 if (mutex_trylock(&dev->struct_mutex)) {
2541 i915_gem_retire_requests(dev_priv);
2542 mutex_unlock(&dev->struct_mutex);
2545 /* Keep the retire handler running until we are finally idle.
2546 * We do not need to do this test under locking as in the worst-case
2547 * we queue the retire worker once too often.
2549 if (READ_ONCE(dev_priv->gt.awake)) {
2550 i915_queue_hangcheck(dev_priv);
2551 queue_delayed_work(dev_priv->wq,
2552 &dev_priv->gt.retire_work,
2553 round_jiffies_up_relative(HZ));
2558 i915_gem_idle_work_handler(struct work_struct *work)
2560 struct drm_i915_private *dev_priv =
2561 container_of(work, typeof(*dev_priv), gt.idle_work.work);
2562 struct drm_device *dev = &dev_priv->drm;
2563 struct intel_engine_cs *engine;
2564 unsigned int stuck_engines;
2565 bool rearm_hangcheck;
2567 if (!READ_ONCE(dev_priv->gt.awake))
2570 if (READ_ONCE(dev_priv->gt.active_engines))
2574 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
2576 if (!mutex_trylock(&dev->struct_mutex)) {
2577 /* Currently busy, come back later */
2578 mod_delayed_work(dev_priv->wq,
2579 &dev_priv->gt.idle_work,
2580 msecs_to_jiffies(50));
2584 if (dev_priv->gt.active_engines)
2587 for_each_engine(engine, dev_priv)
2588 i915_gem_batch_pool_fini(&engine->batch_pool);
2590 GEM_BUG_ON(!dev_priv->gt.awake);
2591 dev_priv->gt.awake = false;
2592 rearm_hangcheck = false;
2594 /* As we have disabled hangcheck, we need to unstick any waiters still
2595 * hanging around. However, as we may be racing against the interrupt
2596 * handler or the waiters themselves, we skip enabling the fake-irq.
2598 stuck_engines = intel_kick_waiters(dev_priv);
2599 if (unlikely(stuck_engines))
2600 DRM_DEBUG_DRIVER("kicked stuck waiters (%x)...missed irq?\n",
2603 if (INTEL_GEN(dev_priv) >= 6)
2604 gen6_rps_idle(dev_priv);
2605 intel_runtime_pm_put(dev_priv);
2607 mutex_unlock(&dev->struct_mutex);
2610 if (rearm_hangcheck) {
2611 GEM_BUG_ON(!dev_priv->gt.awake);
2612 i915_queue_hangcheck(dev_priv);
2616 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
2618 struct drm_i915_gem_object *obj = to_intel_bo(gem);
2619 struct drm_i915_file_private *fpriv = file->driver_priv;
2620 struct i915_vma *vma, *vn;
2622 mutex_lock(&obj->base.dev->struct_mutex);
2623 list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
2624 if (vma->vm->file == fpriv)
2625 i915_vma_close(vma);
2626 mutex_unlock(&obj->base.dev->struct_mutex);
2630 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2631 * @dev: drm device pointer
2632 * @data: ioctl data blob
2633 * @file: drm file pointer
2635 * Returns 0 if successful, else an error is returned with the remaining time in
2636 * the timeout parameter.
2637 * -ETIME: object is still busy after timeout
2638 * -ERESTARTSYS: signal interrupted the wait
2639 * -ENONENT: object doesn't exist
2640 * Also possible, but rare:
2641 * -EAGAIN: GPU wedged
2643 * -ENODEV: Internal IRQ fail
2644 * -E?: The add request failed
2646 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2647 * non-zero timeout parameter the wait ioctl will wait for the given number of
2648 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2649 * without holding struct_mutex the object may become re-busied before this
2650 * function completes. A similar but shorter * race condition exists in the busy
2654 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2656 struct drm_i915_gem_wait *args = data;
2657 struct drm_i915_gem_object *obj;
2658 struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
2662 if (args->flags != 0)
2665 ret = i915_mutex_lock_interruptible(dev);
2669 obj = i915_gem_object_lookup(file, args->bo_handle);
2671 mutex_unlock(&dev->struct_mutex);
2678 for (i = 0; i < I915_NUM_ENGINES; i++) {
2679 struct drm_i915_gem_request *req;
2681 req = i915_gem_active_get(&obj->last_read[i],
2682 &obj->base.dev->struct_mutex);
2684 requests[n++] = req;
2688 i915_gem_object_put(obj);
2689 mutex_unlock(&dev->struct_mutex);
2691 for (i = 0; i < n; i++) {
2693 ret = i915_wait_request(requests[i], true,
2694 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
2695 to_rps_client(file));
2696 i915_gem_request_put(requests[i]);
2702 __i915_gem_object_sync(struct drm_i915_gem_request *to,
2703 struct drm_i915_gem_request *from)
2707 if (to->engine == from->engine)
2710 if (!i915.semaphores) {
2711 ret = i915_wait_request(from,
2712 from->i915->mm.interruptible,
2718 int idx = intel_engine_sync_index(from->engine, to->engine);
2719 if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
2722 trace_i915_gem_ring_sync_to(to, from);
2723 ret = to->engine->semaphore.sync_to(to, from);
2727 from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
2734 * i915_gem_object_sync - sync an object to a ring.
2736 * @obj: object which may be in use on another ring.
2737 * @to: request we are wishing to use
2739 * This code is meant to abstract object synchronization with the GPU.
2740 * Conceptually we serialise writes between engines inside the GPU.
2741 * We only allow one engine to write into a buffer at any time, but
2742 * multiple readers. To ensure each has a coherent view of memory, we must:
2744 * - If there is an outstanding write request to the object, the new
2745 * request must wait for it to complete (either CPU or in hw, requests
2746 * on the same ring will be naturally ordered).
2748 * - If we are a write request (pending_write_domain is set), the new
2749 * request must wait for outstanding read requests to complete.
2751 * Returns 0 if successful, else propagates up the lower layer error.
2754 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2755 struct drm_i915_gem_request *to)
2757 struct i915_gem_active *active;
2758 unsigned long active_mask;
2761 lockdep_assert_held(&obj->base.dev->struct_mutex);
2763 active_mask = obj->active;
2767 if (obj->base.pending_write_domain) {
2768 active = obj->last_read;
2771 active = &obj->last_write;
2774 for_each_active(active_mask, idx) {
2775 struct drm_i915_gem_request *request;
2778 request = i915_gem_active_peek(&active[idx],
2779 &obj->base.dev->struct_mutex);
2783 ret = __i915_gem_object_sync(to, request);
2791 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2793 u32 old_write_domain, old_read_domains;
2795 /* Force a pagefault for domain tracking on next user access */
2796 i915_gem_release_mmap(obj);
2798 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2801 old_read_domains = obj->base.read_domains;
2802 old_write_domain = obj->base.write_domain;
2804 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2805 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2807 trace_i915_gem_object_change_domain(obj,
2812 static void __i915_vma_iounmap(struct i915_vma *vma)
2814 GEM_BUG_ON(i915_vma_is_pinned(vma));
2816 if (vma->iomap == NULL)
2819 io_mapping_unmap(vma->iomap);
2823 int i915_vma_unbind(struct i915_vma *vma)
2825 struct drm_i915_gem_object *obj = vma->obj;
2826 unsigned long active;
2829 /* First wait upon any activity as retiring the request may
2830 * have side-effects such as unpinning or even unbinding this vma.
2832 active = i915_vma_get_active(vma);
2836 /* When a closed VMA is retired, it is unbound - eek.
2837 * In order to prevent it from being recursively closed,
2838 * take a pin on the vma so that the second unbind is
2841 __i915_vma_pin(vma);
2843 for_each_active(active, idx) {
2844 ret = i915_gem_active_retire(&vma->last_read[idx],
2845 &vma->vm->dev->struct_mutex);
2850 __i915_vma_unpin(vma);
2854 GEM_BUG_ON(i915_vma_is_active(vma));
2857 if (i915_vma_is_pinned(vma))
2860 if (!drm_mm_node_allocated(&vma->node))
2863 GEM_BUG_ON(obj->bind_count == 0);
2864 GEM_BUG_ON(!obj->pages);
2866 if (i915_vma_is_ggtt(vma) &&
2867 vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
2868 i915_gem_object_finish_gtt(obj);
2870 /* release the fence reg _after_ flushing */
2871 ret = i915_gem_object_put_fence(obj);
2875 __i915_vma_iounmap(vma);
2878 if (likely(!vma->vm->closed)) {
2879 trace_i915_vma_unbind(vma);
2880 vma->vm->unbind_vma(vma);
2882 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
2884 drm_mm_remove_node(&vma->node);
2885 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
2887 if (i915_vma_is_ggtt(vma)) {
2888 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
2889 obj->map_and_fenceable = false;
2890 } else if (vma->ggtt_view.pages) {
2891 sg_free_table(vma->ggtt_view.pages);
2892 kfree(vma->ggtt_view.pages);
2894 vma->ggtt_view.pages = NULL;
2897 /* Since the unbound list is global, only move to that list if
2898 * no more VMAs exist. */
2899 if (--obj->bind_count == 0)
2900 list_move_tail(&obj->global_list,
2901 &to_i915(obj->base.dev)->mm.unbound_list);
2903 /* And finally now the object is completely decoupled from this vma,
2904 * we can drop its hold on the backing storage and allow it to be
2905 * reaped by the shrinker.
2907 i915_gem_object_unpin_pages(obj);
2910 if (unlikely(i915_vma_is_closed(vma)))
2911 i915_vma_destroy(vma);
2916 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
2918 struct intel_engine_cs *engine;
2921 lockdep_assert_held(&dev_priv->drm.struct_mutex);
2923 for_each_engine(engine, dev_priv) {
2924 if (engine->last_context == NULL)
2927 ret = intel_engine_idle(engine);
2935 static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
2936 unsigned long cache_level)
2938 struct drm_mm_node *gtt_space = &vma->node;
2939 struct drm_mm_node *other;
2942 * On some machines we have to be careful when putting differing types
2943 * of snoopable memory together to avoid the prefetcher crossing memory
2944 * domains and dying. During vm initialisation, we decide whether or not
2945 * these constraints apply and set the drm_mm.color_adjust
2948 if (vma->vm->mm.color_adjust == NULL)
2951 if (!drm_mm_node_allocated(gtt_space))
2954 if (list_empty(>t_space->node_list))
2957 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2958 if (other->allocated && !other->hole_follows && other->color != cache_level)
2961 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2962 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2969 * i915_vma_insert - finds a slot for the vma in its address space
2971 * @size: requested size in bytes (can be larger than the VMA)
2972 * @alignment: required alignment
2973 * @flags: mask of PIN_* flags to use
2975 * First we try to allocate some free space that meets the requirements for
2976 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
2977 * preferrably the oldest idle entry to make room for the new VMA.
2980 * 0 on success, negative error code otherwise.
2983 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
2985 struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
2986 struct drm_i915_gem_object *obj = vma->obj;
2991 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
2992 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
2994 size = max(size, vma->size);
2995 if (flags & PIN_MAPPABLE)
2996 size = i915_gem_get_ggtt_size(dev_priv, size, obj->tiling_mode);
2999 i915_gem_get_ggtt_alignment(dev_priv, size, obj->tiling_mode,
3000 flags & PIN_MAPPABLE);
3002 alignment = min_alignment;
3003 if (alignment & (min_alignment - 1)) {
3004 DRM_DEBUG("Invalid object alignment requested %llu, minimum %llu\n",
3005 alignment, min_alignment);
3009 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3011 end = vma->vm->total;
3012 if (flags & PIN_MAPPABLE)
3013 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
3014 if (flags & PIN_ZONE_4G)
3015 end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
3017 /* If binding the object/GGTT view requires more space than the entire
3018 * aperture has, reject it early before evicting everything in a vain
3019 * attempt to find space.
3022 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
3023 size, obj->base.size,
3024 flags & PIN_MAPPABLE ? "mappable" : "total",
3029 ret = i915_gem_object_get_pages(obj);
3033 i915_gem_object_pin_pages(obj);
3035 if (flags & PIN_OFFSET_FIXED) {
3036 u64 offset = flags & PIN_OFFSET_MASK;
3037 if (offset & (alignment - 1) || offset > end - size) {
3042 vma->node.start = offset;
3043 vma->node.size = size;
3044 vma->node.color = obj->cache_level;
3045 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
3047 ret = i915_gem_evict_for_vma(vma);
3049 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
3054 u32 search_flag, alloc_flag;
3056 if (flags & PIN_HIGH) {
3057 search_flag = DRM_MM_SEARCH_BELOW;
3058 alloc_flag = DRM_MM_CREATE_TOP;
3060 search_flag = DRM_MM_SEARCH_DEFAULT;
3061 alloc_flag = DRM_MM_CREATE_DEFAULT;
3064 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3065 * so we know that we always have a minimum alignment of 4096.
3066 * The drm_mm range manager is optimised to return results
3067 * with zero alignment, so where possible use the optimal
3070 if (alignment <= 4096)
3074 ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
3082 ret = i915_gem_evict_something(vma->vm, size, alignment,
3092 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
3094 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3095 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3101 i915_gem_object_unpin_pages(obj);
3106 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3109 /* If we don't have a page list set up, then we're not pinned
3110 * to GPU, and we can ignore the cache flush because it'll happen
3111 * again at bind time.
3113 if (obj->pages == NULL)
3117 * Stolen memory is always coherent with the GPU as it is explicitly
3118 * marked as wc by the system, or the system is cache-coherent.
3120 if (obj->stolen || obj->phys_handle)
3123 /* If the GPU is snooping the contents of the CPU cache,
3124 * we do not need to manually clear the CPU cache lines. However,
3125 * the caches are only snooped when the render cache is
3126 * flushed/invalidated. As we always have to emit invalidations
3127 * and flushes when moving into and out of the RENDER domain, correct
3128 * snooping behaviour occurs naturally as the result of our domain
3131 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3132 obj->cache_dirty = true;
3136 trace_i915_gem_object_clflush(obj);
3137 drm_clflush_sg(obj->pages);
3138 obj->cache_dirty = false;
3143 /** Flushes the GTT write domain for the object if it's dirty. */
3145 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3147 uint32_t old_write_domain;
3149 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3152 /* No actual flushing is required for the GTT write domain. Writes
3153 * to it immediately go to main memory as far as we know, so there's
3154 * no chipset flush. It also doesn't land in render cache.
3156 * However, we do have to enforce the order so that all writes through
3157 * the GTT land before any writes to the device, such as updates to
3162 old_write_domain = obj->base.write_domain;
3163 obj->base.write_domain = 0;
3165 intel_fb_obj_flush(obj, false, ORIGIN_GTT);
3167 trace_i915_gem_object_change_domain(obj,
3168 obj->base.read_domains,
3172 /** Flushes the CPU write domain for the object if it's dirty. */
3174 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3176 uint32_t old_write_domain;
3178 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3181 if (i915_gem_clflush_object(obj, obj->pin_display))
3182 i915_gem_chipset_flush(to_i915(obj->base.dev));
3184 old_write_domain = obj->base.write_domain;
3185 obj->base.write_domain = 0;
3187 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
3189 trace_i915_gem_object_change_domain(obj,
3190 obj->base.read_domains,
3195 * Moves a single object to the GTT read, and possibly write domain.
3196 * @obj: object to act on
3197 * @write: ask for write access or read only
3199 * This function returns when the move is complete, including waiting on
3203 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3205 uint32_t old_write_domain, old_read_domains;
3206 struct i915_vma *vma;
3209 ret = i915_gem_object_wait_rendering(obj, !write);
3213 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3216 /* Flush and acquire obj->pages so that we are coherent through
3217 * direct access in memory with previous cached writes through
3218 * shmemfs and that our cache domain tracking remains valid.
3219 * For example, if the obj->filp was moved to swap without us
3220 * being notified and releasing the pages, we would mistakenly
3221 * continue to assume that the obj remained out of the CPU cached
3224 ret = i915_gem_object_get_pages(obj);
3228 i915_gem_object_flush_cpu_write_domain(obj);
3230 /* Serialise direct access to this object with the barriers for
3231 * coherent writes from the GPU, by effectively invalidating the
3232 * GTT domain upon first access.
3234 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3237 old_write_domain = obj->base.write_domain;
3238 old_read_domains = obj->base.read_domains;
3240 /* It should now be out of any other write domains, and we can update
3241 * the domain values for our changes.
3243 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3244 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3246 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3247 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3251 trace_i915_gem_object_change_domain(obj,
3255 /* And bump the LRU for this access */
3256 vma = i915_gem_obj_to_ggtt(obj);
3258 drm_mm_node_allocated(&vma->node) &&
3259 !i915_vma_is_active(vma))
3260 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3266 * Changes the cache-level of an object across all VMA.
3267 * @obj: object to act on
3268 * @cache_level: new cache level to set for the object
3270 * After this function returns, the object will be in the new cache-level
3271 * across all GTT and the contents of the backing storage will be coherent,
3272 * with respect to the new cache-level. In order to keep the backing storage
3273 * coherent for all users, we only allow a single cache level to be set
3274 * globally on the object and prevent it from being changed whilst the
3275 * hardware is reading from the object. That is if the object is currently
3276 * on the scanout it will be set to uncached (or equivalent display
3277 * cache coherency) and all non-MOCS GPU access will also be uncached so
3278 * that all direct access to the scanout remains coherent.
3280 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3281 enum i915_cache_level cache_level)
3283 struct i915_vma *vma;
3286 if (obj->cache_level == cache_level)
3289 /* Inspect the list of currently bound VMA and unbind any that would
3290 * be invalid given the new cache-level. This is principally to
3291 * catch the issue of the CS prefetch crossing page boundaries and
3292 * reading an invalid PTE on older architectures.
3295 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3296 if (!drm_mm_node_allocated(&vma->node))
3299 if (i915_vma_is_pinned(vma)) {
3300 DRM_DEBUG("can not change the cache level of pinned objects\n");
3304 if (i915_gem_valid_gtt_space(vma, cache_level))
3307 ret = i915_vma_unbind(vma);
3311 /* As unbinding may affect other elements in the
3312 * obj->vma_list (due to side-effects from retiring
3313 * an active vma), play safe and restart the iterator.
3318 /* We can reuse the existing drm_mm nodes but need to change the
3319 * cache-level on the PTE. We could simply unbind them all and
3320 * rebind with the correct cache-level on next use. However since
3321 * we already have a valid slot, dma mapping, pages etc, we may as
3322 * rewrite the PTE in the belief that doing so tramples upon less
3323 * state and so involves less work.
3325 if (obj->bind_count) {
3326 /* Before we change the PTE, the GPU must not be accessing it.
3327 * If we wait upon the object, we know that all the bound
3328 * VMA are no longer active.
3330 ret = i915_gem_object_wait_rendering(obj, false);
3334 if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
3335 /* Access to snoopable pages through the GTT is
3336 * incoherent and on some machines causes a hard
3337 * lockup. Relinquish the CPU mmaping to force
3338 * userspace to refault in the pages and we can
3339 * then double check if the GTT mapping is still
3340 * valid for that pointer access.
3342 i915_gem_release_mmap(obj);
3344 /* As we no longer need a fence for GTT access,
3345 * we can relinquish it now (and so prevent having
3346 * to steal a fence from someone else on the next
3347 * fence request). Note GPU activity would have
3348 * dropped the fence as all snoopable access is
3349 * supposed to be linear.
3351 ret = i915_gem_object_put_fence(obj);
3355 /* We either have incoherent backing store and
3356 * so no GTT access or the architecture is fully
3357 * coherent. In such cases, existing GTT mmaps
3358 * ignore the cache bit in the PTE and we can
3359 * rewrite it without confusing the GPU or having
3360 * to force userspace to fault back in its mmaps.
3364 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3365 if (!drm_mm_node_allocated(&vma->node))
3368 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3374 list_for_each_entry(vma, &obj->vma_list, obj_link)
3375 vma->node.color = cache_level;
3376 obj->cache_level = cache_level;
3379 /* Flush the dirty CPU caches to the backing storage so that the
3380 * object is now coherent at its new cache level (with respect
3381 * to the access domain).
3383 if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
3384 if (i915_gem_clflush_object(obj, true))
3385 i915_gem_chipset_flush(to_i915(obj->base.dev));
3391 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3392 struct drm_file *file)
3394 struct drm_i915_gem_caching *args = data;
3395 struct drm_i915_gem_object *obj;
3397 obj = i915_gem_object_lookup(file, args->handle);
3401 switch (obj->cache_level) {
3402 case I915_CACHE_LLC:
3403 case I915_CACHE_L3_LLC:
3404 args->caching = I915_CACHING_CACHED;
3408 args->caching = I915_CACHING_DISPLAY;
3412 args->caching = I915_CACHING_NONE;
3416 i915_gem_object_put_unlocked(obj);
3420 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3421 struct drm_file *file)
3423 struct drm_i915_private *dev_priv = to_i915(dev);
3424 struct drm_i915_gem_caching *args = data;
3425 struct drm_i915_gem_object *obj;
3426 enum i915_cache_level level;
3429 switch (args->caching) {
3430 case I915_CACHING_NONE:
3431 level = I915_CACHE_NONE;
3433 case I915_CACHING_CACHED:
3435 * Due to a HW issue on BXT A stepping, GPU stores via a
3436 * snooped mapping may leave stale data in a corresponding CPU
3437 * cacheline, whereas normally such cachelines would get
3440 if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
3443 level = I915_CACHE_LLC;
3445 case I915_CACHING_DISPLAY:
3446 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3452 intel_runtime_pm_get(dev_priv);
3454 ret = i915_mutex_lock_interruptible(dev);
3458 obj = i915_gem_object_lookup(file, args->handle);
3464 ret = i915_gem_object_set_cache_level(obj, level);
3466 i915_gem_object_put(obj);
3468 mutex_unlock(&dev->struct_mutex);
3470 intel_runtime_pm_put(dev_priv);
3476 * Prepare buffer for display plane (scanout, cursors, etc).
3477 * Can be called from an uninterruptible phase (modesetting) and allows
3478 * any flushes to be pipelined (for pageflips).
3481 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3483 const struct i915_ggtt_view *view)
3485 u32 old_read_domains, old_write_domain;
3488 /* Mark the pin_display early so that we account for the
3489 * display coherency whilst setting up the cache domains.
3493 /* The display engine is not coherent with the LLC cache on gen6. As
3494 * a result, we make sure that the pinning that is about to occur is
3495 * done with uncached PTEs. This is lowest common denominator for all
3498 * However for gen6+, we could do better by using the GFDT bit instead
3499 * of uncaching, which would allow us to flush all the LLC-cached data
3500 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3502 ret = i915_gem_object_set_cache_level(obj,
3503 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3505 goto err_unpin_display;
3507 /* As the user may map the buffer once pinned in the display plane
3508 * (e.g. libkms for the bootup splash), we have to ensure that we
3509 * always use map_and_fenceable for all scanout buffers.
3511 ret = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
3512 view->type == I915_GGTT_VIEW_NORMAL ?
3515 goto err_unpin_display;
3517 i915_gem_object_flush_cpu_write_domain(obj);
3519 old_write_domain = obj->base.write_domain;
3520 old_read_domains = obj->base.read_domains;
3522 /* It should now be out of any other write domains, and we can update
3523 * the domain values for our changes.
3525 obj->base.write_domain = 0;
3526 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3528 trace_i915_gem_object_change_domain(obj,
3540 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
3541 const struct i915_ggtt_view *view)
3543 if (WARN_ON(obj->pin_display == 0))
3546 i915_gem_object_ggtt_unpin_view(obj, view);
3552 * Moves a single object to the CPU read, and possibly write domain.
3553 * @obj: object to act on
3554 * @write: requesting write or read-only access
3556 * This function returns when the move is complete, including waiting on
3560 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3562 uint32_t old_write_domain, old_read_domains;
3565 ret = i915_gem_object_wait_rendering(obj, !write);
3569 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3572 i915_gem_object_flush_gtt_write_domain(obj);
3574 old_write_domain = obj->base.write_domain;
3575 old_read_domains = obj->base.read_domains;
3577 /* Flush the CPU cache if it's still invalid. */
3578 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3579 i915_gem_clflush_object(obj, false);
3581 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3584 /* It should now be out of any other write domains, and we can update
3585 * the domain values for our changes.
3587 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3589 /* If we're writing through the CPU, then the GPU read domains will
3590 * need to be invalidated at next use.
3593 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3594 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3597 trace_i915_gem_object_change_domain(obj,
3604 /* Throttle our rendering by waiting until the ring has completed our requests
3605 * emitted over 20 msec ago.
3607 * Note that if we were to use the current jiffies each time around the loop,
3608 * we wouldn't escape the function with any frames outstanding if the time to
3609 * render a frame was over 20ms.
3611 * This should get us reasonable parallelism between CPU and GPU but also
3612 * relatively low latency when blocking on a particular request to finish.
3615 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3617 struct drm_i915_private *dev_priv = to_i915(dev);
3618 struct drm_i915_file_private *file_priv = file->driver_priv;
3619 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
3620 struct drm_i915_gem_request *request, *target = NULL;
3623 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3627 /* ABI: return -EIO if already wedged */
3628 if (i915_terminally_wedged(&dev_priv->gpu_error))
3631 spin_lock(&file_priv->mm.lock);
3632 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3633 if (time_after_eq(request->emitted_jiffies, recent_enough))
3637 * Note that the request might not have been submitted yet.
3638 * In which case emitted_jiffies will be zero.
3640 if (!request->emitted_jiffies)
3646 i915_gem_request_get(target);
3647 spin_unlock(&file_priv->mm.lock);
3652 ret = i915_wait_request(target, true, NULL, NULL);
3653 i915_gem_request_put(target);
3659 i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
3661 struct drm_i915_gem_object *obj = vma->obj;
3663 if (!drm_mm_node_allocated(&vma->node))
3666 if (vma->node.size < size)
3669 if (alignment && vma->node.start & (alignment - 1))
3672 if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
3675 if (flags & PIN_OFFSET_BIAS &&
3676 vma->node.start < (flags & PIN_OFFSET_MASK))
3679 if (flags & PIN_OFFSET_FIXED &&
3680 vma->node.start != (flags & PIN_OFFSET_MASK))
3686 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
3688 struct drm_i915_gem_object *obj = vma->obj;
3689 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3690 bool mappable, fenceable;
3691 u32 fence_size, fence_alignment;
3693 fence_size = i915_gem_get_ggtt_size(dev_priv,
3696 fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
3701 fenceable = (vma->node.size == fence_size &&
3702 (vma->node.start & (fence_alignment - 1)) == 0);
3704 mappable = (vma->node.start + fence_size <=
3705 dev_priv->ggtt.mappable_end);
3707 obj->map_and_fenceable = mappable && fenceable;
3710 int __i915_vma_do_pin(struct i915_vma *vma,
3711 u64 size, u64 alignment, u64 flags)
3713 unsigned int bound = vma->flags;
3716 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
3717 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
3719 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
3724 if ((bound & I915_VMA_BIND_MASK) == 0) {
3725 ret = i915_vma_insert(vma, size, alignment, flags);
3730 ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
3734 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
3735 __i915_vma_set_map_and_fenceable(vma);
3737 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
3741 __i915_vma_unpin(vma);
3746 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3747 const struct i915_ggtt_view *view,
3752 struct i915_vma *vma;
3756 view = &i915_ggtt_view_normal;
3758 vma = i915_gem_obj_lookup_or_create_ggtt_vma(obj, view);
3760 return PTR_ERR(vma);
3762 if (i915_vma_misplaced(vma, size, alignment, flags)) {
3763 if (flags & PIN_NONBLOCK &&
3764 (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
3767 WARN(i915_vma_is_pinned(vma),
3768 "bo is already pinned in ggtt with incorrect alignment:"
3769 " offset=%08x %08x, req.alignment=%llx, req.map_and_fenceable=%d,"
3770 " obj->map_and_fenceable=%d\n",
3771 upper_32_bits(vma->node.start),
3772 lower_32_bits(vma->node.start),
3774 !!(flags & PIN_MAPPABLE),
3775 obj->map_and_fenceable);
3776 ret = i915_vma_unbind(vma);
3781 return i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
3785 i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
3786 const struct i915_ggtt_view *view)
3788 i915_vma_unpin(i915_gem_obj_to_ggtt_view(obj, view));
3792 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3793 struct drm_file *file)
3795 struct drm_i915_gem_busy *args = data;
3796 struct drm_i915_gem_object *obj;
3799 ret = i915_mutex_lock_interruptible(dev);
3803 obj = i915_gem_object_lookup(file, args->handle);
3809 /* Count all active objects as busy, even if they are currently not used
3810 * by the gpu. Users of this interface expect objects to eventually
3811 * become non-busy without any further actions.
3815 struct drm_i915_gem_request *req;
3818 for (i = 0; i < I915_NUM_ENGINES; i++) {
3819 req = i915_gem_active_peek(&obj->last_read[i],
3820 &obj->base.dev->struct_mutex);
3822 args->busy |= 1 << (16 + req->engine->exec_id);
3824 req = i915_gem_active_peek(&obj->last_write,
3825 &obj->base.dev->struct_mutex);
3827 args->busy |= req->engine->exec_id;
3830 i915_gem_object_put(obj);
3832 mutex_unlock(&dev->struct_mutex);
3837 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3838 struct drm_file *file_priv)
3840 return i915_gem_ring_throttle(dev, file_priv);
3844 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3845 struct drm_file *file_priv)
3847 struct drm_i915_private *dev_priv = to_i915(dev);
3848 struct drm_i915_gem_madvise *args = data;
3849 struct drm_i915_gem_object *obj;
3852 switch (args->madv) {
3853 case I915_MADV_DONTNEED:
3854 case I915_MADV_WILLNEED:
3860 ret = i915_mutex_lock_interruptible(dev);
3864 obj = i915_gem_object_lookup(file_priv, args->handle);
3870 if (i915_gem_obj_is_pinned(obj)) {
3876 obj->tiling_mode != I915_TILING_NONE &&
3877 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
3878 if (obj->madv == I915_MADV_WILLNEED)
3879 i915_gem_object_unpin_pages(obj);
3880 if (args->madv == I915_MADV_WILLNEED)
3881 i915_gem_object_pin_pages(obj);
3884 if (obj->madv != __I915_MADV_PURGED)
3885 obj->madv = args->madv;
3887 /* if the object is no longer attached, discard its backing storage */
3888 if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
3889 i915_gem_object_truncate(obj);
3891 args->retained = obj->madv != __I915_MADV_PURGED;
3894 i915_gem_object_put(obj);
3896 mutex_unlock(&dev->struct_mutex);
3900 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3901 const struct drm_i915_gem_object_ops *ops)
3905 INIT_LIST_HEAD(&obj->global_list);
3906 for (i = 0; i < I915_NUM_ENGINES; i++)
3907 init_request_active(&obj->last_read[i],
3908 i915_gem_object_retire__read);
3909 init_request_active(&obj->last_write,
3910 i915_gem_object_retire__write);
3911 init_request_active(&obj->last_fence, NULL);
3912 INIT_LIST_HEAD(&obj->obj_exec_link);
3913 INIT_LIST_HEAD(&obj->vma_list);
3914 INIT_LIST_HEAD(&obj->batch_pool_link);
3918 obj->fence_reg = I915_FENCE_REG_NONE;
3919 obj->madv = I915_MADV_WILLNEED;
3921 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
3924 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3925 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
3926 .get_pages = i915_gem_object_get_pages_gtt,
3927 .put_pages = i915_gem_object_put_pages_gtt,
3930 struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
3933 struct drm_i915_gem_object *obj;
3934 struct address_space *mapping;
3938 obj = i915_gem_object_alloc(dev);
3940 return ERR_PTR(-ENOMEM);
3942 ret = drm_gem_object_init(dev, &obj->base, size);
3946 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
3947 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
3948 /* 965gm cannot relocate objects above 4GiB. */
3949 mask &= ~__GFP_HIGHMEM;
3950 mask |= __GFP_DMA32;
3953 mapping = file_inode(obj->base.filp)->i_mapping;
3954 mapping_set_gfp_mask(mapping, mask);
3956 i915_gem_object_init(obj, &i915_gem_object_ops);
3958 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3959 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3962 /* On some devices, we can have the GPU use the LLC (the CPU
3963 * cache) for about a 10% performance improvement
3964 * compared to uncached. Graphics requests other than
3965 * display scanout are coherent with the CPU in
3966 * accessing this cache. This means in this mode we
3967 * don't need to clflush on the CPU side, and on the
3968 * GPU side we only need to flush internal caches to
3969 * get data visible to the CPU.
3971 * However, we maintain the display planes as UC, and so
3972 * need to rebind when first used as such.
3974 obj->cache_level = I915_CACHE_LLC;
3976 obj->cache_level = I915_CACHE_NONE;
3978 trace_i915_gem_object_create(obj);
3983 i915_gem_object_free(obj);
3985 return ERR_PTR(ret);
3988 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
3990 /* If we are the last user of the backing storage (be it shmemfs
3991 * pages or stolen etc), we know that the pages are going to be
3992 * immediately released. In this case, we can then skip copying
3993 * back the contents from the GPU.
3996 if (obj->madv != I915_MADV_WILLNEED)
3999 if (obj->base.filp == NULL)
4002 /* At first glance, this looks racy, but then again so would be
4003 * userspace racing mmap against close. However, the first external
4004 * reference to the filp can only be obtained through the
4005 * i915_gem_mmap_ioctl() which safeguards us against the user
4006 * acquiring such a reference whilst we are in the middle of
4007 * freeing the object.
4009 return atomic_long_read(&obj->base.filp->f_count) == 1;
4012 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4014 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4015 struct drm_device *dev = obj->base.dev;
4016 struct drm_i915_private *dev_priv = to_i915(dev);
4017 struct i915_vma *vma, *next;
4019 intel_runtime_pm_get(dev_priv);
4021 trace_i915_gem_object_destroy(obj);
4023 /* All file-owned VMA should have been released by this point through
4024 * i915_gem_close_object(), or earlier by i915_gem_context_close().
4025 * However, the object may also be bound into the global GTT (e.g.
4026 * older GPUs without per-process support, or for direct access through
4027 * the GTT either for the user or for scanout). Those VMA still need to
4030 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
4031 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
4032 GEM_BUG_ON(i915_vma_is_active(vma));
4033 vma->flags &= ~I915_VMA_PIN_MASK;
4034 i915_vma_close(vma);
4036 GEM_BUG_ON(obj->bind_count);
4038 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4039 * before progressing. */
4041 i915_gem_object_unpin_pages(obj);
4043 WARN_ON(obj->frontbuffer_bits);
4045 if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4046 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4047 obj->tiling_mode != I915_TILING_NONE)
4048 i915_gem_object_unpin_pages(obj);
4050 if (WARN_ON(obj->pages_pin_count))
4051 obj->pages_pin_count = 0;
4052 if (discard_backing_storage(obj))
4053 obj->madv = I915_MADV_DONTNEED;
4054 i915_gem_object_put_pages(obj);
4058 if (obj->base.import_attach)
4059 drm_prime_gem_destroy(&obj->base, NULL);
4061 if (obj->ops->release)
4062 obj->ops->release(obj);
4064 drm_gem_object_release(&obj->base);
4065 i915_gem_info_remove_obj(dev_priv, obj->base.size);
4068 i915_gem_object_free(obj);
4070 intel_runtime_pm_put(dev_priv);
4073 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4074 struct i915_address_space *vm)
4076 struct i915_vma *vma;
4077 list_for_each_entry(vma, &obj->vma_list, obj_link) {
4078 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
4085 struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
4086 const struct i915_ggtt_view *view)
4088 struct i915_vma *vma;
4092 list_for_each_entry(vma, &obj->vma_list, obj_link)
4093 if (i915_vma_is_ggtt(vma) &&
4094 i915_ggtt_view_equal(&vma->ggtt_view, view))
4100 i915_gem_stop_engines(struct drm_device *dev)
4102 struct drm_i915_private *dev_priv = to_i915(dev);
4103 struct intel_engine_cs *engine;
4105 for_each_engine(engine, dev_priv)
4106 dev_priv->gt.stop_engine(engine);
4110 i915_gem_suspend(struct drm_device *dev)
4112 struct drm_i915_private *dev_priv = to_i915(dev);
4115 intel_suspend_gt_powersave(dev_priv);
4117 mutex_lock(&dev->struct_mutex);
4119 /* We have to flush all the executing contexts to main memory so
4120 * that they can saved in the hibernation image. To ensure the last
4121 * context image is coherent, we have to switch away from it. That
4122 * leaves the dev_priv->kernel_context still active when
4123 * we actually suspend, and its image in memory may not match the GPU
4124 * state. Fortunately, the kernel_context is disposable and we do
4125 * not rely on its state.
4127 ret = i915_gem_switch_to_kernel_context(dev_priv);
4131 ret = i915_gem_wait_for_idle(dev_priv);
4135 i915_gem_retire_requests(dev_priv);
4137 /* Note that rather than stopping the engines, all we have to do
4138 * is assert that every RING_HEAD == RING_TAIL (all execution complete)
4139 * and similar for all logical context images (to ensure they are
4140 * all ready for hibernation).
4142 i915_gem_stop_engines(dev);
4143 i915_gem_context_lost(dev_priv);
4144 mutex_unlock(&dev->struct_mutex);
4146 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4147 cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4148 flush_delayed_work(&dev_priv->gt.idle_work);
4150 /* Assert that we sucessfully flushed all the work and
4151 * reset the GPU back to its idle, low power state.
4153 WARN_ON(dev_priv->gt.awake);
4158 mutex_unlock(&dev->struct_mutex);
4162 void i915_gem_resume(struct drm_device *dev)
4164 struct drm_i915_private *dev_priv = to_i915(dev);
4166 mutex_lock(&dev->struct_mutex);
4167 i915_gem_restore_gtt_mappings(dev);
4169 /* As we didn't flush the kernel context before suspend, we cannot
4170 * guarantee that the context image is complete. So let's just reset
4171 * it and start again.
4173 if (i915.enable_execlists)
4174 intel_lr_context_reset(dev_priv, dev_priv->kernel_context);
4176 mutex_unlock(&dev->struct_mutex);
4179 void i915_gem_init_swizzling(struct drm_device *dev)
4181 struct drm_i915_private *dev_priv = to_i915(dev);
4183 if (INTEL_INFO(dev)->gen < 5 ||
4184 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4187 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4188 DISP_TILE_SURFACE_SWIZZLING);
4193 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4195 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4196 else if (IS_GEN7(dev))
4197 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4198 else if (IS_GEN8(dev))
4199 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4204 static void init_unused_ring(struct drm_device *dev, u32 base)
4206 struct drm_i915_private *dev_priv = to_i915(dev);
4208 I915_WRITE(RING_CTL(base), 0);
4209 I915_WRITE(RING_HEAD(base), 0);
4210 I915_WRITE(RING_TAIL(base), 0);
4211 I915_WRITE(RING_START(base), 0);
4214 static void init_unused_rings(struct drm_device *dev)
4217 init_unused_ring(dev, PRB1_BASE);
4218 init_unused_ring(dev, SRB0_BASE);
4219 init_unused_ring(dev, SRB1_BASE);
4220 init_unused_ring(dev, SRB2_BASE);
4221 init_unused_ring(dev, SRB3_BASE);
4222 } else if (IS_GEN2(dev)) {
4223 init_unused_ring(dev, SRB0_BASE);
4224 init_unused_ring(dev, SRB1_BASE);
4225 } else if (IS_GEN3(dev)) {
4226 init_unused_ring(dev, PRB1_BASE);
4227 init_unused_ring(dev, PRB2_BASE);
4232 i915_gem_init_hw(struct drm_device *dev)
4234 struct drm_i915_private *dev_priv = to_i915(dev);
4235 struct intel_engine_cs *engine;
4238 /* Double layer security blanket, see i915_gem_init() */
4239 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4241 if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
4242 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4244 if (IS_HASWELL(dev))
4245 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4246 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4248 if (HAS_PCH_NOP(dev)) {
4249 if (IS_IVYBRIDGE(dev)) {
4250 u32 temp = I915_READ(GEN7_MSG_CTL);
4251 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4252 I915_WRITE(GEN7_MSG_CTL, temp);
4253 } else if (INTEL_INFO(dev)->gen >= 7) {
4254 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4255 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4256 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4260 i915_gem_init_swizzling(dev);
4263 * At least 830 can leave some of the unused rings
4264 * "active" (ie. head != tail) after resume which
4265 * will prevent c3 entry. Makes sure all unused rings
4268 init_unused_rings(dev);
4270 BUG_ON(!dev_priv->kernel_context);
4272 ret = i915_ppgtt_init_hw(dev);
4274 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
4278 /* Need to do basic initialisation of all rings first: */
4279 for_each_engine(engine, dev_priv) {
4280 ret = engine->init_hw(engine);
4285 intel_mocs_init_l3cc_table(dev);
4287 /* We can't enable contexts until all firmware is loaded */
4288 ret = intel_guc_setup(dev);
4293 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4297 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
4299 if (INTEL_INFO(dev_priv)->gen < 6)
4302 /* TODO: make semaphores and Execlists play nicely together */
4303 if (i915.enable_execlists)
4309 #ifdef CONFIG_INTEL_IOMMU
4310 /* Enable semaphores on SNB when IO remapping is off */
4311 if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
4318 int i915_gem_init(struct drm_device *dev)
4320 struct drm_i915_private *dev_priv = to_i915(dev);
4323 mutex_lock(&dev->struct_mutex);
4325 if (!i915.enable_execlists) {
4326 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
4327 dev_priv->gt.stop_engine = intel_engine_stop;
4329 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4330 dev_priv->gt.stop_engine = intel_logical_ring_stop;
4333 /* This is just a security blanket to placate dragons.
4334 * On some systems, we very sporadically observe that the first TLBs
4335 * used by the CS may be stale, despite us poking the TLB reset. If
4336 * we hold the forcewake during initialisation these problems
4337 * just magically go away.
4339 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4341 i915_gem_init_userptr(dev_priv);
4343 ret = i915_gem_init_ggtt(dev_priv);
4347 ret = i915_gem_context_init(dev);
4351 ret = intel_engines_init(dev);
4355 ret = i915_gem_init_hw(dev);
4357 /* Allow engine initialisation to fail by marking the GPU as
4358 * wedged. But we only want to do this where the GPU is angry,
4359 * for all other failure, such as an allocation failure, bail.
4361 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4362 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4367 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4368 mutex_unlock(&dev->struct_mutex);
4374 i915_gem_cleanup_engines(struct drm_device *dev)
4376 struct drm_i915_private *dev_priv = to_i915(dev);
4377 struct intel_engine_cs *engine;
4379 for_each_engine(engine, dev_priv)
4380 dev_priv->gt.cleanup_engine(engine);
4384 init_engine_lists(struct intel_engine_cs *engine)
4386 INIT_LIST_HEAD(&engine->request_list);
4390 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
4392 struct drm_device *dev = &dev_priv->drm;
4394 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
4395 !IS_CHERRYVIEW(dev_priv))
4396 dev_priv->num_fence_regs = 32;
4397 else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
4398 IS_I945GM(dev_priv) || IS_G33(dev_priv))
4399 dev_priv->num_fence_regs = 16;
4401 dev_priv->num_fence_regs = 8;
4403 if (intel_vgpu_active(dev_priv))
4404 dev_priv->num_fence_regs =
4405 I915_READ(vgtif_reg(avail_rs.fence_num));
4407 /* Initialize fence registers to zero */
4408 i915_gem_restore_fences(dev);
4410 i915_gem_detect_bit_6_swizzle(dev);
4414 i915_gem_load_init(struct drm_device *dev)
4416 struct drm_i915_private *dev_priv = to_i915(dev);
4420 kmem_cache_create("i915_gem_object",
4421 sizeof(struct drm_i915_gem_object), 0,
4425 kmem_cache_create("i915_gem_vma",
4426 sizeof(struct i915_vma), 0,
4429 dev_priv->requests =
4430 kmem_cache_create("i915_gem_request",
4431 sizeof(struct drm_i915_gem_request), 0,
4435 INIT_LIST_HEAD(&dev_priv->context_list);
4436 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4437 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4438 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4439 for (i = 0; i < I915_NUM_ENGINES; i++)
4440 init_engine_lists(&dev_priv->engine[i]);
4441 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4442 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4443 INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
4444 i915_gem_retire_work_handler);
4445 INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
4446 i915_gem_idle_work_handler);
4447 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
4448 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4450 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4452 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4454 init_waitqueue_head(&dev_priv->pending_flip_queue);
4456 dev_priv->mm.interruptible = true;
4458 mutex_init(&dev_priv->fb_tracking.lock);
4461 void i915_gem_load_cleanup(struct drm_device *dev)
4463 struct drm_i915_private *dev_priv = to_i915(dev);
4465 kmem_cache_destroy(dev_priv->requests);
4466 kmem_cache_destroy(dev_priv->vmas);
4467 kmem_cache_destroy(dev_priv->objects);
4470 int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
4472 struct drm_i915_gem_object *obj;
4474 /* Called just before we write the hibernation image.
4476 * We need to update the domain tracking to reflect that the CPU
4477 * will be accessing all the pages to create and restore from the
4478 * hibernation, and so upon restoration those pages will be in the
4481 * To make sure the hibernation image contains the latest state,
4482 * we update that state just before writing out the image.
4485 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
4486 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4487 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4490 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4491 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4492 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4498 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4500 struct drm_i915_file_private *file_priv = file->driver_priv;
4501 struct drm_i915_gem_request *request;
4503 /* Clean up our request list when the client is going away, so that
4504 * later retire_requests won't dereference our soon-to-be-gone
4507 spin_lock(&file_priv->mm.lock);
4508 list_for_each_entry(request, &file_priv->mm.request_list, client_list)
4509 request->file_priv = NULL;
4510 spin_unlock(&file_priv->mm.lock);
4512 if (!list_empty(&file_priv->rps.link)) {
4513 spin_lock(&to_i915(dev)->rps.client_lock);
4514 list_del(&file_priv->rps.link);
4515 spin_unlock(&to_i915(dev)->rps.client_lock);
4519 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4521 struct drm_i915_file_private *file_priv;
4524 DRM_DEBUG_DRIVER("\n");
4526 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4530 file->driver_priv = file_priv;
4531 file_priv->dev_priv = to_i915(dev);
4532 file_priv->file = file;
4533 INIT_LIST_HEAD(&file_priv->rps.link);
4535 spin_lock_init(&file_priv->mm.lock);
4536 INIT_LIST_HEAD(&file_priv->mm.request_list);
4538 file_priv->bsd_engine = -1;
4540 ret = i915_gem_context_open(dev, file);
4548 * i915_gem_track_fb - update frontbuffer tracking
4549 * @old: current GEM buffer for the frontbuffer slots
4550 * @new: new GEM buffer for the frontbuffer slots
4551 * @frontbuffer_bits: bitmask of frontbuffer slots
4553 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
4554 * from @old and setting them in @new. Both @old and @new can be NULL.
4556 void i915_gem_track_fb(struct drm_i915_gem_object *old,
4557 struct drm_i915_gem_object *new,
4558 unsigned frontbuffer_bits)
4561 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
4562 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
4563 old->frontbuffer_bits &= ~frontbuffer_bits;
4567 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
4568 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
4569 new->frontbuffer_bits |= frontbuffer_bits;
4573 /* All the new VM stuff */
4574 u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
4575 struct i915_address_space *vm)
4577 struct drm_i915_private *dev_priv = to_i915(o->base.dev);
4578 struct i915_vma *vma;
4580 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
4582 list_for_each_entry(vma, &o->vma_list, obj_link) {
4583 if (i915_vma_is_ggtt(vma) &&
4584 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
4587 return vma->node.start;
4590 WARN(1, "%s vma for this object not found.\n",
4591 i915_is_ggtt(vm) ? "global" : "ppgtt");
4595 u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
4596 const struct i915_ggtt_view *view)
4598 struct i915_vma *vma;
4600 list_for_each_entry(vma, &o->vma_list, obj_link)
4601 if (i915_vma_is_ggtt(vma) &&
4602 i915_ggtt_view_equal(&vma->ggtt_view, view))
4603 return vma->node.start;
4605 WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
4609 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4610 struct i915_address_space *vm)
4612 struct i915_vma *vma;
4614 list_for_each_entry(vma, &o->vma_list, obj_link) {
4615 if (i915_vma_is_ggtt(vma) &&
4616 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
4618 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
4625 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
4626 const struct i915_ggtt_view *view)
4628 struct i915_vma *vma;
4630 list_for_each_entry(vma, &o->vma_list, obj_link)
4631 if (i915_vma_is_ggtt(vma) &&
4632 i915_ggtt_view_equal(&vma->ggtt_view, view) &&
4633 drm_mm_node_allocated(&vma->node))
4639 unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
4641 struct i915_vma *vma;
4643 GEM_BUG_ON(list_empty(&o->vma_list));
4645 list_for_each_entry(vma, &o->vma_list, obj_link) {
4646 if (i915_vma_is_ggtt(vma) &&
4647 vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
4648 return vma->node.size;
4654 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
4656 struct i915_vma *vma;
4657 list_for_each_entry(vma, &obj->vma_list, obj_link)
4658 if (i915_vma_is_pinned(vma))
4664 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
4666 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
4670 /* Only default objects have per-page dirty tracking */
4671 if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
4674 page = i915_gem_object_get_page(obj, n);
4675 set_page_dirty(page);
4679 /* Allocate a new GEM object and fill it with the supplied data */
4680 struct drm_i915_gem_object *
4681 i915_gem_object_create_from_data(struct drm_device *dev,
4682 const void *data, size_t size)
4684 struct drm_i915_gem_object *obj;
4685 struct sg_table *sg;
4689 obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
4693 ret = i915_gem_object_set_to_cpu_domain(obj, true);
4697 ret = i915_gem_object_get_pages(obj);
4701 i915_gem_object_pin_pages(obj);
4703 bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
4704 obj->dirty = 1; /* Backing store is now out of date */
4705 i915_gem_object_unpin_pages(obj);
4707 if (WARN_ON(bytes != size)) {
4708 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
4716 i915_gem_object_put(obj);
4717 return ERR_PTR(ret);