2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/oom.h>
26 #include <linux/shmem_fs.h>
27 #include <linux/slab.h>
28 #include <linux/swap.h>
29 #include <linux/pci.h>
30 #include <linux/dma-buf.h>
31 #include <linux/vmalloc.h>
33 #include <drm/i915_drm.h>
36 #include "i915_trace.h"
38 static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
40 switch (mutex_trylock_recursive(&dev_priv->drm.struct_mutex)) {
41 case MUTEX_TRYLOCK_FAILED:
44 case MUTEX_TRYLOCK_SUCCESS:
48 case MUTEX_TRYLOCK_RECURSIVE:
56 static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock)
61 mutex_unlock(&dev_priv->drm.struct_mutex);
64 static bool any_vma_pinned(struct drm_i915_gem_object *obj)
68 list_for_each_entry(vma, &obj->vma_list, obj_link) {
69 /* Only GGTT vma may be permanently pinned, and are always
70 * at the start of the list. We can stop hunting as soon
71 * as we see a ppGTT vma.
73 if (!i915_vma_is_ggtt(vma))
76 if (i915_vma_is_pinned(vma))
83 static bool swap_available(void)
85 return get_nr_swap_pages() > 0;
88 static bool can_release_pages(struct drm_i915_gem_object *obj)
93 /* Consider only shrinkable ojects. */
94 if (!i915_gem_object_is_shrinkable(obj))
97 /* Only report true if by unbinding the object and putting its pages
98 * we can actually make forward progress towards freeing physical
101 * If the pages are pinned for any other reason than being bound
102 * to the GPU, simply unbinding from the GPU is not going to succeed
103 * in releasing our pin count on the pages themselves.
105 if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
108 if (any_vma_pinned(obj))
111 /* We can only return physical pages to the system if we can either
112 * discard the contents (because the user has marked them as being
113 * purgeable) or if we can move their contents out to swap.
115 return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
118 static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
120 if (i915_gem_object_unbind(obj) == 0)
121 __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
122 return !READ_ONCE(obj->mm.pages);
126 * i915_gem_shrink - Shrink buffer object caches
127 * @dev_priv: i915 device
128 * @target: amount of memory to make available, in pages
129 * @flags: control flags for selecting cache types
131 * This function is the main interface to the shrinker. It will try to release
132 * up to @target pages of main memory backing storage from buffer objects.
133 * Selection of the specific caches can be done with @flags. This is e.g. useful
134 * when purgeable objects should be removed from caches preferentially.
136 * Note that it's not guaranteed that released amount is actually available as
137 * free system memory - the pages might still be in-used to due to other reasons
138 * (like cpu mmaps) or the mm core has reused them before we could grab them.
139 * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
140 * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
142 * Also note that any kind of pinning (both per-vma address space pins and
143 * backing storage pins at the buffer object level) result in the shrinker code
144 * having to skip the object.
147 * The number of pages of backing storage actually released.
150 i915_gem_shrink(struct drm_i915_private *dev_priv,
151 unsigned long target, unsigned flags)
154 struct list_head *list;
157 { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
158 { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
161 unsigned long count = 0;
164 if (!shrinker_lock(dev_priv, &unlock))
167 trace_i915_gem_shrink(dev_priv, target, flags);
168 i915_gem_retire_requests(dev_priv);
171 * Unbinding of objects will require HW access; Let us not wake the
172 * device just to recover a little memory. If absolutely necessary,
173 * we will force the wake during oom-notifier.
175 if ((flags & I915_SHRINK_BOUND) &&
176 !intel_runtime_pm_get_if_in_use(dev_priv))
177 flags &= ~I915_SHRINK_BOUND;
180 * As we may completely rewrite the (un)bound list whilst unbinding
181 * (due to retiring requests) we have to strictly process only
182 * one element of the list at the time, and recheck the list
183 * on every iteration.
185 * In particular, we must hold a reference whilst removing the
186 * object as we may end up waiting for and/or retiring the objects.
187 * This might release the final reference (held by the active list)
188 * and result in the object being freed from under us. This is
189 * similar to the precautions the eviction code must take whilst
192 * Also note that although these lists do not hold a reference to
193 * the object we can safely grab one here: The final object
194 * unreferencing and the bound_list are both protected by the
195 * dev->struct_mutex and so we won't ever be able to observe an
196 * object on the bound_list with a reference count equals 0.
198 for (phase = phases; phase->list; phase++) {
199 struct list_head still_in_list;
200 struct drm_i915_gem_object *obj;
202 if ((flags & phase->bit) == 0)
205 INIT_LIST_HEAD(&still_in_list);
206 while (count < target &&
207 (obj = list_first_entry_or_null(phase->list,
210 list_move_tail(&obj->global_link, &still_in_list);
211 if (!obj->mm.pages) {
212 list_del_init(&obj->global_link);
216 if (flags & I915_SHRINK_PURGEABLE &&
217 obj->mm.madv != I915_MADV_DONTNEED)
220 if (flags & I915_SHRINK_VMAPS &&
221 !is_vmalloc_addr(obj->mm.mapping))
224 if (!(flags & I915_SHRINK_ACTIVE) &&
225 (i915_gem_object_is_active(obj) ||
226 i915_gem_object_is_framebuffer(obj)))
229 if (!can_release_pages(obj))
232 if (unsafe_drop_pages(obj)) {
233 /* May arrive from get_pages on another bo */
234 mutex_lock_nested(&obj->mm.lock,
236 if (!obj->mm.pages) {
237 __i915_gem_object_invalidate(obj);
238 list_del_init(&obj->global_link);
239 count += obj->base.size >> PAGE_SHIFT;
241 mutex_unlock(&obj->mm.lock);
244 list_splice_tail(&still_in_list, phase->list);
247 if (flags & I915_SHRINK_BOUND)
248 intel_runtime_pm_put(dev_priv);
250 i915_gem_retire_requests(dev_priv);
252 shrinker_unlock(dev_priv, unlock);
258 * i915_gem_shrink_all - Shrink buffer object caches completely
259 * @dev_priv: i915 device
261 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
262 * caches completely. It also first waits for and retires all outstanding
263 * requests to also be able to release backing storage for active objects.
265 * This should only be used in code to intentionally quiescent the gpu or as a
266 * last-ditch effort when memory seems to have run out.
269 * The number of pages of backing storage actually released.
271 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
275 intel_runtime_pm_get(dev_priv);
276 freed = i915_gem_shrink(dev_priv, -1UL,
278 I915_SHRINK_UNBOUND |
280 intel_runtime_pm_put(dev_priv);
286 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
288 struct drm_i915_private *dev_priv =
289 container_of(shrinker, struct drm_i915_private, mm.shrinker);
290 struct drm_i915_gem_object *obj;
294 if (!shrinker_lock(dev_priv, &unlock))
297 i915_gem_retire_requests(dev_priv);
300 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link)
301 if (can_release_pages(obj))
302 count += obj->base.size >> PAGE_SHIFT;
304 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
305 if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
306 count += obj->base.size >> PAGE_SHIFT;
309 shrinker_unlock(dev_priv, unlock);
315 i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
317 struct drm_i915_private *dev_priv =
318 container_of(shrinker, struct drm_i915_private, mm.shrinker);
322 if (!shrinker_lock(dev_priv, &unlock))
325 freed = i915_gem_shrink(dev_priv,
328 I915_SHRINK_UNBOUND |
329 I915_SHRINK_PURGEABLE);
330 if (freed < sc->nr_to_scan)
331 freed += i915_gem_shrink(dev_priv,
332 sc->nr_to_scan - freed,
334 I915_SHRINK_UNBOUND);
335 if (freed < sc->nr_to_scan && current_is_kswapd()) {
336 intel_runtime_pm_get(dev_priv);
337 freed += i915_gem_shrink(dev_priv,
338 sc->nr_to_scan - freed,
341 I915_SHRINK_UNBOUND);
342 intel_runtime_pm_put(dev_priv);
345 shrinker_unlock(dev_priv, unlock);
351 shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, bool *unlock,
354 unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
357 if (i915_gem_wait_for_idle(dev_priv, 0) == 0 &&
358 shrinker_lock(dev_priv, unlock))
361 schedule_timeout_killable(1);
362 if (fatal_signal_pending(current))
365 if (time_after(jiffies, timeout)) {
366 pr_err("Unable to lock GPU to purge memory.\n");
375 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
377 struct drm_i915_private *dev_priv =
378 container_of(nb, struct drm_i915_private, mm.oom_notifier);
379 struct drm_i915_gem_object *obj;
380 unsigned long unevictable, bound, unbound, freed_pages;
383 if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
386 freed_pages = i915_gem_shrink_all(dev_priv);
388 /* Because we may be allocating inside our own driver, we cannot
389 * assert that there are no objects with pinned pages that are not
390 * being pointed to by hardware.
392 unbound = bound = unevictable = 0;
393 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
397 if (!can_release_pages(obj))
398 unevictable += obj->base.size >> PAGE_SHIFT;
400 unbound += obj->base.size >> PAGE_SHIFT;
402 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
406 if (!can_release_pages(obj))
407 unevictable += obj->base.size >> PAGE_SHIFT;
409 bound += obj->base.size >> PAGE_SHIFT;
412 shrinker_unlock(dev_priv, unlock);
414 if (freed_pages || unbound || bound)
415 pr_info("Purging GPU memory, %lu pages freed, "
416 "%lu pages still pinned.\n",
417 freed_pages, unevictable);
418 if (unbound || bound)
419 pr_err("%lu and %lu pages still available in the "
420 "bound and unbound GPU page lists.\n",
423 *(unsigned long *)ptr += freed_pages;
428 i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
430 struct drm_i915_private *dev_priv =
431 container_of(nb, struct drm_i915_private, mm.vmap_notifier);
432 struct i915_vma *vma, *next;
433 unsigned long freed_pages = 0;
437 if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
440 /* Force everything onto the inactive lists */
441 ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
445 intel_runtime_pm_get(dev_priv);
446 freed_pages += i915_gem_shrink(dev_priv, -1UL,
448 I915_SHRINK_UNBOUND |
451 intel_runtime_pm_put(dev_priv);
453 /* We also want to clear any cached iomaps as they wrap vmap */
454 list_for_each_entry_safe(vma, next,
455 &dev_priv->ggtt.base.inactive_list, vm_link) {
456 unsigned long count = vma->node.size >> PAGE_SHIFT;
457 if (vma->iomap && i915_vma_unbind(vma) == 0)
458 freed_pages += count;
462 shrinker_unlock(dev_priv, unlock);
464 *(unsigned long *)ptr += freed_pages;
469 * i915_gem_shrinker_init - Initialize i915 shrinker
470 * @dev_priv: i915 device
472 * This function registers and sets up the i915 shrinker and OOM handler.
474 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
476 dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
477 dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
478 dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
479 WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
481 dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
482 WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
484 dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
485 WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
489 * i915_gem_shrinker_cleanup - Clean up i915 shrinker
490 * @dev_priv: i915 device
492 * This function unregisters the i915 shrinker and OOM handler.
494 void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv)
496 WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
497 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
498 unregister_shrinker(&dev_priv->mm.shrinker);