2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #ifndef __I915_VMA_H__
26 #define __I915_VMA_H__
28 #include <linux/io-mapping.h>
30 #include <drm/drm_mm.h>
32 #include "i915_gem_gtt.h"
33 #include "i915_gem_fence_reg.h"
34 #include "i915_gem_object.h"
35 #include "i915_gem_request.h"
38 enum i915_cache_level;
41 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
42 * VMA's presence cannot be guaranteed before binding, or after unbinding the
43 * object into/from the address space.
45 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
46 * will always be <= an objects lifetime. So object refcounting should cover us.
49 struct drm_mm_node node;
50 struct drm_i915_gem_object *obj;
51 struct i915_address_space *vm;
52 struct drm_i915_fence_reg *fence;
53 struct sg_table *pages;
56 u64 display_alignment;
63 * How many users have pinned this object in GTT space. The following
64 * users can each hold at most one reference: pwrite/pread, execbuffer
65 * (objects are not allowed multiple times for the same batchbuffer),
66 * and the framebuffer code. When switching/pageflipping, the
67 * framebuffer code has at most two buffers pinned per crtc.
69 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
70 * bits with absolutely no headroom. So use 4 bits.
72 #define I915_VMA_PIN_MASK 0xf
73 #define I915_VMA_PIN_OVERFLOW BIT(5)
75 /** Flags and address space this VMA is bound to */
76 #define I915_VMA_GLOBAL_BIND BIT(6)
77 #define I915_VMA_LOCAL_BIND BIT(7)
78 #define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
80 #define I915_VMA_GGTT BIT(8)
81 #define I915_VMA_CAN_FENCE BIT(9)
82 #define I915_VMA_CLOSED BIT(10)
85 struct i915_gem_active last_read[I915_NUM_ENGINES];
86 struct i915_gem_active last_fence;
89 * Support different GGTT views into the same object.
90 * This means there can be multiple VMA mappings per object and per VM.
91 * i915_ggtt_view_type is used to distinguish between those entries.
92 * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
93 * assumed in GEM functions which take no ggtt view parameter.
95 struct i915_ggtt_view ggtt_view;
97 /** This object's place on the active/inactive lists */
98 struct list_head vm_link;
100 struct list_head obj_link; /* Link in the object's VMA list */
101 struct rb_node obj_node;
103 /** This vma's place in the execbuf reservation list */
104 struct list_head exec_link;
106 /** This vma's place in the eviction list */
107 struct list_head evict_link;
110 * Used for performing relocations during execbuffer insertion.
112 struct hlist_node exec_node;
113 unsigned long exec_handle;
114 struct drm_i915_gem_exec_object2 *exec_entry;
118 i915_vma_instance(struct drm_i915_gem_object *obj,
119 struct i915_address_space *vm,
120 const struct i915_ggtt_view *view);
122 void i915_vma_unpin_and_release(struct i915_vma **p_vma);
124 static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
126 return vma->flags & I915_VMA_GGTT;
129 static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
131 return vma->flags & I915_VMA_CAN_FENCE;
134 static inline bool i915_vma_is_closed(const struct i915_vma *vma)
136 return vma->flags & I915_VMA_CLOSED;
139 static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
144 static inline bool i915_vma_is_active(const struct i915_vma *vma)
146 return i915_vma_get_active(vma);
149 static inline void i915_vma_set_active(struct i915_vma *vma,
152 vma->active |= BIT(engine);
155 static inline void i915_vma_clear_active(struct i915_vma *vma,
158 vma->active &= ~BIT(engine);
161 static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
164 return vma->active & BIT(engine);
167 static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
169 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
170 GEM_BUG_ON(!vma->node.allocated);
171 GEM_BUG_ON(upper_32_bits(vma->node.start));
172 GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
173 return lower_32_bits(vma->node.start);
176 static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
178 i915_gem_object_get(vma->obj);
182 static inline void i915_vma_put(struct i915_vma *vma)
184 i915_gem_object_put(vma->obj);
187 static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b)
193 i915_vma_compare(struct i915_vma *vma,
194 struct i915_address_space *vm,
195 const struct i915_ggtt_view *view)
199 GEM_BUG_ON(view && !i915_is_ggtt(vm));
201 cmp = ptrdiff(vma->vm, vm);
205 BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL != 0);
206 cmp = vma->ggtt_view.type;
214 /* ggtt_view.type also encodes its size so that we both distinguish
215 * different views using it as a "type" and also use a compact (no
216 * accessing of uninitialised padding bytes) memcmp without storing
217 * an extra parameter or adding more code.
219 * To ensure that the memcmp is valid for all branches of the union,
220 * even though the code looks like it is just comparing one branch,
221 * we assert above that all branches have the same address, and that
222 * each branch has a unique type/size.
224 BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL >= I915_GGTT_VIEW_PARTIAL);
225 BUILD_BUG_ON(I915_GGTT_VIEW_PARTIAL >= I915_GGTT_VIEW_ROTATED);
226 BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
227 offsetof(typeof(*view), partial));
228 return memcmp(&vma->ggtt_view.partial, &view->partial, view->type);
231 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
233 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
234 bool i915_vma_misplaced(const struct i915_vma *vma,
235 u64 size, u64 alignment, u64 flags);
236 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
237 int __must_check i915_vma_unbind(struct i915_vma *vma);
238 void i915_vma_close(struct i915_vma *vma);
240 int __i915_vma_do_pin(struct i915_vma *vma,
241 u64 size, u64 alignment, u64 flags);
242 static inline int __must_check
243 i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
245 BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
246 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
247 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
249 /* Pin early to prevent the shrinker/eviction logic from destroying
250 * our vma as we insert and bind.
252 if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0)) {
253 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
254 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
258 return __i915_vma_do_pin(vma, size, alignment, flags);
261 static inline int i915_vma_pin_count(const struct i915_vma *vma)
263 return vma->flags & I915_VMA_PIN_MASK;
266 static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
268 return i915_vma_pin_count(vma);
271 static inline void __i915_vma_pin(struct i915_vma *vma)
274 GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
277 static inline void __i915_vma_unpin(struct i915_vma *vma)
279 GEM_BUG_ON(!i915_vma_is_pinned(vma));
283 static inline void i915_vma_unpin(struct i915_vma *vma)
285 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
286 __i915_vma_unpin(vma);
290 * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
293 * The passed in VMA has to be pinned in the global GTT mappable region.
294 * An extra pinning of the VMA is acquired for the return iomapping,
295 * the caller must call i915_vma_unpin_iomap to relinquish the pinning
296 * after the iomapping is no longer required.
298 * Callers must hold the struct_mutex.
300 * Returns a valid iomapped pointer or ERR_PTR.
302 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
303 #define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x))
306 * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
309 * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
311 * Callers must hold the struct_mutex. This function is only valid to be
312 * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
314 static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
316 lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
317 GEM_BUG_ON(vma->iomap == NULL);
321 static inline struct page *i915_vma_first_page(struct i915_vma *vma)
323 GEM_BUG_ON(!vma->pages);
324 return sg_page(vma->pages->sgl);
328 * i915_vma_pin_fence - pin fencing state
329 * @vma: vma to pin fencing for
331 * This pins the fencing state (whether tiled or untiled) to make sure the
332 * vma (and its object) is ready to be used as a scanout target. Fencing
333 * status must be synchronize first by calling i915_vma_get_fence():
335 * The resulting fence pin reference must be released again with
336 * i915_vma_unpin_fence().
340 * True if the vma has a fence, false otherwise.
343 i915_vma_pin_fence(struct i915_vma *vma)
345 lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
347 vma->fence->pin_count++;
354 * i915_vma_unpin_fence - unpin fencing state
355 * @vma: vma to unpin fencing for
357 * This releases the fence pin reference acquired through
358 * i915_vma_pin_fence. It will handle both objects with and without an
359 * attached fence correctly, callers do not need to distinguish this.
362 i915_vma_unpin_fence(struct i915_vma *vma)
364 lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
366 GEM_BUG_ON(vma->fence->pin_count <= 0);
367 vma->fence->pin_count--;