]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/i915_gem_clflush.c
Merge tag 'char-misc-4.13-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregk...
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_gem_clflush.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include "i915_drv.h"
26 #include "intel_frontbuffer.h"
27 #include "i915_gem_clflush.h"
28
29 static DEFINE_SPINLOCK(clflush_lock);
30
31 struct clflush {
32         struct dma_fence dma; /* Must be first for dma_fence_free() */
33         struct i915_sw_fence wait;
34         struct work_struct work;
35         struct drm_i915_gem_object *obj;
36 };
37
38 static const char *i915_clflush_get_driver_name(struct dma_fence *fence)
39 {
40         return DRIVER_NAME;
41 }
42
43 static const char *i915_clflush_get_timeline_name(struct dma_fence *fence)
44 {
45         return "clflush";
46 }
47
48 static bool i915_clflush_enable_signaling(struct dma_fence *fence)
49 {
50         return true;
51 }
52
53 static void i915_clflush_release(struct dma_fence *fence)
54 {
55         struct clflush *clflush = container_of(fence, typeof(*clflush), dma);
56
57         i915_sw_fence_fini(&clflush->wait);
58
59         BUILD_BUG_ON(offsetof(typeof(*clflush), dma));
60         dma_fence_free(&clflush->dma);
61 }
62
63 static const struct dma_fence_ops i915_clflush_ops = {
64         .get_driver_name = i915_clflush_get_driver_name,
65         .get_timeline_name = i915_clflush_get_timeline_name,
66         .enable_signaling = i915_clflush_enable_signaling,
67         .wait = dma_fence_default_wait,
68         .release = i915_clflush_release,
69 };
70
71 static void __i915_do_clflush(struct drm_i915_gem_object *obj)
72 {
73         drm_clflush_sg(obj->mm.pages);
74         intel_fb_obj_flush(obj, ORIGIN_CPU);
75 }
76
77 static void i915_clflush_work(struct work_struct *work)
78 {
79         struct clflush *clflush = container_of(work, typeof(*clflush), work);
80         struct drm_i915_gem_object *obj = clflush->obj;
81
82         if (i915_gem_object_pin_pages(obj)) {
83                 DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
84                 goto out;
85         }
86
87         __i915_do_clflush(obj);
88
89         i915_gem_object_unpin_pages(obj);
90
91 out:
92         i915_gem_object_put(obj);
93
94         dma_fence_signal(&clflush->dma);
95         dma_fence_put(&clflush->dma);
96 }
97
98 static int __i915_sw_fence_call
99 i915_clflush_notify(struct i915_sw_fence *fence,
100                     enum i915_sw_fence_notify state)
101 {
102         struct clflush *clflush = container_of(fence, typeof(*clflush), wait);
103
104         switch (state) {
105         case FENCE_COMPLETE:
106                 schedule_work(&clflush->work);
107                 break;
108
109         case FENCE_FREE:
110                 dma_fence_put(&clflush->dma);
111                 break;
112         }
113
114         return NOTIFY_DONE;
115 }
116
117 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
118                              unsigned int flags)
119 {
120         struct clflush *clflush;
121
122         /*
123          * Stolen memory is always coherent with the GPU as it is explicitly
124          * marked as wc by the system, or the system is cache-coherent.
125          * Similarly, we only access struct pages through the CPU cache, so
126          * anything not backed by physical memory we consider to be always
127          * coherent and not need clflushing.
128          */
129         if (!i915_gem_object_has_struct_page(obj)) {
130                 obj->cache_dirty = false;
131                 return false;
132         }
133
134         /* If the GPU is snooping the contents of the CPU cache,
135          * we do not need to manually clear the CPU cache lines.  However,
136          * the caches are only snooped when the render cache is
137          * flushed/invalidated.  As we always have to emit invalidations
138          * and flushes when moving into and out of the RENDER domain, correct
139          * snooping behaviour occurs naturally as the result of our domain
140          * tracking.
141          */
142         if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent)
143                 return false;
144
145         trace_i915_gem_object_clflush(obj);
146
147         clflush = NULL;
148         if (!(flags & I915_CLFLUSH_SYNC))
149                 clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
150         if (clflush) {
151                 GEM_BUG_ON(!obj->cache_dirty);
152
153                 dma_fence_init(&clflush->dma,
154                                &i915_clflush_ops,
155                                &clflush_lock,
156                                to_i915(obj->base.dev)->mm.unordered_timeline,
157                                0);
158                 i915_sw_fence_init(&clflush->wait, i915_clflush_notify);
159
160                 clflush->obj = i915_gem_object_get(obj);
161                 INIT_WORK(&clflush->work, i915_clflush_work);
162
163                 dma_fence_get(&clflush->dma);
164
165                 i915_sw_fence_await_reservation(&clflush->wait,
166                                                 obj->resv, NULL,
167                                                 true, I915_FENCE_TIMEOUT,
168                                                 GFP_KERNEL);
169
170                 reservation_object_lock(obj->resv, NULL);
171                 reservation_object_add_excl_fence(obj->resv, &clflush->dma);
172                 reservation_object_unlock(obj->resv);
173
174                 i915_sw_fence_commit(&clflush->wait);
175         } else if (obj->mm.pages) {
176                 __i915_do_clflush(obj);
177         } else {
178                 GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
179         }
180
181         obj->cache_dirty = false;
182         return true;
183 }