]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/staging/gma500/psb_sgx.c
staging: gma500: Intel GMA500 staging driver
[karo-tx-linux.git] / drivers / staging / gma500 / psb_sgx.c
1 /**************************************************************************
2  * Copyright (c) 2007, Intel Corporation.
3  * All Rights Reserved.
4  * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX. USA.
5  * All Rights Reserved.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program; if not, write to the Free Software Foundation, Inc.,
18  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  **************************************************************************/
21
22 #include <drm/drmP.h>
23 #include "psb_drv.h"
24 #include "psb_drm.h"
25 #include "psb_reg.h"
26 #include "ttm/ttm_bo_api.h"
27 #include "ttm/ttm_execbuf_util.h"
28 #include "psb_ttm_userobj_api.h"
29 #include "ttm/ttm_placement.h"
30 #include "psb_sgx.h"
31 #include "psb_intel_reg.h"
32 #include "psb_powermgmt.h"
33
34
35 static inline int psb_same_page(unsigned long offset,
36                                 unsigned long offset2)
37 {
38         return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
39 }
40
41 static inline unsigned long psb_offset_end(unsigned long offset,
42                                               unsigned long end)
43 {
44         offset = (offset + PAGE_SIZE) & PAGE_MASK;
45         return (end < offset) ? end : offset;
46 }
47
48 struct psb_dstbuf_cache {
49         unsigned int dst;
50         struct ttm_buffer_object *dst_buf;
51         unsigned long dst_offset;
52         uint32_t *dst_page;
53         unsigned int dst_page_offset;
54         struct ttm_bo_kmap_obj dst_kmap;
55         bool dst_is_iomem;
56 };
57
58 struct psb_validate_buffer {
59         struct ttm_validate_buffer base;
60         struct psb_validate_req req;
61         int ret;
62         struct psb_validate_arg __user *user_val_arg;
63         uint32_t flags;
64         uint32_t offset;
65         int po_correct;
66 };
67 static int
68 psb_placement_fence_type(struct ttm_buffer_object *bo,
69                          uint64_t set_val_flags,
70                          uint64_t clr_val_flags,
71                          uint32_t new_fence_class,
72                          uint32_t *new_fence_type)
73 {
74         int ret;
75         uint32_t n_fence_type;
76         /*
77         uint32_t set_flags = set_val_flags & 0xFFFFFFFF;
78         uint32_t clr_flags = clr_val_flags & 0xFFFFFFFF;
79         */
80         struct ttm_fence_object *old_fence;
81         uint32_t old_fence_type;
82         struct ttm_placement placement;
83
84         if (unlikely
85             (!(set_val_flags &
86                (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)))) {
87                 DRM_ERROR
88                     ("GPU access type (read / write) is not indicated.\n");
89                 return -EINVAL;
90         }
91
92         /* User space driver doesn't set any TTM placement flags in
93                                         set_val_flags or clr_val_flags */
94         placement.num_placement = 0;/* FIXME  */
95         placement.num_busy_placement = 0;
96         placement.fpfn = 0;
97         placement.lpfn = 0;
98         ret = psb_ttm_bo_check_placement(bo, &placement);
99         if (unlikely(ret != 0))
100                 return ret;
101
102         switch (new_fence_class) {
103         default:
104                 n_fence_type = _PSB_FENCE_TYPE_EXE;
105         }
106
107         *new_fence_type = n_fence_type;
108         old_fence = (struct ttm_fence_object *) bo->sync_obj;
109         old_fence_type = (uint32_t) (unsigned long) bo->sync_obj_arg;
110
111         if (old_fence && ((new_fence_class != old_fence->fence_class) ||
112                           ((n_fence_type ^ old_fence_type) &
113                            old_fence_type))) {
114                 ret = ttm_bo_wait(bo, 0, 1, 0);
115                 if (unlikely(ret != 0))
116                         return ret;
117         }
118         /*
119         bo->proposed_flags = (bo->proposed_flags | set_flags)
120                 & ~clr_flags & TTM_PL_MASK_MEMTYPE;
121         */
122         return 0;
123 }
124
125 int psb_validate_kernel_buffer(struct psb_context *context,
126                                struct ttm_buffer_object *bo,
127                                uint32_t fence_class,
128                                uint64_t set_flags, uint64_t clr_flags)
129 {
130         struct psb_validate_buffer *item;
131         uint32_t cur_fence_type;
132         int ret;
133
134         if (unlikely(context->used_buffers >= PSB_NUM_VALIDATE_BUFFERS)) {
135                 DRM_ERROR("Out of free validation buffer entries for "
136                           "kernel buffer validation.\n");
137                 return -ENOMEM;
138         }
139
140         item = &context->buffers[context->used_buffers];
141         item->user_val_arg = NULL;
142         item->base.reserved = 0;
143
144         ret = ttm_bo_reserve(bo, 1, 0, 1, context->val_seq);
145         if (unlikely(ret != 0))
146                 return ret;
147
148         ret = psb_placement_fence_type(bo, set_flags, clr_flags, fence_class,
149                                        &cur_fence_type);
150         if (unlikely(ret != 0)) {
151                 ttm_bo_unreserve(bo);
152                 return ret;
153         }
154
155         item->base.bo = ttm_bo_reference(bo);
156         item->base.new_sync_obj_arg = (void *) (unsigned long) cur_fence_type;
157         item->base.reserved = 1;
158
159         /* Internal locking ??? FIXMEAC */
160         list_add_tail(&item->base.head, &context->kern_validate_list);
161         context->used_buffers++;
162         /*
163         ret = ttm_bo_validate(bo, 1, 0, 0);
164         if (unlikely(ret != 0))
165                 goto out_unlock;
166         */
167         item->offset = bo->offset;
168         item->flags = bo->mem.placement;
169         context->fence_types |= cur_fence_type;
170
171         return ret;
172 }
173
174 void psb_fence_or_sync(struct drm_file *file_priv,
175                        uint32_t engine,
176                        uint32_t fence_types,
177                        uint32_t fence_flags,
178                        struct list_head *list,
179                        struct psb_ttm_fence_rep *fence_arg,
180                        struct ttm_fence_object **fence_p)
181 {
182         struct drm_device *dev = file_priv->minor->dev;
183         struct drm_psb_private *dev_priv = psb_priv(dev);
184         struct ttm_fence_device *fdev = &dev_priv->fdev;
185         int ret;
186         struct ttm_fence_object *fence;
187         struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
188         uint32_t handle;
189
190         ret = ttm_fence_user_create(fdev, tfile,
191                                     engine, fence_types,
192                                     TTM_FENCE_FLAG_EMIT, &fence, &handle);
193         if (ret) {
194
195                 /*
196                  * Fence creation failed.
197                  * Fall back to synchronous operation and idle the engine.
198                  */
199
200                 if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) {
201
202                         /*
203                          * Communicate to user-space that
204                          * fence creation has failed and that
205                          * the engine is idle.
206                          */
207
208                         fence_arg->handle = ~0;
209                         fence_arg->error = ret;
210                 }
211
212                 ttm_eu_backoff_reservation(list);
213                 if (fence_p)
214                         *fence_p = NULL;
215                 return;
216         }
217
218         ttm_eu_fence_buffer_objects(list, fence);
219         if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) {
220                 struct ttm_fence_info info = ttm_fence_get_info(fence);
221                 fence_arg->handle = handle;
222                 fence_arg->fence_class = ttm_fence_class(fence);
223                 fence_arg->fence_type = ttm_fence_types(fence);
224                 fence_arg->signaled_types = info.signaled_types;
225                 fence_arg->error = 0;
226         } else {
227                 ret =
228                     ttm_ref_object_base_unref(tfile, handle,
229                                               ttm_fence_type);
230                 BUG_ON(ret);
231         }
232
233         if (fence_p)
234                 *fence_p = fence;
235         else if (fence)
236                 ttm_fence_object_unref(&fence);
237 }
238