]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
Merge tag 'mfd-fixes-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd
[karo-tx-linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include <drm/ttm/ttm_bo_api.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include "vmwgfx_so.h"
33 #include "vmwgfx_binding.h"
34
35 #define VMW_RES_HT_ORDER 12
36
37 /**
38  * struct vmw_resource_relocation - Relocation info for resources
39  *
40  * @head: List head for the software context's relocation list.
41  * @res: Non-ref-counted pointer to the resource.
42  * @offset: Offset of 4 byte entries into the command buffer where the
43  * id that needs fixup is located.
44  */
45 struct vmw_resource_relocation {
46         struct list_head head;
47         const struct vmw_resource *res;
48         unsigned long offset;
49 };
50
51 /**
52  * struct vmw_resource_val_node - Validation info for resources
53  *
54  * @head: List head for the software context's resource list.
55  * @hash: Hash entry for quick resouce to val_node lookup.
56  * @res: Ref-counted pointer to the resource.
57  * @switch_backup: Boolean whether to switch backup buffer on unreserve.
58  * @new_backup: Refcounted pointer to the new backup buffer.
59  * @staged_bindings: If @res is a context, tracks bindings set up during
60  * the command batch. Otherwise NULL.
61  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
62  * @first_usage: Set to true the first time the resource is referenced in
63  * the command stream.
64  * @switching_backup: The command stream provides a new backup buffer for a
65  * resource.
66  * @no_buffer_needed: This means @switching_backup is true on first buffer
67  * reference. So resource reservation does not need to allocate a backup
68  * buffer for the resource.
69  */
70 struct vmw_resource_val_node {
71         struct list_head head;
72         struct drm_hash_item hash;
73         struct vmw_resource *res;
74         struct vmw_dma_buffer *new_backup;
75         struct vmw_ctx_binding_state *staged_bindings;
76         unsigned long new_backup_offset;
77         u32 first_usage : 1;
78         u32 switching_backup : 1;
79         u32 no_buffer_needed : 1;
80 };
81
82 /**
83  * struct vmw_cmd_entry - Describe a command for the verifier
84  *
85  * @user_allow: Whether allowed from the execbuf ioctl.
86  * @gb_disable: Whether disabled if guest-backed objects are available.
87  * @gb_enable: Whether enabled iff guest-backed objects are available.
88  */
89 struct vmw_cmd_entry {
90         int (*func) (struct vmw_private *, struct vmw_sw_context *,
91                      SVGA3dCmdHeader *);
92         bool user_allow;
93         bool gb_disable;
94         bool gb_enable;
95 };
96
97 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)  \
98         [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
99                                        (_gb_disable), (_gb_enable)}
100
101 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
102                                         struct vmw_sw_context *sw_context,
103                                         struct vmw_resource *ctx);
104 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
105                                  struct vmw_sw_context *sw_context,
106                                  SVGAMobId *id,
107                                  struct vmw_dma_buffer **vmw_bo_p);
108 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
109                                    struct vmw_dma_buffer *vbo,
110                                    bool validate_as_mob,
111                                    uint32_t *p_val_node);
112
113
114 /**
115  * vmw_resources_unreserve - unreserve resources previously reserved for
116  * command submission.
117  *
118  * @sw_context: pointer to the software context
119  * @backoff: Whether command submission failed.
120  */
121 static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
122                                     bool backoff)
123 {
124         struct vmw_resource_val_node *val;
125         struct list_head *list = &sw_context->resource_list;
126
127         if (sw_context->dx_query_mob && !backoff)
128                 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
129                                           sw_context->dx_query_mob);
130
131         list_for_each_entry(val, list, head) {
132                 struct vmw_resource *res = val->res;
133                 bool switch_backup =
134                         (backoff) ? false : val->switching_backup;
135
136                 /*
137                  * Transfer staged context bindings to the
138                  * persistent context binding tracker.
139                  */
140                 if (unlikely(val->staged_bindings)) {
141                         if (!backoff) {
142                                 vmw_binding_state_commit
143                                         (vmw_context_binding_state(val->res),
144                                          val->staged_bindings);
145                         }
146
147                         if (val->staged_bindings != sw_context->staged_bindings)
148                                 vmw_binding_state_free(val->staged_bindings);
149                         else
150                                 sw_context->staged_bindings_inuse = false;
151                         val->staged_bindings = NULL;
152                 }
153                 vmw_resource_unreserve(res, switch_backup, val->new_backup,
154                                        val->new_backup_offset);
155                 vmw_dmabuf_unreference(&val->new_backup);
156         }
157 }
158
159 /**
160  * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
161  * added to the validate list.
162  *
163  * @dev_priv: Pointer to the device private:
164  * @sw_context: The validation context:
165  * @node: The validation node holding this context.
166  */
167 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
168                                    struct vmw_sw_context *sw_context,
169                                    struct vmw_resource_val_node *node)
170 {
171         int ret;
172
173         ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
174         if (unlikely(ret != 0))
175                 goto out_err;
176
177         if (!sw_context->staged_bindings) {
178                 sw_context->staged_bindings =
179                         vmw_binding_state_alloc(dev_priv);
180                 if (IS_ERR(sw_context->staged_bindings)) {
181                         DRM_ERROR("Failed to allocate context binding "
182                                   "information.\n");
183                         ret = PTR_ERR(sw_context->staged_bindings);
184                         sw_context->staged_bindings = NULL;
185                         goto out_err;
186                 }
187         }
188
189         if (sw_context->staged_bindings_inuse) {
190                 node->staged_bindings = vmw_binding_state_alloc(dev_priv);
191                 if (IS_ERR(node->staged_bindings)) {
192                         DRM_ERROR("Failed to allocate context binding "
193                                   "information.\n");
194                         ret = PTR_ERR(node->staged_bindings);
195                         node->staged_bindings = NULL;
196                         goto out_err;
197                 }
198         } else {
199                 node->staged_bindings = sw_context->staged_bindings;
200                 sw_context->staged_bindings_inuse = true;
201         }
202
203         return 0;
204 out_err:
205         return ret;
206 }
207
208 /**
209  * vmw_resource_val_add - Add a resource to the software context's
210  * resource list if it's not already on it.
211  *
212  * @sw_context: Pointer to the software context.
213  * @res: Pointer to the resource.
214  * @p_node On successful return points to a valid pointer to a
215  * struct vmw_resource_val_node, if non-NULL on entry.
216  */
217 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
218                                 struct vmw_resource *res,
219                                 struct vmw_resource_val_node **p_node)
220 {
221         struct vmw_private *dev_priv = res->dev_priv;
222         struct vmw_resource_val_node *node;
223         struct drm_hash_item *hash;
224         int ret;
225
226         if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
227                                     &hash) == 0)) {
228                 node = container_of(hash, struct vmw_resource_val_node, hash);
229                 node->first_usage = false;
230                 if (unlikely(p_node != NULL))
231                         *p_node = node;
232                 return 0;
233         }
234
235         node = kzalloc(sizeof(*node), GFP_KERNEL);
236         if (unlikely(node == NULL)) {
237                 DRM_ERROR("Failed to allocate a resource validation "
238                           "entry.\n");
239                 return -ENOMEM;
240         }
241
242         node->hash.key = (unsigned long) res;
243         ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
244         if (unlikely(ret != 0)) {
245                 DRM_ERROR("Failed to initialize a resource validation "
246                           "entry.\n");
247                 kfree(node);
248                 return ret;
249         }
250         node->res = vmw_resource_reference(res);
251         node->first_usage = true;
252         if (unlikely(p_node != NULL))
253                 *p_node = node;
254
255         if (!dev_priv->has_mob) {
256                 list_add_tail(&node->head, &sw_context->resource_list);
257                 return 0;
258         }
259
260         switch (vmw_res_type(res)) {
261         case vmw_res_context:
262         case vmw_res_dx_context:
263                 list_add(&node->head, &sw_context->ctx_resource_list);
264                 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
265                 break;
266         case vmw_res_cotable:
267                 list_add_tail(&node->head, &sw_context->ctx_resource_list);
268                 break;
269         default:
270                 list_add_tail(&node->head, &sw_context->resource_list);
271                 break;
272         }
273
274         return ret;
275 }
276
277 /**
278  * vmw_view_res_val_add - Add a view and the surface it's pointing to
279  * to the validation list
280  *
281  * @sw_context: The software context holding the validation list.
282  * @view: Pointer to the view resource.
283  *
284  * Returns 0 if success, negative error code otherwise.
285  */
286 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
287                                 struct vmw_resource *view)
288 {
289         int ret;
290
291         /*
292          * First add the resource the view is pointing to, otherwise
293          * it may be swapped out when the view is validated.
294          */
295         ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
296         if (ret)
297                 return ret;
298
299         return vmw_resource_val_add(sw_context, view, NULL);
300 }
301
302 /**
303  * vmw_view_id_val_add - Look up a view and add it and the surface it's
304  * pointing to to the validation list.
305  *
306  * @sw_context: The software context holding the validation list.
307  * @view_type: The view type to look up.
308  * @id: view id of the view.
309  *
310  * The view is represented by a view id and the DX context it's created on,
311  * or scheduled for creation on. If there is no DX context set, the function
312  * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
313  */
314 static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
315                                enum vmw_view_type view_type, u32 id)
316 {
317         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
318         struct vmw_resource *view;
319         int ret;
320
321         if (!ctx_node) {
322                 DRM_ERROR("DX Context not set.\n");
323                 return -EINVAL;
324         }
325
326         view = vmw_view_lookup(sw_context->man, view_type, id);
327         if (IS_ERR(view))
328                 return PTR_ERR(view);
329
330         ret = vmw_view_res_val_add(sw_context, view);
331         vmw_resource_unreference(&view);
332
333         return ret;
334 }
335
336 /**
337  * vmw_resource_context_res_add - Put resources previously bound to a context on
338  * the validation list
339  *
340  * @dev_priv: Pointer to a device private structure
341  * @sw_context: Pointer to a software context used for this command submission
342  * @ctx: Pointer to the context resource
343  *
344  * This function puts all resources that were previously bound to @ctx on
345  * the resource validation list. This is part of the context state reemission
346  */
347 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
348                                         struct vmw_sw_context *sw_context,
349                                         struct vmw_resource *ctx)
350 {
351         struct list_head *binding_list;
352         struct vmw_ctx_bindinfo *entry;
353         int ret = 0;
354         struct vmw_resource *res;
355         u32 i;
356
357         /* Add all cotables to the validation list. */
358         if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
359                 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
360                         res = vmw_context_cotable(ctx, i);
361                         if (IS_ERR(res))
362                                 continue;
363
364                         ret = vmw_resource_val_add(sw_context, res, NULL);
365                         vmw_resource_unreference(&res);
366                         if (unlikely(ret != 0))
367                                 return ret;
368                 }
369         }
370
371
372         /* Add all resources bound to the context to the validation list */
373         mutex_lock(&dev_priv->binding_mutex);
374         binding_list = vmw_context_binding_list(ctx);
375
376         list_for_each_entry(entry, binding_list, ctx_list) {
377                 /* entry->res is not refcounted */
378                 res = vmw_resource_reference_unless_doomed(entry->res);
379                 if (unlikely(res == NULL))
380                         continue;
381
382                 if (vmw_res_type(entry->res) == vmw_res_view)
383                         ret = vmw_view_res_val_add(sw_context, entry->res);
384                 else
385                         ret = vmw_resource_val_add(sw_context, entry->res,
386                                                    NULL);
387                 vmw_resource_unreference(&res);
388                 if (unlikely(ret != 0))
389                         break;
390         }
391
392         if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
393                 struct vmw_dma_buffer *dx_query_mob;
394
395                 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
396                 if (dx_query_mob)
397                         ret = vmw_bo_to_validate_list(sw_context,
398                                                       dx_query_mob,
399                                                       true, NULL);
400         }
401
402         mutex_unlock(&dev_priv->binding_mutex);
403         return ret;
404 }
405
406 /**
407  * vmw_resource_relocation_add - Add a relocation to the relocation list
408  *
409  * @list: Pointer to head of relocation list.
410  * @res: The resource.
411  * @offset: Offset into the command buffer currently being parsed where the
412  * id that needs fixup is located. Granularity is 4 bytes.
413  */
414 static int vmw_resource_relocation_add(struct list_head *list,
415                                        const struct vmw_resource *res,
416                                        unsigned long offset)
417 {
418         struct vmw_resource_relocation *rel;
419
420         rel = kmalloc(sizeof(*rel), GFP_KERNEL);
421         if (unlikely(rel == NULL)) {
422                 DRM_ERROR("Failed to allocate a resource relocation.\n");
423                 return -ENOMEM;
424         }
425
426         rel->res = res;
427         rel->offset = offset;
428         list_add_tail(&rel->head, list);
429
430         return 0;
431 }
432
433 /**
434  * vmw_resource_relocations_free - Free all relocations on a list
435  *
436  * @list: Pointer to the head of the relocation list.
437  */
438 static void vmw_resource_relocations_free(struct list_head *list)
439 {
440         struct vmw_resource_relocation *rel, *n;
441
442         list_for_each_entry_safe(rel, n, list, head) {
443                 list_del(&rel->head);
444                 kfree(rel);
445         }
446 }
447
448 /**
449  * vmw_resource_relocations_apply - Apply all relocations on a list
450  *
451  * @cb: Pointer to the start of the command buffer bein patch. This need
452  * not be the same buffer as the one being parsed when the relocation
453  * list was built, but the contents must be the same modulo the
454  * resource ids.
455  * @list: Pointer to the head of the relocation list.
456  */
457 static void vmw_resource_relocations_apply(uint32_t *cb,
458                                            struct list_head *list)
459 {
460         struct vmw_resource_relocation *rel;
461
462         list_for_each_entry(rel, list, head) {
463                 if (likely(rel->res != NULL))
464                         cb[rel->offset] = rel->res->id;
465                 else
466                         cb[rel->offset] = SVGA_3D_CMD_NOP;
467         }
468 }
469
470 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
471                            struct vmw_sw_context *sw_context,
472                            SVGA3dCmdHeader *header)
473 {
474         return capable(CAP_SYS_ADMIN) ? : -EINVAL;
475 }
476
477 static int vmw_cmd_ok(struct vmw_private *dev_priv,
478                       struct vmw_sw_context *sw_context,
479                       SVGA3dCmdHeader *header)
480 {
481         return 0;
482 }
483
484 /**
485  * vmw_bo_to_validate_list - add a bo to a validate list
486  *
487  * @sw_context: The software context used for this command submission batch.
488  * @bo: The buffer object to add.
489  * @validate_as_mob: Validate this buffer as a MOB.
490  * @p_val_node: If non-NULL Will be updated with the validate node number
491  * on return.
492  *
493  * Returns -EINVAL if the limit of number of buffer objects per command
494  * submission is reached.
495  */
496 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
497                                    struct vmw_dma_buffer *vbo,
498                                    bool validate_as_mob,
499                                    uint32_t *p_val_node)
500 {
501         uint32_t val_node;
502         struct vmw_validate_buffer *vval_buf;
503         struct ttm_validate_buffer *val_buf;
504         struct drm_hash_item *hash;
505         int ret;
506
507         if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
508                                     &hash) == 0)) {
509                 vval_buf = container_of(hash, struct vmw_validate_buffer,
510                                         hash);
511                 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
512                         DRM_ERROR("Inconsistent buffer usage.\n");
513                         return -EINVAL;
514                 }
515                 val_buf = &vval_buf->base;
516                 val_node = vval_buf - sw_context->val_bufs;
517         } else {
518                 val_node = sw_context->cur_val_buf;
519                 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
520                         DRM_ERROR("Max number of DMA buffers per submission "
521                                   "exceeded.\n");
522                         return -EINVAL;
523                 }
524                 vval_buf = &sw_context->val_bufs[val_node];
525                 vval_buf->hash.key = (unsigned long) vbo;
526                 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
527                 if (unlikely(ret != 0)) {
528                         DRM_ERROR("Failed to initialize a buffer validation "
529                                   "entry.\n");
530                         return ret;
531                 }
532                 ++sw_context->cur_val_buf;
533                 val_buf = &vval_buf->base;
534                 val_buf->bo = ttm_bo_reference(&vbo->base);
535                 val_buf->shared = false;
536                 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
537                 vval_buf->validate_as_mob = validate_as_mob;
538         }
539
540         if (p_val_node)
541                 *p_val_node = val_node;
542
543         return 0;
544 }
545
546 /**
547  * vmw_resources_reserve - Reserve all resources on the sw_context's
548  * resource list.
549  *
550  * @sw_context: Pointer to the software context.
551  *
552  * Note that since vmware's command submission currently is protected by
553  * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
554  * since only a single thread at once will attempt this.
555  */
556 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
557 {
558         struct vmw_resource_val_node *val;
559         int ret = 0;
560
561         list_for_each_entry(val, &sw_context->resource_list, head) {
562                 struct vmw_resource *res = val->res;
563
564                 ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
565                 if (unlikely(ret != 0))
566                         return ret;
567
568                 if (res->backup) {
569                         struct vmw_dma_buffer *vbo = res->backup;
570
571                         ret = vmw_bo_to_validate_list
572                                 (sw_context, vbo,
573                                  vmw_resource_needs_backup(res), NULL);
574
575                         if (unlikely(ret != 0))
576                                 return ret;
577                 }
578         }
579
580         if (sw_context->dx_query_mob) {
581                 struct vmw_dma_buffer *expected_dx_query_mob;
582
583                 expected_dx_query_mob =
584                         vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
585                 if (expected_dx_query_mob &&
586                     expected_dx_query_mob != sw_context->dx_query_mob) {
587                         ret = -EINVAL;
588                 }
589         }
590
591         return ret;
592 }
593
594 /**
595  * vmw_resources_validate - Validate all resources on the sw_context's
596  * resource list.
597  *
598  * @sw_context: Pointer to the software context.
599  *
600  * Before this function is called, all resource backup buffers must have
601  * been validated.
602  */
603 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
604 {
605         struct vmw_resource_val_node *val;
606         int ret;
607
608         list_for_each_entry(val, &sw_context->resource_list, head) {
609                 struct vmw_resource *res = val->res;
610                 struct vmw_dma_buffer *backup = res->backup;
611
612                 ret = vmw_resource_validate(res);
613                 if (unlikely(ret != 0)) {
614                         if (ret != -ERESTARTSYS)
615                                 DRM_ERROR("Failed to validate resource.\n");
616                         return ret;
617                 }
618
619                 /* Check if the resource switched backup buffer */
620                 if (backup && res->backup && (backup != res->backup)) {
621                         struct vmw_dma_buffer *vbo = res->backup;
622
623                         ret = vmw_bo_to_validate_list
624                                 (sw_context, vbo,
625                                  vmw_resource_needs_backup(res), NULL);
626                         if (ret) {
627                                 ttm_bo_unreserve(&vbo->base);
628                                 return ret;
629                         }
630                 }
631         }
632         return 0;
633 }
634
635 /**
636  * vmw_cmd_res_reloc_add - Add a resource to a software context's
637  * relocation- and validation lists.
638  *
639  * @dev_priv: Pointer to a struct vmw_private identifying the device.
640  * @sw_context: Pointer to the software context.
641  * @id_loc: Pointer to where the id that needs translation is located.
642  * @res: Valid pointer to a struct vmw_resource.
643  * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
644  * used for this resource is returned here.
645  */
646 static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
647                                  struct vmw_sw_context *sw_context,
648                                  uint32_t *id_loc,
649                                  struct vmw_resource *res,
650                                  struct vmw_resource_val_node **p_val)
651 {
652         int ret;
653         struct vmw_resource_val_node *node;
654
655         *p_val = NULL;
656         ret = vmw_resource_relocation_add(&sw_context->res_relocations,
657                                           res,
658                                           id_loc - sw_context->buf_start);
659         if (unlikely(ret != 0))
660                 return ret;
661
662         ret = vmw_resource_val_add(sw_context, res, &node);
663         if (unlikely(ret != 0))
664                 return ret;
665
666         if (p_val)
667                 *p_val = node;
668
669         return 0;
670 }
671
672
673 /**
674  * vmw_cmd_res_check - Check that a resource is present and if so, put it
675  * on the resource validate list unless it's already there.
676  *
677  * @dev_priv: Pointer to a device private structure.
678  * @sw_context: Pointer to the software context.
679  * @res_type: Resource type.
680  * @converter: User-space visisble type specific information.
681  * @id_loc: Pointer to the location in the command buffer currently being
682  * parsed from where the user-space resource id handle is located.
683  * @p_val: Pointer to pointer to resource validalidation node. Populated
684  * on exit.
685  */
686 static int
687 vmw_cmd_res_check(struct vmw_private *dev_priv,
688                   struct vmw_sw_context *sw_context,
689                   enum vmw_res_type res_type,
690                   const struct vmw_user_resource_conv *converter,
691                   uint32_t *id_loc,
692                   struct vmw_resource_val_node **p_val)
693 {
694         struct vmw_res_cache_entry *rcache =
695                 &sw_context->res_cache[res_type];
696         struct vmw_resource *res;
697         struct vmw_resource_val_node *node;
698         int ret;
699
700         if (*id_loc == SVGA3D_INVALID_ID) {
701                 if (p_val)
702                         *p_val = NULL;
703                 if (res_type == vmw_res_context) {
704                         DRM_ERROR("Illegal context invalid id.\n");
705                         return -EINVAL;
706                 }
707                 return 0;
708         }
709
710         /*
711          * Fastpath in case of repeated commands referencing the same
712          * resource
713          */
714
715         if (likely(rcache->valid && *id_loc == rcache->handle)) {
716                 const struct vmw_resource *res = rcache->res;
717
718                 rcache->node->first_usage = false;
719                 if (p_val)
720                         *p_val = rcache->node;
721
722                 return vmw_resource_relocation_add
723                         (&sw_context->res_relocations, res,
724                          id_loc - sw_context->buf_start);
725         }
726
727         ret = vmw_user_resource_lookup_handle(dev_priv,
728                                               sw_context->fp->tfile,
729                                               *id_loc,
730                                               converter,
731                                               &res);
732         if (unlikely(ret != 0)) {
733                 DRM_ERROR("Could not find or use resource 0x%08x.\n",
734                           (unsigned) *id_loc);
735                 dump_stack();
736                 return ret;
737         }
738
739         rcache->valid = true;
740         rcache->res = res;
741         rcache->handle = *id_loc;
742
743         ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
744                                     res, &node);
745         if (unlikely(ret != 0))
746                 goto out_no_reloc;
747
748         rcache->node = node;
749         if (p_val)
750                 *p_val = node;
751         vmw_resource_unreference(&res);
752         return 0;
753
754 out_no_reloc:
755         BUG_ON(sw_context->error_resource != NULL);
756         sw_context->error_resource = res;
757
758         return ret;
759 }
760
761 /**
762  * vmw_rebind_dx_query - Rebind DX query associated with the context
763  *
764  * @ctx_res: context the query belongs to
765  *
766  * This function assumes binding_mutex is held.
767  */
768 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
769 {
770         struct vmw_private *dev_priv = ctx_res->dev_priv;
771         struct vmw_dma_buffer *dx_query_mob;
772         struct {
773                 SVGA3dCmdHeader header;
774                 SVGA3dCmdDXBindAllQuery body;
775         } *cmd;
776
777
778         dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
779
780         if (!dx_query_mob || dx_query_mob->dx_query_ctx)
781                 return 0;
782
783         cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
784
785         if (cmd == NULL) {
786                 DRM_ERROR("Failed to rebind queries.\n");
787                 return -ENOMEM;
788         }
789
790         cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
791         cmd->header.size = sizeof(cmd->body);
792         cmd->body.cid = ctx_res->id;
793         cmd->body.mobid = dx_query_mob->base.mem.start;
794         vmw_fifo_commit(dev_priv, sizeof(*cmd));
795
796         vmw_context_bind_dx_query(ctx_res, dx_query_mob);
797
798         return 0;
799 }
800
801 /**
802  * vmw_rebind_contexts - Rebind all resources previously bound to
803  * referenced contexts.
804  *
805  * @sw_context: Pointer to the software context.
806  *
807  * Rebind context binding points that have been scrubbed because of eviction.
808  */
809 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
810 {
811         struct vmw_resource_val_node *val;
812         int ret;
813
814         list_for_each_entry(val, &sw_context->resource_list, head) {
815                 if (unlikely(!val->staged_bindings))
816                         break;
817
818                 ret = vmw_binding_rebind_all
819                         (vmw_context_binding_state(val->res));
820                 if (unlikely(ret != 0)) {
821                         if (ret != -ERESTARTSYS)
822                                 DRM_ERROR("Failed to rebind context.\n");
823                         return ret;
824                 }
825
826                 ret = vmw_rebind_all_dx_query(val->res);
827                 if (ret != 0)
828                         return ret;
829         }
830
831         return 0;
832 }
833
834 /**
835  * vmw_view_bindings_add - Add an array of view bindings to a context
836  * binding state tracker.
837  *
838  * @sw_context: The execbuf state used for this command.
839  * @view_type: View type for the bindings.
840  * @binding_type: Binding type for the bindings.
841  * @shader_slot: The shader slot to user for the bindings.
842  * @view_ids: Array of view ids to be bound.
843  * @num_views: Number of view ids in @view_ids.
844  * @first_slot: The binding slot to be used for the first view id in @view_ids.
845  */
846 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
847                                  enum vmw_view_type view_type,
848                                  enum vmw_ctx_binding_type binding_type,
849                                  uint32 shader_slot,
850                                  uint32 view_ids[], u32 num_views,
851                                  u32 first_slot)
852 {
853         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
854         struct vmw_cmdbuf_res_manager *man;
855         u32 i;
856         int ret;
857
858         if (!ctx_node) {
859                 DRM_ERROR("DX Context not set.\n");
860                 return -EINVAL;
861         }
862
863         man = sw_context->man;
864         for (i = 0; i < num_views; ++i) {
865                 struct vmw_ctx_bindinfo_view binding;
866                 struct vmw_resource *view = NULL;
867
868                 if (view_ids[i] != SVGA3D_INVALID_ID) {
869                         view = vmw_view_lookup(man, view_type, view_ids[i]);
870                         if (IS_ERR(view)) {
871                                 DRM_ERROR("View not found.\n");
872                                 return PTR_ERR(view);
873                         }
874
875                         ret = vmw_view_res_val_add(sw_context, view);
876                         if (ret) {
877                                 DRM_ERROR("Could not add view to "
878                                           "validation list.\n");
879                                 vmw_resource_unreference(&view);
880                                 return ret;
881                         }
882                 }
883                 binding.bi.ctx = ctx_node->res;
884                 binding.bi.res = view;
885                 binding.bi.bt = binding_type;
886                 binding.shader_slot = shader_slot;
887                 binding.slot = first_slot + i;
888                 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
889                                 shader_slot, binding.slot);
890                 if (view)
891                         vmw_resource_unreference(&view);
892         }
893
894         return 0;
895 }
896
897 /**
898  * vmw_cmd_cid_check - Check a command header for valid context information.
899  *
900  * @dev_priv: Pointer to a device private structure.
901  * @sw_context: Pointer to the software context.
902  * @header: A command header with an embedded user-space context handle.
903  *
904  * Convenience function: Call vmw_cmd_res_check with the user-space context
905  * handle embedded in @header.
906  */
907 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
908                              struct vmw_sw_context *sw_context,
909                              SVGA3dCmdHeader *header)
910 {
911         struct vmw_cid_cmd {
912                 SVGA3dCmdHeader header;
913                 uint32_t cid;
914         } *cmd;
915
916         cmd = container_of(header, struct vmw_cid_cmd, header);
917         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
918                                  user_context_converter, &cmd->cid, NULL);
919 }
920
921 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
922                                            struct vmw_sw_context *sw_context,
923                                            SVGA3dCmdHeader *header)
924 {
925         struct vmw_sid_cmd {
926                 SVGA3dCmdHeader header;
927                 SVGA3dCmdSetRenderTarget body;
928         } *cmd;
929         struct vmw_resource_val_node *ctx_node;
930         struct vmw_resource_val_node *res_node;
931         int ret;
932
933         cmd = container_of(header, struct vmw_sid_cmd, header);
934
935         if (cmd->body.type >= SVGA3D_RT_MAX) {
936                 DRM_ERROR("Illegal render target type %u.\n",
937                           (unsigned) cmd->body.type);
938                 return -EINVAL;
939         }
940
941         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
942                                 user_context_converter, &cmd->body.cid,
943                                 &ctx_node);
944         if (unlikely(ret != 0))
945                 return ret;
946
947         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
948                                 user_surface_converter,
949                                 &cmd->body.target.sid, &res_node);
950         if (unlikely(ret != 0))
951                 return ret;
952
953         if (dev_priv->has_mob) {
954                 struct vmw_ctx_bindinfo_view binding;
955
956                 binding.bi.ctx = ctx_node->res;
957                 binding.bi.res = res_node ? res_node->res : NULL;
958                 binding.bi.bt = vmw_ctx_binding_rt;
959                 binding.slot = cmd->body.type;
960                 vmw_binding_add(ctx_node->staged_bindings,
961                                 &binding.bi, 0, binding.slot);
962         }
963
964         return 0;
965 }
966
967 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
968                                       struct vmw_sw_context *sw_context,
969                                       SVGA3dCmdHeader *header)
970 {
971         struct vmw_sid_cmd {
972                 SVGA3dCmdHeader header;
973                 SVGA3dCmdSurfaceCopy body;
974         } *cmd;
975         int ret;
976
977         cmd = container_of(header, struct vmw_sid_cmd, header);
978
979         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
980                                 user_surface_converter,
981                                 &cmd->body.src.sid, NULL);
982         if (ret)
983                 return ret;
984
985         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
986                                  user_surface_converter,
987                                  &cmd->body.dest.sid, NULL);
988 }
989
990 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
991                                       struct vmw_sw_context *sw_context,
992                                       SVGA3dCmdHeader *header)
993 {
994         struct {
995                 SVGA3dCmdHeader header;
996                 SVGA3dCmdDXBufferCopy body;
997         } *cmd;
998         int ret;
999
1000         cmd = container_of(header, typeof(*cmd), header);
1001         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1002                                 user_surface_converter,
1003                                 &cmd->body.src, NULL);
1004         if (ret != 0)
1005                 return ret;
1006
1007         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1008                                  user_surface_converter,
1009                                  &cmd->body.dest, NULL);
1010 }
1011
1012 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
1013                                    struct vmw_sw_context *sw_context,
1014                                    SVGA3dCmdHeader *header)
1015 {
1016         struct {
1017                 SVGA3dCmdHeader header;
1018                 SVGA3dCmdDXPredCopyRegion body;
1019         } *cmd;
1020         int ret;
1021
1022         cmd = container_of(header, typeof(*cmd), header);
1023         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1024                                 user_surface_converter,
1025                                 &cmd->body.srcSid, NULL);
1026         if (ret != 0)
1027                 return ret;
1028
1029         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1030                                  user_surface_converter,
1031                                  &cmd->body.dstSid, NULL);
1032 }
1033
1034 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
1035                                      struct vmw_sw_context *sw_context,
1036                                      SVGA3dCmdHeader *header)
1037 {
1038         struct vmw_sid_cmd {
1039                 SVGA3dCmdHeader header;
1040                 SVGA3dCmdSurfaceStretchBlt body;
1041         } *cmd;
1042         int ret;
1043
1044         cmd = container_of(header, struct vmw_sid_cmd, header);
1045         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1046                                 user_surface_converter,
1047                                 &cmd->body.src.sid, NULL);
1048         if (unlikely(ret != 0))
1049                 return ret;
1050         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1051                                  user_surface_converter,
1052                                  &cmd->body.dest.sid, NULL);
1053 }
1054
1055 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1056                                          struct vmw_sw_context *sw_context,
1057                                          SVGA3dCmdHeader *header)
1058 {
1059         struct vmw_sid_cmd {
1060                 SVGA3dCmdHeader header;
1061                 SVGA3dCmdBlitSurfaceToScreen body;
1062         } *cmd;
1063
1064         cmd = container_of(header, struct vmw_sid_cmd, header);
1065
1066         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1067                                  user_surface_converter,
1068                                  &cmd->body.srcImage.sid, NULL);
1069 }
1070
1071 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1072                                  struct vmw_sw_context *sw_context,
1073                                  SVGA3dCmdHeader *header)
1074 {
1075         struct vmw_sid_cmd {
1076                 SVGA3dCmdHeader header;
1077                 SVGA3dCmdPresent body;
1078         } *cmd;
1079
1080
1081         cmd = container_of(header, struct vmw_sid_cmd, header);
1082
1083         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1084                                  user_surface_converter, &cmd->body.sid,
1085                                  NULL);
1086 }
1087
1088 /**
1089  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1090  *
1091  * @dev_priv: The device private structure.
1092  * @new_query_bo: The new buffer holding query results.
1093  * @sw_context: The software context used for this command submission.
1094  *
1095  * This function checks whether @new_query_bo is suitable for holding
1096  * query results, and if another buffer currently is pinned for query
1097  * results. If so, the function prepares the state of @sw_context for
1098  * switching pinned buffers after successful submission of the current
1099  * command batch.
1100  */
1101 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1102                                        struct vmw_dma_buffer *new_query_bo,
1103                                        struct vmw_sw_context *sw_context)
1104 {
1105         struct vmw_res_cache_entry *ctx_entry =
1106                 &sw_context->res_cache[vmw_res_context];
1107         int ret;
1108
1109         BUG_ON(!ctx_entry->valid);
1110         sw_context->last_query_ctx = ctx_entry->res;
1111
1112         if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1113
1114                 if (unlikely(new_query_bo->base.num_pages > 4)) {
1115                         DRM_ERROR("Query buffer too large.\n");
1116                         return -EINVAL;
1117                 }
1118
1119                 if (unlikely(sw_context->cur_query_bo != NULL)) {
1120                         sw_context->needs_post_query_barrier = true;
1121                         ret = vmw_bo_to_validate_list(sw_context,
1122                                                       sw_context->cur_query_bo,
1123                                                       dev_priv->has_mob, NULL);
1124                         if (unlikely(ret != 0))
1125                                 return ret;
1126                 }
1127                 sw_context->cur_query_bo = new_query_bo;
1128
1129                 ret = vmw_bo_to_validate_list(sw_context,
1130                                               dev_priv->dummy_query_bo,
1131                                               dev_priv->has_mob, NULL);
1132                 if (unlikely(ret != 0))
1133                         return ret;
1134
1135         }
1136
1137         return 0;
1138 }
1139
1140
1141 /**
1142  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1143  *
1144  * @dev_priv: The device private structure.
1145  * @sw_context: The software context used for this command submission batch.
1146  *
1147  * This function will check if we're switching query buffers, and will then,
1148  * issue a dummy occlusion query wait used as a query barrier. When the fence
1149  * object following that query wait has signaled, we are sure that all
1150  * preceding queries have finished, and the old query buffer can be unpinned.
1151  * However, since both the new query buffer and the old one are fenced with
1152  * that fence, we can do an asynchronus unpin now, and be sure that the
1153  * old query buffer won't be moved until the fence has signaled.
1154  *
1155  * As mentioned above, both the new - and old query buffers need to be fenced
1156  * using a sequence emitted *after* calling this function.
1157  */
1158 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1159                                      struct vmw_sw_context *sw_context)
1160 {
1161         /*
1162          * The validate list should still hold references to all
1163          * contexts here.
1164          */
1165
1166         if (sw_context->needs_post_query_barrier) {
1167                 struct vmw_res_cache_entry *ctx_entry =
1168                         &sw_context->res_cache[vmw_res_context];
1169                 struct vmw_resource *ctx;
1170                 int ret;
1171
1172                 BUG_ON(!ctx_entry->valid);
1173                 ctx = ctx_entry->res;
1174
1175                 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1176
1177                 if (unlikely(ret != 0))
1178                         DRM_ERROR("Out of fifo space for dummy query.\n");
1179         }
1180
1181         if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1182                 if (dev_priv->pinned_bo) {
1183                         vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1184                         vmw_dmabuf_unreference(&dev_priv->pinned_bo);
1185                 }
1186
1187                 if (!sw_context->needs_post_query_barrier) {
1188                         vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1189
1190                         /*
1191                          * We pin also the dummy_query_bo buffer so that we
1192                          * don't need to validate it when emitting
1193                          * dummy queries in context destroy paths.
1194                          */
1195
1196                         if (!dev_priv->dummy_query_bo_pinned) {
1197                                 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1198                                                     true);
1199                                 dev_priv->dummy_query_bo_pinned = true;
1200                         }
1201
1202                         BUG_ON(sw_context->last_query_ctx == NULL);
1203                         dev_priv->query_cid = sw_context->last_query_ctx->id;
1204                         dev_priv->query_cid_valid = true;
1205                         dev_priv->pinned_bo =
1206                                 vmw_dmabuf_reference(sw_context->cur_query_bo);
1207                 }
1208         }
1209 }
1210
1211 /**
1212  * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1213  * handle to a MOB id.
1214  *
1215  * @dev_priv: Pointer to a device private structure.
1216  * @sw_context: The software context used for this command batch validation.
1217  * @id: Pointer to the user-space handle to be translated.
1218  * @vmw_bo_p: Points to a location that, on successful return will carry
1219  * a reference-counted pointer to the DMA buffer identified by the
1220  * user-space handle in @id.
1221  *
1222  * This function saves information needed to translate a user-space buffer
1223  * handle to a MOB id. The translation does not take place immediately, but
1224  * during a call to vmw_apply_relocations(). This function builds a relocation
1225  * list and a list of buffers to validate. The former needs to be freed using
1226  * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1227  * needs to be freed using vmw_clear_validations.
1228  */
1229 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1230                                  struct vmw_sw_context *sw_context,
1231                                  SVGAMobId *id,
1232                                  struct vmw_dma_buffer **vmw_bo_p)
1233 {
1234         struct vmw_dma_buffer *vmw_bo = NULL;
1235         uint32_t handle = *id;
1236         struct vmw_relocation *reloc;
1237         int ret;
1238
1239         ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1240                                      NULL);
1241         if (unlikely(ret != 0)) {
1242                 DRM_ERROR("Could not find or use MOB buffer.\n");
1243                 ret = -EINVAL;
1244                 goto out_no_reloc;
1245         }
1246
1247         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1248                 DRM_ERROR("Max number relocations per submission"
1249                           " exceeded\n");
1250                 ret = -EINVAL;
1251                 goto out_no_reloc;
1252         }
1253
1254         reloc = &sw_context->relocs[sw_context->cur_reloc++];
1255         reloc->mob_loc = id;
1256         reloc->location = NULL;
1257
1258         ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
1259         if (unlikely(ret != 0))
1260                 goto out_no_reloc;
1261
1262         *vmw_bo_p = vmw_bo;
1263         return 0;
1264
1265 out_no_reloc:
1266         vmw_dmabuf_unreference(&vmw_bo);
1267         *vmw_bo_p = NULL;
1268         return ret;
1269 }
1270
1271 /**
1272  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1273  * handle to a valid SVGAGuestPtr
1274  *
1275  * @dev_priv: Pointer to a device private structure.
1276  * @sw_context: The software context used for this command batch validation.
1277  * @ptr: Pointer to the user-space handle to be translated.
1278  * @vmw_bo_p: Points to a location that, on successful return will carry
1279  * a reference-counted pointer to the DMA buffer identified by the
1280  * user-space handle in @id.
1281  *
1282  * This function saves information needed to translate a user-space buffer
1283  * handle to a valid SVGAGuestPtr. The translation does not take place
1284  * immediately, but during a call to vmw_apply_relocations().
1285  * This function builds a relocation list and a list of buffers to validate.
1286  * The former needs to be freed using either vmw_apply_relocations() or
1287  * vmw_free_relocations(). The latter needs to be freed using
1288  * vmw_clear_validations.
1289  */
1290 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1291                                    struct vmw_sw_context *sw_context,
1292                                    SVGAGuestPtr *ptr,
1293                                    struct vmw_dma_buffer **vmw_bo_p)
1294 {
1295         struct vmw_dma_buffer *vmw_bo = NULL;
1296         uint32_t handle = ptr->gmrId;
1297         struct vmw_relocation *reloc;
1298         int ret;
1299
1300         ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1301                                      NULL);
1302         if (unlikely(ret != 0)) {
1303                 DRM_ERROR("Could not find or use GMR region.\n");
1304                 ret = -EINVAL;
1305                 goto out_no_reloc;
1306         }
1307
1308         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1309                 DRM_ERROR("Max number relocations per submission"
1310                           " exceeded\n");
1311                 ret = -EINVAL;
1312                 goto out_no_reloc;
1313         }
1314
1315         reloc = &sw_context->relocs[sw_context->cur_reloc++];
1316         reloc->location = ptr;
1317
1318         ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
1319         if (unlikely(ret != 0))
1320                 goto out_no_reloc;
1321
1322         *vmw_bo_p = vmw_bo;
1323         return 0;
1324
1325 out_no_reloc:
1326         vmw_dmabuf_unreference(&vmw_bo);
1327         *vmw_bo_p = NULL;
1328         return ret;
1329 }
1330
1331
1332
1333 /**
1334  * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1335  *
1336  * @dev_priv: Pointer to a device private struct.
1337  * @sw_context: The software context used for this command submission.
1338  * @header: Pointer to the command header in the command stream.
1339  *
1340  * This function adds the new query into the query COTABLE
1341  */
1342 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1343                                    struct vmw_sw_context *sw_context,
1344                                    SVGA3dCmdHeader *header)
1345 {
1346         struct vmw_dx_define_query_cmd {
1347                 SVGA3dCmdHeader header;
1348                 SVGA3dCmdDXDefineQuery q;
1349         } *cmd;
1350
1351         int    ret;
1352         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
1353         struct vmw_resource *cotable_res;
1354
1355
1356         if (ctx_node == NULL) {
1357                 DRM_ERROR("DX Context not set for query.\n");
1358                 return -EINVAL;
1359         }
1360
1361         cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
1362
1363         if (cmd->q.type <  SVGA3D_QUERYTYPE_MIN ||
1364             cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
1365                 return -EINVAL;
1366
1367         cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
1368         ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
1369         vmw_resource_unreference(&cotable_res);
1370
1371         return ret;
1372 }
1373
1374
1375
1376 /**
1377  * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1378  *
1379  * @dev_priv: Pointer to a device private struct.
1380  * @sw_context: The software context used for this command submission.
1381  * @header: Pointer to the command header in the command stream.
1382  *
1383  * The query bind operation will eventually associate the query ID
1384  * with its backing MOB.  In this function, we take the user mode
1385  * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1386  * kernel mode equivalent.
1387  */
1388 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1389                                  struct vmw_sw_context *sw_context,
1390                                  SVGA3dCmdHeader *header)
1391 {
1392         struct vmw_dx_bind_query_cmd {
1393                 SVGA3dCmdHeader header;
1394                 SVGA3dCmdDXBindQuery q;
1395         } *cmd;
1396
1397         struct vmw_dma_buffer *vmw_bo;
1398         int    ret;
1399
1400
1401         cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
1402
1403         /*
1404          * Look up the buffer pointed to by q.mobid, put it on the relocation
1405          * list so its kernel mode MOB ID can be filled in later
1406          */
1407         ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
1408                                     &vmw_bo);
1409
1410         if (ret != 0)
1411                 return ret;
1412
1413         sw_context->dx_query_mob = vmw_bo;
1414         sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
1415
1416         vmw_dmabuf_unreference(&vmw_bo);
1417
1418         return ret;
1419 }
1420
1421
1422
1423 /**
1424  * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
1425  *
1426  * @dev_priv: Pointer to a device private struct.
1427  * @sw_context: The software context used for this command submission.
1428  * @header: Pointer to the command header in the command stream.
1429  */
1430 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1431                                   struct vmw_sw_context *sw_context,
1432                                   SVGA3dCmdHeader *header)
1433 {
1434         struct vmw_begin_gb_query_cmd {
1435                 SVGA3dCmdHeader header;
1436                 SVGA3dCmdBeginGBQuery q;
1437         } *cmd;
1438
1439         cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1440                            header);
1441
1442         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1443                                  user_context_converter, &cmd->q.cid,
1444                                  NULL);
1445 }
1446
1447 /**
1448  * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
1449  *
1450  * @dev_priv: Pointer to a device private struct.
1451  * @sw_context: The software context used for this command submission.
1452  * @header: Pointer to the command header in the command stream.
1453  */
1454 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1455                                struct vmw_sw_context *sw_context,
1456                                SVGA3dCmdHeader *header)
1457 {
1458         struct vmw_begin_query_cmd {
1459                 SVGA3dCmdHeader header;
1460                 SVGA3dCmdBeginQuery q;
1461         } *cmd;
1462
1463         cmd = container_of(header, struct vmw_begin_query_cmd,
1464                            header);
1465
1466         if (unlikely(dev_priv->has_mob)) {
1467                 struct {
1468                         SVGA3dCmdHeader header;
1469                         SVGA3dCmdBeginGBQuery q;
1470                 } gb_cmd;
1471
1472                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1473
1474                 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1475                 gb_cmd.header.size = cmd->header.size;
1476                 gb_cmd.q.cid = cmd->q.cid;
1477                 gb_cmd.q.type = cmd->q.type;
1478
1479                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1480                 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1481         }
1482
1483         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1484                                  user_context_converter, &cmd->q.cid,
1485                                  NULL);
1486 }
1487
1488 /**
1489  * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
1490  *
1491  * @dev_priv: Pointer to a device private struct.
1492  * @sw_context: The software context used for this command submission.
1493  * @header: Pointer to the command header in the command stream.
1494  */
1495 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1496                                 struct vmw_sw_context *sw_context,
1497                                 SVGA3dCmdHeader *header)
1498 {
1499         struct vmw_dma_buffer *vmw_bo;
1500         struct vmw_query_cmd {
1501                 SVGA3dCmdHeader header;
1502                 SVGA3dCmdEndGBQuery q;
1503         } *cmd;
1504         int ret;
1505
1506         cmd = container_of(header, struct vmw_query_cmd, header);
1507         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1508         if (unlikely(ret != 0))
1509                 return ret;
1510
1511         ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1512                                     &cmd->q.mobid,
1513                                     &vmw_bo);
1514         if (unlikely(ret != 0))
1515                 return ret;
1516
1517         ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1518
1519         vmw_dmabuf_unreference(&vmw_bo);
1520         return ret;
1521 }
1522
1523 /**
1524  * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
1525  *
1526  * @dev_priv: Pointer to a device private struct.
1527  * @sw_context: The software context used for this command submission.
1528  * @header: Pointer to the command header in the command stream.
1529  */
1530 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1531                              struct vmw_sw_context *sw_context,
1532                              SVGA3dCmdHeader *header)
1533 {
1534         struct vmw_dma_buffer *vmw_bo;
1535         struct vmw_query_cmd {
1536                 SVGA3dCmdHeader header;
1537                 SVGA3dCmdEndQuery q;
1538         } *cmd;
1539         int ret;
1540
1541         cmd = container_of(header, struct vmw_query_cmd, header);
1542         if (dev_priv->has_mob) {
1543                 struct {
1544                         SVGA3dCmdHeader header;
1545                         SVGA3dCmdEndGBQuery q;
1546                 } gb_cmd;
1547
1548                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1549
1550                 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1551                 gb_cmd.header.size = cmd->header.size;
1552                 gb_cmd.q.cid = cmd->q.cid;
1553                 gb_cmd.q.type = cmd->q.type;
1554                 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1555                 gb_cmd.q.offset = cmd->q.guestResult.offset;
1556
1557                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1558                 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1559         }
1560
1561         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1562         if (unlikely(ret != 0))
1563                 return ret;
1564
1565         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1566                                       &cmd->q.guestResult,
1567                                       &vmw_bo);
1568         if (unlikely(ret != 0))
1569                 return ret;
1570
1571         ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1572
1573         vmw_dmabuf_unreference(&vmw_bo);
1574         return ret;
1575 }
1576
1577 /**
1578  * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
1579  *
1580  * @dev_priv: Pointer to a device private struct.
1581  * @sw_context: The software context used for this command submission.
1582  * @header: Pointer to the command header in the command stream.
1583  */
1584 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1585                                  struct vmw_sw_context *sw_context,
1586                                  SVGA3dCmdHeader *header)
1587 {
1588         struct vmw_dma_buffer *vmw_bo;
1589         struct vmw_query_cmd {
1590                 SVGA3dCmdHeader header;
1591                 SVGA3dCmdWaitForGBQuery q;
1592         } *cmd;
1593         int ret;
1594
1595         cmd = container_of(header, struct vmw_query_cmd, header);
1596         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1597         if (unlikely(ret != 0))
1598                 return ret;
1599
1600         ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1601                                     &cmd->q.mobid,
1602                                     &vmw_bo);
1603         if (unlikely(ret != 0))
1604                 return ret;
1605
1606         vmw_dmabuf_unreference(&vmw_bo);
1607         return 0;
1608 }
1609
1610 /**
1611  * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
1612  *
1613  * @dev_priv: Pointer to a device private struct.
1614  * @sw_context: The software context used for this command submission.
1615  * @header: Pointer to the command header in the command stream.
1616  */
1617 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1618                               struct vmw_sw_context *sw_context,
1619                               SVGA3dCmdHeader *header)
1620 {
1621         struct vmw_dma_buffer *vmw_bo;
1622         struct vmw_query_cmd {
1623                 SVGA3dCmdHeader header;
1624                 SVGA3dCmdWaitForQuery q;
1625         } *cmd;
1626         int ret;
1627
1628         cmd = container_of(header, struct vmw_query_cmd, header);
1629         if (dev_priv->has_mob) {
1630                 struct {
1631                         SVGA3dCmdHeader header;
1632                         SVGA3dCmdWaitForGBQuery q;
1633                 } gb_cmd;
1634
1635                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1636
1637                 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1638                 gb_cmd.header.size = cmd->header.size;
1639                 gb_cmd.q.cid = cmd->q.cid;
1640                 gb_cmd.q.type = cmd->q.type;
1641                 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1642                 gb_cmd.q.offset = cmd->q.guestResult.offset;
1643
1644                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1645                 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1646         }
1647
1648         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1649         if (unlikely(ret != 0))
1650                 return ret;
1651
1652         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1653                                       &cmd->q.guestResult,
1654                                       &vmw_bo);
1655         if (unlikely(ret != 0))
1656                 return ret;
1657
1658         vmw_dmabuf_unreference(&vmw_bo);
1659         return 0;
1660 }
1661
1662 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1663                        struct vmw_sw_context *sw_context,
1664                        SVGA3dCmdHeader *header)
1665 {
1666         struct vmw_dma_buffer *vmw_bo = NULL;
1667         struct vmw_surface *srf = NULL;
1668         struct vmw_dma_cmd {
1669                 SVGA3dCmdHeader header;
1670                 SVGA3dCmdSurfaceDMA dma;
1671         } *cmd;
1672         int ret;
1673         SVGA3dCmdSurfaceDMASuffix *suffix;
1674         uint32_t bo_size;
1675
1676         cmd = container_of(header, struct vmw_dma_cmd, header);
1677         suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1678                                                header->size - sizeof(*suffix));
1679
1680         /* Make sure device and verifier stays in sync. */
1681         if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1682                 DRM_ERROR("Invalid DMA suffix size.\n");
1683                 return -EINVAL;
1684         }
1685
1686         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1687                                       &cmd->dma.guest.ptr,
1688                                       &vmw_bo);
1689         if (unlikely(ret != 0))
1690                 return ret;
1691
1692         /* Make sure DMA doesn't cross BO boundaries. */
1693         bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1694         if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1695                 DRM_ERROR("Invalid DMA offset.\n");
1696                 return -EINVAL;
1697         }
1698
1699         bo_size -= cmd->dma.guest.ptr.offset;
1700         if (unlikely(suffix->maximumOffset > bo_size))
1701                 suffix->maximumOffset = bo_size;
1702
1703         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1704                                 user_surface_converter, &cmd->dma.host.sid,
1705                                 NULL);
1706         if (unlikely(ret != 0)) {
1707                 if (unlikely(ret != -ERESTARTSYS))
1708                         DRM_ERROR("could not find surface for DMA.\n");
1709                 goto out_no_surface;
1710         }
1711
1712         srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1713
1714         vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1715                              header);
1716
1717 out_no_surface:
1718         vmw_dmabuf_unreference(&vmw_bo);
1719         return ret;
1720 }
1721
1722 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1723                         struct vmw_sw_context *sw_context,
1724                         SVGA3dCmdHeader *header)
1725 {
1726         struct vmw_draw_cmd {
1727                 SVGA3dCmdHeader header;
1728                 SVGA3dCmdDrawPrimitives body;
1729         } *cmd;
1730         SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1731                 (unsigned long)header + sizeof(*cmd));
1732         SVGA3dPrimitiveRange *range;
1733         uint32_t i;
1734         uint32_t maxnum;
1735         int ret;
1736
1737         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1738         if (unlikely(ret != 0))
1739                 return ret;
1740
1741         cmd = container_of(header, struct vmw_draw_cmd, header);
1742         maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1743
1744         if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1745                 DRM_ERROR("Illegal number of vertex declarations.\n");
1746                 return -EINVAL;
1747         }
1748
1749         for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1750                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1751                                         user_surface_converter,
1752                                         &decl->array.surfaceId, NULL);
1753                 if (unlikely(ret != 0))
1754                         return ret;
1755         }
1756
1757         maxnum = (header->size - sizeof(cmd->body) -
1758                   cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1759         if (unlikely(cmd->body.numRanges > maxnum)) {
1760                 DRM_ERROR("Illegal number of index ranges.\n");
1761                 return -EINVAL;
1762         }
1763
1764         range = (SVGA3dPrimitiveRange *) decl;
1765         for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1766                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1767                                         user_surface_converter,
1768                                         &range->indexArray.surfaceId, NULL);
1769                 if (unlikely(ret != 0))
1770                         return ret;
1771         }
1772         return 0;
1773 }
1774
1775
1776 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1777                              struct vmw_sw_context *sw_context,
1778                              SVGA3dCmdHeader *header)
1779 {
1780         struct vmw_tex_state_cmd {
1781                 SVGA3dCmdHeader header;
1782                 SVGA3dCmdSetTextureState state;
1783         } *cmd;
1784
1785         SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1786           ((unsigned long) header + header->size + sizeof(header));
1787         SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1788                 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1789         struct vmw_resource_val_node *ctx_node;
1790         struct vmw_resource_val_node *res_node;
1791         int ret;
1792
1793         cmd = container_of(header, struct vmw_tex_state_cmd,
1794                            header);
1795
1796         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1797                                 user_context_converter, &cmd->state.cid,
1798                                 &ctx_node);
1799         if (unlikely(ret != 0))
1800                 return ret;
1801
1802         for (; cur_state < last_state; ++cur_state) {
1803                 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1804                         continue;
1805
1806                 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1807                         DRM_ERROR("Illegal texture/sampler unit %u.\n",
1808                                   (unsigned) cur_state->stage);
1809                         return -EINVAL;
1810                 }
1811
1812                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1813                                         user_surface_converter,
1814                                         &cur_state->value, &res_node);
1815                 if (unlikely(ret != 0))
1816                         return ret;
1817
1818                 if (dev_priv->has_mob) {
1819                         struct vmw_ctx_bindinfo_tex binding;
1820
1821                         binding.bi.ctx = ctx_node->res;
1822                         binding.bi.res = res_node ? res_node->res : NULL;
1823                         binding.bi.bt = vmw_ctx_binding_tex;
1824                         binding.texture_stage = cur_state->stage;
1825                         vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1826                                         0, binding.texture_stage);
1827                 }
1828         }
1829
1830         return 0;
1831 }
1832
1833 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1834                                       struct vmw_sw_context *sw_context,
1835                                       void *buf)
1836 {
1837         struct vmw_dma_buffer *vmw_bo;
1838         int ret;
1839
1840         struct {
1841                 uint32_t header;
1842                 SVGAFifoCmdDefineGMRFB body;
1843         } *cmd = buf;
1844
1845         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1846                                       &cmd->body.ptr,
1847                                       &vmw_bo);
1848         if (unlikely(ret != 0))
1849                 return ret;
1850
1851         vmw_dmabuf_unreference(&vmw_bo);
1852
1853         return ret;
1854 }
1855
1856
1857 /**
1858  * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1859  * switching
1860  *
1861  * @dev_priv: Pointer to a device private struct.
1862  * @sw_context: The software context being used for this batch.
1863  * @val_node: The validation node representing the resource.
1864  * @buf_id: Pointer to the user-space backup buffer handle in the command
1865  * stream.
1866  * @backup_offset: Offset of backup into MOB.
1867  *
1868  * This function prepares for registering a switch of backup buffers
1869  * in the resource metadata just prior to unreserving. It's basically a wrapper
1870  * around vmw_cmd_res_switch_backup with a different interface.
1871  */
1872 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1873                                      struct vmw_sw_context *sw_context,
1874                                      struct vmw_resource_val_node *val_node,
1875                                      uint32_t *buf_id,
1876                                      unsigned long backup_offset)
1877 {
1878         struct vmw_dma_buffer *dma_buf;
1879         int ret;
1880
1881         ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1882         if (ret)
1883                 return ret;
1884
1885         val_node->switching_backup = true;
1886         if (val_node->first_usage)
1887                 val_node->no_buffer_needed = true;
1888
1889         vmw_dmabuf_unreference(&val_node->new_backup);
1890         val_node->new_backup = dma_buf;
1891         val_node->new_backup_offset = backup_offset;
1892
1893         return 0;
1894 }
1895
1896
1897 /**
1898  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1899  *
1900  * @dev_priv: Pointer to a device private struct.
1901  * @sw_context: The software context being used for this batch.
1902  * @res_type: The resource type.
1903  * @converter: Information about user-space binding for this resource type.
1904  * @res_id: Pointer to the user-space resource handle in the command stream.
1905  * @buf_id: Pointer to the user-space backup buffer handle in the command
1906  * stream.
1907  * @backup_offset: Offset of backup into MOB.
1908  *
1909  * This function prepares for registering a switch of backup buffers
1910  * in the resource metadata just prior to unreserving. It's basically a wrapper
1911  * around vmw_cmd_res_switch_backup with a different interface.
1912  */
1913 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1914                                  struct vmw_sw_context *sw_context,
1915                                  enum vmw_res_type res_type,
1916                                  const struct vmw_user_resource_conv
1917                                  *converter,
1918                                  uint32_t *res_id,
1919                                  uint32_t *buf_id,
1920                                  unsigned long backup_offset)
1921 {
1922         struct vmw_resource_val_node *val_node;
1923         int ret;
1924
1925         ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1926                                 converter, res_id, &val_node);
1927         if (ret)
1928                 return ret;
1929
1930         return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
1931                                          buf_id, backup_offset);
1932 }
1933
1934 /**
1935  * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1936  * command
1937  *
1938  * @dev_priv: Pointer to a device private struct.
1939  * @sw_context: The software context being used for this batch.
1940  * @header: Pointer to the command header in the command stream.
1941  */
1942 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1943                                    struct vmw_sw_context *sw_context,
1944                                    SVGA3dCmdHeader *header)
1945 {
1946         struct vmw_bind_gb_surface_cmd {
1947                 SVGA3dCmdHeader header;
1948                 SVGA3dCmdBindGBSurface body;
1949         } *cmd;
1950
1951         cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1952
1953         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1954                                      user_surface_converter,
1955                                      &cmd->body.sid, &cmd->body.mobid,
1956                                      0);
1957 }
1958
1959 /**
1960  * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1961  * command
1962  *
1963  * @dev_priv: Pointer to a device private struct.
1964  * @sw_context: The software context being used for this batch.
1965  * @header: Pointer to the command header in the command stream.
1966  */
1967 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1968                                    struct vmw_sw_context *sw_context,
1969                                    SVGA3dCmdHeader *header)
1970 {
1971         struct vmw_gb_surface_cmd {
1972                 SVGA3dCmdHeader header;
1973                 SVGA3dCmdUpdateGBImage body;
1974         } *cmd;
1975
1976         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1977
1978         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1979                                  user_surface_converter,
1980                                  &cmd->body.image.sid, NULL);
1981 }
1982
1983 /**
1984  * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1985  * command
1986  *
1987  * @dev_priv: Pointer to a device private struct.
1988  * @sw_context: The software context being used for this batch.
1989  * @header: Pointer to the command header in the command stream.
1990  */
1991 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1992                                      struct vmw_sw_context *sw_context,
1993                                      SVGA3dCmdHeader *header)
1994 {
1995         struct vmw_gb_surface_cmd {
1996                 SVGA3dCmdHeader header;
1997                 SVGA3dCmdUpdateGBSurface body;
1998         } *cmd;
1999
2000         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2001
2002         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2003                                  user_surface_converter,
2004                                  &cmd->body.sid, NULL);
2005 }
2006
2007 /**
2008  * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
2009  * command
2010  *
2011  * @dev_priv: Pointer to a device private struct.
2012  * @sw_context: The software context being used for this batch.
2013  * @header: Pointer to the command header in the command stream.
2014  */
2015 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
2016                                      struct vmw_sw_context *sw_context,
2017                                      SVGA3dCmdHeader *header)
2018 {
2019         struct vmw_gb_surface_cmd {
2020                 SVGA3dCmdHeader header;
2021                 SVGA3dCmdReadbackGBImage body;
2022         } *cmd;
2023
2024         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2025
2026         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2027                                  user_surface_converter,
2028                                  &cmd->body.image.sid, NULL);
2029 }
2030
2031 /**
2032  * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
2033  * command
2034  *
2035  * @dev_priv: Pointer to a device private struct.
2036  * @sw_context: The software context being used for this batch.
2037  * @header: Pointer to the command header in the command stream.
2038  */
2039 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
2040                                        struct vmw_sw_context *sw_context,
2041                                        SVGA3dCmdHeader *header)
2042 {
2043         struct vmw_gb_surface_cmd {
2044                 SVGA3dCmdHeader header;
2045                 SVGA3dCmdReadbackGBSurface body;
2046         } *cmd;
2047
2048         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2049
2050         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2051                                  user_surface_converter,
2052                                  &cmd->body.sid, NULL);
2053 }
2054
2055 /**
2056  * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
2057  * command
2058  *
2059  * @dev_priv: Pointer to a device private struct.
2060  * @sw_context: The software context being used for this batch.
2061  * @header: Pointer to the command header in the command stream.
2062  */
2063 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
2064                                        struct vmw_sw_context *sw_context,
2065                                        SVGA3dCmdHeader *header)
2066 {
2067         struct vmw_gb_surface_cmd {
2068                 SVGA3dCmdHeader header;
2069                 SVGA3dCmdInvalidateGBImage body;
2070         } *cmd;
2071
2072         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2073
2074         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2075                                  user_surface_converter,
2076                                  &cmd->body.image.sid, NULL);
2077 }
2078
2079 /**
2080  * vmw_cmd_invalidate_gb_surface - Validate an
2081  * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
2082  *
2083  * @dev_priv: Pointer to a device private struct.
2084  * @sw_context: The software context being used for this batch.
2085  * @header: Pointer to the command header in the command stream.
2086  */
2087 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
2088                                          struct vmw_sw_context *sw_context,
2089                                          SVGA3dCmdHeader *header)
2090 {
2091         struct vmw_gb_surface_cmd {
2092                 SVGA3dCmdHeader header;
2093                 SVGA3dCmdInvalidateGBSurface body;
2094         } *cmd;
2095
2096         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2097
2098         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2099                                  user_surface_converter,
2100                                  &cmd->body.sid, NULL);
2101 }
2102
2103
2104 /**
2105  * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2106  * command
2107  *
2108  * @dev_priv: Pointer to a device private struct.
2109  * @sw_context: The software context being used for this batch.
2110  * @header: Pointer to the command header in the command stream.
2111  */
2112 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2113                                  struct vmw_sw_context *sw_context,
2114                                  SVGA3dCmdHeader *header)
2115 {
2116         struct vmw_shader_define_cmd {
2117                 SVGA3dCmdHeader header;
2118                 SVGA3dCmdDefineShader body;
2119         } *cmd;
2120         int ret;
2121         size_t size;
2122         struct vmw_resource_val_node *val;
2123
2124         cmd = container_of(header, struct vmw_shader_define_cmd,
2125                            header);
2126
2127         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2128                                 user_context_converter, &cmd->body.cid,
2129                                 &val);
2130         if (unlikely(ret != 0))
2131                 return ret;
2132
2133         if (unlikely(!dev_priv->has_mob))
2134                 return 0;
2135
2136         size = cmd->header.size - sizeof(cmd->body);
2137         ret = vmw_compat_shader_add(dev_priv,
2138                                     vmw_context_res_man(val->res),
2139                                     cmd->body.shid, cmd + 1,
2140                                     cmd->body.type, size,
2141                                     &sw_context->staged_cmd_res);
2142         if (unlikely(ret != 0))
2143                 return ret;
2144
2145         return vmw_resource_relocation_add(&sw_context->res_relocations,
2146                                            NULL, &cmd->header.id -
2147                                            sw_context->buf_start);
2148
2149         return 0;
2150 }
2151
2152 /**
2153  * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2154  * command
2155  *
2156  * @dev_priv: Pointer to a device private struct.
2157  * @sw_context: The software context being used for this batch.
2158  * @header: Pointer to the command header in the command stream.
2159  */
2160 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2161                                   struct vmw_sw_context *sw_context,
2162                                   SVGA3dCmdHeader *header)
2163 {
2164         struct vmw_shader_destroy_cmd {
2165                 SVGA3dCmdHeader header;
2166                 SVGA3dCmdDestroyShader body;
2167         } *cmd;
2168         int ret;
2169         struct vmw_resource_val_node *val;
2170
2171         cmd = container_of(header, struct vmw_shader_destroy_cmd,
2172                            header);
2173
2174         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2175                                 user_context_converter, &cmd->body.cid,
2176                                 &val);
2177         if (unlikely(ret != 0))
2178                 return ret;
2179
2180         if (unlikely(!dev_priv->has_mob))
2181                 return 0;
2182
2183         ret = vmw_shader_remove(vmw_context_res_man(val->res),
2184                                 cmd->body.shid,
2185                                 cmd->body.type,
2186                                 &sw_context->staged_cmd_res);
2187         if (unlikely(ret != 0))
2188                 return ret;
2189
2190         return vmw_resource_relocation_add(&sw_context->res_relocations,
2191                                            NULL, &cmd->header.id -
2192                                            sw_context->buf_start);
2193
2194         return 0;
2195 }
2196
2197 /**
2198  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2199  * command
2200  *
2201  * @dev_priv: Pointer to a device private struct.
2202  * @sw_context: The software context being used for this batch.
2203  * @header: Pointer to the command header in the command stream.
2204  */
2205 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
2206                               struct vmw_sw_context *sw_context,
2207                               SVGA3dCmdHeader *header)
2208 {
2209         struct vmw_set_shader_cmd {
2210                 SVGA3dCmdHeader header;
2211                 SVGA3dCmdSetShader body;
2212         } *cmd;
2213         struct vmw_resource_val_node *ctx_node, *res_node = NULL;
2214         struct vmw_ctx_bindinfo_shader binding;
2215         struct vmw_resource *res = NULL;
2216         int ret;
2217
2218         cmd = container_of(header, struct vmw_set_shader_cmd,
2219                            header);
2220
2221         if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2222                 DRM_ERROR("Illegal shader type %u.\n",
2223                           (unsigned) cmd->body.type);
2224                 return -EINVAL;
2225         }
2226
2227         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2228                                 user_context_converter, &cmd->body.cid,
2229                                 &ctx_node);
2230         if (unlikely(ret != 0))
2231                 return ret;
2232
2233         if (!dev_priv->has_mob)
2234                 return 0;
2235
2236         if (cmd->body.shid != SVGA3D_INVALID_ID) {
2237                 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2238                                         cmd->body.shid,
2239                                         cmd->body.type);
2240
2241                 if (!IS_ERR(res)) {
2242                         ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
2243                                                     &cmd->body.shid, res,
2244                                                     &res_node);
2245                         vmw_resource_unreference(&res);
2246                         if (unlikely(ret != 0))
2247                                 return ret;
2248                 }
2249         }
2250
2251         if (!res_node) {
2252                 ret = vmw_cmd_res_check(dev_priv, sw_context,
2253                                         vmw_res_shader,
2254                                         user_shader_converter,
2255                                         &cmd->body.shid, &res_node);
2256                 if (unlikely(ret != 0))
2257                         return ret;
2258         }
2259
2260         binding.bi.ctx = ctx_node->res;
2261         binding.bi.res = res_node ? res_node->res : NULL;
2262         binding.bi.bt = vmw_ctx_binding_shader;
2263         binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2264         vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2265                         binding.shader_slot, 0);
2266         return 0;
2267 }
2268
2269 /**
2270  * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2271  * command
2272  *
2273  * @dev_priv: Pointer to a device private struct.
2274  * @sw_context: The software context being used for this batch.
2275  * @header: Pointer to the command header in the command stream.
2276  */
2277 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2278                                     struct vmw_sw_context *sw_context,
2279                                     SVGA3dCmdHeader *header)
2280 {
2281         struct vmw_set_shader_const_cmd {
2282                 SVGA3dCmdHeader header;
2283                 SVGA3dCmdSetShaderConst body;
2284         } *cmd;
2285         int ret;
2286
2287         cmd = container_of(header, struct vmw_set_shader_const_cmd,
2288                            header);
2289
2290         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2291                                 user_context_converter, &cmd->body.cid,
2292                                 NULL);
2293         if (unlikely(ret != 0))
2294                 return ret;
2295
2296         if (dev_priv->has_mob)
2297                 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2298
2299         return 0;
2300 }
2301
2302 /**
2303  * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2304  * command
2305  *
2306  * @dev_priv: Pointer to a device private struct.
2307  * @sw_context: The software context being used for this batch.
2308  * @header: Pointer to the command header in the command stream.
2309  */
2310 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2311                                   struct vmw_sw_context *sw_context,
2312                                   SVGA3dCmdHeader *header)
2313 {
2314         struct vmw_bind_gb_shader_cmd {
2315                 SVGA3dCmdHeader header;
2316                 SVGA3dCmdBindGBShader body;
2317         } *cmd;
2318
2319         cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2320                            header);
2321
2322         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2323                                      user_shader_converter,
2324                                      &cmd->body.shid, &cmd->body.mobid,
2325                                      cmd->body.offsetInBytes);
2326 }
2327
2328 /**
2329  * vmw_cmd_dx_set_single_constant_buffer - Validate an
2330  * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2331  *
2332  * @dev_priv: Pointer to a device private struct.
2333  * @sw_context: The software context being used for this batch.
2334  * @header: Pointer to the command header in the command stream.
2335  */
2336 static int
2337 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2338                                       struct vmw_sw_context *sw_context,
2339                                       SVGA3dCmdHeader *header)
2340 {
2341         struct {
2342                 SVGA3dCmdHeader header;
2343                 SVGA3dCmdDXSetSingleConstantBuffer body;
2344         } *cmd;
2345         struct vmw_resource_val_node *res_node = NULL;
2346         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2347         struct vmw_ctx_bindinfo_cb binding;
2348         int ret;
2349
2350         if (unlikely(ctx_node == NULL)) {
2351                 DRM_ERROR("DX Context not set.\n");
2352                 return -EINVAL;
2353         }
2354
2355         cmd = container_of(header, typeof(*cmd), header);
2356         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2357                                 user_surface_converter,
2358                                 &cmd->body.sid, &res_node);
2359         if (unlikely(ret != 0))
2360                 return ret;
2361
2362         binding.bi.ctx = ctx_node->res;
2363         binding.bi.res = res_node ? res_node->res : NULL;
2364         binding.bi.bt = vmw_ctx_binding_cb;
2365         binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2366         binding.offset = cmd->body.offsetInBytes;
2367         binding.size = cmd->body.sizeInBytes;
2368         binding.slot = cmd->body.slot;
2369
2370         if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2371             binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2372                 DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2373                           (unsigned) cmd->body.type,
2374                           (unsigned) binding.slot);
2375                 return -EINVAL;
2376         }
2377
2378         vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2379                         binding.shader_slot, binding.slot);
2380
2381         return 0;
2382 }
2383
2384 /**
2385  * vmw_cmd_dx_set_shader_res - Validate an
2386  * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2387  *
2388  * @dev_priv: Pointer to a device private struct.
2389  * @sw_context: The software context being used for this batch.
2390  * @header: Pointer to the command header in the command stream.
2391  */
2392 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2393                                      struct vmw_sw_context *sw_context,
2394                                      SVGA3dCmdHeader *header)
2395 {
2396         struct {
2397                 SVGA3dCmdHeader header;
2398                 SVGA3dCmdDXSetShaderResources body;
2399         } *cmd = container_of(header, typeof(*cmd), header);
2400         u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2401                 sizeof(SVGA3dShaderResourceViewId);
2402
2403         if ((u64) cmd->body.startView + (u64) num_sr_view >
2404             (u64) SVGA3D_DX_MAX_SRVIEWS ||
2405             cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2406                 DRM_ERROR("Invalid shader binding.\n");
2407                 return -EINVAL;
2408         }
2409
2410         return vmw_view_bindings_add(sw_context, vmw_view_sr,
2411                                      vmw_ctx_binding_sr,
2412                                      cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2413                                      (void *) &cmd[1], num_sr_view,
2414                                      cmd->body.startView);
2415 }
2416
2417 /**
2418  * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2419  * command
2420  *
2421  * @dev_priv: Pointer to a device private struct.
2422  * @sw_context: The software context being used for this batch.
2423  * @header: Pointer to the command header in the command stream.
2424  */
2425 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2426                                  struct vmw_sw_context *sw_context,
2427                                  SVGA3dCmdHeader *header)
2428 {
2429         struct {
2430                 SVGA3dCmdHeader header;
2431                 SVGA3dCmdDXSetShader body;
2432         } *cmd;
2433         struct vmw_resource *res = NULL;
2434         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2435         struct vmw_ctx_bindinfo_shader binding;
2436         int ret = 0;
2437
2438         if (unlikely(ctx_node == NULL)) {
2439                 DRM_ERROR("DX Context not set.\n");
2440                 return -EINVAL;
2441         }
2442
2443         cmd = container_of(header, typeof(*cmd), header);
2444
2445         if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2446                 DRM_ERROR("Illegal shader type %u.\n",
2447                           (unsigned) cmd->body.type);
2448                 return -EINVAL;
2449         }
2450
2451         if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2452                 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2453                 if (IS_ERR(res)) {
2454                         DRM_ERROR("Could not find shader for binding.\n");
2455                         return PTR_ERR(res);
2456                 }
2457
2458                 ret = vmw_resource_val_add(sw_context, res, NULL);
2459                 if (ret)
2460                         goto out_unref;
2461         }
2462
2463         binding.bi.ctx = ctx_node->res;
2464         binding.bi.res = res;
2465         binding.bi.bt = vmw_ctx_binding_dx_shader;
2466         binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2467
2468         vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2469                         binding.shader_slot, 0);
2470 out_unref:
2471         if (res)
2472                 vmw_resource_unreference(&res);
2473
2474         return ret;
2475 }
2476
2477 /**
2478  * vmw_cmd_dx_set_vertex_buffers - Validates an
2479  * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2480  *
2481  * @dev_priv: Pointer to a device private struct.
2482  * @sw_context: The software context being used for this batch.
2483  * @header: Pointer to the command header in the command stream.
2484  */
2485 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2486                                          struct vmw_sw_context *sw_context,
2487                                          SVGA3dCmdHeader *header)
2488 {
2489         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2490         struct vmw_ctx_bindinfo_vb binding;
2491         struct vmw_resource_val_node *res_node;
2492         struct {
2493                 SVGA3dCmdHeader header;
2494                 SVGA3dCmdDXSetVertexBuffers body;
2495                 SVGA3dVertexBuffer buf[];
2496         } *cmd;
2497         int i, ret, num;
2498
2499         if (unlikely(ctx_node == NULL)) {
2500                 DRM_ERROR("DX Context not set.\n");
2501                 return -EINVAL;
2502         }
2503
2504         cmd = container_of(header, typeof(*cmd), header);
2505         num = (cmd->header.size - sizeof(cmd->body)) /
2506                 sizeof(SVGA3dVertexBuffer);
2507         if ((u64)num + (u64)cmd->body.startBuffer >
2508             (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2509                 DRM_ERROR("Invalid number of vertex buffers.\n");
2510                 return -EINVAL;
2511         }
2512
2513         for (i = 0; i < num; i++) {
2514                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2515                                         user_surface_converter,
2516                                         &cmd->buf[i].sid, &res_node);
2517                 if (unlikely(ret != 0))
2518                         return ret;
2519
2520                 binding.bi.ctx = ctx_node->res;
2521                 binding.bi.bt = vmw_ctx_binding_vb;
2522                 binding.bi.res = ((res_node) ? res_node->res : NULL);
2523                 binding.offset = cmd->buf[i].offset;
2524                 binding.stride = cmd->buf[i].stride;
2525                 binding.slot = i + cmd->body.startBuffer;
2526
2527                 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2528                                 0, binding.slot);
2529         }
2530
2531         return 0;
2532 }
2533
2534 /**
2535  * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2536  * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
2537  *
2538  * @dev_priv: Pointer to a device private struct.
2539  * @sw_context: The software context being used for this batch.
2540  * @header: Pointer to the command header in the command stream.
2541  */
2542 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2543                                        struct vmw_sw_context *sw_context,
2544                                        SVGA3dCmdHeader *header)
2545 {
2546         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2547         struct vmw_ctx_bindinfo_ib binding;
2548         struct vmw_resource_val_node *res_node;
2549         struct {
2550                 SVGA3dCmdHeader header;
2551                 SVGA3dCmdDXSetIndexBuffer body;
2552         } *cmd;
2553         int ret;
2554
2555         if (unlikely(ctx_node == NULL)) {
2556                 DRM_ERROR("DX Context not set.\n");
2557                 return -EINVAL;
2558         }
2559
2560         cmd = container_of(header, typeof(*cmd), header);
2561         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2562                                 user_surface_converter,
2563                                 &cmd->body.sid, &res_node);
2564         if (unlikely(ret != 0))
2565                 return ret;
2566
2567         binding.bi.ctx = ctx_node->res;
2568         binding.bi.res = ((res_node) ? res_node->res : NULL);
2569         binding.bi.bt = vmw_ctx_binding_ib;
2570         binding.offset = cmd->body.offset;
2571         binding.format = cmd->body.format;
2572
2573         vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
2574
2575         return 0;
2576 }
2577
2578 /**
2579  * vmw_cmd_dx_set_rendertarget - Validate an
2580  * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2581  *
2582  * @dev_priv: Pointer to a device private struct.
2583  * @sw_context: The software context being used for this batch.
2584  * @header: Pointer to the command header in the command stream.
2585  */
2586 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2587                                         struct vmw_sw_context *sw_context,
2588                                         SVGA3dCmdHeader *header)
2589 {
2590         struct {
2591                 SVGA3dCmdHeader header;
2592                 SVGA3dCmdDXSetRenderTargets body;
2593         } *cmd = container_of(header, typeof(*cmd), header);
2594         int ret;
2595         u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2596                 sizeof(SVGA3dRenderTargetViewId);
2597
2598         if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2599                 DRM_ERROR("Invalid DX Rendertarget binding.\n");
2600                 return -EINVAL;
2601         }
2602
2603         ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2604                                     vmw_ctx_binding_ds, 0,
2605                                     &cmd->body.depthStencilViewId, 1, 0);
2606         if (ret)
2607                 return ret;
2608
2609         return vmw_view_bindings_add(sw_context, vmw_view_rt,
2610                                      vmw_ctx_binding_dx_rt, 0,
2611                                      (void *)&cmd[1], num_rt_view, 0);
2612 }
2613
2614 /**
2615  * vmw_cmd_dx_clear_rendertarget_view - Validate an
2616  * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2617  *
2618  * @dev_priv: Pointer to a device private struct.
2619  * @sw_context: The software context being used for this batch.
2620  * @header: Pointer to the command header in the command stream.
2621  */
2622 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2623                                               struct vmw_sw_context *sw_context,
2624                                               SVGA3dCmdHeader *header)
2625 {
2626         struct {
2627                 SVGA3dCmdHeader header;
2628                 SVGA3dCmdDXClearRenderTargetView body;
2629         } *cmd = container_of(header, typeof(*cmd), header);
2630
2631         return vmw_view_id_val_add(sw_context, vmw_view_rt,
2632                                    cmd->body.renderTargetViewId);
2633 }
2634
2635 /**
2636  * vmw_cmd_dx_clear_rendertarget_view - Validate an
2637  * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2638  *
2639  * @dev_priv: Pointer to a device private struct.
2640  * @sw_context: The software context being used for this batch.
2641  * @header: Pointer to the command header in the command stream.
2642  */
2643 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2644                                               struct vmw_sw_context *sw_context,
2645                                               SVGA3dCmdHeader *header)
2646 {
2647         struct {
2648                 SVGA3dCmdHeader header;
2649                 SVGA3dCmdDXClearDepthStencilView body;
2650         } *cmd = container_of(header, typeof(*cmd), header);
2651
2652         return vmw_view_id_val_add(sw_context, vmw_view_ds,
2653                                    cmd->body.depthStencilViewId);
2654 }
2655
2656 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2657                                   struct vmw_sw_context *sw_context,
2658                                   SVGA3dCmdHeader *header)
2659 {
2660         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2661         struct vmw_resource_val_node *srf_node;
2662         struct vmw_resource *res;
2663         enum vmw_view_type view_type;
2664         int ret;
2665         /*
2666          * This is based on the fact that all affected define commands have
2667          * the same initial command body layout.
2668          */
2669         struct {
2670                 SVGA3dCmdHeader header;
2671                 uint32 defined_id;
2672                 uint32 sid;
2673         } *cmd;
2674
2675         if (unlikely(ctx_node == NULL)) {
2676                 DRM_ERROR("DX Context not set.\n");
2677                 return -EINVAL;
2678         }
2679
2680         view_type = vmw_view_cmd_to_type(header->id);
2681         cmd = container_of(header, typeof(*cmd), header);
2682         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2683                                 user_surface_converter,
2684                                 &cmd->sid, &srf_node);
2685         if (unlikely(ret != 0))
2686                 return ret;
2687
2688         res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
2689         ret = vmw_cotable_notify(res, cmd->defined_id);
2690         vmw_resource_unreference(&res);
2691         if (unlikely(ret != 0))
2692                 return ret;
2693
2694         return vmw_view_add(sw_context->man,
2695                             ctx_node->res,
2696                             srf_node->res,
2697                             view_type,
2698                             cmd->defined_id,
2699                             header,
2700                             header->size + sizeof(*header),
2701                             &sw_context->staged_cmd_res);
2702 }
2703
2704 /**
2705  * vmw_cmd_dx_set_so_targets - Validate an
2706  * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2707  *
2708  * @dev_priv: Pointer to a device private struct.
2709  * @sw_context: The software context being used for this batch.
2710  * @header: Pointer to the command header in the command stream.
2711  */
2712 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2713                                      struct vmw_sw_context *sw_context,
2714                                      SVGA3dCmdHeader *header)
2715 {
2716         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2717         struct vmw_ctx_bindinfo_so binding;
2718         struct vmw_resource_val_node *res_node;
2719         struct {
2720                 SVGA3dCmdHeader header;
2721                 SVGA3dCmdDXSetSOTargets body;
2722                 SVGA3dSoTarget targets[];
2723         } *cmd;
2724         int i, ret, num;
2725
2726         if (unlikely(ctx_node == NULL)) {
2727                 DRM_ERROR("DX Context not set.\n");
2728                 return -EINVAL;
2729         }
2730
2731         cmd = container_of(header, typeof(*cmd), header);
2732         num = (cmd->header.size - sizeof(cmd->body)) /
2733                 sizeof(SVGA3dSoTarget);
2734
2735         if (num > SVGA3D_DX_MAX_SOTARGETS) {
2736                 DRM_ERROR("Invalid DX SO binding.\n");
2737                 return -EINVAL;
2738         }
2739
2740         for (i = 0; i < num; i++) {
2741                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2742                                         user_surface_converter,
2743                                         &cmd->targets[i].sid, &res_node);
2744                 if (unlikely(ret != 0))
2745                         return ret;
2746
2747                 binding.bi.ctx = ctx_node->res;
2748                 binding.bi.res = ((res_node) ? res_node->res : NULL);
2749                 binding.bi.bt = vmw_ctx_binding_so,
2750                 binding.offset = cmd->targets[i].offset;
2751                 binding.size = cmd->targets[i].sizeInBytes;
2752                 binding.slot = i;
2753
2754                 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2755                                 0, binding.slot);
2756         }
2757
2758         return 0;
2759 }
2760
2761 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2762                                 struct vmw_sw_context *sw_context,
2763                                 SVGA3dCmdHeader *header)
2764 {
2765         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2766         struct vmw_resource *res;
2767         /*
2768          * This is based on the fact that all affected define commands have
2769          * the same initial command body layout.
2770          */
2771         struct {
2772                 SVGA3dCmdHeader header;
2773                 uint32 defined_id;
2774         } *cmd;
2775         enum vmw_so_type so_type;
2776         int ret;
2777
2778         if (unlikely(ctx_node == NULL)) {
2779                 DRM_ERROR("DX Context not set.\n");
2780                 return -EINVAL;
2781         }
2782
2783         so_type = vmw_so_cmd_to_type(header->id);
2784         res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
2785         cmd = container_of(header, typeof(*cmd), header);
2786         ret = vmw_cotable_notify(res, cmd->defined_id);
2787         vmw_resource_unreference(&res);
2788
2789         return ret;
2790 }
2791
2792 /**
2793  * vmw_cmd_dx_check_subresource - Validate an
2794  * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2795  *
2796  * @dev_priv: Pointer to a device private struct.
2797  * @sw_context: The software context being used for this batch.
2798  * @header: Pointer to the command header in the command stream.
2799  */
2800 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2801                                         struct vmw_sw_context *sw_context,
2802                                         SVGA3dCmdHeader *header)
2803 {
2804         struct {
2805                 SVGA3dCmdHeader header;
2806                 union {
2807                         SVGA3dCmdDXReadbackSubResource r_body;
2808                         SVGA3dCmdDXInvalidateSubResource i_body;
2809                         SVGA3dCmdDXUpdateSubResource u_body;
2810                         SVGA3dSurfaceId sid;
2811                 };
2812         } *cmd;
2813
2814         BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2815                      offsetof(typeof(*cmd), sid));
2816         BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2817                      offsetof(typeof(*cmd), sid));
2818         BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2819                      offsetof(typeof(*cmd), sid));
2820
2821         cmd = container_of(header, typeof(*cmd), header);
2822
2823         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2824                                  user_surface_converter,
2825                                  &cmd->sid, NULL);
2826 }
2827
2828 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2829                                 struct vmw_sw_context *sw_context,
2830                                 SVGA3dCmdHeader *header)
2831 {
2832         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2833
2834         if (unlikely(ctx_node == NULL)) {
2835                 DRM_ERROR("DX Context not set.\n");
2836                 return -EINVAL;
2837         }
2838
2839         return 0;
2840 }
2841
2842 /**
2843  * vmw_cmd_dx_view_remove - validate a view remove command and
2844  * schedule the view resource for removal.
2845  *
2846  * @dev_priv: Pointer to a device private struct.
2847  * @sw_context: The software context being used for this batch.
2848  * @header: Pointer to the command header in the command stream.
2849  *
2850  * Check that the view exists, and if it was not created using this
2851  * command batch, make sure it's validated (present in the device) so that
2852  * the remove command will not confuse the device.
2853  */
2854 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2855                                   struct vmw_sw_context *sw_context,
2856                                   SVGA3dCmdHeader *header)
2857 {
2858         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2859         struct {
2860                 SVGA3dCmdHeader header;
2861                 union vmw_view_destroy body;
2862         } *cmd = container_of(header, typeof(*cmd), header);
2863         enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2864         struct vmw_resource *view;
2865         int ret;
2866
2867         if (!ctx_node) {
2868                 DRM_ERROR("DX Context not set.\n");
2869                 return -EINVAL;
2870         }
2871
2872         ret = vmw_view_remove(sw_context->man,
2873                               cmd->body.view_id, view_type,
2874                               &sw_context->staged_cmd_res,
2875                               &view);
2876         if (ret || !view)
2877                 return ret;
2878
2879         /*
2880          * Add view to the validate list iff it was not created using this
2881          * command batch.
2882          */
2883         return vmw_view_res_val_add(sw_context, view);
2884 }
2885
2886 /**
2887  * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2888  * command
2889  *
2890  * @dev_priv: Pointer to a device private struct.
2891  * @sw_context: The software context being used for this batch.
2892  * @header: Pointer to the command header in the command stream.
2893  */
2894 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2895                                     struct vmw_sw_context *sw_context,
2896                                     SVGA3dCmdHeader *header)
2897 {
2898         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2899         struct vmw_resource *res;
2900         struct {
2901                 SVGA3dCmdHeader header;
2902                 SVGA3dCmdDXDefineShader body;
2903         } *cmd = container_of(header, typeof(*cmd), header);
2904         int ret;
2905
2906         if (!ctx_node) {
2907                 DRM_ERROR("DX Context not set.\n");
2908                 return -EINVAL;
2909         }
2910
2911         res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
2912         ret = vmw_cotable_notify(res, cmd->body.shaderId);
2913         vmw_resource_unreference(&res);
2914         if (ret)
2915                 return ret;
2916
2917         return vmw_dx_shader_add(sw_context->man, ctx_node->res,
2918                                  cmd->body.shaderId, cmd->body.type,
2919                                  &sw_context->staged_cmd_res);
2920 }
2921
2922 /**
2923  * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2924  * command
2925  *
2926  * @dev_priv: Pointer to a device private struct.
2927  * @sw_context: The software context being used for this batch.
2928  * @header: Pointer to the command header in the command stream.
2929  */
2930 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2931                                      struct vmw_sw_context *sw_context,
2932                                      SVGA3dCmdHeader *header)
2933 {
2934         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2935         struct {
2936                 SVGA3dCmdHeader header;
2937                 SVGA3dCmdDXDestroyShader body;
2938         } *cmd = container_of(header, typeof(*cmd), header);
2939         int ret;
2940
2941         if (!ctx_node) {
2942                 DRM_ERROR("DX Context not set.\n");
2943                 return -EINVAL;
2944         }
2945
2946         ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2947                                 &sw_context->staged_cmd_res);
2948         if (ret)
2949                 DRM_ERROR("Could not find shader to remove.\n");
2950
2951         return ret;
2952 }
2953
2954 /**
2955  * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
2956  * command
2957  *
2958  * @dev_priv: Pointer to a device private struct.
2959  * @sw_context: The software context being used for this batch.
2960  * @header: Pointer to the command header in the command stream.
2961  */
2962 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2963                                   struct vmw_sw_context *sw_context,
2964                                   SVGA3dCmdHeader *header)
2965 {
2966         struct vmw_resource_val_node *ctx_node;
2967         struct vmw_resource_val_node *res_node;
2968         struct vmw_resource *res;
2969         struct {
2970                 SVGA3dCmdHeader header;
2971                 SVGA3dCmdDXBindShader body;
2972         } *cmd = container_of(header, typeof(*cmd), header);
2973         int ret;
2974
2975         if (cmd->body.cid != SVGA3D_INVALID_ID) {
2976                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2977                                         user_context_converter,
2978                                         &cmd->body.cid, &ctx_node);
2979                 if (ret)
2980                         return ret;
2981         } else {
2982                 ctx_node = sw_context->dx_ctx_node;
2983                 if (!ctx_node) {
2984                         DRM_ERROR("DX Context not set.\n");
2985                         return -EINVAL;
2986                 }
2987         }
2988
2989         res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2990                                 cmd->body.shid, 0);
2991         if (IS_ERR(res)) {
2992                 DRM_ERROR("Could not find shader to bind.\n");
2993                 return PTR_ERR(res);
2994         }
2995
2996         ret = vmw_resource_val_add(sw_context, res, &res_node);
2997         if (ret) {
2998                 DRM_ERROR("Error creating resource validation node.\n");
2999                 goto out_unref;
3000         }
3001
3002
3003         ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
3004                                         &cmd->body.mobid,
3005                                         cmd->body.offsetInBytes);
3006 out_unref:
3007         vmw_resource_unreference(&res);
3008
3009         return ret;
3010 }
3011
3012 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3013                                 struct vmw_sw_context *sw_context,
3014                                 void *buf, uint32_t *size)
3015 {
3016         uint32_t size_remaining = *size;
3017         uint32_t cmd_id;
3018
3019         cmd_id = ((uint32_t *)buf)[0];
3020         switch (cmd_id) {
3021         case SVGA_CMD_UPDATE:
3022                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3023                 break;
3024         case SVGA_CMD_DEFINE_GMRFB:
3025                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3026                 break;
3027         case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3028                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3029                 break;
3030         case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3031                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3032                 break;
3033         default:
3034                 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
3035                 return -EINVAL;
3036         }
3037
3038         if (*size > size_remaining) {
3039                 DRM_ERROR("Invalid SVGA command (size mismatch):"
3040                           " %u.\n", cmd_id);
3041                 return -EINVAL;
3042         }
3043
3044         if (unlikely(!sw_context->kernel)) {
3045                 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
3046                 return -EPERM;
3047         }
3048
3049         if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3050                 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3051
3052         return 0;
3053 }
3054
3055 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3056         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3057                     false, false, false),
3058         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3059                     false, false, false),
3060         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3061                     true, false, false),
3062         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3063                     true, false, false),
3064         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3065                     true, false, false),
3066         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3067                     false, false, false),
3068         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3069                     false, false, false),
3070         VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3071                     true, false, false),
3072         VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3073                     true, false, false),
3074         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3075                     true, false, false),
3076         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3077                     &vmw_cmd_set_render_target_check, true, false, false),
3078         VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3079                     true, false, false),
3080         VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3081                     true, false, false),
3082         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3083                     true, false, false),
3084         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3085                     true, false, false),
3086         VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3087                     true, false, false),
3088         VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3089                     true, false, false),
3090         VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3091                     true, false, false),
3092         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3093                     false, false, false),
3094         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3095                     true, false, false),
3096         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3097                     true, false, false),
3098         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3099                     true, false, false),
3100         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3101                     true, false, false),
3102         VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3103                     true, false, false),
3104         VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3105                     true, false, false),
3106         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3107                     true, false, false),
3108         VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3109                     true, false, false),
3110         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3111                     true, false, false),
3112         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3113                     true, false, false),
3114         VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3115                     &vmw_cmd_blt_surf_screen_check, false, false, false),
3116         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3117                     false, false, false),
3118         VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3119                     false, false, false),
3120         VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3121                     false, false, false),
3122         VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3123                     false, false, false),
3124         VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3125                     false, false, false),
3126         VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
3127                     false, false, false),
3128         VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
3129                     false, false, false),
3130         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
3131                     false, false, false),
3132         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
3133                     false, false, false),
3134         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
3135                     false, false, false),
3136         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
3137                     false, false, false),
3138         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
3139                     false, false, false),
3140         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
3141                     false, false, false),
3142         VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3143                     false, false, true),
3144         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3145                     false, false, true),
3146         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3147                     false, false, true),
3148         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3149                     false, false, true),
3150         VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3151                     false, false, true),
3152         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3153                     false, false, true),
3154         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3155                     false, false, true),
3156         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3157                     false, false, true),
3158         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3159                     true, false, true),
3160         VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3161                     false, false, true),
3162         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3163                     true, false, true),
3164         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3165                     &vmw_cmd_update_gb_surface, true, false, true),
3166         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3167                     &vmw_cmd_readback_gb_image, true, false, true),
3168         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3169                     &vmw_cmd_readback_gb_surface, true, false, true),
3170         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3171                     &vmw_cmd_invalidate_gb_image, true, false, true),
3172         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3173                     &vmw_cmd_invalidate_gb_surface, true, false, true),
3174         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3175                     false, false, true),
3176         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3177                     false, false, true),
3178         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3179                     false, false, true),
3180         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3181                     false, false, true),
3182         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3183                     false, false, true),
3184         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3185                     false, false, true),
3186         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3187                     true, false, true),
3188         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3189                     false, false, true),
3190         VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3191                     false, false, false),
3192         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3193                     true, false, true),
3194         VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3195                     true, false, true),
3196         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3197                     true, false, true),
3198         VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3199                     true, false, true),
3200         VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3201                     false, false, true),
3202         VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3203                     false, false, true),
3204         VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3205                     false, false, true),
3206         VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3207                     false, false, true),
3208         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3209                     false, false, true),
3210         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3211                     false, false, true),
3212         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3213                     false, false, true),
3214         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3215                     false, false, true),
3216         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3217                     false, false, true),
3218         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3219                     false, false, true),
3220         VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3221                     true, false, true),
3222         VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3223                     false, false, true),
3224         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3225                     false, false, true),
3226         VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3227                     false, false, true),
3228         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3229                     false, false, true),
3230
3231         /*
3232          * DX commands
3233          */
3234         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3235                     false, false, true),
3236         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3237                     false, false, true),
3238         VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3239                     false, false, true),
3240         VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3241                     false, false, true),
3242         VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3243                     false, false, true),
3244         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3245                     &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3246         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3247                     &vmw_cmd_dx_set_shader_res, true, false, true),
3248         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3249                     true, false, true),
3250         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3251                     true, false, true),
3252         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3253                     true, false, true),
3254         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3255                     true, false, true),
3256         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3257                     true, false, true),
3258         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3259                     &vmw_cmd_dx_cid_check, true, false, true),
3260         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3261                     true, false, true),
3262         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3263                     &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3264         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3265                     &vmw_cmd_dx_set_index_buffer, true, false, true),
3266         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3267                     &vmw_cmd_dx_set_rendertargets, true, false, true),
3268         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3269                     true, false, true),
3270         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3271                     &vmw_cmd_dx_cid_check, true, false, true),
3272         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3273                     &vmw_cmd_dx_cid_check, true, false, true),
3274         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3275                     true, false, true),
3276         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok,
3277                     true, false, true),
3278         VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3279                     true, false, true),
3280         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3281                     &vmw_cmd_ok, true, false, true),
3282         VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok,
3283                     true, false, true),
3284         VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok,
3285                     true, false, true),
3286         VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3287                     true, false, true),
3288         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
3289                     true, false, true),
3290         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3291                     true, false, true),
3292         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3293                     true, false, true),
3294         VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3295                     &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3296         VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3297                     &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3298         VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3299                     true, false, true),
3300         VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_invalid,
3301                     true, false, true),
3302         VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3303                     &vmw_cmd_dx_check_subresource, true, false, true),
3304         VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3305                     &vmw_cmd_dx_check_subresource, true, false, true),
3306         VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3307                     &vmw_cmd_dx_check_subresource, true, false, true),
3308         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3309                     &vmw_cmd_dx_view_define, true, false, true),
3310         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3311                     &vmw_cmd_dx_view_remove, true, false, true),
3312         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3313                     &vmw_cmd_dx_view_define, true, false, true),
3314         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3315                     &vmw_cmd_dx_view_remove, true, false, true),
3316         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3317                     &vmw_cmd_dx_view_define, true, false, true),
3318         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3319                     &vmw_cmd_dx_view_remove, true, false, true),
3320         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3321                     &vmw_cmd_dx_so_define, true, false, true),
3322         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3323                     &vmw_cmd_dx_cid_check, true, false, true),
3324         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3325                     &vmw_cmd_dx_so_define, true, false, true),
3326         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3327                     &vmw_cmd_dx_cid_check, true, false, true),
3328         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3329                     &vmw_cmd_dx_so_define, true, false, true),
3330         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3331                     &vmw_cmd_dx_cid_check, true, false, true),
3332         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3333                     &vmw_cmd_dx_so_define, true, false, true),
3334         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3335                     &vmw_cmd_dx_cid_check, true, false, true),
3336         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3337                     &vmw_cmd_dx_so_define, true, false, true),
3338         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3339                     &vmw_cmd_dx_cid_check, true, false, true),
3340         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3341                     &vmw_cmd_dx_define_shader, true, false, true),
3342         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3343                     &vmw_cmd_dx_destroy_shader, true, false, true),
3344         VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3345                     &vmw_cmd_dx_bind_shader, true, false, true),
3346         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3347                     &vmw_cmd_dx_so_define, true, false, true),
3348         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3349                     &vmw_cmd_dx_cid_check, true, false, true),
3350         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3351                     true, false, true),
3352         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3353                     &vmw_cmd_dx_set_so_targets, true, false, true),
3354         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3355                     &vmw_cmd_dx_cid_check, true, false, true),
3356         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3357                     &vmw_cmd_dx_cid_check, true, false, true),
3358         VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3359                     &vmw_cmd_buffer_copy_check, true, false, true),
3360         VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3361                     &vmw_cmd_pred_copy_check, true, false, true),
3362 };
3363
3364 static int vmw_cmd_check(struct vmw_private *dev_priv,
3365                          struct vmw_sw_context *sw_context,
3366                          void *buf, uint32_t *size)
3367 {
3368         uint32_t cmd_id;
3369         uint32_t size_remaining = *size;
3370         SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3371         int ret;
3372         const struct vmw_cmd_entry *entry;
3373         bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3374
3375         cmd_id = ((uint32_t *)buf)[0];
3376         /* Handle any none 3D commands */
3377         if (unlikely(cmd_id < SVGA_CMD_MAX))
3378                 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3379
3380
3381         cmd_id = header->id;
3382         *size = header->size + sizeof(SVGA3dCmdHeader);
3383
3384         cmd_id -= SVGA_3D_CMD_BASE;
3385         if (unlikely(*size > size_remaining))
3386                 goto out_invalid;
3387
3388         if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3389                 goto out_invalid;
3390
3391         entry = &vmw_cmd_entries[cmd_id];
3392         if (unlikely(!entry->func))
3393                 goto out_invalid;
3394
3395         if (unlikely(!entry->user_allow && !sw_context->kernel))
3396                 goto out_privileged;
3397
3398         if (unlikely(entry->gb_disable && gb))
3399                 goto out_old;
3400
3401         if (unlikely(entry->gb_enable && !gb))
3402                 goto out_new;
3403
3404         ret = entry->func(dev_priv, sw_context, header);
3405         if (unlikely(ret != 0))
3406                 goto out_invalid;
3407
3408         return 0;
3409 out_invalid:
3410         DRM_ERROR("Invalid SVGA3D command: %d\n",
3411                   cmd_id + SVGA_3D_CMD_BASE);
3412         return -EINVAL;
3413 out_privileged:
3414         DRM_ERROR("Privileged SVGA3D command: %d\n",
3415                   cmd_id + SVGA_3D_CMD_BASE);
3416         return -EPERM;
3417 out_old:
3418         DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3419                   cmd_id + SVGA_3D_CMD_BASE);
3420         return -EINVAL;
3421 out_new:
3422         DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
3423                   cmd_id + SVGA_3D_CMD_BASE);
3424         return -EINVAL;
3425 }
3426
3427 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3428                              struct vmw_sw_context *sw_context,
3429                              void *buf,
3430                              uint32_t size)
3431 {
3432         int32_t cur_size = size;
3433         int ret;
3434
3435         sw_context->buf_start = buf;
3436
3437         while (cur_size > 0) {
3438                 size = cur_size;
3439                 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3440                 if (unlikely(ret != 0))
3441                         return ret;
3442                 buf = (void *)((unsigned long) buf + size);
3443                 cur_size -= size;
3444         }
3445
3446         if (unlikely(cur_size != 0)) {
3447                 DRM_ERROR("Command verifier out of sync.\n");
3448                 return -EINVAL;
3449         }
3450
3451         return 0;
3452 }
3453
3454 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3455 {
3456         sw_context->cur_reloc = 0;
3457 }
3458
3459 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3460 {
3461         uint32_t i;
3462         struct vmw_relocation *reloc;
3463         struct ttm_validate_buffer *validate;
3464         struct ttm_buffer_object *bo;
3465
3466         for (i = 0; i < sw_context->cur_reloc; ++i) {
3467                 reloc = &sw_context->relocs[i];
3468                 validate = &sw_context->val_bufs[reloc->index].base;
3469                 bo = validate->bo;
3470                 switch (bo->mem.mem_type) {
3471                 case TTM_PL_VRAM:
3472                         reloc->location->offset += bo->offset;
3473                         reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3474                         break;
3475                 case VMW_PL_GMR:
3476                         reloc->location->gmrId = bo->mem.start;
3477                         break;
3478                 case VMW_PL_MOB:
3479                         *reloc->mob_loc = bo->mem.start;
3480                         break;
3481                 default:
3482                         BUG();
3483                 }
3484         }
3485         vmw_free_relocations(sw_context);
3486 }
3487
3488 /**
3489  * vmw_resource_list_unrefererence - Free up a resource list and unreference
3490  * all resources referenced by it.
3491  *
3492  * @list: The resource list.
3493  */
3494 static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
3495                                           struct list_head *list)
3496 {
3497         struct vmw_resource_val_node *val, *val_next;
3498
3499         /*
3500          * Drop references to resources held during command submission.
3501          */
3502
3503         list_for_each_entry_safe(val, val_next, list, head) {
3504                 list_del_init(&val->head);
3505                 vmw_resource_unreference(&val->res);
3506
3507                 if (val->staged_bindings) {
3508                         if (val->staged_bindings != sw_context->staged_bindings)
3509                                 vmw_binding_state_free(val->staged_bindings);
3510                         else
3511                                 sw_context->staged_bindings_inuse = false;
3512                         val->staged_bindings = NULL;
3513                 }
3514
3515                 kfree(val);
3516         }
3517 }
3518
3519 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
3520 {
3521         struct vmw_validate_buffer *entry, *next;
3522         struct vmw_resource_val_node *val;
3523
3524         /*
3525          * Drop references to DMA buffers held during command submission.
3526          */
3527         list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
3528                                  base.head) {
3529                 list_del(&entry->base.head);
3530                 ttm_bo_unref(&entry->base.bo);
3531                 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
3532                 sw_context->cur_val_buf--;
3533         }
3534         BUG_ON(sw_context->cur_val_buf != 0);
3535
3536         list_for_each_entry(val, &sw_context->resource_list, head)
3537                 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
3538 }
3539
3540 int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3541                                struct ttm_buffer_object *bo,
3542                                bool interruptible,
3543                                bool validate_as_mob)
3544 {
3545         struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
3546                                                   base);
3547         int ret;
3548
3549         if (vbo->pin_count > 0)
3550                 return 0;
3551
3552         if (validate_as_mob)
3553                 return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
3554                                        false);
3555
3556         /**
3557          * Put BO in VRAM if there is space, otherwise as a GMR.
3558          * If there is no space in VRAM and GMR ids are all used up,
3559          * start evicting GMRs to make room. If the DMA buffer can't be
3560          * used as a GMR, this will return -ENOMEM.
3561          */
3562
3563         ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
3564                               false);
3565         if (likely(ret == 0 || ret == -ERESTARTSYS))
3566                 return ret;
3567
3568         /**
3569          * If that failed, try VRAM again, this time evicting
3570          * previous contents.
3571          */
3572
3573         ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
3574         return ret;
3575 }
3576
3577 static int vmw_validate_buffers(struct vmw_private *dev_priv,
3578                                 struct vmw_sw_context *sw_context)
3579 {
3580         struct vmw_validate_buffer *entry;
3581         int ret;
3582
3583         list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
3584                 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
3585                                                  true,
3586                                                  entry->validate_as_mob);
3587                 if (unlikely(ret != 0))
3588                         return ret;
3589         }
3590         return 0;
3591 }
3592
3593 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3594                                  uint32_t size)
3595 {
3596         if (likely(sw_context->cmd_bounce_size >= size))
3597                 return 0;
3598
3599         if (sw_context->cmd_bounce_size == 0)
3600                 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3601
3602         while (sw_context->cmd_bounce_size < size) {
3603                 sw_context->cmd_bounce_size =
3604                         PAGE_ALIGN(sw_context->cmd_bounce_size +
3605                                    (sw_context->cmd_bounce_size >> 1));
3606         }
3607
3608         if (sw_context->cmd_bounce != NULL)
3609                 vfree(sw_context->cmd_bounce);
3610
3611         sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3612
3613         if (sw_context->cmd_bounce == NULL) {
3614                 DRM_ERROR("Failed to allocate command bounce buffer.\n");
3615                 sw_context->cmd_bounce_size = 0;
3616                 return -ENOMEM;
3617         }
3618
3619         return 0;
3620 }
3621
3622 /**
3623  * vmw_execbuf_fence_commands - create and submit a command stream fence
3624  *
3625  * Creates a fence object and submits a command stream marker.
3626  * If this fails for some reason, We sync the fifo and return NULL.
3627  * It is then safe to fence buffers with a NULL pointer.
3628  *
3629  * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3630  * a userspace handle if @p_handle is not NULL, otherwise not.
3631  */
3632
3633 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3634                                struct vmw_private *dev_priv,
3635                                struct vmw_fence_obj **p_fence,
3636                                uint32_t *p_handle)
3637 {
3638         uint32_t sequence;
3639         int ret;
3640         bool synced = false;
3641
3642         /* p_handle implies file_priv. */
3643         BUG_ON(p_handle != NULL && file_priv == NULL);
3644
3645         ret = vmw_fifo_send_fence(dev_priv, &sequence);
3646         if (unlikely(ret != 0)) {
3647                 DRM_ERROR("Fence submission error. Syncing.\n");
3648                 synced = true;
3649         }
3650
3651         if (p_handle != NULL)
3652                 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3653                                             sequence, p_fence, p_handle);
3654         else
3655                 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3656
3657         if (unlikely(ret != 0 && !synced)) {
3658                 (void) vmw_fallback_wait(dev_priv, false, false,
3659                                          sequence, false,
3660                                          VMW_FENCE_WAIT_TIMEOUT);
3661                 *p_fence = NULL;
3662         }
3663
3664         return 0;
3665 }
3666
3667 /**
3668  * vmw_execbuf_copy_fence_user - copy fence object information to
3669  * user-space.
3670  *
3671  * @dev_priv: Pointer to a vmw_private struct.
3672  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3673  * @ret: Return value from fence object creation.
3674  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3675  * which the information should be copied.
3676  * @fence: Pointer to the fenc object.
3677  * @fence_handle: User-space fence handle.
3678  *
3679  * This function copies fence information to user-space. If copying fails,
3680  * The user-space struct drm_vmw_fence_rep::error member is hopefully
3681  * left untouched, and if it's preloaded with an -EFAULT by user-space,
3682  * the error will hopefully be detected.
3683  * Also if copying fails, user-space will be unable to signal the fence
3684  * object so we wait for it immediately, and then unreference the
3685  * user-space reference.
3686  */
3687 void
3688 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3689                             struct vmw_fpriv *vmw_fp,
3690                             int ret,
3691                             struct drm_vmw_fence_rep __user *user_fence_rep,
3692                             struct vmw_fence_obj *fence,
3693                             uint32_t fence_handle)
3694 {
3695         struct drm_vmw_fence_rep fence_rep;
3696
3697         if (user_fence_rep == NULL)
3698                 return;
3699
3700         memset(&fence_rep, 0, sizeof(fence_rep));
3701
3702         fence_rep.error = ret;
3703         if (ret == 0) {
3704                 BUG_ON(fence == NULL);
3705
3706                 fence_rep.handle = fence_handle;
3707                 fence_rep.seqno = fence->base.seqno;
3708                 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3709                 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3710         }
3711
3712         /*
3713          * copy_to_user errors will be detected by user space not
3714          * seeing fence_rep::error filled in. Typically
3715          * user-space would have pre-set that member to -EFAULT.
3716          */
3717         ret = copy_to_user(user_fence_rep, &fence_rep,
3718                            sizeof(fence_rep));
3719
3720         /*
3721          * User-space lost the fence object. We need to sync
3722          * and unreference the handle.
3723          */
3724         if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3725                 ttm_ref_object_base_unref(vmw_fp->tfile,
3726                                           fence_handle, TTM_REF_USAGE);
3727                 DRM_ERROR("Fence copy error. Syncing.\n");
3728                 (void) vmw_fence_obj_wait(fence, false, false,
3729                                           VMW_FENCE_WAIT_TIMEOUT);
3730         }
3731 }
3732
3733 /**
3734  * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3735  * the fifo.
3736  *
3737  * @dev_priv: Pointer to a device private structure.
3738  * @kernel_commands: Pointer to the unpatched command batch.
3739  * @command_size: Size of the unpatched command batch.
3740  * @sw_context: Structure holding the relocation lists.
3741  *
3742  * Side effects: If this function returns 0, then the command batch
3743  * pointed to by @kernel_commands will have been modified.
3744  */
3745 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3746                                    void *kernel_commands,
3747                                    u32 command_size,
3748                                    struct vmw_sw_context *sw_context)
3749 {
3750         void *cmd;
3751
3752         if (sw_context->dx_ctx_node)
3753                 cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3754                                           sw_context->dx_ctx_node->res->id);
3755         else
3756                 cmd = vmw_fifo_reserve(dev_priv, command_size);
3757         if (!cmd) {
3758                 DRM_ERROR("Failed reserving fifo space for commands.\n");
3759                 return -ENOMEM;
3760         }
3761
3762         vmw_apply_relocations(sw_context);
3763         memcpy(cmd, kernel_commands, command_size);
3764         vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3765         vmw_resource_relocations_free(&sw_context->res_relocations);
3766         vmw_fifo_commit(dev_priv, command_size);
3767
3768         return 0;
3769 }
3770
3771 /**
3772  * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3773  * the command buffer manager.
3774  *
3775  * @dev_priv: Pointer to a device private structure.
3776  * @header: Opaque handle to the command buffer allocation.
3777  * @command_size: Size of the unpatched command batch.
3778  * @sw_context: Structure holding the relocation lists.
3779  *
3780  * Side effects: If this function returns 0, then the command buffer
3781  * represented by @header will have been modified.
3782  */
3783 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3784                                      struct vmw_cmdbuf_header *header,
3785                                      u32 command_size,
3786                                      struct vmw_sw_context *sw_context)
3787 {
3788         u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
3789                   SVGA3D_INVALID_ID);
3790         void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
3791                                        id, false, header);
3792
3793         vmw_apply_relocations(sw_context);
3794         vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3795         vmw_resource_relocations_free(&sw_context->res_relocations);
3796         vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3797
3798         return 0;
3799 }
3800
3801 /**
3802  * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3803  * submission using a command buffer.
3804  *
3805  * @dev_priv: Pointer to a device private structure.
3806  * @user_commands: User-space pointer to the commands to be submitted.
3807  * @command_size: Size of the unpatched command batch.
3808  * @header: Out parameter returning the opaque pointer to the command buffer.
3809  *
3810  * This function checks whether we can use the command buffer manager for
3811  * submission and if so, creates a command buffer of suitable size and
3812  * copies the user data into that buffer.
3813  *
3814  * On successful return, the function returns a pointer to the data in the
3815  * command buffer and *@header is set to non-NULL.
3816  * If command buffers could not be used, the function will return the value
3817  * of @kernel_commands on function call. That value may be NULL. In that case,
3818  * the value of *@header will be set to NULL.
3819  * If an error is encountered, the function will return a pointer error value.
3820  * If the function is interrupted by a signal while sleeping, it will return
3821  * -ERESTARTSYS casted to a pointer error value.
3822  */
3823 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3824                                 void __user *user_commands,
3825                                 void *kernel_commands,
3826                                 u32 command_size,
3827                                 struct vmw_cmdbuf_header **header)
3828 {
3829         size_t cmdbuf_size;
3830         int ret;
3831
3832         *header = NULL;
3833         if (!dev_priv->cman || kernel_commands)
3834                 return kernel_commands;
3835
3836         if (command_size > SVGA_CB_MAX_SIZE) {
3837                 DRM_ERROR("Command buffer is too large.\n");
3838                 return ERR_PTR(-EINVAL);
3839         }
3840
3841         /* If possible, add a little space for fencing. */
3842         cmdbuf_size = command_size + 512;
3843         cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3844         kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
3845                                            true, header);
3846         if (IS_ERR(kernel_commands))
3847                 return kernel_commands;
3848
3849         ret = copy_from_user(kernel_commands, user_commands,
3850                              command_size);
3851         if (ret) {
3852                 DRM_ERROR("Failed copying commands.\n");
3853                 vmw_cmdbuf_header_free(*header);
3854                 *header = NULL;
3855                 return ERR_PTR(-EFAULT);
3856         }
3857
3858         return kernel_commands;
3859 }
3860
3861 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3862                                    struct vmw_sw_context *sw_context,
3863                                    uint32_t handle)
3864 {
3865         struct vmw_resource_val_node *ctx_node;
3866         struct vmw_resource *res;
3867         int ret;
3868
3869         if (handle == SVGA3D_INVALID_ID)
3870                 return 0;
3871
3872         ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
3873                                               handle, user_context_converter,
3874                                               &res);
3875         if (unlikely(ret != 0)) {
3876                 DRM_ERROR("Could not find or user DX context 0x%08x.\n",
3877                           (unsigned) handle);
3878                 return ret;
3879         }
3880
3881         ret = vmw_resource_val_add(sw_context, res, &ctx_node);
3882         if (unlikely(ret != 0))
3883                 goto out_err;
3884
3885         sw_context->dx_ctx_node = ctx_node;
3886         sw_context->man = vmw_context_res_man(res);
3887 out_err:
3888         vmw_resource_unreference(&res);
3889         return ret;
3890 }
3891
3892 int vmw_execbuf_process(struct drm_file *file_priv,
3893                         struct vmw_private *dev_priv,
3894                         void __user *user_commands,
3895                         void *kernel_commands,
3896                         uint32_t command_size,
3897                         uint64_t throttle_us,
3898                         uint32_t dx_context_handle,
3899                         struct drm_vmw_fence_rep __user *user_fence_rep,
3900                         struct vmw_fence_obj **out_fence)
3901 {
3902         struct vmw_sw_context *sw_context = &dev_priv->ctx;
3903         struct vmw_fence_obj *fence = NULL;
3904         struct vmw_resource *error_resource;
3905         struct list_head resource_list;
3906         struct vmw_cmdbuf_header *header;
3907         struct ww_acquire_ctx ticket;
3908         uint32_t handle;
3909         int ret;
3910
3911         if (throttle_us) {
3912                 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3913                                    throttle_us);
3914
3915                 if (ret)
3916                         return ret;
3917         }
3918
3919         kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3920                                              kernel_commands, command_size,
3921                                              &header);
3922         if (IS_ERR(kernel_commands))
3923                 return PTR_ERR(kernel_commands);
3924
3925         ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
3926         if (ret) {
3927                 ret = -ERESTARTSYS;
3928                 goto out_free_header;
3929         }
3930
3931         sw_context->kernel = false;
3932         if (kernel_commands == NULL) {
3933                 ret = vmw_resize_cmd_bounce(sw_context, command_size);
3934                 if (unlikely(ret != 0))
3935                         goto out_unlock;
3936
3937
3938                 ret = copy_from_user(sw_context->cmd_bounce,
3939                                      user_commands, command_size);
3940
3941                 if (unlikely(ret != 0)) {
3942                         ret = -EFAULT;
3943                         DRM_ERROR("Failed copying commands.\n");
3944                         goto out_unlock;
3945                 }
3946                 kernel_commands = sw_context->cmd_bounce;
3947         } else if (!header)
3948                 sw_context->kernel = true;
3949
3950         sw_context->fp = vmw_fpriv(file_priv);
3951         sw_context->cur_reloc = 0;
3952         sw_context->cur_val_buf = 0;
3953         INIT_LIST_HEAD(&sw_context->resource_list);
3954         INIT_LIST_HEAD(&sw_context->ctx_resource_list);
3955         sw_context->cur_query_bo = dev_priv->pinned_bo;
3956         sw_context->last_query_ctx = NULL;
3957         sw_context->needs_post_query_barrier = false;
3958         sw_context->dx_ctx_node = NULL;
3959         sw_context->dx_query_mob = NULL;
3960         sw_context->dx_query_ctx = NULL;
3961         memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
3962         INIT_LIST_HEAD(&sw_context->validate_nodes);
3963         INIT_LIST_HEAD(&sw_context->res_relocations);
3964         if (sw_context->staged_bindings)
3965                 vmw_binding_state_reset(sw_context->staged_bindings);
3966
3967         if (!sw_context->res_ht_initialized) {
3968                 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
3969                 if (unlikely(ret != 0))
3970                         goto out_unlock;
3971                 sw_context->res_ht_initialized = true;
3972         }
3973         INIT_LIST_HEAD(&sw_context->staged_cmd_res);
3974         INIT_LIST_HEAD(&resource_list);
3975         ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
3976         if (unlikely(ret != 0)) {
3977                 list_splice_init(&sw_context->ctx_resource_list,
3978                                  &sw_context->resource_list);
3979                 goto out_err_nores;
3980         }
3981
3982         ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
3983                                 command_size);
3984         /*
3985          * Merge the resource lists before checking the return status
3986          * from vmd_cmd_check_all so that all the open hashtabs will
3987          * be handled properly even if vmw_cmd_check_all fails.
3988          */
3989         list_splice_init(&sw_context->ctx_resource_list,
3990                          &sw_context->resource_list);
3991
3992         if (unlikely(ret != 0))
3993                 goto out_err_nores;
3994
3995         ret = vmw_resources_reserve(sw_context);
3996         if (unlikely(ret != 0))
3997                 goto out_err_nores;
3998
3999         ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
4000                                      true, NULL);
4001         if (unlikely(ret != 0))
4002                 goto out_err_nores;
4003
4004         ret = vmw_validate_buffers(dev_priv, sw_context);
4005         if (unlikely(ret != 0))
4006                 goto out_err;
4007
4008         ret = vmw_resources_validate(sw_context);
4009         if (unlikely(ret != 0))
4010                 goto out_err;
4011
4012         ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4013         if (unlikely(ret != 0)) {
4014                 ret = -ERESTARTSYS;
4015                 goto out_err;
4016         }
4017
4018         if (dev_priv->has_mob) {
4019                 ret = vmw_rebind_contexts(sw_context);
4020                 if (unlikely(ret != 0))
4021                         goto out_unlock_binding;
4022         }
4023
4024         if (!header) {
4025                 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4026                                               command_size, sw_context);
4027         } else {
4028                 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4029                                                 sw_context);
4030                 header = NULL;
4031         }
4032         mutex_unlock(&dev_priv->binding_mutex);
4033         if (ret)
4034                 goto out_err;
4035
4036         vmw_query_bo_switch_commit(dev_priv, sw_context);
4037         ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
4038                                          &fence,
4039                                          (user_fence_rep) ? &handle : NULL);
4040         /*
4041          * This error is harmless, because if fence submission fails,
4042          * vmw_fifo_send_fence will sync. The error will be propagated to
4043          * user-space in @fence_rep
4044          */
4045
4046         if (ret != 0)
4047                 DRM_ERROR("Fence submission error. Syncing.\n");
4048
4049         vmw_resources_unreserve(sw_context, false);
4050
4051         ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
4052                                     (void *) fence);
4053
4054         if (unlikely(dev_priv->pinned_bo != NULL &&
4055                      !dev_priv->query_cid_valid))
4056                 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4057
4058         vmw_clear_validations(sw_context);
4059         vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4060                                     user_fence_rep, fence, handle);
4061
4062         /* Don't unreference when handing fence out */
4063         if (unlikely(out_fence != NULL)) {
4064                 *out_fence = fence;
4065                 fence = NULL;
4066         } else if (likely(fence != NULL)) {
4067                 vmw_fence_obj_unreference(&fence);
4068         }
4069
4070         list_splice_init(&sw_context->resource_list, &resource_list);
4071         vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4072         mutex_unlock(&dev_priv->cmdbuf_mutex);
4073
4074         /*
4075          * Unreference resources outside of the cmdbuf_mutex to
4076          * avoid deadlocks in resource destruction paths.
4077          */
4078         vmw_resource_list_unreference(sw_context, &resource_list);
4079
4080         return 0;
4081
4082 out_unlock_binding:
4083         mutex_unlock(&dev_priv->binding_mutex);
4084 out_err:
4085         ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
4086 out_err_nores:
4087         vmw_resources_unreserve(sw_context, true);
4088         vmw_resource_relocations_free(&sw_context->res_relocations);
4089         vmw_free_relocations(sw_context);
4090         vmw_clear_validations(sw_context);
4091         if (unlikely(dev_priv->pinned_bo != NULL &&
4092                      !dev_priv->query_cid_valid))
4093                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4094 out_unlock:
4095         list_splice_init(&sw_context->resource_list, &resource_list);
4096         error_resource = sw_context->error_resource;
4097         sw_context->error_resource = NULL;
4098         vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4099         mutex_unlock(&dev_priv->cmdbuf_mutex);
4100
4101         /*
4102          * Unreference resources outside of the cmdbuf_mutex to
4103          * avoid deadlocks in resource destruction paths.
4104          */
4105         vmw_resource_list_unreference(sw_context, &resource_list);
4106         if (unlikely(error_resource != NULL))
4107                 vmw_resource_unreference(&error_resource);
4108 out_free_header:
4109         if (header)
4110                 vmw_cmdbuf_header_free(header);
4111
4112         return ret;
4113 }
4114
4115 /**
4116  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4117  *
4118  * @dev_priv: The device private structure.
4119  *
4120  * This function is called to idle the fifo and unpin the query buffer
4121  * if the normal way to do this hits an error, which should typically be
4122  * extremely rare.
4123  */
4124 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4125 {
4126         DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4127
4128         (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4129         vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4130         if (dev_priv->dummy_query_bo_pinned) {
4131                 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4132                 dev_priv->dummy_query_bo_pinned = false;
4133         }
4134 }
4135
4136
4137 /**
4138  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4139  * query bo.
4140  *
4141  * @dev_priv: The device private structure.
4142  * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4143  * _after_ a query barrier that flushes all queries touching the current
4144  * buffer pointed to by @dev_priv->pinned_bo
4145  *
4146  * This function should be used to unpin the pinned query bo, or
4147  * as a query barrier when we need to make sure that all queries have
4148  * finished before the next fifo command. (For example on hardware
4149  * context destructions where the hardware may otherwise leak unfinished
4150  * queries).
4151  *
4152  * This function does not return any failure codes, but make attempts
4153  * to do safe unpinning in case of errors.
4154  *
4155  * The function will synchronize on the previous query barrier, and will
4156  * thus not finish until that barrier has executed.
4157  *
4158  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4159  * before calling this function.
4160  */
4161 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4162                                      struct vmw_fence_obj *fence)
4163 {
4164         int ret = 0;
4165         struct list_head validate_list;
4166         struct ttm_validate_buffer pinned_val, query_val;
4167         struct vmw_fence_obj *lfence = NULL;
4168         struct ww_acquire_ctx ticket;
4169
4170         if (dev_priv->pinned_bo == NULL)
4171                 goto out_unlock;
4172
4173         INIT_LIST_HEAD(&validate_list);
4174
4175         pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
4176         pinned_val.shared = false;
4177         list_add_tail(&pinned_val.head, &validate_list);
4178
4179         query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
4180         query_val.shared = false;
4181         list_add_tail(&query_val.head, &validate_list);
4182
4183         ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
4184                                      false, NULL);
4185         if (unlikely(ret != 0)) {
4186                 vmw_execbuf_unpin_panic(dev_priv);
4187                 goto out_no_reserve;
4188         }
4189
4190         if (dev_priv->query_cid_valid) {
4191                 BUG_ON(fence != NULL);
4192                 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4193                 if (unlikely(ret != 0)) {
4194                         vmw_execbuf_unpin_panic(dev_priv);
4195                         goto out_no_emit;
4196                 }
4197                 dev_priv->query_cid_valid = false;
4198         }
4199
4200         vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4201         if (dev_priv->dummy_query_bo_pinned) {
4202                 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4203                 dev_priv->dummy_query_bo_pinned = false;
4204         }
4205         if (fence == NULL) {
4206                 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4207                                                   NULL);
4208                 fence = lfence;
4209         }
4210         ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
4211         if (lfence != NULL)
4212                 vmw_fence_obj_unreference(&lfence);
4213
4214         ttm_bo_unref(&query_val.bo);
4215         ttm_bo_unref(&pinned_val.bo);
4216         vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4217         DRM_INFO("Dummy query bo pin count: %d\n",
4218                  dev_priv->dummy_query_bo->pin_count);
4219
4220 out_unlock:
4221         return;
4222
4223 out_no_emit:
4224         ttm_eu_backoff_reservation(&ticket, &validate_list);
4225 out_no_reserve:
4226         ttm_bo_unref(&query_val.bo);
4227         ttm_bo_unref(&pinned_val.bo);
4228         vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4229 }
4230
4231 /**
4232  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4233  * query bo.
4234  *
4235  * @dev_priv: The device private structure.
4236  *
4237  * This function should be used to unpin the pinned query bo, or
4238  * as a query barrier when we need to make sure that all queries have
4239  * finished before the next fifo command. (For example on hardware
4240  * context destructions where the hardware may otherwise leak unfinished
4241  * queries).
4242  *
4243  * This function does not return any failure codes, but make attempts
4244  * to do safe unpinning in case of errors.
4245  *
4246  * The function will synchronize on the previous query barrier, and will
4247  * thus not finish until that barrier has executed.
4248  */
4249 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4250 {
4251         mutex_lock(&dev_priv->cmdbuf_mutex);
4252         if (dev_priv->query_cid_valid)
4253                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4254         mutex_unlock(&dev_priv->cmdbuf_mutex);
4255 }
4256
4257 int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
4258                       struct drm_file *file_priv, size_t size)
4259 {
4260         struct vmw_private *dev_priv = vmw_priv(dev);
4261         struct drm_vmw_execbuf_arg arg;
4262         int ret;
4263         static const size_t copy_offset[] = {
4264                 offsetof(struct drm_vmw_execbuf_arg, context_handle),
4265                 sizeof(struct drm_vmw_execbuf_arg)};
4266
4267         if (unlikely(size < copy_offset[0])) {
4268                 DRM_ERROR("Invalid command size, ioctl %d\n",
4269                           DRM_VMW_EXECBUF);
4270                 return -EINVAL;
4271         }
4272
4273         if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4274                 return -EFAULT;
4275
4276         /*
4277          * Extend the ioctl argument while
4278          * maintaining backwards compatibility:
4279          * We take different code paths depending on the value of
4280          * arg.version.
4281          */
4282
4283         if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4284                      arg.version == 0)) {
4285                 DRM_ERROR("Incorrect execbuf version.\n");
4286                 return -EINVAL;
4287         }
4288
4289         if (arg.version > 1 &&
4290             copy_from_user(&arg.context_handle,
4291                            (void __user *) (data + copy_offset[0]),
4292                            copy_offset[arg.version - 1] -
4293                            copy_offset[0]) != 0)
4294                 return -EFAULT;
4295
4296         switch (arg.version) {
4297         case 1:
4298                 arg.context_handle = (uint32_t) -1;
4299                 break;
4300         case 2:
4301                 if (arg.pad64 != 0) {
4302                         DRM_ERROR("Unused IOCTL data not set to zero.\n");
4303                         return -EINVAL;
4304                 }
4305                 break;
4306         default:
4307                 break;
4308         }
4309
4310         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4311         if (unlikely(ret != 0))
4312                 return ret;
4313
4314         ret = vmw_execbuf_process(file_priv, dev_priv,
4315                                   (void __user *)(unsigned long)arg.commands,
4316                                   NULL, arg.command_size, arg.throttle_us,
4317                                   arg.context_handle,
4318                                   (void __user *)(unsigned long)arg.fence_rep,
4319                                   NULL);
4320         ttm_read_unlock(&dev_priv->reservation_sem);
4321         if (unlikely(ret != 0))
4322                 return ret;
4323
4324         vmw_kms_cursor_post_execbuf(dev_priv);
4325
4326         return 0;
4327 }