]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
Merge tag 'ipu-fixes-3.18' of git://git.pengutronix.de/git/pza/linux into drm-next
[karo-tx-linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include <drm/ttm/ttm_bo_api.h>
31 #include <drm/ttm/ttm_placement.h>
32
33 #define VMW_RES_HT_ORDER 12
34
35 /**
36  * struct vmw_resource_relocation - Relocation info for resources
37  *
38  * @head: List head for the software context's relocation list.
39  * @res: Non-ref-counted pointer to the resource.
40  * @offset: Offset of 4 byte entries into the command buffer where the
41  * id that needs fixup is located.
42  */
43 struct vmw_resource_relocation {
44         struct list_head head;
45         const struct vmw_resource *res;
46         unsigned long offset;
47 };
48
49 /**
50  * struct vmw_resource_val_node - Validation info for resources
51  *
52  * @head: List head for the software context's resource list.
53  * @hash: Hash entry for quick resouce to val_node lookup.
54  * @res: Ref-counted pointer to the resource.
55  * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56  * @new_backup: Refcounted pointer to the new backup buffer.
57  * @staged_bindings: If @res is a context, tracks bindings set up during
58  * the command batch. Otherwise NULL.
59  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
60  * @first_usage: Set to true the first time the resource is referenced in
61  * the command stream.
62  * @no_buffer_needed: Resources do not need to allocate buffer backup on
63  * reservation. The command stream will provide one.
64  */
65 struct vmw_resource_val_node {
66         struct list_head head;
67         struct drm_hash_item hash;
68         struct vmw_resource *res;
69         struct vmw_dma_buffer *new_backup;
70         struct vmw_ctx_binding_state *staged_bindings;
71         unsigned long new_backup_offset;
72         bool first_usage;
73         bool no_buffer_needed;
74 };
75
76 /**
77  * struct vmw_cmd_entry - Describe a command for the verifier
78  *
79  * @user_allow: Whether allowed from the execbuf ioctl.
80  * @gb_disable: Whether disabled if guest-backed objects are available.
81  * @gb_enable: Whether enabled iff guest-backed objects are available.
82  */
83 struct vmw_cmd_entry {
84         int (*func) (struct vmw_private *, struct vmw_sw_context *,
85                      SVGA3dCmdHeader *);
86         bool user_allow;
87         bool gb_disable;
88         bool gb_enable;
89 };
90
91 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)  \
92         [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
93                                        (_gb_disable), (_gb_enable)}
94
95 /**
96  * vmw_resource_unreserve - unreserve resources previously reserved for
97  * command submission.
98  *
99  * @list_head: list of resources to unreserve.
100  * @backoff: Whether command submission failed.
101  */
102 static void vmw_resource_list_unreserve(struct list_head *list,
103                                         bool backoff)
104 {
105         struct vmw_resource_val_node *val;
106
107         list_for_each_entry(val, list, head) {
108                 struct vmw_resource *res = val->res;
109                 struct vmw_dma_buffer *new_backup =
110                         backoff ? NULL : val->new_backup;
111
112                 /*
113                  * Transfer staged context bindings to the
114                  * persistent context binding tracker.
115                  */
116                 if (unlikely(val->staged_bindings)) {
117                         if (!backoff) {
118                                 vmw_context_binding_state_transfer
119                                         (val->res, val->staged_bindings);
120                         }
121                         kfree(val->staged_bindings);
122                         val->staged_bindings = NULL;
123                 }
124                 vmw_resource_unreserve(res, new_backup,
125                         val->new_backup_offset);
126                 vmw_dmabuf_unreference(&val->new_backup);
127         }
128 }
129
130
131 /**
132  * vmw_resource_val_add - Add a resource to the software context's
133  * resource list if it's not already on it.
134  *
135  * @sw_context: Pointer to the software context.
136  * @res: Pointer to the resource.
137  * @p_node On successful return points to a valid pointer to a
138  * struct vmw_resource_val_node, if non-NULL on entry.
139  */
140 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
141                                 struct vmw_resource *res,
142                                 struct vmw_resource_val_node **p_node)
143 {
144         struct vmw_resource_val_node *node;
145         struct drm_hash_item *hash;
146         int ret;
147
148         if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
149                                     &hash) == 0)) {
150                 node = container_of(hash, struct vmw_resource_val_node, hash);
151                 node->first_usage = false;
152                 if (unlikely(p_node != NULL))
153                         *p_node = node;
154                 return 0;
155         }
156
157         node = kzalloc(sizeof(*node), GFP_KERNEL);
158         if (unlikely(node == NULL)) {
159                 DRM_ERROR("Failed to allocate a resource validation "
160                           "entry.\n");
161                 return -ENOMEM;
162         }
163
164         node->hash.key = (unsigned long) res;
165         ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
166         if (unlikely(ret != 0)) {
167                 DRM_ERROR("Failed to initialize a resource validation "
168                           "entry.\n");
169                 kfree(node);
170                 return ret;
171         }
172         list_add_tail(&node->head, &sw_context->resource_list);
173         node->res = vmw_resource_reference(res);
174         node->first_usage = true;
175
176         if (unlikely(p_node != NULL))
177                 *p_node = node;
178
179         return 0;
180 }
181
182 /**
183  * vmw_resource_context_res_add - Put resources previously bound to a context on
184  * the validation list
185  *
186  * @dev_priv: Pointer to a device private structure
187  * @sw_context: Pointer to a software context used for this command submission
188  * @ctx: Pointer to the context resource
189  *
190  * This function puts all resources that were previously bound to @ctx on
191  * the resource validation list. This is part of the context state reemission
192  */
193 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
194                                         struct vmw_sw_context *sw_context,
195                                         struct vmw_resource *ctx)
196 {
197         struct list_head *binding_list;
198         struct vmw_ctx_binding *entry;
199         int ret = 0;
200         struct vmw_resource *res;
201
202         mutex_lock(&dev_priv->binding_mutex);
203         binding_list = vmw_context_binding_list(ctx);
204
205         list_for_each_entry(entry, binding_list, ctx_list) {
206                 res = vmw_resource_reference_unless_doomed(entry->bi.res);
207                 if (unlikely(res == NULL))
208                         continue;
209
210                 ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
211                 vmw_resource_unreference(&res);
212                 if (unlikely(ret != 0))
213                         break;
214         }
215
216         mutex_unlock(&dev_priv->binding_mutex);
217         return ret;
218 }
219
220 /**
221  * vmw_resource_relocation_add - Add a relocation to the relocation list
222  *
223  * @list: Pointer to head of relocation list.
224  * @res: The resource.
225  * @offset: Offset into the command buffer currently being parsed where the
226  * id that needs fixup is located. Granularity is 4 bytes.
227  */
228 static int vmw_resource_relocation_add(struct list_head *list,
229                                        const struct vmw_resource *res,
230                                        unsigned long offset)
231 {
232         struct vmw_resource_relocation *rel;
233
234         rel = kmalloc(sizeof(*rel), GFP_KERNEL);
235         if (unlikely(rel == NULL)) {
236                 DRM_ERROR("Failed to allocate a resource relocation.\n");
237                 return -ENOMEM;
238         }
239
240         rel->res = res;
241         rel->offset = offset;
242         list_add_tail(&rel->head, list);
243
244         return 0;
245 }
246
247 /**
248  * vmw_resource_relocations_free - Free all relocations on a list
249  *
250  * @list: Pointer to the head of the relocation list.
251  */
252 static void vmw_resource_relocations_free(struct list_head *list)
253 {
254         struct vmw_resource_relocation *rel, *n;
255
256         list_for_each_entry_safe(rel, n, list, head) {
257                 list_del(&rel->head);
258                 kfree(rel);
259         }
260 }
261
262 /**
263  * vmw_resource_relocations_apply - Apply all relocations on a list
264  *
265  * @cb: Pointer to the start of the command buffer bein patch. This need
266  * not be the same buffer as the one being parsed when the relocation
267  * list was built, but the contents must be the same modulo the
268  * resource ids.
269  * @list: Pointer to the head of the relocation list.
270  */
271 static void vmw_resource_relocations_apply(uint32_t *cb,
272                                            struct list_head *list)
273 {
274         struct vmw_resource_relocation *rel;
275
276         list_for_each_entry(rel, list, head) {
277                 if (likely(rel->res != NULL))
278                         cb[rel->offset] = rel->res->id;
279                 else
280                         cb[rel->offset] = SVGA_3D_CMD_NOP;
281         }
282 }
283
284 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
285                            struct vmw_sw_context *sw_context,
286                            SVGA3dCmdHeader *header)
287 {
288         return capable(CAP_SYS_ADMIN) ? : -EINVAL;
289 }
290
291 static int vmw_cmd_ok(struct vmw_private *dev_priv,
292                       struct vmw_sw_context *sw_context,
293                       SVGA3dCmdHeader *header)
294 {
295         return 0;
296 }
297
298 /**
299  * vmw_bo_to_validate_list - add a bo to a validate list
300  *
301  * @sw_context: The software context used for this command submission batch.
302  * @bo: The buffer object to add.
303  * @validate_as_mob: Validate this buffer as a MOB.
304  * @p_val_node: If non-NULL Will be updated with the validate node number
305  * on return.
306  *
307  * Returns -EINVAL if the limit of number of buffer objects per command
308  * submission is reached.
309  */
310 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
311                                    struct ttm_buffer_object *bo,
312                                    bool validate_as_mob,
313                                    uint32_t *p_val_node)
314 {
315         uint32_t val_node;
316         struct vmw_validate_buffer *vval_buf;
317         struct ttm_validate_buffer *val_buf;
318         struct drm_hash_item *hash;
319         int ret;
320
321         if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
322                                     &hash) == 0)) {
323                 vval_buf = container_of(hash, struct vmw_validate_buffer,
324                                         hash);
325                 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
326                         DRM_ERROR("Inconsistent buffer usage.\n");
327                         return -EINVAL;
328                 }
329                 val_buf = &vval_buf->base;
330                 val_node = vval_buf - sw_context->val_bufs;
331         } else {
332                 val_node = sw_context->cur_val_buf;
333                 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
334                         DRM_ERROR("Max number of DMA buffers per submission "
335                                   "exceeded.\n");
336                         return -EINVAL;
337                 }
338                 vval_buf = &sw_context->val_bufs[val_node];
339                 vval_buf->hash.key = (unsigned long) bo;
340                 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
341                 if (unlikely(ret != 0)) {
342                         DRM_ERROR("Failed to initialize a buffer validation "
343                                   "entry.\n");
344                         return ret;
345                 }
346                 ++sw_context->cur_val_buf;
347                 val_buf = &vval_buf->base;
348                 val_buf->bo = ttm_bo_reference(bo);
349                 val_buf->shared = false;
350                 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
351                 vval_buf->validate_as_mob = validate_as_mob;
352         }
353
354         if (p_val_node)
355                 *p_val_node = val_node;
356
357         return 0;
358 }
359
360 /**
361  * vmw_resources_reserve - Reserve all resources on the sw_context's
362  * resource list.
363  *
364  * @sw_context: Pointer to the software context.
365  *
366  * Note that since vmware's command submission currently is protected by
367  * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
368  * since only a single thread at once will attempt this.
369  */
370 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
371 {
372         struct vmw_resource_val_node *val;
373         int ret;
374
375         list_for_each_entry(val, &sw_context->resource_list, head) {
376                 struct vmw_resource *res = val->res;
377
378                 ret = vmw_resource_reserve(res, val->no_buffer_needed);
379                 if (unlikely(ret != 0))
380                         return ret;
381
382                 if (res->backup) {
383                         struct ttm_buffer_object *bo = &res->backup->base;
384
385                         ret = vmw_bo_to_validate_list
386                                 (sw_context, bo,
387                                  vmw_resource_needs_backup(res), NULL);
388
389                         if (unlikely(ret != 0))
390                                 return ret;
391                 }
392         }
393         return 0;
394 }
395
396 /**
397  * vmw_resources_validate - Validate all resources on the sw_context's
398  * resource list.
399  *
400  * @sw_context: Pointer to the software context.
401  *
402  * Before this function is called, all resource backup buffers must have
403  * been validated.
404  */
405 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
406 {
407         struct vmw_resource_val_node *val;
408         int ret;
409
410         list_for_each_entry(val, &sw_context->resource_list, head) {
411                 struct vmw_resource *res = val->res;
412
413                 ret = vmw_resource_validate(res);
414                 if (unlikely(ret != 0)) {
415                         if (ret != -ERESTARTSYS)
416                                 DRM_ERROR("Failed to validate resource.\n");
417                         return ret;
418                 }
419         }
420         return 0;
421 }
422
423
424 /**
425  * vmw_cmd_res_reloc_add - Add a resource to a software context's
426  * relocation- and validation lists.
427  *
428  * @dev_priv: Pointer to a struct vmw_private identifying the device.
429  * @sw_context: Pointer to the software context.
430  * @res_type: Resource type.
431  * @id_loc: Pointer to where the id that needs translation is located.
432  * @res: Valid pointer to a struct vmw_resource.
433  * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
434  * used for this resource is returned here.
435  */
436 static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
437                                  struct vmw_sw_context *sw_context,
438                                  enum vmw_res_type res_type,
439                                  uint32_t *id_loc,
440                                  struct vmw_resource *res,
441                                  struct vmw_resource_val_node **p_val)
442 {
443         int ret;
444         struct vmw_resource_val_node *node;
445
446         *p_val = NULL;
447         ret = vmw_resource_relocation_add(&sw_context->res_relocations,
448                                           res,
449                                           id_loc - sw_context->buf_start);
450         if (unlikely(ret != 0))
451                 return ret;
452
453         ret = vmw_resource_val_add(sw_context, res, &node);
454         if (unlikely(ret != 0))
455                 return ret;
456
457         if (res_type == vmw_res_context && dev_priv->has_mob &&
458             node->first_usage) {
459
460                 /*
461                  * Put contexts first on the list to be able to exit
462                  * list traversal for contexts early.
463                  */
464                 list_del(&node->head);
465                 list_add(&node->head, &sw_context->resource_list);
466
467                 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
468                 if (unlikely(ret != 0))
469                         return ret;
470                 node->staged_bindings =
471                         kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
472                 if (node->staged_bindings == NULL) {
473                         DRM_ERROR("Failed to allocate context binding "
474                                   "information.\n");
475                         return -ENOMEM;
476                 }
477                 INIT_LIST_HEAD(&node->staged_bindings->list);
478         }
479
480         if (p_val)
481                 *p_val = node;
482
483         return 0;
484 }
485
486
487 /**
488  * vmw_cmd_res_check - Check that a resource is present and if so, put it
489  * on the resource validate list unless it's already there.
490  *
491  * @dev_priv: Pointer to a device private structure.
492  * @sw_context: Pointer to the software context.
493  * @res_type: Resource type.
494  * @converter: User-space visisble type specific information.
495  * @id_loc: Pointer to the location in the command buffer currently being
496  * parsed from where the user-space resource id handle is located.
497  * @p_val: Pointer to pointer to resource validalidation node. Populated
498  * on exit.
499  */
500 static int
501 vmw_cmd_res_check(struct vmw_private *dev_priv,
502                   struct vmw_sw_context *sw_context,
503                   enum vmw_res_type res_type,
504                   const struct vmw_user_resource_conv *converter,
505                   uint32_t *id_loc,
506                   struct vmw_resource_val_node **p_val)
507 {
508         struct vmw_res_cache_entry *rcache =
509                 &sw_context->res_cache[res_type];
510         struct vmw_resource *res;
511         struct vmw_resource_val_node *node;
512         int ret;
513
514         if (*id_loc == SVGA3D_INVALID_ID) {
515                 if (p_val)
516                         *p_val = NULL;
517                 if (res_type == vmw_res_context) {
518                         DRM_ERROR("Illegal context invalid id.\n");
519                         return -EINVAL;
520                 }
521                 return 0;
522         }
523
524         /*
525          * Fastpath in case of repeated commands referencing the same
526          * resource
527          */
528
529         if (likely(rcache->valid && *id_loc == rcache->handle)) {
530                 const struct vmw_resource *res = rcache->res;
531
532                 rcache->node->first_usage = false;
533                 if (p_val)
534                         *p_val = rcache->node;
535
536                 return vmw_resource_relocation_add
537                         (&sw_context->res_relocations, res,
538                          id_loc - sw_context->buf_start);
539         }
540
541         ret = vmw_user_resource_lookup_handle(dev_priv,
542                                               sw_context->fp->tfile,
543                                               *id_loc,
544                                               converter,
545                                               &res);
546         if (unlikely(ret != 0)) {
547                 DRM_ERROR("Could not find or use resource 0x%08x.\n",
548                           (unsigned) *id_loc);
549                 dump_stack();
550                 return ret;
551         }
552
553         rcache->valid = true;
554         rcache->res = res;
555         rcache->handle = *id_loc;
556
557         ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
558                                     res, &node);
559         if (unlikely(ret != 0))
560                 goto out_no_reloc;
561
562         rcache->node = node;
563         if (p_val)
564                 *p_val = node;
565         vmw_resource_unreference(&res);
566         return 0;
567
568 out_no_reloc:
569         BUG_ON(sw_context->error_resource != NULL);
570         sw_context->error_resource = res;
571
572         return ret;
573 }
574
575 /**
576  * vmw_rebind_contexts - Rebind all resources previously bound to
577  * referenced contexts.
578  *
579  * @sw_context: Pointer to the software context.
580  *
581  * Rebind context binding points that have been scrubbed because of eviction.
582  */
583 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
584 {
585         struct vmw_resource_val_node *val;
586         int ret;
587
588         list_for_each_entry(val, &sw_context->resource_list, head) {
589                 if (unlikely(!val->staged_bindings))
590                         break;
591
592                 ret = vmw_context_rebind_all(val->res);
593                 if (unlikely(ret != 0)) {
594                         if (ret != -ERESTARTSYS)
595                                 DRM_ERROR("Failed to rebind context.\n");
596                         return ret;
597                 }
598         }
599
600         return 0;
601 }
602
603 /**
604  * vmw_cmd_cid_check - Check a command header for valid context information.
605  *
606  * @dev_priv: Pointer to a device private structure.
607  * @sw_context: Pointer to the software context.
608  * @header: A command header with an embedded user-space context handle.
609  *
610  * Convenience function: Call vmw_cmd_res_check with the user-space context
611  * handle embedded in @header.
612  */
613 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
614                              struct vmw_sw_context *sw_context,
615                              SVGA3dCmdHeader *header)
616 {
617         struct vmw_cid_cmd {
618                 SVGA3dCmdHeader header;
619                 uint32_t cid;
620         } *cmd;
621
622         cmd = container_of(header, struct vmw_cid_cmd, header);
623         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
624                                  user_context_converter, &cmd->cid, NULL);
625 }
626
627 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
628                                            struct vmw_sw_context *sw_context,
629                                            SVGA3dCmdHeader *header)
630 {
631         struct vmw_sid_cmd {
632                 SVGA3dCmdHeader header;
633                 SVGA3dCmdSetRenderTarget body;
634         } *cmd;
635         struct vmw_resource_val_node *ctx_node;
636         struct vmw_resource_val_node *res_node;
637         int ret;
638
639         cmd = container_of(header, struct vmw_sid_cmd, header);
640
641         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
642                                 user_context_converter, &cmd->body.cid,
643                                 &ctx_node);
644         if (unlikely(ret != 0))
645                 return ret;
646
647         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
648                                 user_surface_converter,
649                                 &cmd->body.target.sid, &res_node);
650         if (unlikely(ret != 0))
651                 return ret;
652
653         if (dev_priv->has_mob) {
654                 struct vmw_ctx_bindinfo bi;
655
656                 bi.ctx = ctx_node->res;
657                 bi.res = res_node ? res_node->res : NULL;
658                 bi.bt = vmw_ctx_binding_rt;
659                 bi.i1.rt_type = cmd->body.type;
660                 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
661         }
662
663         return 0;
664 }
665
666 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
667                                       struct vmw_sw_context *sw_context,
668                                       SVGA3dCmdHeader *header)
669 {
670         struct vmw_sid_cmd {
671                 SVGA3dCmdHeader header;
672                 SVGA3dCmdSurfaceCopy body;
673         } *cmd;
674         int ret;
675
676         cmd = container_of(header, struct vmw_sid_cmd, header);
677         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
678                                 user_surface_converter,
679                                 &cmd->body.src.sid, NULL);
680         if (unlikely(ret != 0))
681                 return ret;
682         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
683                                  user_surface_converter,
684                                  &cmd->body.dest.sid, NULL);
685 }
686
687 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
688                                      struct vmw_sw_context *sw_context,
689                                      SVGA3dCmdHeader *header)
690 {
691         struct vmw_sid_cmd {
692                 SVGA3dCmdHeader header;
693                 SVGA3dCmdSurfaceStretchBlt body;
694         } *cmd;
695         int ret;
696
697         cmd = container_of(header, struct vmw_sid_cmd, header);
698         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
699                                 user_surface_converter,
700                                 &cmd->body.src.sid, NULL);
701         if (unlikely(ret != 0))
702                 return ret;
703         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
704                                  user_surface_converter,
705                                  &cmd->body.dest.sid, NULL);
706 }
707
708 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
709                                          struct vmw_sw_context *sw_context,
710                                          SVGA3dCmdHeader *header)
711 {
712         struct vmw_sid_cmd {
713                 SVGA3dCmdHeader header;
714                 SVGA3dCmdBlitSurfaceToScreen body;
715         } *cmd;
716
717         cmd = container_of(header, struct vmw_sid_cmd, header);
718
719         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
720                                  user_surface_converter,
721                                  &cmd->body.srcImage.sid, NULL);
722 }
723
724 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
725                                  struct vmw_sw_context *sw_context,
726                                  SVGA3dCmdHeader *header)
727 {
728         struct vmw_sid_cmd {
729                 SVGA3dCmdHeader header;
730                 SVGA3dCmdPresent body;
731         } *cmd;
732
733
734         cmd = container_of(header, struct vmw_sid_cmd, header);
735
736         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
737                                  user_surface_converter, &cmd->body.sid,
738                                  NULL);
739 }
740
741 /**
742  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
743  *
744  * @dev_priv: The device private structure.
745  * @new_query_bo: The new buffer holding query results.
746  * @sw_context: The software context used for this command submission.
747  *
748  * This function checks whether @new_query_bo is suitable for holding
749  * query results, and if another buffer currently is pinned for query
750  * results. If so, the function prepares the state of @sw_context for
751  * switching pinned buffers after successful submission of the current
752  * command batch.
753  */
754 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
755                                        struct ttm_buffer_object *new_query_bo,
756                                        struct vmw_sw_context *sw_context)
757 {
758         struct vmw_res_cache_entry *ctx_entry =
759                 &sw_context->res_cache[vmw_res_context];
760         int ret;
761
762         BUG_ON(!ctx_entry->valid);
763         sw_context->last_query_ctx = ctx_entry->res;
764
765         if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
766
767                 if (unlikely(new_query_bo->num_pages > 4)) {
768                         DRM_ERROR("Query buffer too large.\n");
769                         return -EINVAL;
770                 }
771
772                 if (unlikely(sw_context->cur_query_bo != NULL)) {
773                         sw_context->needs_post_query_barrier = true;
774                         ret = vmw_bo_to_validate_list(sw_context,
775                                                       sw_context->cur_query_bo,
776                                                       dev_priv->has_mob, NULL);
777                         if (unlikely(ret != 0))
778                                 return ret;
779                 }
780                 sw_context->cur_query_bo = new_query_bo;
781
782                 ret = vmw_bo_to_validate_list(sw_context,
783                                               dev_priv->dummy_query_bo,
784                                               dev_priv->has_mob, NULL);
785                 if (unlikely(ret != 0))
786                         return ret;
787
788         }
789
790         return 0;
791 }
792
793
794 /**
795  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
796  *
797  * @dev_priv: The device private structure.
798  * @sw_context: The software context used for this command submission batch.
799  *
800  * This function will check if we're switching query buffers, and will then,
801  * issue a dummy occlusion query wait used as a query barrier. When the fence
802  * object following that query wait has signaled, we are sure that all
803  * preceding queries have finished, and the old query buffer can be unpinned.
804  * However, since both the new query buffer and the old one are fenced with
805  * that fence, we can do an asynchronus unpin now, and be sure that the
806  * old query buffer won't be moved until the fence has signaled.
807  *
808  * As mentioned above, both the new - and old query buffers need to be fenced
809  * using a sequence emitted *after* calling this function.
810  */
811 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
812                                      struct vmw_sw_context *sw_context)
813 {
814         /*
815          * The validate list should still hold references to all
816          * contexts here.
817          */
818
819         if (sw_context->needs_post_query_barrier) {
820                 struct vmw_res_cache_entry *ctx_entry =
821                         &sw_context->res_cache[vmw_res_context];
822                 struct vmw_resource *ctx;
823                 int ret;
824
825                 BUG_ON(!ctx_entry->valid);
826                 ctx = ctx_entry->res;
827
828                 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
829
830                 if (unlikely(ret != 0))
831                         DRM_ERROR("Out of fifo space for dummy query.\n");
832         }
833
834         if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
835                 if (dev_priv->pinned_bo) {
836                         vmw_bo_pin(dev_priv->pinned_bo, false);
837                         ttm_bo_unref(&dev_priv->pinned_bo);
838                 }
839
840                 if (!sw_context->needs_post_query_barrier) {
841                         vmw_bo_pin(sw_context->cur_query_bo, true);
842
843                         /*
844                          * We pin also the dummy_query_bo buffer so that we
845                          * don't need to validate it when emitting
846                          * dummy queries in context destroy paths.
847                          */
848
849                         vmw_bo_pin(dev_priv->dummy_query_bo, true);
850                         dev_priv->dummy_query_bo_pinned = true;
851
852                         BUG_ON(sw_context->last_query_ctx == NULL);
853                         dev_priv->query_cid = sw_context->last_query_ctx->id;
854                         dev_priv->query_cid_valid = true;
855                         dev_priv->pinned_bo =
856                                 ttm_bo_reference(sw_context->cur_query_bo);
857                 }
858         }
859 }
860
861 /**
862  * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
863  * handle to a MOB id.
864  *
865  * @dev_priv: Pointer to a device private structure.
866  * @sw_context: The software context used for this command batch validation.
867  * @id: Pointer to the user-space handle to be translated.
868  * @vmw_bo_p: Points to a location that, on successful return will carry
869  * a reference-counted pointer to the DMA buffer identified by the
870  * user-space handle in @id.
871  *
872  * This function saves information needed to translate a user-space buffer
873  * handle to a MOB id. The translation does not take place immediately, but
874  * during a call to vmw_apply_relocations(). This function builds a relocation
875  * list and a list of buffers to validate. The former needs to be freed using
876  * either vmw_apply_relocations() or vmw_free_relocations(). The latter
877  * needs to be freed using vmw_clear_validations.
878  */
879 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
880                                  struct vmw_sw_context *sw_context,
881                                  SVGAMobId *id,
882                                  struct vmw_dma_buffer **vmw_bo_p)
883 {
884         struct vmw_dma_buffer *vmw_bo = NULL;
885         struct ttm_buffer_object *bo;
886         uint32_t handle = *id;
887         struct vmw_relocation *reloc;
888         int ret;
889
890         ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
891         if (unlikely(ret != 0)) {
892                 DRM_ERROR("Could not find or use MOB buffer.\n");
893                 return -EINVAL;
894         }
895         bo = &vmw_bo->base;
896
897         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
898                 DRM_ERROR("Max number relocations per submission"
899                           " exceeded\n");
900                 ret = -EINVAL;
901                 goto out_no_reloc;
902         }
903
904         reloc = &sw_context->relocs[sw_context->cur_reloc++];
905         reloc->mob_loc = id;
906         reloc->location = NULL;
907
908         ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
909         if (unlikely(ret != 0))
910                 goto out_no_reloc;
911
912         *vmw_bo_p = vmw_bo;
913         return 0;
914
915 out_no_reloc:
916         vmw_dmabuf_unreference(&vmw_bo);
917         vmw_bo_p = NULL;
918         return ret;
919 }
920
921 /**
922  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
923  * handle to a valid SVGAGuestPtr
924  *
925  * @dev_priv: Pointer to a device private structure.
926  * @sw_context: The software context used for this command batch validation.
927  * @ptr: Pointer to the user-space handle to be translated.
928  * @vmw_bo_p: Points to a location that, on successful return will carry
929  * a reference-counted pointer to the DMA buffer identified by the
930  * user-space handle in @id.
931  *
932  * This function saves information needed to translate a user-space buffer
933  * handle to a valid SVGAGuestPtr. The translation does not take place
934  * immediately, but during a call to vmw_apply_relocations().
935  * This function builds a relocation list and a list of buffers to validate.
936  * The former needs to be freed using either vmw_apply_relocations() or
937  * vmw_free_relocations(). The latter needs to be freed using
938  * vmw_clear_validations.
939  */
940 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
941                                    struct vmw_sw_context *sw_context,
942                                    SVGAGuestPtr *ptr,
943                                    struct vmw_dma_buffer **vmw_bo_p)
944 {
945         struct vmw_dma_buffer *vmw_bo = NULL;
946         struct ttm_buffer_object *bo;
947         uint32_t handle = ptr->gmrId;
948         struct vmw_relocation *reloc;
949         int ret;
950
951         ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
952         if (unlikely(ret != 0)) {
953                 DRM_ERROR("Could not find or use GMR region.\n");
954                 return -EINVAL;
955         }
956         bo = &vmw_bo->base;
957
958         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
959                 DRM_ERROR("Max number relocations per submission"
960                           " exceeded\n");
961                 ret = -EINVAL;
962                 goto out_no_reloc;
963         }
964
965         reloc = &sw_context->relocs[sw_context->cur_reloc++];
966         reloc->location = ptr;
967
968         ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
969         if (unlikely(ret != 0))
970                 goto out_no_reloc;
971
972         *vmw_bo_p = vmw_bo;
973         return 0;
974
975 out_no_reloc:
976         vmw_dmabuf_unreference(&vmw_bo);
977         vmw_bo_p = NULL;
978         return ret;
979 }
980
981 /**
982  * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
983  *
984  * @dev_priv: Pointer to a device private struct.
985  * @sw_context: The software context used for this command submission.
986  * @header: Pointer to the command header in the command stream.
987  */
988 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
989                                   struct vmw_sw_context *sw_context,
990                                   SVGA3dCmdHeader *header)
991 {
992         struct vmw_begin_gb_query_cmd {
993                 SVGA3dCmdHeader header;
994                 SVGA3dCmdBeginGBQuery q;
995         } *cmd;
996
997         cmd = container_of(header, struct vmw_begin_gb_query_cmd,
998                            header);
999
1000         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1001                                  user_context_converter, &cmd->q.cid,
1002                                  NULL);
1003 }
1004
1005 /**
1006  * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
1007  *
1008  * @dev_priv: Pointer to a device private struct.
1009  * @sw_context: The software context used for this command submission.
1010  * @header: Pointer to the command header in the command stream.
1011  */
1012 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1013                                struct vmw_sw_context *sw_context,
1014                                SVGA3dCmdHeader *header)
1015 {
1016         struct vmw_begin_query_cmd {
1017                 SVGA3dCmdHeader header;
1018                 SVGA3dCmdBeginQuery q;
1019         } *cmd;
1020
1021         cmd = container_of(header, struct vmw_begin_query_cmd,
1022                            header);
1023
1024         if (unlikely(dev_priv->has_mob)) {
1025                 struct {
1026                         SVGA3dCmdHeader header;
1027                         SVGA3dCmdBeginGBQuery q;
1028                 } gb_cmd;
1029
1030                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1031
1032                 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1033                 gb_cmd.header.size = cmd->header.size;
1034                 gb_cmd.q.cid = cmd->q.cid;
1035                 gb_cmd.q.type = cmd->q.type;
1036
1037                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1038                 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1039         }
1040
1041         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1042                                  user_context_converter, &cmd->q.cid,
1043                                  NULL);
1044 }
1045
1046 /**
1047  * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
1048  *
1049  * @dev_priv: Pointer to a device private struct.
1050  * @sw_context: The software context used for this command submission.
1051  * @header: Pointer to the command header in the command stream.
1052  */
1053 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1054                                 struct vmw_sw_context *sw_context,
1055                                 SVGA3dCmdHeader *header)
1056 {
1057         struct vmw_dma_buffer *vmw_bo;
1058         struct vmw_query_cmd {
1059                 SVGA3dCmdHeader header;
1060                 SVGA3dCmdEndGBQuery q;
1061         } *cmd;
1062         int ret;
1063
1064         cmd = container_of(header, struct vmw_query_cmd, header);
1065         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1066         if (unlikely(ret != 0))
1067                 return ret;
1068
1069         ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1070                                     &cmd->q.mobid,
1071                                     &vmw_bo);
1072         if (unlikely(ret != 0))
1073                 return ret;
1074
1075         ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
1076
1077         vmw_dmabuf_unreference(&vmw_bo);
1078         return ret;
1079 }
1080
1081 /**
1082  * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
1083  *
1084  * @dev_priv: Pointer to a device private struct.
1085  * @sw_context: The software context used for this command submission.
1086  * @header: Pointer to the command header in the command stream.
1087  */
1088 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1089                              struct vmw_sw_context *sw_context,
1090                              SVGA3dCmdHeader *header)
1091 {
1092         struct vmw_dma_buffer *vmw_bo;
1093         struct vmw_query_cmd {
1094                 SVGA3dCmdHeader header;
1095                 SVGA3dCmdEndQuery q;
1096         } *cmd;
1097         int ret;
1098
1099         cmd = container_of(header, struct vmw_query_cmd, header);
1100         if (dev_priv->has_mob) {
1101                 struct {
1102                         SVGA3dCmdHeader header;
1103                         SVGA3dCmdEndGBQuery q;
1104                 } gb_cmd;
1105
1106                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1107
1108                 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1109                 gb_cmd.header.size = cmd->header.size;
1110                 gb_cmd.q.cid = cmd->q.cid;
1111                 gb_cmd.q.type = cmd->q.type;
1112                 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1113                 gb_cmd.q.offset = cmd->q.guestResult.offset;
1114
1115                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1116                 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1117         }
1118
1119         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1120         if (unlikely(ret != 0))
1121                 return ret;
1122
1123         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1124                                       &cmd->q.guestResult,
1125                                       &vmw_bo);
1126         if (unlikely(ret != 0))
1127                 return ret;
1128
1129         ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
1130
1131         vmw_dmabuf_unreference(&vmw_bo);
1132         return ret;
1133 }
1134
1135 /**
1136  * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
1137  *
1138  * @dev_priv: Pointer to a device private struct.
1139  * @sw_context: The software context used for this command submission.
1140  * @header: Pointer to the command header in the command stream.
1141  */
1142 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1143                                  struct vmw_sw_context *sw_context,
1144                                  SVGA3dCmdHeader *header)
1145 {
1146         struct vmw_dma_buffer *vmw_bo;
1147         struct vmw_query_cmd {
1148                 SVGA3dCmdHeader header;
1149                 SVGA3dCmdWaitForGBQuery q;
1150         } *cmd;
1151         int ret;
1152
1153         cmd = container_of(header, struct vmw_query_cmd, header);
1154         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1155         if (unlikely(ret != 0))
1156                 return ret;
1157
1158         ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1159                                     &cmd->q.mobid,
1160                                     &vmw_bo);
1161         if (unlikely(ret != 0))
1162                 return ret;
1163
1164         vmw_dmabuf_unreference(&vmw_bo);
1165         return 0;
1166 }
1167
1168 /**
1169  * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
1170  *
1171  * @dev_priv: Pointer to a device private struct.
1172  * @sw_context: The software context used for this command submission.
1173  * @header: Pointer to the command header in the command stream.
1174  */
1175 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1176                               struct vmw_sw_context *sw_context,
1177                               SVGA3dCmdHeader *header)
1178 {
1179         struct vmw_dma_buffer *vmw_bo;
1180         struct vmw_query_cmd {
1181                 SVGA3dCmdHeader header;
1182                 SVGA3dCmdWaitForQuery q;
1183         } *cmd;
1184         int ret;
1185
1186         cmd = container_of(header, struct vmw_query_cmd, header);
1187         if (dev_priv->has_mob) {
1188                 struct {
1189                         SVGA3dCmdHeader header;
1190                         SVGA3dCmdWaitForGBQuery q;
1191                 } gb_cmd;
1192
1193                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1194
1195                 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1196                 gb_cmd.header.size = cmd->header.size;
1197                 gb_cmd.q.cid = cmd->q.cid;
1198                 gb_cmd.q.type = cmd->q.type;
1199                 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1200                 gb_cmd.q.offset = cmd->q.guestResult.offset;
1201
1202                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1203                 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1204         }
1205
1206         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1207         if (unlikely(ret != 0))
1208                 return ret;
1209
1210         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1211                                       &cmd->q.guestResult,
1212                                       &vmw_bo);
1213         if (unlikely(ret != 0))
1214                 return ret;
1215
1216         vmw_dmabuf_unreference(&vmw_bo);
1217         return 0;
1218 }
1219
1220 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1221                        struct vmw_sw_context *sw_context,
1222                        SVGA3dCmdHeader *header)
1223 {
1224         struct vmw_dma_buffer *vmw_bo = NULL;
1225         struct vmw_surface *srf = NULL;
1226         struct vmw_dma_cmd {
1227                 SVGA3dCmdHeader header;
1228                 SVGA3dCmdSurfaceDMA dma;
1229         } *cmd;
1230         int ret;
1231         SVGA3dCmdSurfaceDMASuffix *suffix;
1232         uint32_t bo_size;
1233
1234         cmd = container_of(header, struct vmw_dma_cmd, header);
1235         suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1236                                                header->size - sizeof(*suffix));
1237
1238         /* Make sure device and verifier stays in sync. */
1239         if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1240                 DRM_ERROR("Invalid DMA suffix size.\n");
1241                 return -EINVAL;
1242         }
1243
1244         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1245                                       &cmd->dma.guest.ptr,
1246                                       &vmw_bo);
1247         if (unlikely(ret != 0))
1248                 return ret;
1249
1250         /* Make sure DMA doesn't cross BO boundaries. */
1251         bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1252         if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1253                 DRM_ERROR("Invalid DMA offset.\n");
1254                 return -EINVAL;
1255         }
1256
1257         bo_size -= cmd->dma.guest.ptr.offset;
1258         if (unlikely(suffix->maximumOffset > bo_size))
1259                 suffix->maximumOffset = bo_size;
1260
1261         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1262                                 user_surface_converter, &cmd->dma.host.sid,
1263                                 NULL);
1264         if (unlikely(ret != 0)) {
1265                 if (unlikely(ret != -ERESTARTSYS))
1266                         DRM_ERROR("could not find surface for DMA.\n");
1267                 goto out_no_surface;
1268         }
1269
1270         srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1271
1272         vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1273                              header);
1274
1275 out_no_surface:
1276         vmw_dmabuf_unreference(&vmw_bo);
1277         return ret;
1278 }
1279
1280 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1281                         struct vmw_sw_context *sw_context,
1282                         SVGA3dCmdHeader *header)
1283 {
1284         struct vmw_draw_cmd {
1285                 SVGA3dCmdHeader header;
1286                 SVGA3dCmdDrawPrimitives body;
1287         } *cmd;
1288         SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1289                 (unsigned long)header + sizeof(*cmd));
1290         SVGA3dPrimitiveRange *range;
1291         uint32_t i;
1292         uint32_t maxnum;
1293         int ret;
1294
1295         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1296         if (unlikely(ret != 0))
1297                 return ret;
1298
1299         cmd = container_of(header, struct vmw_draw_cmd, header);
1300         maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1301
1302         if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1303                 DRM_ERROR("Illegal number of vertex declarations.\n");
1304                 return -EINVAL;
1305         }
1306
1307         for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1308                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1309                                         user_surface_converter,
1310                                         &decl->array.surfaceId, NULL);
1311                 if (unlikely(ret != 0))
1312                         return ret;
1313         }
1314
1315         maxnum = (header->size - sizeof(cmd->body) -
1316                   cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1317         if (unlikely(cmd->body.numRanges > maxnum)) {
1318                 DRM_ERROR("Illegal number of index ranges.\n");
1319                 return -EINVAL;
1320         }
1321
1322         range = (SVGA3dPrimitiveRange *) decl;
1323         for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1324                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1325                                         user_surface_converter,
1326                                         &range->indexArray.surfaceId, NULL);
1327                 if (unlikely(ret != 0))
1328                         return ret;
1329         }
1330         return 0;
1331 }
1332
1333
1334 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1335                              struct vmw_sw_context *sw_context,
1336                              SVGA3dCmdHeader *header)
1337 {
1338         struct vmw_tex_state_cmd {
1339                 SVGA3dCmdHeader header;
1340                 SVGA3dCmdSetTextureState state;
1341         } *cmd;
1342
1343         SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1344           ((unsigned long) header + header->size + sizeof(header));
1345         SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1346                 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1347         struct vmw_resource_val_node *ctx_node;
1348         struct vmw_resource_val_node *res_node;
1349         int ret;
1350
1351         cmd = container_of(header, struct vmw_tex_state_cmd,
1352                            header);
1353
1354         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1355                                 user_context_converter, &cmd->state.cid,
1356                                 &ctx_node);
1357         if (unlikely(ret != 0))
1358                 return ret;
1359
1360         for (; cur_state < last_state; ++cur_state) {
1361                 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1362                         continue;
1363
1364                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1365                                         user_surface_converter,
1366                                         &cur_state->value, &res_node);
1367                 if (unlikely(ret != 0))
1368                         return ret;
1369
1370                 if (dev_priv->has_mob) {
1371                         struct vmw_ctx_bindinfo bi;
1372
1373                         bi.ctx = ctx_node->res;
1374                         bi.res = res_node ? res_node->res : NULL;
1375                         bi.bt = vmw_ctx_binding_tex;
1376                         bi.i1.texture_stage = cur_state->stage;
1377                         vmw_context_binding_add(ctx_node->staged_bindings,
1378                                                 &bi);
1379                 }
1380         }
1381
1382         return 0;
1383 }
1384
1385 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1386                                       struct vmw_sw_context *sw_context,
1387                                       void *buf)
1388 {
1389         struct vmw_dma_buffer *vmw_bo;
1390         int ret;
1391
1392         struct {
1393                 uint32_t header;
1394                 SVGAFifoCmdDefineGMRFB body;
1395         } *cmd = buf;
1396
1397         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1398                                       &cmd->body.ptr,
1399                                       &vmw_bo);
1400         if (unlikely(ret != 0))
1401                 return ret;
1402
1403         vmw_dmabuf_unreference(&vmw_bo);
1404
1405         return ret;
1406 }
1407
1408 /**
1409  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1410  *
1411  * @dev_priv: Pointer to a device private struct.
1412  * @sw_context: The software context being used for this batch.
1413  * @res_type: The resource type.
1414  * @converter: Information about user-space binding for this resource type.
1415  * @res_id: Pointer to the user-space resource handle in the command stream.
1416  * @buf_id: Pointer to the user-space backup buffer handle in the command
1417  * stream.
1418  * @backup_offset: Offset of backup into MOB.
1419  *
1420  * This function prepares for registering a switch of backup buffers
1421  * in the resource metadata just prior to unreserving.
1422  */
1423 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1424                                  struct vmw_sw_context *sw_context,
1425                                  enum vmw_res_type res_type,
1426                                  const struct vmw_user_resource_conv
1427                                  *converter,
1428                                  uint32_t *res_id,
1429                                  uint32_t *buf_id,
1430                                  unsigned long backup_offset)
1431 {
1432         int ret;
1433         struct vmw_dma_buffer *dma_buf;
1434         struct vmw_resource_val_node *val_node;
1435
1436         ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1437                                 converter, res_id, &val_node);
1438         if (unlikely(ret != 0))
1439                 return ret;
1440
1441         ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1442         if (unlikely(ret != 0))
1443                 return ret;
1444
1445         if (val_node->first_usage)
1446                 val_node->no_buffer_needed = true;
1447
1448         vmw_dmabuf_unreference(&val_node->new_backup);
1449         val_node->new_backup = dma_buf;
1450         val_node->new_backup_offset = backup_offset;
1451
1452         return 0;
1453 }
1454
1455 /**
1456  * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1457  * command
1458  *
1459  * @dev_priv: Pointer to a device private struct.
1460  * @sw_context: The software context being used for this batch.
1461  * @header: Pointer to the command header in the command stream.
1462  */
1463 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1464                                    struct vmw_sw_context *sw_context,
1465                                    SVGA3dCmdHeader *header)
1466 {
1467         struct vmw_bind_gb_surface_cmd {
1468                 SVGA3dCmdHeader header;
1469                 SVGA3dCmdBindGBSurface body;
1470         } *cmd;
1471
1472         cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1473
1474         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1475                                      user_surface_converter,
1476                                      &cmd->body.sid, &cmd->body.mobid,
1477                                      0);
1478 }
1479
1480 /**
1481  * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1482  * command
1483  *
1484  * @dev_priv: Pointer to a device private struct.
1485  * @sw_context: The software context being used for this batch.
1486  * @header: Pointer to the command header in the command stream.
1487  */
1488 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1489                                    struct vmw_sw_context *sw_context,
1490                                    SVGA3dCmdHeader *header)
1491 {
1492         struct vmw_gb_surface_cmd {
1493                 SVGA3dCmdHeader header;
1494                 SVGA3dCmdUpdateGBImage body;
1495         } *cmd;
1496
1497         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1498
1499         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1500                                  user_surface_converter,
1501                                  &cmd->body.image.sid, NULL);
1502 }
1503
1504 /**
1505  * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1506  * command
1507  *
1508  * @dev_priv: Pointer to a device private struct.
1509  * @sw_context: The software context being used for this batch.
1510  * @header: Pointer to the command header in the command stream.
1511  */
1512 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1513                                      struct vmw_sw_context *sw_context,
1514                                      SVGA3dCmdHeader *header)
1515 {
1516         struct vmw_gb_surface_cmd {
1517                 SVGA3dCmdHeader header;
1518                 SVGA3dCmdUpdateGBSurface body;
1519         } *cmd;
1520
1521         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1522
1523         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1524                                  user_surface_converter,
1525                                  &cmd->body.sid, NULL);
1526 }
1527
1528 /**
1529  * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1530  * command
1531  *
1532  * @dev_priv: Pointer to a device private struct.
1533  * @sw_context: The software context being used for this batch.
1534  * @header: Pointer to the command header in the command stream.
1535  */
1536 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1537                                      struct vmw_sw_context *sw_context,
1538                                      SVGA3dCmdHeader *header)
1539 {
1540         struct vmw_gb_surface_cmd {
1541                 SVGA3dCmdHeader header;
1542                 SVGA3dCmdReadbackGBImage body;
1543         } *cmd;
1544
1545         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1546
1547         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1548                                  user_surface_converter,
1549                                  &cmd->body.image.sid, NULL);
1550 }
1551
1552 /**
1553  * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1554  * command
1555  *
1556  * @dev_priv: Pointer to a device private struct.
1557  * @sw_context: The software context being used for this batch.
1558  * @header: Pointer to the command header in the command stream.
1559  */
1560 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1561                                        struct vmw_sw_context *sw_context,
1562                                        SVGA3dCmdHeader *header)
1563 {
1564         struct vmw_gb_surface_cmd {
1565                 SVGA3dCmdHeader header;
1566                 SVGA3dCmdReadbackGBSurface body;
1567         } *cmd;
1568
1569         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1570
1571         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1572                                  user_surface_converter,
1573                                  &cmd->body.sid, NULL);
1574 }
1575
1576 /**
1577  * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1578  * command
1579  *
1580  * @dev_priv: Pointer to a device private struct.
1581  * @sw_context: The software context being used for this batch.
1582  * @header: Pointer to the command header in the command stream.
1583  */
1584 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1585                                        struct vmw_sw_context *sw_context,
1586                                        SVGA3dCmdHeader *header)
1587 {
1588         struct vmw_gb_surface_cmd {
1589                 SVGA3dCmdHeader header;
1590                 SVGA3dCmdInvalidateGBImage body;
1591         } *cmd;
1592
1593         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1594
1595         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1596                                  user_surface_converter,
1597                                  &cmd->body.image.sid, NULL);
1598 }
1599
1600 /**
1601  * vmw_cmd_invalidate_gb_surface - Validate an
1602  * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1603  *
1604  * @dev_priv: Pointer to a device private struct.
1605  * @sw_context: The software context being used for this batch.
1606  * @header: Pointer to the command header in the command stream.
1607  */
1608 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1609                                          struct vmw_sw_context *sw_context,
1610                                          SVGA3dCmdHeader *header)
1611 {
1612         struct vmw_gb_surface_cmd {
1613                 SVGA3dCmdHeader header;
1614                 SVGA3dCmdInvalidateGBSurface body;
1615         } *cmd;
1616
1617         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1618
1619         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1620                                  user_surface_converter,
1621                                  &cmd->body.sid, NULL);
1622 }
1623
1624
1625 /**
1626  * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
1627  * command
1628  *
1629  * @dev_priv: Pointer to a device private struct.
1630  * @sw_context: The software context being used for this batch.
1631  * @header: Pointer to the command header in the command stream.
1632  */
1633 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1634                                  struct vmw_sw_context *sw_context,
1635                                  SVGA3dCmdHeader *header)
1636 {
1637         struct vmw_shader_define_cmd {
1638                 SVGA3dCmdHeader header;
1639                 SVGA3dCmdDefineShader body;
1640         } *cmd;
1641         int ret;
1642         size_t size;
1643         struct vmw_resource_val_node *val;
1644
1645         cmd = container_of(header, struct vmw_shader_define_cmd,
1646                            header);
1647
1648         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1649                                 user_context_converter, &cmd->body.cid,
1650                                 &val);
1651         if (unlikely(ret != 0))
1652                 return ret;
1653
1654         if (unlikely(!dev_priv->has_mob))
1655                 return 0;
1656
1657         size = cmd->header.size - sizeof(cmd->body);
1658         ret = vmw_compat_shader_add(dev_priv,
1659                                     vmw_context_res_man(val->res),
1660                                     cmd->body.shid, cmd + 1,
1661                                     cmd->body.type, size,
1662                                     &sw_context->staged_cmd_res);
1663         if (unlikely(ret != 0))
1664                 return ret;
1665
1666         return vmw_resource_relocation_add(&sw_context->res_relocations,
1667                                            NULL, &cmd->header.id -
1668                                            sw_context->buf_start);
1669
1670         return 0;
1671 }
1672
1673 /**
1674  * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
1675  * command
1676  *
1677  * @dev_priv: Pointer to a device private struct.
1678  * @sw_context: The software context being used for this batch.
1679  * @header: Pointer to the command header in the command stream.
1680  */
1681 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1682                                   struct vmw_sw_context *sw_context,
1683                                   SVGA3dCmdHeader *header)
1684 {
1685         struct vmw_shader_destroy_cmd {
1686                 SVGA3dCmdHeader header;
1687                 SVGA3dCmdDestroyShader body;
1688         } *cmd;
1689         int ret;
1690         struct vmw_resource_val_node *val;
1691
1692         cmd = container_of(header, struct vmw_shader_destroy_cmd,
1693                            header);
1694
1695         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1696                                 user_context_converter, &cmd->body.cid,
1697                                 &val);
1698         if (unlikely(ret != 0))
1699                 return ret;
1700
1701         if (unlikely(!dev_priv->has_mob))
1702                 return 0;
1703
1704         ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
1705                                        cmd->body.shid,
1706                                        cmd->body.type,
1707                                        &sw_context->staged_cmd_res);
1708         if (unlikely(ret != 0))
1709                 return ret;
1710
1711         return vmw_resource_relocation_add(&sw_context->res_relocations,
1712                                            NULL, &cmd->header.id -
1713                                            sw_context->buf_start);
1714
1715         return 0;
1716 }
1717
1718 /**
1719  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1720  * command
1721  *
1722  * @dev_priv: Pointer to a device private struct.
1723  * @sw_context: The software context being used for this batch.
1724  * @header: Pointer to the command header in the command stream.
1725  */
1726 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1727                               struct vmw_sw_context *sw_context,
1728                               SVGA3dCmdHeader *header)
1729 {
1730         struct vmw_set_shader_cmd {
1731                 SVGA3dCmdHeader header;
1732                 SVGA3dCmdSetShader body;
1733         } *cmd;
1734         struct vmw_resource_val_node *ctx_node, *res_node = NULL;
1735         struct vmw_ctx_bindinfo bi;
1736         struct vmw_resource *res = NULL;
1737         int ret;
1738
1739         cmd = container_of(header, struct vmw_set_shader_cmd,
1740                            header);
1741
1742         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1743                                 user_context_converter, &cmd->body.cid,
1744                                 &ctx_node);
1745         if (unlikely(ret != 0))
1746                 return ret;
1747
1748         if (!dev_priv->has_mob)
1749                 return 0;
1750
1751         if (cmd->body.shid != SVGA3D_INVALID_ID) {
1752                 res = vmw_compat_shader_lookup
1753                         (vmw_context_res_man(ctx_node->res),
1754                          cmd->body.shid,
1755                          cmd->body.type);
1756
1757                 if (!IS_ERR(res)) {
1758                         ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
1759                                                     vmw_res_shader,
1760                                                     &cmd->body.shid, res,
1761                                                     &res_node);
1762                         vmw_resource_unreference(&res);
1763                         if (unlikely(ret != 0))
1764                                 return ret;
1765                 }
1766         }
1767
1768         if (!res_node) {
1769                 ret = vmw_cmd_res_check(dev_priv, sw_context,
1770                                         vmw_res_shader,
1771                                         user_shader_converter,
1772                                         &cmd->body.shid, &res_node);
1773                 if (unlikely(ret != 0))
1774                         return ret;
1775         }
1776
1777         bi.ctx = ctx_node->res;
1778         bi.res = res_node ? res_node->res : NULL;
1779         bi.bt = vmw_ctx_binding_shader;
1780         bi.i1.shader_type = cmd->body.type;
1781         return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
1782 }
1783
1784 /**
1785  * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
1786  * command
1787  *
1788  * @dev_priv: Pointer to a device private struct.
1789  * @sw_context: The software context being used for this batch.
1790  * @header: Pointer to the command header in the command stream.
1791  */
1792 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
1793                                     struct vmw_sw_context *sw_context,
1794                                     SVGA3dCmdHeader *header)
1795 {
1796         struct vmw_set_shader_const_cmd {
1797                 SVGA3dCmdHeader header;
1798                 SVGA3dCmdSetShaderConst body;
1799         } *cmd;
1800         int ret;
1801
1802         cmd = container_of(header, struct vmw_set_shader_const_cmd,
1803                            header);
1804
1805         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1806                                 user_context_converter, &cmd->body.cid,
1807                                 NULL);
1808         if (unlikely(ret != 0))
1809                 return ret;
1810
1811         if (dev_priv->has_mob)
1812                 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
1813
1814         return 0;
1815 }
1816
1817 /**
1818  * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
1819  * command
1820  *
1821  * @dev_priv: Pointer to a device private struct.
1822  * @sw_context: The software context being used for this batch.
1823  * @header: Pointer to the command header in the command stream.
1824  */
1825 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
1826                                   struct vmw_sw_context *sw_context,
1827                                   SVGA3dCmdHeader *header)
1828 {
1829         struct vmw_bind_gb_shader_cmd {
1830                 SVGA3dCmdHeader header;
1831                 SVGA3dCmdBindGBShader body;
1832         } *cmd;
1833
1834         cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
1835                            header);
1836
1837         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
1838                                      user_shader_converter,
1839                                      &cmd->body.shid, &cmd->body.mobid,
1840                                      cmd->body.offsetInBytes);
1841 }
1842
1843 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1844                                 struct vmw_sw_context *sw_context,
1845                                 void *buf, uint32_t *size)
1846 {
1847         uint32_t size_remaining = *size;
1848         uint32_t cmd_id;
1849
1850         cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1851         switch (cmd_id) {
1852         case SVGA_CMD_UPDATE:
1853                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
1854                 break;
1855         case SVGA_CMD_DEFINE_GMRFB:
1856                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
1857                 break;
1858         case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
1859                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1860                 break;
1861         case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
1862                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1863                 break;
1864         default:
1865                 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
1866                 return -EINVAL;
1867         }
1868
1869         if (*size > size_remaining) {
1870                 DRM_ERROR("Invalid SVGA command (size mismatch):"
1871                           " %u.\n", cmd_id);
1872                 return -EINVAL;
1873         }
1874
1875         if (unlikely(!sw_context->kernel)) {
1876                 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
1877                 return -EPERM;
1878         }
1879
1880         if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
1881                 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
1882
1883         return 0;
1884 }
1885
1886 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1887         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
1888                     false, false, false),
1889         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
1890                     false, false, false),
1891         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
1892                     true, false, false),
1893         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
1894                     true, false, false),
1895         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
1896                     true, false, false),
1897         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
1898                     false, false, false),
1899         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
1900                     false, false, false),
1901         VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
1902                     true, false, false),
1903         VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
1904                     true, false, false),
1905         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
1906                     true, false, false),
1907         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
1908                     &vmw_cmd_set_render_target_check, true, false, false),
1909         VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
1910                     true, false, false),
1911         VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
1912                     true, false, false),
1913         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
1914                     true, false, false),
1915         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
1916                     true, false, false),
1917         VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
1918                     true, false, false),
1919         VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
1920                     true, false, false),
1921         VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
1922                     true, false, false),
1923         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1924                     false, false, false),
1925         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
1926                     true, false, false),
1927         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
1928                     true, false, false),
1929         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1930                     true, false, false),
1931         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
1932                     true, false, false),
1933         VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1934                     true, false, false),
1935         VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
1936                     true, false, false),
1937         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
1938                     true, false, false),
1939         VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
1940                     true, false, false),
1941         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
1942                     true, false, false),
1943         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
1944                     true, false, false),
1945         VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
1946                     &vmw_cmd_blt_surf_screen_check, false, false, false),
1947         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
1948                     false, false, false),
1949         VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
1950                     false, false, false),
1951         VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
1952                     false, false, false),
1953         VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
1954                     false, false, false),
1955         VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
1956                     false, false, false),
1957         VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
1958                     false, false, false),
1959         VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
1960                     false, false, false),
1961         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
1962                     false, false, false),
1963         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
1964                     false, false, false),
1965         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
1966                     false, false, false),
1967         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
1968                     false, false, false),
1969         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
1970                     false, false, false),
1971         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
1972                     false, false, false),
1973         VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
1974                     false, false, true),
1975         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
1976                     false, false, true),
1977         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
1978                     false, false, true),
1979         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
1980                     false, false, true),
1981         VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
1982                     false, false, true),
1983         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
1984                     false, false, true),
1985         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
1986                     false, false, true),
1987         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
1988                     false, false, true),
1989         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
1990                     true, false, true),
1991         VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
1992                     false, false, true),
1993         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
1994                     true, false, true),
1995         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
1996                     &vmw_cmd_update_gb_surface, true, false, true),
1997         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
1998                     &vmw_cmd_readback_gb_image, true, false, true),
1999         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
2000                     &vmw_cmd_readback_gb_surface, true, false, true),
2001         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
2002                     &vmw_cmd_invalidate_gb_image, true, false, true),
2003         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
2004                     &vmw_cmd_invalidate_gb_surface, true, false, true),
2005         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
2006                     false, false, true),
2007         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
2008                     false, false, true),
2009         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
2010                     false, false, true),
2011         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
2012                     false, false, true),
2013         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
2014                     false, false, true),
2015         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
2016                     false, false, true),
2017         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
2018                     true, false, true),
2019         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
2020                     false, false, true),
2021         VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
2022                     false, false, false),
2023         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
2024                     true, false, true),
2025         VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
2026                     true, false, true),
2027         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
2028                     true, false, true),
2029         VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
2030                     true, false, true),
2031         VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
2032                     false, false, true),
2033         VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
2034                     false, false, true),
2035         VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
2036                     false, false, true),
2037         VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
2038                     false, false, true),
2039         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
2040                     false, false, true),
2041         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
2042                     false, false, true),
2043         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
2044                     false, false, true),
2045         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
2046                     false, false, true),
2047         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2048                     false, false, true),
2049         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2050                     false, false, true),
2051         VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
2052                     true, false, true)
2053 };
2054
2055 static int vmw_cmd_check(struct vmw_private *dev_priv,
2056                          struct vmw_sw_context *sw_context,
2057                          void *buf, uint32_t *size)
2058 {
2059         uint32_t cmd_id;
2060         uint32_t size_remaining = *size;
2061         SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
2062         int ret;
2063         const struct vmw_cmd_entry *entry;
2064         bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
2065
2066         cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
2067         /* Handle any none 3D commands */
2068         if (unlikely(cmd_id < SVGA_CMD_MAX))
2069                 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
2070
2071
2072         cmd_id = le32_to_cpu(header->id);
2073         *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
2074
2075         cmd_id -= SVGA_3D_CMD_BASE;
2076         if (unlikely(*size > size_remaining))
2077                 goto out_invalid;
2078
2079         if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
2080                 goto out_invalid;
2081
2082         entry = &vmw_cmd_entries[cmd_id];
2083         if (unlikely(!entry->func))
2084                 goto out_invalid;
2085
2086         if (unlikely(!entry->user_allow && !sw_context->kernel))
2087                 goto out_privileged;
2088
2089         if (unlikely(entry->gb_disable && gb))
2090                 goto out_old;
2091
2092         if (unlikely(entry->gb_enable && !gb))
2093                 goto out_new;
2094
2095         ret = entry->func(dev_priv, sw_context, header);
2096         if (unlikely(ret != 0))
2097                 goto out_invalid;
2098
2099         return 0;
2100 out_invalid:
2101         DRM_ERROR("Invalid SVGA3D command: %d\n",
2102                   cmd_id + SVGA_3D_CMD_BASE);
2103         return -EINVAL;
2104 out_privileged:
2105         DRM_ERROR("Privileged SVGA3D command: %d\n",
2106                   cmd_id + SVGA_3D_CMD_BASE);
2107         return -EPERM;
2108 out_old:
2109         DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
2110                   cmd_id + SVGA_3D_CMD_BASE);
2111         return -EINVAL;
2112 out_new:
2113         DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
2114                   cmd_id + SVGA_3D_CMD_BASE);
2115         return -EINVAL;
2116 }
2117
2118 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
2119                              struct vmw_sw_context *sw_context,
2120                              void *buf,
2121                              uint32_t size)
2122 {
2123         int32_t cur_size = size;
2124         int ret;
2125
2126         sw_context->buf_start = buf;
2127
2128         while (cur_size > 0) {
2129                 size = cur_size;
2130                 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
2131                 if (unlikely(ret != 0))
2132                         return ret;
2133                 buf = (void *)((unsigned long) buf + size);
2134                 cur_size -= size;
2135         }
2136
2137         if (unlikely(cur_size != 0)) {
2138                 DRM_ERROR("Command verifier out of sync.\n");
2139                 return -EINVAL;
2140         }
2141
2142         return 0;
2143 }
2144
2145 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
2146 {
2147         sw_context->cur_reloc = 0;
2148 }
2149
2150 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
2151 {
2152         uint32_t i;
2153         struct vmw_relocation *reloc;
2154         struct ttm_validate_buffer *validate;
2155         struct ttm_buffer_object *bo;
2156
2157         for (i = 0; i < sw_context->cur_reloc; ++i) {
2158                 reloc = &sw_context->relocs[i];
2159                 validate = &sw_context->val_bufs[reloc->index].base;
2160                 bo = validate->bo;
2161                 switch (bo->mem.mem_type) {
2162                 case TTM_PL_VRAM:
2163                         reloc->location->offset += bo->offset;
2164                         reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
2165                         break;
2166                 case VMW_PL_GMR:
2167                         reloc->location->gmrId = bo->mem.start;
2168                         break;
2169                 case VMW_PL_MOB:
2170                         *reloc->mob_loc = bo->mem.start;
2171                         break;
2172                 default:
2173                         BUG();
2174                 }
2175         }
2176         vmw_free_relocations(sw_context);
2177 }
2178
2179 /**
2180  * vmw_resource_list_unrefererence - Free up a resource list and unreference
2181  * all resources referenced by it.
2182  *
2183  * @list: The resource list.
2184  */
2185 static void vmw_resource_list_unreference(struct list_head *list)
2186 {
2187         struct vmw_resource_val_node *val, *val_next;
2188
2189         /*
2190          * Drop references to resources held during command submission.
2191          */
2192
2193         list_for_each_entry_safe(val, val_next, list, head) {
2194                 list_del_init(&val->head);
2195                 vmw_resource_unreference(&val->res);
2196                 if (unlikely(val->staged_bindings))
2197                         kfree(val->staged_bindings);
2198                 kfree(val);
2199         }
2200 }
2201
2202 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
2203 {
2204         struct vmw_validate_buffer *entry, *next;
2205         struct vmw_resource_val_node *val;
2206
2207         /*
2208          * Drop references to DMA buffers held during command submission.
2209          */
2210         list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
2211                                  base.head) {
2212                 list_del(&entry->base.head);
2213                 ttm_bo_unref(&entry->base.bo);
2214                 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
2215                 sw_context->cur_val_buf--;
2216         }
2217         BUG_ON(sw_context->cur_val_buf != 0);
2218
2219         list_for_each_entry(val, &sw_context->resource_list, head)
2220                 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
2221 }
2222
2223 static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
2224                                       struct ttm_buffer_object *bo,
2225                                       bool validate_as_mob)
2226 {
2227         int ret;
2228
2229
2230         /*
2231          * Don't validate pinned buffers.
2232          */
2233
2234         if (bo == dev_priv->pinned_bo ||
2235             (bo == dev_priv->dummy_query_bo &&
2236              dev_priv->dummy_query_bo_pinned))
2237                 return 0;
2238
2239         if (validate_as_mob)
2240                 return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
2241
2242         /**
2243          * Put BO in VRAM if there is space, otherwise as a GMR.
2244          * If there is no space in VRAM and GMR ids are all used up,
2245          * start evicting GMRs to make room. If the DMA buffer can't be
2246          * used as a GMR, this will return -ENOMEM.
2247          */
2248
2249         ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
2250         if (likely(ret == 0 || ret == -ERESTARTSYS))
2251                 return ret;
2252
2253         /**
2254          * If that failed, try VRAM again, this time evicting
2255          * previous contents.
2256          */
2257
2258         DRM_INFO("Falling through to VRAM.\n");
2259         ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
2260         return ret;
2261 }
2262
2263 static int vmw_validate_buffers(struct vmw_private *dev_priv,
2264                                 struct vmw_sw_context *sw_context)
2265 {
2266         struct vmw_validate_buffer *entry;
2267         int ret;
2268
2269         list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
2270                 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
2271                                                  entry->validate_as_mob);
2272                 if (unlikely(ret != 0))
2273                         return ret;
2274         }
2275         return 0;
2276 }
2277
2278 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
2279                                  uint32_t size)
2280 {
2281         if (likely(sw_context->cmd_bounce_size >= size))
2282                 return 0;
2283
2284         if (sw_context->cmd_bounce_size == 0)
2285                 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
2286
2287         while (sw_context->cmd_bounce_size < size) {
2288                 sw_context->cmd_bounce_size =
2289                         PAGE_ALIGN(sw_context->cmd_bounce_size +
2290                                    (sw_context->cmd_bounce_size >> 1));
2291         }
2292
2293         if (sw_context->cmd_bounce != NULL)
2294                 vfree(sw_context->cmd_bounce);
2295
2296         sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
2297
2298         if (sw_context->cmd_bounce == NULL) {
2299                 DRM_ERROR("Failed to allocate command bounce buffer.\n");
2300                 sw_context->cmd_bounce_size = 0;
2301                 return -ENOMEM;
2302         }
2303
2304         return 0;
2305 }
2306
2307 /**
2308  * vmw_execbuf_fence_commands - create and submit a command stream fence
2309  *
2310  * Creates a fence object and submits a command stream marker.
2311  * If this fails for some reason, We sync the fifo and return NULL.
2312  * It is then safe to fence buffers with a NULL pointer.
2313  *
2314  * If @p_handle is not NULL @file_priv must also not be NULL. Creates
2315  * a userspace handle if @p_handle is not NULL, otherwise not.
2316  */
2317
2318 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
2319                                struct vmw_private *dev_priv,
2320                                struct vmw_fence_obj **p_fence,
2321                                uint32_t *p_handle)
2322 {
2323         uint32_t sequence;
2324         int ret;
2325         bool synced = false;
2326
2327         /* p_handle implies file_priv. */
2328         BUG_ON(p_handle != NULL && file_priv == NULL);
2329
2330         ret = vmw_fifo_send_fence(dev_priv, &sequence);
2331         if (unlikely(ret != 0)) {
2332                 DRM_ERROR("Fence submission error. Syncing.\n");
2333                 synced = true;
2334         }
2335
2336         if (p_handle != NULL)
2337                 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
2338                                             sequence, p_fence, p_handle);
2339         else
2340                 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
2341
2342         if (unlikely(ret != 0 && !synced)) {
2343                 (void) vmw_fallback_wait(dev_priv, false, false,
2344                                          sequence, false,
2345                                          VMW_FENCE_WAIT_TIMEOUT);
2346                 *p_fence = NULL;
2347         }
2348
2349         return 0;
2350 }
2351
2352 /**
2353  * vmw_execbuf_copy_fence_user - copy fence object information to
2354  * user-space.
2355  *
2356  * @dev_priv: Pointer to a vmw_private struct.
2357  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
2358  * @ret: Return value from fence object creation.
2359  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
2360  * which the information should be copied.
2361  * @fence: Pointer to the fenc object.
2362  * @fence_handle: User-space fence handle.
2363  *
2364  * This function copies fence information to user-space. If copying fails,
2365  * The user-space struct drm_vmw_fence_rep::error member is hopefully
2366  * left untouched, and if it's preloaded with an -EFAULT by user-space,
2367  * the error will hopefully be detected.
2368  * Also if copying fails, user-space will be unable to signal the fence
2369  * object so we wait for it immediately, and then unreference the
2370  * user-space reference.
2371  */
2372 void
2373 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2374                             struct vmw_fpriv *vmw_fp,
2375                             int ret,
2376                             struct drm_vmw_fence_rep __user *user_fence_rep,
2377                             struct vmw_fence_obj *fence,
2378                             uint32_t fence_handle)
2379 {
2380         struct drm_vmw_fence_rep fence_rep;
2381
2382         if (user_fence_rep == NULL)
2383                 return;
2384
2385         memset(&fence_rep, 0, sizeof(fence_rep));
2386
2387         fence_rep.error = ret;
2388         if (ret == 0) {
2389                 BUG_ON(fence == NULL);
2390
2391                 fence_rep.handle = fence_handle;
2392                 fence_rep.seqno = fence->base.seqno;
2393                 vmw_update_seqno(dev_priv, &dev_priv->fifo);
2394                 fence_rep.passed_seqno = dev_priv->last_read_seqno;
2395         }
2396
2397         /*
2398          * copy_to_user errors will be detected by user space not
2399          * seeing fence_rep::error filled in. Typically
2400          * user-space would have pre-set that member to -EFAULT.
2401          */
2402         ret = copy_to_user(user_fence_rep, &fence_rep,
2403                            sizeof(fence_rep));
2404
2405         /*
2406          * User-space lost the fence object. We need to sync
2407          * and unreference the handle.
2408          */
2409         if (unlikely(ret != 0) && (fence_rep.error == 0)) {
2410                 ttm_ref_object_base_unref(vmw_fp->tfile,
2411                                           fence_handle, TTM_REF_USAGE);
2412                 DRM_ERROR("Fence copy error. Syncing.\n");
2413                 (void) vmw_fence_obj_wait(fence, false, false,
2414                                           VMW_FENCE_WAIT_TIMEOUT);
2415         }
2416 }
2417
2418
2419
2420 int vmw_execbuf_process(struct drm_file *file_priv,
2421                         struct vmw_private *dev_priv,
2422                         void __user *user_commands,
2423                         void *kernel_commands,
2424                         uint32_t command_size,
2425                         uint64_t throttle_us,
2426                         struct drm_vmw_fence_rep __user *user_fence_rep,
2427                         struct vmw_fence_obj **out_fence)
2428 {
2429         struct vmw_sw_context *sw_context = &dev_priv->ctx;
2430         struct vmw_fence_obj *fence = NULL;
2431         struct vmw_resource *error_resource;
2432         struct list_head resource_list;
2433         struct ww_acquire_ctx ticket;
2434         uint32_t handle;
2435         void *cmd;
2436         int ret;
2437
2438         ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
2439         if (unlikely(ret != 0))
2440                 return -ERESTARTSYS;
2441
2442         if (kernel_commands == NULL) {
2443                 sw_context->kernel = false;
2444
2445                 ret = vmw_resize_cmd_bounce(sw_context, command_size);
2446                 if (unlikely(ret != 0))
2447                         goto out_unlock;
2448
2449
2450                 ret = copy_from_user(sw_context->cmd_bounce,
2451                                      user_commands, command_size);
2452
2453                 if (unlikely(ret != 0)) {
2454                         ret = -EFAULT;
2455                         DRM_ERROR("Failed copying commands.\n");
2456                         goto out_unlock;
2457                 }
2458                 kernel_commands = sw_context->cmd_bounce;
2459         } else
2460                 sw_context->kernel = true;
2461
2462         sw_context->fp = vmw_fpriv(file_priv);
2463         sw_context->cur_reloc = 0;
2464         sw_context->cur_val_buf = 0;
2465         INIT_LIST_HEAD(&sw_context->resource_list);
2466         sw_context->cur_query_bo = dev_priv->pinned_bo;
2467         sw_context->last_query_ctx = NULL;
2468         sw_context->needs_post_query_barrier = false;
2469         memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
2470         INIT_LIST_HEAD(&sw_context->validate_nodes);
2471         INIT_LIST_HEAD(&sw_context->res_relocations);
2472         if (!sw_context->res_ht_initialized) {
2473                 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
2474                 if (unlikely(ret != 0))
2475                         goto out_unlock;
2476                 sw_context->res_ht_initialized = true;
2477         }
2478         INIT_LIST_HEAD(&sw_context->staged_cmd_res);
2479
2480         INIT_LIST_HEAD(&resource_list);
2481         ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2482                                 command_size);
2483         if (unlikely(ret != 0))
2484                 goto out_err_nores;
2485
2486         ret = vmw_resources_reserve(sw_context);
2487         if (unlikely(ret != 0))
2488                 goto out_err_nores;
2489
2490         ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, true);
2491         if (unlikely(ret != 0))
2492                 goto out_err;
2493
2494         ret = vmw_validate_buffers(dev_priv, sw_context);
2495         if (unlikely(ret != 0))
2496                 goto out_err;
2497
2498         ret = vmw_resources_validate(sw_context);
2499         if (unlikely(ret != 0))
2500                 goto out_err;
2501
2502         if (throttle_us) {
2503                 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
2504                                    throttle_us);
2505
2506                 if (unlikely(ret != 0))
2507                         goto out_err;
2508         }
2509
2510         ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
2511         if (unlikely(ret != 0)) {
2512                 ret = -ERESTARTSYS;
2513                 goto out_err;
2514         }
2515
2516         if (dev_priv->has_mob) {
2517                 ret = vmw_rebind_contexts(sw_context);
2518                 if (unlikely(ret != 0))
2519                         goto out_unlock_binding;
2520         }
2521
2522         cmd = vmw_fifo_reserve(dev_priv, command_size);
2523         if (unlikely(cmd == NULL)) {
2524                 DRM_ERROR("Failed reserving fifo space for commands.\n");
2525                 ret = -ENOMEM;
2526                 goto out_unlock_binding;
2527         }
2528
2529         vmw_apply_relocations(sw_context);
2530         memcpy(cmd, kernel_commands, command_size);
2531
2532         vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
2533         vmw_resource_relocations_free(&sw_context->res_relocations);
2534
2535         vmw_fifo_commit(dev_priv, command_size);
2536
2537         vmw_query_bo_switch_commit(dev_priv, sw_context);
2538         ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
2539                                          &fence,
2540                                          (user_fence_rep) ? &handle : NULL);
2541         /*
2542          * This error is harmless, because if fence submission fails,
2543          * vmw_fifo_send_fence will sync. The error will be propagated to
2544          * user-space in @fence_rep
2545          */
2546
2547         if (ret != 0)
2548                 DRM_ERROR("Fence submission error. Syncing.\n");
2549
2550         vmw_resource_list_unreserve(&sw_context->resource_list, false);
2551         mutex_unlock(&dev_priv->binding_mutex);
2552
2553         ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
2554                                     (void *) fence);
2555
2556         if (unlikely(dev_priv->pinned_bo != NULL &&
2557                      !dev_priv->query_cid_valid))
2558                 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
2559
2560         vmw_clear_validations(sw_context);
2561         vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
2562                                     user_fence_rep, fence, handle);
2563
2564         /* Don't unreference when handing fence out */
2565         if (unlikely(out_fence != NULL)) {
2566                 *out_fence = fence;
2567                 fence = NULL;
2568         } else if (likely(fence != NULL)) {
2569                 vmw_fence_obj_unreference(&fence);
2570         }
2571
2572         list_splice_init(&sw_context->resource_list, &resource_list);
2573         vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
2574         mutex_unlock(&dev_priv->cmdbuf_mutex);
2575
2576         /*
2577          * Unreference resources outside of the cmdbuf_mutex to
2578          * avoid deadlocks in resource destruction paths.
2579          */
2580         vmw_resource_list_unreference(&resource_list);
2581
2582         return 0;
2583
2584 out_unlock_binding:
2585         mutex_unlock(&dev_priv->binding_mutex);
2586 out_err:
2587         ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2588 out_err_nores:
2589         vmw_resource_list_unreserve(&sw_context->resource_list, true);
2590         vmw_resource_relocations_free(&sw_context->res_relocations);
2591         vmw_free_relocations(sw_context);
2592         vmw_clear_validations(sw_context);
2593         if (unlikely(dev_priv->pinned_bo != NULL &&
2594                      !dev_priv->query_cid_valid))
2595                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2596 out_unlock:
2597         list_splice_init(&sw_context->resource_list, &resource_list);
2598         error_resource = sw_context->error_resource;
2599         sw_context->error_resource = NULL;
2600         vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
2601         mutex_unlock(&dev_priv->cmdbuf_mutex);
2602
2603         /*
2604          * Unreference resources outside of the cmdbuf_mutex to
2605          * avoid deadlocks in resource destruction paths.
2606          */
2607         vmw_resource_list_unreference(&resource_list);
2608         if (unlikely(error_resource != NULL))
2609                 vmw_resource_unreference(&error_resource);
2610
2611         return ret;
2612 }
2613
2614 /**
2615  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
2616  *
2617  * @dev_priv: The device private structure.
2618  *
2619  * This function is called to idle the fifo and unpin the query buffer
2620  * if the normal way to do this hits an error, which should typically be
2621  * extremely rare.
2622  */
2623 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
2624 {
2625         DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
2626
2627         (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
2628         vmw_bo_pin(dev_priv->pinned_bo, false);
2629         vmw_bo_pin(dev_priv->dummy_query_bo, false);
2630         dev_priv->dummy_query_bo_pinned = false;
2631 }
2632
2633
2634 /**
2635  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2636  * query bo.
2637  *
2638  * @dev_priv: The device private structure.
2639  * @fence: If non-NULL should point to a struct vmw_fence_obj issued
2640  * _after_ a query barrier that flushes all queries touching the current
2641  * buffer pointed to by @dev_priv->pinned_bo
2642  *
2643  * This function should be used to unpin the pinned query bo, or
2644  * as a query barrier when we need to make sure that all queries have
2645  * finished before the next fifo command. (For example on hardware
2646  * context destructions where the hardware may otherwise leak unfinished
2647  * queries).
2648  *
2649  * This function does not return any failure codes, but make attempts
2650  * to do safe unpinning in case of errors.
2651  *
2652  * The function will synchronize on the previous query barrier, and will
2653  * thus not finish until that barrier has executed.
2654  *
2655  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
2656  * before calling this function.
2657  */
2658 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2659                                      struct vmw_fence_obj *fence)
2660 {
2661         int ret = 0;
2662         struct list_head validate_list;
2663         struct ttm_validate_buffer pinned_val, query_val;
2664         struct vmw_fence_obj *lfence = NULL;
2665         struct ww_acquire_ctx ticket;
2666
2667         if (dev_priv->pinned_bo == NULL)
2668                 goto out_unlock;
2669
2670         INIT_LIST_HEAD(&validate_list);
2671
2672         pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
2673         pinned_val.shared = false;
2674         list_add_tail(&pinned_val.head, &validate_list);
2675
2676         query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
2677         query_val.shared = false;
2678         list_add_tail(&query_val.head, &validate_list);
2679
2680         ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false);
2681         if (unlikely(ret != 0)) {
2682                 vmw_execbuf_unpin_panic(dev_priv);
2683                 goto out_no_reserve;
2684         }
2685
2686         if (dev_priv->query_cid_valid) {
2687                 BUG_ON(fence != NULL);
2688                 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
2689                 if (unlikely(ret != 0)) {
2690                         vmw_execbuf_unpin_panic(dev_priv);
2691                         goto out_no_emit;
2692                 }
2693                 dev_priv->query_cid_valid = false;
2694         }
2695
2696         vmw_bo_pin(dev_priv->pinned_bo, false);
2697         vmw_bo_pin(dev_priv->dummy_query_bo, false);
2698         dev_priv->dummy_query_bo_pinned = false;
2699
2700         if (fence == NULL) {
2701                 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
2702                                                   NULL);
2703                 fence = lfence;
2704         }
2705         ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
2706         if (lfence != NULL)
2707                 vmw_fence_obj_unreference(&lfence);
2708
2709         ttm_bo_unref(&query_val.bo);
2710         ttm_bo_unref(&pinned_val.bo);
2711         ttm_bo_unref(&dev_priv->pinned_bo);
2712
2713 out_unlock:
2714         return;
2715
2716 out_no_emit:
2717         ttm_eu_backoff_reservation(&ticket, &validate_list);
2718 out_no_reserve:
2719         ttm_bo_unref(&query_val.bo);
2720         ttm_bo_unref(&pinned_val.bo);
2721         ttm_bo_unref(&dev_priv->pinned_bo);
2722 }
2723
2724 /**
2725  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2726  * query bo.
2727  *
2728  * @dev_priv: The device private structure.
2729  *
2730  * This function should be used to unpin the pinned query bo, or
2731  * as a query barrier when we need to make sure that all queries have
2732  * finished before the next fifo command. (For example on hardware
2733  * context destructions where the hardware may otherwise leak unfinished
2734  * queries).
2735  *
2736  * This function does not return any failure codes, but make attempts
2737  * to do safe unpinning in case of errors.
2738  *
2739  * The function will synchronize on the previous query barrier, and will
2740  * thus not finish until that barrier has executed.
2741  */
2742 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
2743 {
2744         mutex_lock(&dev_priv->cmdbuf_mutex);
2745         if (dev_priv->query_cid_valid)
2746                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2747         mutex_unlock(&dev_priv->cmdbuf_mutex);
2748 }
2749
2750
2751 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2752                       struct drm_file *file_priv)
2753 {
2754         struct vmw_private *dev_priv = vmw_priv(dev);
2755         struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
2756         int ret;
2757
2758         /*
2759          * This will allow us to extend the ioctl argument while
2760          * maintaining backwards compatibility:
2761          * We take different code paths depending on the value of
2762          * arg->version.
2763          */
2764
2765         if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
2766                 DRM_ERROR("Incorrect execbuf version.\n");
2767                 DRM_ERROR("You're running outdated experimental "
2768                           "vmwgfx user-space drivers.");
2769                 return -EINVAL;
2770         }
2771
2772         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
2773         if (unlikely(ret != 0))
2774                 return ret;
2775
2776         ret = vmw_execbuf_process(file_priv, dev_priv,
2777                                   (void __user *)(unsigned long)arg->commands,
2778                                   NULL, arg->command_size, arg->throttle_us,
2779                                   (void __user *)(unsigned long)arg->fence_rep,
2780                                   NULL);
2781
2782         if (unlikely(ret != 0))
2783                 goto out_unlock;
2784
2785         vmw_kms_cursor_post_execbuf(dev_priv);
2786
2787 out_unlock:
2788         ttm_read_unlock(&dev_priv->reservation_sem);
2789         return ret;
2790 }