]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
Merge tag 'for-linus-20170812' of git://git.infradead.org/linux-mtd
[karo-tx-linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
1 /**************************************************************************
2  *
3  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include <drm/drmP.h>
33 #include "vmwgfx_resource_priv.h"
34 #include "vmwgfx_binding.h"
35
36 #define VMW_RES_EVICT_ERR_COUNT 10
37
38 struct vmw_user_dma_buffer {
39         struct ttm_prime_object prime;
40         struct vmw_dma_buffer dma;
41 };
42
43 struct vmw_bo_user_rep {
44         uint32_t handle;
45         uint64_t map_handle;
46 };
47
48 static inline struct vmw_dma_buffer *
49 vmw_dma_buffer(struct ttm_buffer_object *bo)
50 {
51         return container_of(bo, struct vmw_dma_buffer, base);
52 }
53
54 static inline struct vmw_user_dma_buffer *
55 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
56 {
57         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
58         return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
59 }
60
61 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
62 {
63         kref_get(&res->kref);
64         return res;
65 }
66
67 struct vmw_resource *
68 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
69 {
70         return kref_get_unless_zero(&res->kref) ? res : NULL;
71 }
72
73 /**
74  * vmw_resource_release_id - release a resource id to the id manager.
75  *
76  * @res: Pointer to the resource.
77  *
78  * Release the resource id to the resource id manager and set it to -1
79  */
80 void vmw_resource_release_id(struct vmw_resource *res)
81 {
82         struct vmw_private *dev_priv = res->dev_priv;
83         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
84
85         write_lock(&dev_priv->resource_lock);
86         if (res->id != -1)
87                 idr_remove(idr, res->id);
88         res->id = -1;
89         write_unlock(&dev_priv->resource_lock);
90 }
91
92 static void vmw_resource_release(struct kref *kref)
93 {
94         struct vmw_resource *res =
95             container_of(kref, struct vmw_resource, kref);
96         struct vmw_private *dev_priv = res->dev_priv;
97         int id;
98         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
99
100         write_lock(&dev_priv->resource_lock);
101         res->avail = false;
102         list_del_init(&res->lru_head);
103         write_unlock(&dev_priv->resource_lock);
104         if (res->backup) {
105                 struct ttm_buffer_object *bo = &res->backup->base;
106
107                 ttm_bo_reserve(bo, false, false, NULL);
108                 if (!list_empty(&res->mob_head) &&
109                     res->func->unbind != NULL) {
110                         struct ttm_validate_buffer val_buf;
111
112                         val_buf.bo = bo;
113                         val_buf.shared = false;
114                         res->func->unbind(res, false, &val_buf);
115                 }
116                 res->backup_dirty = false;
117                 list_del_init(&res->mob_head);
118                 ttm_bo_unreserve(bo);
119                 vmw_dmabuf_unreference(&res->backup);
120         }
121
122         if (likely(res->hw_destroy != NULL)) {
123                 mutex_lock(&dev_priv->binding_mutex);
124                 vmw_binding_res_list_kill(&res->binding_head);
125                 mutex_unlock(&dev_priv->binding_mutex);
126                 res->hw_destroy(res);
127         }
128
129         id = res->id;
130         if (res->res_free != NULL)
131                 res->res_free(res);
132         else
133                 kfree(res);
134
135         write_lock(&dev_priv->resource_lock);
136         if (id != -1)
137                 idr_remove(idr, id);
138         write_unlock(&dev_priv->resource_lock);
139 }
140
141 void vmw_resource_unreference(struct vmw_resource **p_res)
142 {
143         struct vmw_resource *res = *p_res;
144
145         *p_res = NULL;
146         kref_put(&res->kref, vmw_resource_release);
147 }
148
149
150 /**
151  * vmw_resource_alloc_id - release a resource id to the id manager.
152  *
153  * @res: Pointer to the resource.
154  *
155  * Allocate the lowest free resource from the resource manager, and set
156  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
157  */
158 int vmw_resource_alloc_id(struct vmw_resource *res)
159 {
160         struct vmw_private *dev_priv = res->dev_priv;
161         int ret;
162         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
163
164         BUG_ON(res->id != -1);
165
166         idr_preload(GFP_KERNEL);
167         write_lock(&dev_priv->resource_lock);
168
169         ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
170         if (ret >= 0)
171                 res->id = ret;
172
173         write_unlock(&dev_priv->resource_lock);
174         idr_preload_end();
175         return ret < 0 ? ret : 0;
176 }
177
178 /**
179  * vmw_resource_init - initialize a struct vmw_resource
180  *
181  * @dev_priv:       Pointer to a device private struct.
182  * @res:            The struct vmw_resource to initialize.
183  * @obj_type:       Resource object type.
184  * @delay_id:       Boolean whether to defer device id allocation until
185  *                  the first validation.
186  * @res_free:       Resource destructor.
187  * @func:           Resource function table.
188  */
189 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
190                       bool delay_id,
191                       void (*res_free) (struct vmw_resource *res),
192                       const struct vmw_res_func *func)
193 {
194         kref_init(&res->kref);
195         res->hw_destroy = NULL;
196         res->res_free = res_free;
197         res->avail = false;
198         res->dev_priv = dev_priv;
199         res->func = func;
200         INIT_LIST_HEAD(&res->lru_head);
201         INIT_LIST_HEAD(&res->mob_head);
202         INIT_LIST_HEAD(&res->binding_head);
203         res->id = -1;
204         res->backup = NULL;
205         res->backup_offset = 0;
206         res->backup_dirty = false;
207         res->res_dirty = false;
208         if (delay_id)
209                 return 0;
210         else
211                 return vmw_resource_alloc_id(res);
212 }
213
214 /**
215  * vmw_resource_activate
216  *
217  * @res:        Pointer to the newly created resource
218  * @hw_destroy: Destroy function. NULL if none.
219  *
220  * Activate a resource after the hardware has been made aware of it.
221  * Set tye destroy function to @destroy. Typically this frees the
222  * resource and destroys the hardware resources associated with it.
223  * Activate basically means that the function vmw_resource_lookup will
224  * find it.
225  */
226 void vmw_resource_activate(struct vmw_resource *res,
227                            void (*hw_destroy) (struct vmw_resource *))
228 {
229         struct vmw_private *dev_priv = res->dev_priv;
230
231         write_lock(&dev_priv->resource_lock);
232         res->avail = true;
233         res->hw_destroy = hw_destroy;
234         write_unlock(&dev_priv->resource_lock);
235 }
236
237 /**
238  * vmw_user_resource_lookup_handle - lookup a struct resource from a
239  * TTM user-space handle and perform basic type checks
240  *
241  * @dev_priv:     Pointer to a device private struct
242  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
243  * @handle:       The TTM user-space handle
244  * @converter:    Pointer to an object describing the resource type
245  * @p_res:        On successful return the location pointed to will contain
246  *                a pointer to a refcounted struct vmw_resource.
247  *
248  * If the handle can't be found or is associated with an incorrect resource
249  * type, -EINVAL will be returned.
250  */
251 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
252                                     struct ttm_object_file *tfile,
253                                     uint32_t handle,
254                                     const struct vmw_user_resource_conv
255                                     *converter,
256                                     struct vmw_resource **p_res)
257 {
258         struct ttm_base_object *base;
259         struct vmw_resource *res;
260         int ret = -EINVAL;
261
262         base = ttm_base_object_lookup(tfile, handle);
263         if (unlikely(base == NULL))
264                 return -EINVAL;
265
266         if (unlikely(ttm_base_object_type(base) != converter->object_type))
267                 goto out_bad_resource;
268
269         res = converter->base_obj_to_res(base);
270
271         read_lock(&dev_priv->resource_lock);
272         if (!res->avail || res->res_free != converter->res_free) {
273                 read_unlock(&dev_priv->resource_lock);
274                 goto out_bad_resource;
275         }
276
277         kref_get(&res->kref);
278         read_unlock(&dev_priv->resource_lock);
279
280         *p_res = res;
281         ret = 0;
282
283 out_bad_resource:
284         ttm_base_object_unref(&base);
285
286         return ret;
287 }
288
289 /**
290  * Helper function that looks either a surface or dmabuf.
291  *
292  * The pointer this pointed at by out_surf and out_buf needs to be null.
293  */
294 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
295                            struct ttm_object_file *tfile,
296                            uint32_t handle,
297                            struct vmw_surface **out_surf,
298                            struct vmw_dma_buffer **out_buf)
299 {
300         struct vmw_resource *res;
301         int ret;
302
303         BUG_ON(*out_surf || *out_buf);
304
305         ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
306                                               user_surface_converter,
307                                               &res);
308         if (!ret) {
309                 *out_surf = vmw_res_to_srf(res);
310                 return 0;
311         }
312
313         *out_surf = NULL;
314         ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
315         return ret;
316 }
317
318 /**
319  * Buffer management.
320  */
321
322 /**
323  * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
324  *
325  * @dev_priv: Pointer to a struct vmw_private identifying the device.
326  * @size: The requested buffer size.
327  * @user: Whether this is an ordinary dma buffer or a user dma buffer.
328  */
329 static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
330                                   bool user)
331 {
332         static size_t struct_size, user_struct_size;
333         size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
334         size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
335
336         if (unlikely(struct_size == 0)) {
337                 size_t backend_size = ttm_round_pot(vmw_tt_size);
338
339                 struct_size = backend_size +
340                         ttm_round_pot(sizeof(struct vmw_dma_buffer));
341                 user_struct_size = backend_size +
342                         ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
343         }
344
345         if (dev_priv->map_mode == vmw_dma_alloc_coherent)
346                 page_array_size +=
347                         ttm_round_pot(num_pages * sizeof(dma_addr_t));
348
349         return ((user) ? user_struct_size : struct_size) +
350                 page_array_size;
351 }
352
353 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
354 {
355         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
356
357         kfree(vmw_bo);
358 }
359
360 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
361 {
362         struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
363
364         ttm_prime_object_kfree(vmw_user_bo, prime);
365 }
366
367 int vmw_dmabuf_init(struct vmw_private *dev_priv,
368                     struct vmw_dma_buffer *vmw_bo,
369                     size_t size, struct ttm_placement *placement,
370                     bool interruptible,
371                     void (*bo_free) (struct ttm_buffer_object *bo))
372 {
373         struct ttm_bo_device *bdev = &dev_priv->bdev;
374         size_t acc_size;
375         int ret;
376         bool user = (bo_free == &vmw_user_dmabuf_destroy);
377
378         BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
379
380         acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
381         memset(vmw_bo, 0, sizeof(*vmw_bo));
382
383         INIT_LIST_HEAD(&vmw_bo->res_list);
384
385         ret = ttm_bo_init(bdev, &vmw_bo->base, size,
386                           ttm_bo_type_device, placement,
387                           0, interruptible,
388                           NULL, acc_size, NULL, NULL, bo_free);
389         return ret;
390 }
391
392 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
393 {
394         struct vmw_user_dma_buffer *vmw_user_bo;
395         struct ttm_base_object *base = *p_base;
396         struct ttm_buffer_object *bo;
397
398         *p_base = NULL;
399
400         if (unlikely(base == NULL))
401                 return;
402
403         vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
404                                    prime.base);
405         bo = &vmw_user_bo->dma.base;
406         ttm_bo_unref(&bo);
407 }
408
409 static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
410                                             enum ttm_ref_type ref_type)
411 {
412         struct vmw_user_dma_buffer *user_bo;
413         user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
414
415         switch (ref_type) {
416         case TTM_REF_SYNCCPU_WRITE:
417                 ttm_bo_synccpu_write_release(&user_bo->dma.base);
418                 break;
419         default:
420                 BUG();
421         }
422 }
423
424 /**
425  * vmw_user_dmabuf_alloc - Allocate a user dma buffer
426  *
427  * @dev_priv: Pointer to a struct device private.
428  * @tfile: Pointer to a struct ttm_object_file on which to register the user
429  * object.
430  * @size: Size of the dma buffer.
431  * @shareable: Boolean whether the buffer is shareable with other open files.
432  * @handle: Pointer to where the handle value should be assigned.
433  * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
434  * should be assigned.
435  */
436 int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
437                           struct ttm_object_file *tfile,
438                           uint32_t size,
439                           bool shareable,
440                           uint32_t *handle,
441                           struct vmw_dma_buffer **p_dma_buf,
442                           struct ttm_base_object **p_base)
443 {
444         struct vmw_user_dma_buffer *user_bo;
445         struct ttm_buffer_object *tmp;
446         int ret;
447
448         user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
449         if (unlikely(!user_bo)) {
450                 DRM_ERROR("Failed to allocate a buffer.\n");
451                 return -ENOMEM;
452         }
453
454         ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
455                               (dev_priv->has_mob) ?
456                               &vmw_sys_placement :
457                               &vmw_vram_sys_placement, true,
458                               &vmw_user_dmabuf_destroy);
459         if (unlikely(ret != 0))
460                 return ret;
461
462         tmp = ttm_bo_reference(&user_bo->dma.base);
463         ret = ttm_prime_object_init(tfile,
464                                     size,
465                                     &user_bo->prime,
466                                     shareable,
467                                     ttm_buffer_type,
468                                     &vmw_user_dmabuf_release,
469                                     &vmw_user_dmabuf_ref_obj_release);
470         if (unlikely(ret != 0)) {
471                 ttm_bo_unref(&tmp);
472                 goto out_no_base_object;
473         }
474
475         *p_dma_buf = &user_bo->dma;
476         if (p_base) {
477                 *p_base = &user_bo->prime.base;
478                 kref_get(&(*p_base)->refcount);
479         }
480         *handle = user_bo->prime.base.hash.key;
481
482 out_no_base_object:
483         return ret;
484 }
485
486 /**
487  * vmw_user_dmabuf_verify_access - verify access permissions on this
488  * buffer object.
489  *
490  * @bo: Pointer to the buffer object being accessed
491  * @tfile: Identifying the caller.
492  */
493 int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
494                                   struct ttm_object_file *tfile)
495 {
496         struct vmw_user_dma_buffer *vmw_user_bo;
497
498         if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
499                 return -EPERM;
500
501         vmw_user_bo = vmw_user_dma_buffer(bo);
502
503         /* Check that the caller has opened the object. */
504         if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
505                 return 0;
506
507         DRM_ERROR("Could not grant buffer access.\n");
508         return -EPERM;
509 }
510
511 /**
512  * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
513  * access, idling previous GPU operations on the buffer and optionally
514  * blocking it for further command submissions.
515  *
516  * @user_bo: Pointer to the buffer object being grabbed for CPU access
517  * @tfile: Identifying the caller.
518  * @flags: Flags indicating how the grab should be performed.
519  *
520  * A blocking grab will be automatically released when @tfile is closed.
521  */
522 static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
523                                         struct ttm_object_file *tfile,
524                                         uint32_t flags)
525 {
526         struct ttm_buffer_object *bo = &user_bo->dma.base;
527         bool existed;
528         int ret;
529
530         if (flags & drm_vmw_synccpu_allow_cs) {
531                 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
532                 long lret;
533
534                 lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
535                                                            nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
536                 if (!lret)
537                         return -EBUSY;
538                 else if (lret < 0)
539                         return lret;
540                 return 0;
541         }
542
543         ret = ttm_bo_synccpu_write_grab
544                 (bo, !!(flags & drm_vmw_synccpu_dontblock));
545         if (unlikely(ret != 0))
546                 return ret;
547
548         ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
549                                  TTM_REF_SYNCCPU_WRITE, &existed, false);
550         if (ret != 0 || existed)
551                 ttm_bo_synccpu_write_release(&user_bo->dma.base);
552
553         return ret;
554 }
555
556 /**
557  * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
558  * and unblock command submission on the buffer if blocked.
559  *
560  * @handle: Handle identifying the buffer object.
561  * @tfile: Identifying the caller.
562  * @flags: Flags indicating the type of release.
563  */
564 static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
565                                            struct ttm_object_file *tfile,
566                                            uint32_t flags)
567 {
568         if (!(flags & drm_vmw_synccpu_allow_cs))
569                 return ttm_ref_object_base_unref(tfile, handle,
570                                                  TTM_REF_SYNCCPU_WRITE);
571
572         return 0;
573 }
574
575 /**
576  * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
577  * functionality.
578  *
579  * @dev: Identifies the drm device.
580  * @data: Pointer to the ioctl argument.
581  * @file_priv: Identifies the caller.
582  *
583  * This function checks the ioctl arguments for validity and calls the
584  * relevant synccpu functions.
585  */
586 int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
587                                   struct drm_file *file_priv)
588 {
589         struct drm_vmw_synccpu_arg *arg =
590                 (struct drm_vmw_synccpu_arg *) data;
591         struct vmw_dma_buffer *dma_buf;
592         struct vmw_user_dma_buffer *user_bo;
593         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
594         struct ttm_base_object *buffer_base;
595         int ret;
596
597         if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
598             || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
599                                drm_vmw_synccpu_dontblock |
600                                drm_vmw_synccpu_allow_cs)) != 0) {
601                 DRM_ERROR("Illegal synccpu flags.\n");
602                 return -EINVAL;
603         }
604
605         switch (arg->op) {
606         case drm_vmw_synccpu_grab:
607                 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
608                                              &buffer_base);
609                 if (unlikely(ret != 0))
610                         return ret;
611
612                 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
613                                        dma);
614                 ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
615                 vmw_dmabuf_unreference(&dma_buf);
616                 ttm_base_object_unref(&buffer_base);
617                 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
618                              ret != -EBUSY)) {
619                         DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
620                                   (unsigned int) arg->handle);
621                         return ret;
622                 }
623                 break;
624         case drm_vmw_synccpu_release:
625                 ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
626                                                       arg->flags);
627                 if (unlikely(ret != 0)) {
628                         DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
629                                   (unsigned int) arg->handle);
630                         return ret;
631                 }
632                 break;
633         default:
634                 DRM_ERROR("Invalid synccpu operation.\n");
635                 return -EINVAL;
636         }
637
638         return 0;
639 }
640
641 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
642                            struct drm_file *file_priv)
643 {
644         struct vmw_private *dev_priv = vmw_priv(dev);
645         union drm_vmw_alloc_dmabuf_arg *arg =
646             (union drm_vmw_alloc_dmabuf_arg *)data;
647         struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
648         struct drm_vmw_dmabuf_rep *rep = &arg->rep;
649         struct vmw_dma_buffer *dma_buf;
650         uint32_t handle;
651         int ret;
652
653         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
654         if (unlikely(ret != 0))
655                 return ret;
656
657         ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
658                                     req->size, false, &handle, &dma_buf,
659                                     NULL);
660         if (unlikely(ret != 0))
661                 goto out_no_dmabuf;
662
663         rep->handle = handle;
664         rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
665         rep->cur_gmr_id = handle;
666         rep->cur_gmr_offset = 0;
667
668         vmw_dmabuf_unreference(&dma_buf);
669
670 out_no_dmabuf:
671         ttm_read_unlock(&dev_priv->reservation_sem);
672
673         return ret;
674 }
675
676 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
677                            struct drm_file *file_priv)
678 {
679         struct drm_vmw_unref_dmabuf_arg *arg =
680             (struct drm_vmw_unref_dmabuf_arg *)data;
681
682         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
683                                          arg->handle,
684                                          TTM_REF_USAGE);
685 }
686
687 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
688                            uint32_t handle, struct vmw_dma_buffer **out,
689                            struct ttm_base_object **p_base)
690 {
691         struct vmw_user_dma_buffer *vmw_user_bo;
692         struct ttm_base_object *base;
693
694         base = ttm_base_object_lookup(tfile, handle);
695         if (unlikely(base == NULL)) {
696                 pr_err("Invalid buffer object handle 0x%08lx\n",
697                        (unsigned long)handle);
698                 return -ESRCH;
699         }
700
701         if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
702                 ttm_base_object_unref(&base);
703                 pr_err("Invalid buffer object handle 0x%08lx\n",
704                        (unsigned long)handle);
705                 return -EINVAL;
706         }
707
708         vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
709                                    prime.base);
710         (void)ttm_bo_reference(&vmw_user_bo->dma.base);
711         if (p_base)
712                 *p_base = base;
713         else
714                 ttm_base_object_unref(&base);
715         *out = &vmw_user_bo->dma;
716
717         return 0;
718 }
719
720 int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
721                               struct vmw_dma_buffer *dma_buf,
722                               uint32_t *handle)
723 {
724         struct vmw_user_dma_buffer *user_bo;
725
726         if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
727                 return -EINVAL;
728
729         user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
730
731         *handle = user_bo->prime.base.hash.key;
732         return ttm_ref_object_add(tfile, &user_bo->prime.base,
733                                   TTM_REF_USAGE, NULL, false);
734 }
735
736 /**
737  * vmw_dumb_create - Create a dumb kms buffer
738  *
739  * @file_priv: Pointer to a struct drm_file identifying the caller.
740  * @dev: Pointer to the drm device.
741  * @args: Pointer to a struct drm_mode_create_dumb structure
742  *
743  * This is a driver callback for the core drm create_dumb functionality.
744  * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
745  * that the arguments have a different format.
746  */
747 int vmw_dumb_create(struct drm_file *file_priv,
748                     struct drm_device *dev,
749                     struct drm_mode_create_dumb *args)
750 {
751         struct vmw_private *dev_priv = vmw_priv(dev);
752         struct vmw_dma_buffer *dma_buf;
753         int ret;
754
755         args->pitch = args->width * ((args->bpp + 7) / 8);
756         args->size = args->pitch * args->height;
757
758         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
759         if (unlikely(ret != 0))
760                 return ret;
761
762         ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
763                                     args->size, false, &args->handle,
764                                     &dma_buf, NULL);
765         if (unlikely(ret != 0))
766                 goto out_no_dmabuf;
767
768         vmw_dmabuf_unreference(&dma_buf);
769 out_no_dmabuf:
770         ttm_read_unlock(&dev_priv->reservation_sem);
771         return ret;
772 }
773
774 /**
775  * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
776  *
777  * @file_priv: Pointer to a struct drm_file identifying the caller.
778  * @dev: Pointer to the drm device.
779  * @handle: Handle identifying the dumb buffer.
780  * @offset: The address space offset returned.
781  *
782  * This is a driver callback for the core drm dumb_map_offset functionality.
783  */
784 int vmw_dumb_map_offset(struct drm_file *file_priv,
785                         struct drm_device *dev, uint32_t handle,
786                         uint64_t *offset)
787 {
788         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
789         struct vmw_dma_buffer *out_buf;
790         int ret;
791
792         ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
793         if (ret != 0)
794                 return -EINVAL;
795
796         *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
797         vmw_dmabuf_unreference(&out_buf);
798         return 0;
799 }
800
801 /**
802  * vmw_dumb_destroy - Destroy a dumb boffer
803  *
804  * @file_priv: Pointer to a struct drm_file identifying the caller.
805  * @dev: Pointer to the drm device.
806  * @handle: Handle identifying the dumb buffer.
807  *
808  * This is a driver callback for the core drm dumb_destroy functionality.
809  */
810 int vmw_dumb_destroy(struct drm_file *file_priv,
811                      struct drm_device *dev,
812                      uint32_t handle)
813 {
814         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
815                                          handle, TTM_REF_USAGE);
816 }
817
818 /**
819  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
820  *
821  * @res:            The resource for which to allocate a backup buffer.
822  * @interruptible:  Whether any sleeps during allocation should be
823  *                  performed while interruptible.
824  */
825 static int vmw_resource_buf_alloc(struct vmw_resource *res,
826                                   bool interruptible)
827 {
828         unsigned long size =
829                 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
830         struct vmw_dma_buffer *backup;
831         int ret;
832
833         if (likely(res->backup)) {
834                 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
835                 return 0;
836         }
837
838         backup = kzalloc(sizeof(*backup), GFP_KERNEL);
839         if (unlikely(!backup))
840                 return -ENOMEM;
841
842         ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
843                               res->func->backup_placement,
844                               interruptible,
845                               &vmw_dmabuf_bo_free);
846         if (unlikely(ret != 0))
847                 goto out_no_dmabuf;
848
849         res->backup = backup;
850
851 out_no_dmabuf:
852         return ret;
853 }
854
855 /**
856  * vmw_resource_do_validate - Make a resource up-to-date and visible
857  *                            to the device.
858  *
859  * @res:            The resource to make visible to the device.
860  * @val_buf:        Information about a buffer possibly
861  *                  containing backup data if a bind operation is needed.
862  *
863  * On hardware resource shortage, this function returns -EBUSY and
864  * should be retried once resources have been freed up.
865  */
866 static int vmw_resource_do_validate(struct vmw_resource *res,
867                                     struct ttm_validate_buffer *val_buf)
868 {
869         int ret = 0;
870         const struct vmw_res_func *func = res->func;
871
872         if (unlikely(res->id == -1)) {
873                 ret = func->create(res);
874                 if (unlikely(ret != 0))
875                         return ret;
876         }
877
878         if (func->bind &&
879             ((func->needs_backup && list_empty(&res->mob_head) &&
880               val_buf->bo != NULL) ||
881              (!func->needs_backup && val_buf->bo != NULL))) {
882                 ret = func->bind(res, val_buf);
883                 if (unlikely(ret != 0))
884                         goto out_bind_failed;
885                 if (func->needs_backup)
886                         list_add_tail(&res->mob_head, &res->backup->res_list);
887         }
888
889         /*
890          * Only do this on write operations, and move to
891          * vmw_resource_unreserve if it can be called after
892          * backup buffers have been unreserved. Otherwise
893          * sort out locking.
894          */
895         res->res_dirty = true;
896
897         return 0;
898
899 out_bind_failed:
900         func->destroy(res);
901
902         return ret;
903 }
904
905 /**
906  * vmw_resource_unreserve - Unreserve a resource previously reserved for
907  * command submission.
908  *
909  * @res:               Pointer to the struct vmw_resource to unreserve.
910  * @switch_backup:     Backup buffer has been switched.
911  * @new_backup:        Pointer to new backup buffer if command submission
912  *                     switched. May be NULL.
913  * @new_backup_offset: New backup offset if @switch_backup is true.
914  *
915  * Currently unreserving a resource means putting it back on the device's
916  * resource lru list, so that it can be evicted if necessary.
917  */
918 void vmw_resource_unreserve(struct vmw_resource *res,
919                             bool switch_backup,
920                             struct vmw_dma_buffer *new_backup,
921                             unsigned long new_backup_offset)
922 {
923         struct vmw_private *dev_priv = res->dev_priv;
924
925         if (!list_empty(&res->lru_head))
926                 return;
927
928         if (switch_backup && new_backup != res->backup) {
929                 if (res->backup) {
930                         lockdep_assert_held(&res->backup->base.resv->lock.base);
931                         list_del_init(&res->mob_head);
932                         vmw_dmabuf_unreference(&res->backup);
933                 }
934
935                 if (new_backup) {
936                         res->backup = vmw_dmabuf_reference(new_backup);
937                         lockdep_assert_held(&new_backup->base.resv->lock.base);
938                         list_add_tail(&res->mob_head, &new_backup->res_list);
939                 } else {
940                         res->backup = NULL;
941                 }
942         }
943         if (switch_backup)
944                 res->backup_offset = new_backup_offset;
945
946         if (!res->func->may_evict || res->id == -1 || res->pin_count)
947                 return;
948
949         write_lock(&dev_priv->resource_lock);
950         list_add_tail(&res->lru_head,
951                       &res->dev_priv->res_lru[res->func->res_type]);
952         write_unlock(&dev_priv->resource_lock);
953 }
954
955 /**
956  * vmw_resource_check_buffer - Check whether a backup buffer is needed
957  *                             for a resource and in that case, allocate
958  *                             one, reserve and validate it.
959  *
960  * @res:            The resource for which to allocate a backup buffer.
961  * @interruptible:  Whether any sleeps during allocation should be
962  *                  performed while interruptible.
963  * @val_buf:        On successful return contains data about the
964  *                  reserved and validated backup buffer.
965  */
966 static int
967 vmw_resource_check_buffer(struct vmw_resource *res,
968                           bool interruptible,
969                           struct ttm_validate_buffer *val_buf)
970 {
971         struct list_head val_list;
972         bool backup_dirty = false;
973         int ret;
974
975         if (unlikely(res->backup == NULL)) {
976                 ret = vmw_resource_buf_alloc(res, interruptible);
977                 if (unlikely(ret != 0))
978                         return ret;
979         }
980
981         INIT_LIST_HEAD(&val_list);
982         val_buf->bo = ttm_bo_reference(&res->backup->base);
983         val_buf->shared = false;
984         list_add_tail(&val_buf->head, &val_list);
985         ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
986         if (unlikely(ret != 0))
987                 goto out_no_reserve;
988
989         if (res->func->needs_backup && list_empty(&res->mob_head))
990                 return 0;
991
992         backup_dirty = res->backup_dirty;
993         ret = ttm_bo_validate(&res->backup->base,
994                               res->func->backup_placement,
995                               true, false);
996
997         if (unlikely(ret != 0))
998                 goto out_no_validate;
999
1000         return 0;
1001
1002 out_no_validate:
1003         ttm_eu_backoff_reservation(NULL, &val_list);
1004 out_no_reserve:
1005         ttm_bo_unref(&val_buf->bo);
1006         if (backup_dirty)
1007                 vmw_dmabuf_unreference(&res->backup);
1008
1009         return ret;
1010 }
1011
1012 /**
1013  * vmw_resource_reserve - Reserve a resource for command submission
1014  *
1015  * @res:            The resource to reserve.
1016  *
1017  * This function takes the resource off the LRU list and make sure
1018  * a backup buffer is present for guest-backed resources. However,
1019  * the buffer may not be bound to the resource at this point.
1020  *
1021  */
1022 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
1023                          bool no_backup)
1024 {
1025         struct vmw_private *dev_priv = res->dev_priv;
1026         int ret;
1027
1028         write_lock(&dev_priv->resource_lock);
1029         list_del_init(&res->lru_head);
1030         write_unlock(&dev_priv->resource_lock);
1031
1032         if (res->func->needs_backup && res->backup == NULL &&
1033             !no_backup) {
1034                 ret = vmw_resource_buf_alloc(res, interruptible);
1035                 if (unlikely(ret != 0)) {
1036                         DRM_ERROR("Failed to allocate a backup buffer "
1037                                   "of size %lu. bytes\n",
1038                                   (unsigned long) res->backup_size);
1039                         return ret;
1040                 }
1041         }
1042
1043         return 0;
1044 }
1045
1046 /**
1047  * vmw_resource_backoff_reservation - Unreserve and unreference a
1048  *                                    backup buffer
1049  *.
1050  * @val_buf:        Backup buffer information.
1051  */
1052 static void
1053 vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1054 {
1055         struct list_head val_list;
1056
1057         if (likely(val_buf->bo == NULL))
1058                 return;
1059
1060         INIT_LIST_HEAD(&val_list);
1061         list_add_tail(&val_buf->head, &val_list);
1062         ttm_eu_backoff_reservation(NULL, &val_list);
1063         ttm_bo_unref(&val_buf->bo);
1064 }
1065
1066 /**
1067  * vmw_resource_do_evict - Evict a resource, and transfer its data
1068  *                         to a backup buffer.
1069  *
1070  * @res:            The resource to evict.
1071  * @interruptible:  Whether to wait interruptible.
1072  */
1073 static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1074 {
1075         struct ttm_validate_buffer val_buf;
1076         const struct vmw_res_func *func = res->func;
1077         int ret;
1078
1079         BUG_ON(!func->may_evict);
1080
1081         val_buf.bo = NULL;
1082         val_buf.shared = false;
1083         ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1084         if (unlikely(ret != 0))
1085                 return ret;
1086
1087         if (unlikely(func->unbind != NULL &&
1088                      (!func->needs_backup || !list_empty(&res->mob_head)))) {
1089                 ret = func->unbind(res, res->res_dirty, &val_buf);
1090                 if (unlikely(ret != 0))
1091                         goto out_no_unbind;
1092                 list_del_init(&res->mob_head);
1093         }
1094         ret = func->destroy(res);
1095         res->backup_dirty = true;
1096         res->res_dirty = false;
1097 out_no_unbind:
1098         vmw_resource_backoff_reservation(&val_buf);
1099
1100         return ret;
1101 }
1102
1103
1104 /**
1105  * vmw_resource_validate - Make a resource up-to-date and visible
1106  *                         to the device.
1107  *
1108  * @res:            The resource to make visible to the device.
1109  *
1110  * On succesful return, any backup DMA buffer pointed to by @res->backup will
1111  * be reserved and validated.
1112  * On hardware resource shortage, this function will repeatedly evict
1113  * resources of the same type until the validation succeeds.
1114  */
1115 int vmw_resource_validate(struct vmw_resource *res)
1116 {
1117         int ret;
1118         struct vmw_resource *evict_res;
1119         struct vmw_private *dev_priv = res->dev_priv;
1120         struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1121         struct ttm_validate_buffer val_buf;
1122         unsigned err_count = 0;
1123
1124         if (!res->func->create)
1125                 return 0;
1126
1127         val_buf.bo = NULL;
1128         val_buf.shared = false;
1129         if (res->backup)
1130                 val_buf.bo = &res->backup->base;
1131         do {
1132                 ret = vmw_resource_do_validate(res, &val_buf);
1133                 if (likely(ret != -EBUSY))
1134                         break;
1135
1136                 write_lock(&dev_priv->resource_lock);
1137                 if (list_empty(lru_list) || !res->func->may_evict) {
1138                         DRM_ERROR("Out of device device resources "
1139                                   "for %s.\n", res->func->type_name);
1140                         ret = -EBUSY;
1141                         write_unlock(&dev_priv->resource_lock);
1142                         break;
1143                 }
1144
1145                 evict_res = vmw_resource_reference
1146                         (list_first_entry(lru_list, struct vmw_resource,
1147                                           lru_head));
1148                 list_del_init(&evict_res->lru_head);
1149
1150                 write_unlock(&dev_priv->resource_lock);
1151
1152                 ret = vmw_resource_do_evict(evict_res, true);
1153                 if (unlikely(ret != 0)) {
1154                         write_lock(&dev_priv->resource_lock);
1155                         list_add_tail(&evict_res->lru_head, lru_list);
1156                         write_unlock(&dev_priv->resource_lock);
1157                         if (ret == -ERESTARTSYS ||
1158                             ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1159                                 vmw_resource_unreference(&evict_res);
1160                                 goto out_no_validate;
1161                         }
1162                 }
1163
1164                 vmw_resource_unreference(&evict_res);
1165         } while (1);
1166
1167         if (unlikely(ret != 0))
1168                 goto out_no_validate;
1169         else if (!res->func->needs_backup && res->backup) {
1170                 list_del_init(&res->mob_head);
1171                 vmw_dmabuf_unreference(&res->backup);
1172         }
1173
1174         return 0;
1175
1176 out_no_validate:
1177         return ret;
1178 }
1179
1180 /**
1181  * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1182  *                       object without unreserving it.
1183  *
1184  * @bo:             Pointer to the struct ttm_buffer_object to fence.
1185  * @fence:          Pointer to the fence. If NULL, this function will
1186  *                  insert a fence into the command stream..
1187  *
1188  * Contrary to the ttm_eu version of this function, it takes only
1189  * a single buffer object instead of a list, and it also doesn't
1190  * unreserve the buffer object, which needs to be done separately.
1191  */
1192 void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1193                          struct vmw_fence_obj *fence)
1194 {
1195         struct ttm_bo_device *bdev = bo->bdev;
1196
1197         struct vmw_private *dev_priv =
1198                 container_of(bdev, struct vmw_private, bdev);
1199
1200         if (fence == NULL) {
1201                 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1202                 reservation_object_add_excl_fence(bo->resv, &fence->base);
1203                 dma_fence_put(&fence->base);
1204         } else
1205                 reservation_object_add_excl_fence(bo->resv, &fence->base);
1206 }
1207
1208 /**
1209  * vmw_resource_move_notify - TTM move_notify_callback
1210  *
1211  * @bo: The TTM buffer object about to move.
1212  * @mem: The struct ttm_mem_reg indicating to what memory
1213  *       region the move is taking place.
1214  *
1215  * Evicts the Guest Backed hardware resource if the backup
1216  * buffer is being moved out of MOB memory.
1217  * Note that this function should not race with the resource
1218  * validation code as long as it accesses only members of struct
1219  * resource that remain static while bo::res is !NULL and
1220  * while we have @bo reserved. struct resource::backup is *not* a
1221  * static member. The resource validation code will take care
1222  * to set @bo::res to NULL, while having @bo reserved when the
1223  * buffer is no longer bound to the resource, so @bo:res can be
1224  * used to determine whether there is a need to unbind and whether
1225  * it is safe to unbind.
1226  */
1227 void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1228                               struct ttm_mem_reg *mem)
1229 {
1230         struct vmw_dma_buffer *dma_buf;
1231
1232         if (mem == NULL)
1233                 return;
1234
1235         if (bo->destroy != vmw_dmabuf_bo_free &&
1236             bo->destroy != vmw_user_dmabuf_destroy)
1237                 return;
1238
1239         dma_buf = container_of(bo, struct vmw_dma_buffer, base);
1240
1241         if (mem->mem_type != VMW_PL_MOB) {
1242                 struct vmw_resource *res, *n;
1243                 struct ttm_validate_buffer val_buf;
1244
1245                 val_buf.bo = bo;
1246                 val_buf.shared = false;
1247
1248                 list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1249
1250                         if (unlikely(res->func->unbind == NULL))
1251                                 continue;
1252
1253                         (void) res->func->unbind(res, true, &val_buf);
1254                         res->backup_dirty = true;
1255                         res->res_dirty = false;
1256                         list_del_init(&res->mob_head);
1257                 }
1258
1259                 (void) ttm_bo_wait(bo, false, false);
1260         }
1261 }
1262
1263
1264
1265 /**
1266  * vmw_query_readback_all - Read back cached query states
1267  *
1268  * @dx_query_mob: Buffer containing the DX query MOB
1269  *
1270  * Read back cached states from the device if they exist.  This function
1271  * assumings binding_mutex is held.
1272  */
1273 int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
1274 {
1275         struct vmw_resource *dx_query_ctx;
1276         struct vmw_private *dev_priv;
1277         struct {
1278                 SVGA3dCmdHeader header;
1279                 SVGA3dCmdDXReadbackAllQuery body;
1280         } *cmd;
1281
1282
1283         /* No query bound, so do nothing */
1284         if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
1285                 return 0;
1286
1287         dx_query_ctx = dx_query_mob->dx_query_ctx;
1288         dev_priv     = dx_query_ctx->dev_priv;
1289
1290         cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
1291         if (unlikely(cmd == NULL)) {
1292                 DRM_ERROR("Failed reserving FIFO space for "
1293                           "query MOB read back.\n");
1294                 return -ENOMEM;
1295         }
1296
1297         cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
1298         cmd->header.size = sizeof(cmd->body);
1299         cmd->body.cid    = dx_query_ctx->id;
1300
1301         vmw_fifo_commit(dev_priv, sizeof(*cmd));
1302
1303         /* Triggers a rebind the next time affected context is bound */
1304         dx_query_mob->dx_query_ctx = NULL;
1305
1306         return 0;
1307 }
1308
1309
1310
1311 /**
1312  * vmw_query_move_notify - Read back cached query states
1313  *
1314  * @bo: The TTM buffer object about to move.
1315  * @mem: The memory region @bo is moving to.
1316  *
1317  * Called before the query MOB is swapped out to read back cached query
1318  * states from the device.
1319  */
1320 void vmw_query_move_notify(struct ttm_buffer_object *bo,
1321                            struct ttm_mem_reg *mem)
1322 {
1323         struct vmw_dma_buffer *dx_query_mob;
1324         struct ttm_bo_device *bdev = bo->bdev;
1325         struct vmw_private *dev_priv;
1326
1327
1328         dev_priv = container_of(bdev, struct vmw_private, bdev);
1329
1330         mutex_lock(&dev_priv->binding_mutex);
1331
1332         dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
1333         if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
1334                 mutex_unlock(&dev_priv->binding_mutex);
1335                 return;
1336         }
1337
1338         /* If BO is being moved from MOB to system memory */
1339         if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
1340                 struct vmw_fence_obj *fence;
1341
1342                 (void) vmw_query_readback_all(dx_query_mob);
1343                 mutex_unlock(&dev_priv->binding_mutex);
1344
1345                 /* Create a fence and attach the BO to it */
1346                 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1347                 vmw_fence_single_bo(bo, fence);
1348
1349                 if (fence != NULL)
1350                         vmw_fence_obj_unreference(&fence);
1351
1352                 (void) ttm_bo_wait(bo, false, false);
1353         } else
1354                 mutex_unlock(&dev_priv->binding_mutex);
1355
1356 }
1357
1358 /**
1359  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1360  *
1361  * @res:            The resource being queried.
1362  */
1363 bool vmw_resource_needs_backup(const struct vmw_resource *res)
1364 {
1365         return res->func->needs_backup;
1366 }
1367
1368 /**
1369  * vmw_resource_evict_type - Evict all resources of a specific type
1370  *
1371  * @dev_priv:       Pointer to a device private struct
1372  * @type:           The resource type to evict
1373  *
1374  * To avoid thrashing starvation or as part of the hibernation sequence,
1375  * try to evict all evictable resources of a specific type.
1376  */
1377 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1378                                     enum vmw_res_type type)
1379 {
1380         struct list_head *lru_list = &dev_priv->res_lru[type];
1381         struct vmw_resource *evict_res;
1382         unsigned err_count = 0;
1383         int ret;
1384
1385         do {
1386                 write_lock(&dev_priv->resource_lock);
1387
1388                 if (list_empty(lru_list))
1389                         goto out_unlock;
1390
1391                 evict_res = vmw_resource_reference(
1392                         list_first_entry(lru_list, struct vmw_resource,
1393                                          lru_head));
1394                 list_del_init(&evict_res->lru_head);
1395                 write_unlock(&dev_priv->resource_lock);
1396
1397                 ret = vmw_resource_do_evict(evict_res, false);
1398                 if (unlikely(ret != 0)) {
1399                         write_lock(&dev_priv->resource_lock);
1400                         list_add_tail(&evict_res->lru_head, lru_list);
1401                         write_unlock(&dev_priv->resource_lock);
1402                         if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1403                                 vmw_resource_unreference(&evict_res);
1404                                 return;
1405                         }
1406                 }
1407
1408                 vmw_resource_unreference(&evict_res);
1409         } while (1);
1410
1411 out_unlock:
1412         write_unlock(&dev_priv->resource_lock);
1413 }
1414
1415 /**
1416  * vmw_resource_evict_all - Evict all evictable resources
1417  *
1418  * @dev_priv:       Pointer to a device private struct
1419  *
1420  * To avoid thrashing starvation or as part of the hibernation sequence,
1421  * evict all evictable resources. In particular this means that all
1422  * guest-backed resources that are registered with the device are
1423  * evicted and the OTable becomes clean.
1424  */
1425 void vmw_resource_evict_all(struct vmw_private *dev_priv)
1426 {
1427         enum vmw_res_type type;
1428
1429         mutex_lock(&dev_priv->cmdbuf_mutex);
1430
1431         for (type = 0; type < vmw_res_max; ++type)
1432                 vmw_resource_evict_type(dev_priv, type);
1433
1434         mutex_unlock(&dev_priv->cmdbuf_mutex);
1435 }
1436
1437 /**
1438  * vmw_resource_pin - Add a pin reference on a resource
1439  *
1440  * @res: The resource to add a pin reference on
1441  *
1442  * This function adds a pin reference, and if needed validates the resource.
1443  * Having a pin reference means that the resource can never be evicted, and
1444  * its id will never change as long as there is a pin reference.
1445  * This function returns 0 on success and a negative error code on failure.
1446  */
1447 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
1448 {
1449         struct vmw_private *dev_priv = res->dev_priv;
1450         int ret;
1451
1452         ttm_write_lock(&dev_priv->reservation_sem, interruptible);
1453         mutex_lock(&dev_priv->cmdbuf_mutex);
1454         ret = vmw_resource_reserve(res, interruptible, false);
1455         if (ret)
1456                 goto out_no_reserve;
1457
1458         if (res->pin_count == 0) {
1459                 struct vmw_dma_buffer *vbo = NULL;
1460
1461                 if (res->backup) {
1462                         vbo = res->backup;
1463
1464                         ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
1465                         if (!vbo->pin_count) {
1466                                 ret = ttm_bo_validate
1467                                         (&vbo->base,
1468                                          res->func->backup_placement,
1469                                          interruptible, false);
1470                                 if (ret) {
1471                                         ttm_bo_unreserve(&vbo->base);
1472                                         goto out_no_validate;
1473                                 }
1474                         }
1475
1476                         /* Do we really need to pin the MOB as well? */
1477                         vmw_bo_pin_reserved(vbo, true);
1478                 }
1479                 ret = vmw_resource_validate(res);
1480                 if (vbo)
1481                         ttm_bo_unreserve(&vbo->base);
1482                 if (ret)
1483                         goto out_no_validate;
1484         }
1485         res->pin_count++;
1486
1487 out_no_validate:
1488         vmw_resource_unreserve(res, false, NULL, 0UL);
1489 out_no_reserve:
1490         mutex_unlock(&dev_priv->cmdbuf_mutex);
1491         ttm_write_unlock(&dev_priv->reservation_sem);
1492
1493         return ret;
1494 }
1495
1496 /**
1497  * vmw_resource_unpin - Remove a pin reference from a resource
1498  *
1499  * @res: The resource to remove a pin reference from
1500  *
1501  * Having a pin reference means that the resource can never be evicted, and
1502  * its id will never change as long as there is a pin reference.
1503  */
1504 void vmw_resource_unpin(struct vmw_resource *res)
1505 {
1506         struct vmw_private *dev_priv = res->dev_priv;
1507         int ret;
1508
1509         (void) ttm_read_lock(&dev_priv->reservation_sem, false);
1510         mutex_lock(&dev_priv->cmdbuf_mutex);
1511
1512         ret = vmw_resource_reserve(res, false, true);
1513         WARN_ON(ret);
1514
1515         WARN_ON(res->pin_count == 0);
1516         if (--res->pin_count == 0 && res->backup) {
1517                 struct vmw_dma_buffer *vbo = res->backup;
1518
1519                 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1520                 vmw_bo_pin_reserved(vbo, false);
1521                 ttm_bo_unreserve(&vbo->base);
1522         }
1523
1524         vmw_resource_unreserve(res, false, NULL, 0UL);
1525
1526         mutex_unlock(&dev_priv->cmdbuf_mutex);
1527         ttm_read_unlock(&dev_priv->reservation_sem);
1528 }
1529
1530 /**
1531  * vmw_res_type - Return the resource type
1532  *
1533  * @res: Pointer to the resource
1534  */
1535 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1536 {
1537         return res->func->res_type;
1538 }