]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
drm/amdgpu: fix user fence handling
authorChristian König <christian.koenig@amd.com>
Mon, 14 Dec 2015 15:42:31 +0000 (16:42 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 16 Dec 2015 22:48:59 +0000 (17:48 -0500)
This fixes a random corruption under memory pressure. We need to fence
the BO for the user fence as well, otherwise it might be swapped out
and the GPU could write the fence value to an undesired location.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Cc: stable@vger.kernel.org
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c

index 5a5f04d0902d662548ebae9f82525e1363e8457c..048cfe073dae6a1e89112b219c6a130b3d513be9 100644 (file)
@@ -1264,7 +1264,8 @@ struct amdgpu_cs_parser {
        struct ww_acquire_ctx   ticket;
 
        /* user fence */
-       struct amdgpu_user_fence uf;
+       struct amdgpu_user_fence        uf;
+       struct amdgpu_bo_list_entry     uf_entry;
 };
 
 struct amdgpu_job {
index 4f352ec9dec4e2fb7c3bb5bd062e674291fc8d5a..25a3e2485cc2e9572943638d1975f678fbc26e38 100644 (file)
@@ -127,6 +127,37 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
        return 0;
 }
 
+static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
+                                     struct drm_amdgpu_cs_chunk_fence *fence_data)
+{
+       struct drm_gem_object *gobj;
+       uint32_t handle;
+
+       handle = fence_data->handle;
+       gobj = drm_gem_object_lookup(p->adev->ddev, p->filp,
+                                    fence_data->handle);
+       if (gobj == NULL)
+               return -EINVAL;
+
+       p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
+       p->uf.offset = fence_data->offset;
+
+       if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) {
+               drm_gem_object_unreference_unlocked(gobj);
+               return -EINVAL;
+       }
+
+       p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo);
+       p->uf_entry.prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
+       p->uf_entry.allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
+       p->uf_entry.priority = 0;
+       p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
+       p->uf_entry.tv.shared = true;
+
+       drm_gem_object_unreference_unlocked(gobj);
+       return 0;
+}
+
 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 {
        union drm_amdgpu_cs *cs = data;
@@ -207,28 +238,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 
                case AMDGPU_CHUNK_ID_FENCE:
                        size = sizeof(struct drm_amdgpu_cs_chunk_fence);
-                       if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) {
-                               uint32_t handle;
-                               struct drm_gem_object *gobj;
-                               struct drm_amdgpu_cs_chunk_fence *fence_data;
-
-                               fence_data = (void *)p->chunks[i].kdata;
-                               handle = fence_data->handle;
-                               gobj = drm_gem_object_lookup(p->adev->ddev,
-                                                            p->filp, handle);
-                               if (gobj == NULL) {
-                                       ret = -EINVAL;
-                                       goto free_partial_kdata;
-                               }
-
-                               p->uf.bo = gem_to_amdgpu_bo(gobj);
-                               amdgpu_bo_ref(p->uf.bo);
-                               drm_gem_object_unreference_unlocked(gobj);
-                               p->uf.offset = fence_data->offset;
-                       } else {
+                       if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
                                ret = -EINVAL;
                                goto free_partial_kdata;
                        }
+
+                       ret = amdgpu_cs_user_fence_chunk(p, (void *)p->chunks[i].kdata);
+                       if (ret)
+                               goto free_partial_kdata;
+
                        break;
 
                case AMDGPU_CHUNK_ID_DEPENDENCIES:
@@ -391,6 +409,9 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
        p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm,
                                      &p->validated);
 
+       if (p->uf.bo)
+               list_add(&p->uf_entry.tv.head, &p->validated);
+
        if (need_mmap_lock)
                down_read(&current->mm->mmap_sem);
 
@@ -488,8 +509,8 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
                for (i = 0; i < parser->num_ibs; i++)
                        amdgpu_ib_free(parser->adev, &parser->ibs[i]);
        kfree(parser->ibs);
-       if (parser->uf.bo)
-               amdgpu_bo_unref(&parser->uf.bo);
+       amdgpu_bo_unref(&parser->uf.bo);
+       amdgpu_bo_unref(&parser->uf_entry.robj);
 }
 
 static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,