]> git.hungrycats.org Git - linux/commitdiff
drm/amd: fix scheduler fence teardown order v2
authorChristian König <christian.koenig@amd.com>
Fri, 28 Oct 2016 15:04:07 +0000 (17:04 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 18 Nov 2016 09:51:49 +0000 (10:51 +0100)
commit c24784f01549ecdf23fc00d0588423bcf8956714 upstream.

Some fences might be alive even after we have stopped the scheduler leading
to warnings about leaked objects from the SLUB allocator.

Fix this by allocating/freeing the SLUB allocator from the module
init/fini functions just like we do it for hw fences.

v2: make variable static, add link to bug

Fixes: https://bugs.freedesktop.org/show_bug.cgi?id=97500
Reported-by: Grazvydas Ignotas <notasas@gmail.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com> (v1)
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
drivers/gpu/drm/amd/scheduler/sched_fence.c

index 9aa533cf4ad10f79535172eaa5edafb097da0154..414a1600da54b5a108638d86246429d5796562db 100644 (file)
@@ -605,6 +605,7 @@ static int __init amdgpu_init(void)
 {
        amdgpu_sync_init();
        amdgpu_fence_slab_init();
+       amd_sched_fence_slab_init();
        if (vgacon_text_force()) {
                DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
                return -EINVAL;
@@ -624,6 +625,7 @@ static void __exit amdgpu_exit(void)
        drm_pci_exit(driver, pdriver);
        amdgpu_unregister_atpx_handler();
        amdgpu_sync_fini();
+       amd_sched_fence_slab_fini();
        amdgpu_fence_slab_fini();
 }
 
index 910b8d5b21c56595a9cb858c6564c711134c2645..ffe1f85ce30019dc75b7aff2550ad60c6e70a5f1 100644 (file)
@@ -34,9 +34,6 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
 static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
 
-struct kmem_cache *sched_fence_slab;
-atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
-
 /* Initialize a given run queue struct */
 static void amd_sched_rq_init(struct amd_sched_rq *rq)
 {
@@ -618,13 +615,6 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
        INIT_LIST_HEAD(&sched->ring_mirror_list);
        spin_lock_init(&sched->job_list_lock);
        atomic_set(&sched->hw_rq_count, 0);
-       if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
-               sched_fence_slab = kmem_cache_create(
-                       "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
-                       SLAB_HWCACHE_ALIGN, NULL);
-               if (!sched_fence_slab)
-                       return -ENOMEM;
-       }
 
        /* Each scheduler will run on a seperate kernel thread */
        sched->thread = kthread_run(amd_sched_main, sched, sched->name);
@@ -645,7 +635,4 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
 {
        if (sched->thread)
                kthread_stop(sched->thread);
-       rcu_barrier();
-       if (atomic_dec_and_test(&sched_fence_slab_ref))
-               kmem_cache_destroy(sched_fence_slab);
 }
index 7cbbbfb502ef1342caa2a5b36aa970368fed5055..51068e6c3d9af4746e40ebca3f81384e95d5c16c 100644 (file)
@@ -30,9 +30,6 @@
 struct amd_gpu_scheduler;
 struct amd_sched_rq;
 
-extern struct kmem_cache *sched_fence_slab;
-extern atomic_t sched_fence_slab_ref;
-
 /**
  * A scheduler entity is a wrapper around a job queue or a group
  * of other entities. Entities take turns emitting jobs from their
@@ -145,6 +142,9 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
                           struct amd_sched_entity *entity);
 void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
 
+int amd_sched_fence_slab_init(void);
+void amd_sched_fence_slab_fini(void);
+
 struct amd_sched_fence *amd_sched_fence_create(
        struct amd_sched_entity *s_entity, void *owner);
 void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
index 6b63beaf75746848720f98d4eb2b5329c299267b..93ad2e1f8f5702177045b866072a8b24903b1cdc 100644 (file)
 #include <drm/drmP.h>
 #include "gpu_scheduler.h"
 
+static struct kmem_cache *sched_fence_slab;
+
+int amd_sched_fence_slab_init(void)
+{
+       sched_fence_slab = kmem_cache_create(
+               "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
+               SLAB_HWCACHE_ALIGN, NULL);
+       if (!sched_fence_slab)
+               return -ENOMEM;
+
+       return 0;
+}
+
+void amd_sched_fence_slab_fini(void)
+{
+       rcu_barrier();
+       kmem_cache_destroy(sched_fence_slab);
+}
+
 struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
                                               void *owner)
 {