summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gvt/scheduler.c
diff options
context:
space:
mode:
authorChangbin Du2017-01-05 09:49:03 +0100
committerZhenyu Wang2017-01-09 04:10:43 +0100
commit440a9b9fae37dfd7e4c7d76db34fada57f9afd92 (patch)
tree7679b1dd789545fe8ab5c33e68bc0014bc43c3fc /drivers/gpu/drm/i915/gvt/scheduler.c
parentdrm/i915/gvt: fix use after free for workload (diff)
downloadkernel-qcow2-linux-440a9b9fae37dfd7e4c7d76db34fada57f9afd92.tar.gz
kernel-qcow2-linux-440a9b9fae37dfd7e4c7d76db34fada57f9afd92.tar.xz
kernel-qcow2-linux-440a9b9fae37dfd7e4c7d76db34fada57f9afd92.zip
drm/i915/gvt: dec vgpu->running_workload_num after the workload is really done
The vgpu->running_workload_num is used to determine whether a vgpu has any workload running or not. So we should make sure the workload is really done before we dec running_workload_num. Function complete_current_workload is not the right place to do it, since this function is still processing the workload. This patch move the dec op afterward. v2: move dec op before wake_up(&scheduler->workload_complete_wq) (Min He) Signed-off-by: Changbin Du <changbin.du@intel.com> Reviewed-by: Min He <min.he@intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/scheduler.c')
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index c694dd039f3b..e91885dffeff 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload;
+ struct intel_vgpu *vgpu;
int event;
mutex_lock(&gvt->lock);
workload = scheduler->current_workload[ring_id];
+ vgpu = workload->vgpu;
- if (!workload->status && !workload->vgpu->resetting) {
+ if (!workload->status && !vgpu->resetting) {
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));
@@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
for_each_set_bit(event, workload->pending_events,
INTEL_GVT_EVENT_MAX)
- intel_vgpu_trigger_virtual_event(workload->vgpu,
- event);
+ intel_vgpu_trigger_virtual_event(vgpu, event);
}
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
scheduler->current_workload[ring_id] = NULL;
- atomic_dec(&workload->vgpu->running_workload_num);
-
list_del_init(&workload->list);
workload->complete(workload);
+ atomic_dec(&vgpu->running_workload_num);
wake_up(&scheduler->workload_complete_wq);
mutex_unlock(&gvt->lock);
}