summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell/spufs/sched.c
diff options
context:
space:
mode:
authorChristoph Hellwig2007-02-13 21:54:24 +0100
committerArnd Bergmann2007-02-13 21:55:41 +0100
commit26bec67386dbf6ef887254e815398842e182cdcd (patch)
treecde0851af46df1b376a7af47e7c59362506cecc5 /arch/powerpc/platforms/cell/spufs/sched.c
parent[POWERPC] spufs: runqueue simplification (diff)
downloadkernel-qcow2-linux-26bec67386dbf6ef887254e815398842e182cdcd.tar.gz
kernel-qcow2-linux-26bec67386dbf6ef887254e815398842e182cdcd.tar.xz
kernel-qcow2-linux-26bec67386dbf6ef887254e815398842e182cdcd.zip
[POWERPC] spufs: optimize spu_run
There is no need to directly wake up contexts in spu_activate when called from spu_run, so add a flag to surpress this wakeup. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/sched.c')
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 6f8e2257c5a6..07d0d095c62a 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -247,8 +247,8 @@ static void spu_prio_wait(struct spu_context *ctx)
{
DEFINE_WAIT(wait);
+ set_bit(SPU_SCHED_WAKE, &ctx->sched_flags);
prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
-
if (!signal_pending(current)) {
mutex_unlock(&ctx->state_mutex);
schedule();
@@ -256,6 +256,7 @@ static void spu_prio_wait(struct spu_context *ctx)
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(&ctx->stop_wq, &wait);
+ clear_bit(SPU_SCHED_WAKE, &ctx->sched_flags);
}
/**
@@ -275,7 +276,7 @@ static void spu_reschedule(struct spu *spu)
best = sched_find_first_bit(spu_prio->bitmap);
if (best < MAX_PRIO) {
struct spu_context *ctx = spu_grab_context(best);
- if (ctx)
+ if (ctx && test_bit(SPU_SCHED_WAKE, &ctx->sched_flags))
wake_up(&ctx->stop_wq);
}
spin_unlock(&spu_prio->runq_lock);
@@ -315,7 +316,7 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
* add the context to the runqueue so it gets woken up once an spu
* is available.
*/
-int spu_activate(struct spu_context *ctx, u64 flags)
+int spu_activate(struct spu_context *ctx, unsigned long flags)
{
if (ctx->spu)
@@ -331,7 +332,8 @@ int spu_activate(struct spu_context *ctx, u64 flags)
}
spu_add_to_rq(ctx);
- spu_prio_wait(ctx);
+ if (!(flags & SPU_ACTIVATE_NOWAKE))
+ spu_prio_wait(ctx);
spu_del_from_rq(ctx);
} while (!signal_pending(current));