summaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo2013-03-14 03:47:39 +0100
committerTejun Heo2013-03-14 03:47:39 +0100
commitebf44d16ec4619c8a8daeacd987dd86d420ea2c3 (patch)
tree501bb8fbeda2ff67d71af733c3c56150053a4351 /kernel/workqueue.c
parentworkqueue: rename worker_pool->assoc_mutex to ->manager_mutex (diff)
downloadkernel-qcow2-linux-ebf44d16ec4619c8a8daeacd987dd86d420ea2c3.tar.gz
kernel-qcow2-linux-ebf44d16ec4619c8a8daeacd987dd86d420ea2c3.tar.xz
kernel-qcow2-linux-ebf44d16ec4619c8a8daeacd987dd86d420ea2c3.zip
workqueue: factor out initial worker creation into create_and_start_worker()
get_unbound_pool(), workqueue_cpu_up_callback() and init_workqueues() have similar code pieces to create and start the initial worker factor those out into create_and_start_worker(). This patch doesn't introduce any functional changes. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c47
1 files changed, 23 insertions, 24 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index bc25bdfb4b42..cac710646cbc 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1793,6 +1793,26 @@ static void start_worker(struct worker *worker)
}
/**
+ * create_and_start_worker - create and start a worker for a pool
+ * @pool: the target pool
+ *
+ * Create and start a new worker for @pool.
+ */
+static int create_and_start_worker(struct worker_pool *pool)
+{
+ struct worker *worker;
+
+ worker = create_worker(pool);
+ if (worker) {
+ spin_lock_irq(&pool->lock);
+ start_worker(worker);
+ spin_unlock_irq(&pool->lock);
+ }
+
+ return worker ? 0 : -ENOMEM;
+}
+
+/**
* destroy_worker - destroy a workqueue worker
* @worker: worker to be destroyed
*
@@ -3542,7 +3562,6 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
static DEFINE_MUTEX(create_mutex);
u32 hash = wqattrs_hash(attrs);
struct worker_pool *pool;
- struct worker *worker;
mutex_lock(&create_mutex);
@@ -3568,14 +3587,9 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
goto fail;
/* create and start the initial worker */
- worker = create_worker(pool);
- if (!worker)
+ if (create_and_start_worker(pool) < 0)
goto fail;
- spin_lock_irq(&pool->lock);
- start_worker(worker);
- spin_unlock_irq(&pool->lock);
-
/* install */
spin_lock_irq(&workqueue_lock);
hash_add(unbound_pool_hash, &pool->hash_node, hash);
@@ -4148,18 +4162,10 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
for_each_cpu_worker_pool(pool, cpu) {
- struct worker *worker;
-
if (pool->nr_workers)
continue;
-
- worker = create_worker(pool);
- if (!worker)
+ if (create_and_start_worker(pool) < 0)
return NOTIFY_BAD;
-
- spin_lock_irq(&pool->lock);
- start_worker(worker);
- spin_unlock_irq(&pool->lock);
}
break;
@@ -4409,15 +4415,8 @@ static int __init init_workqueues(void)
struct worker_pool *pool;
for_each_cpu_worker_pool(pool, cpu) {
- struct worker *worker;
-
pool->flags &= ~POOL_DISASSOCIATED;
-
- worker = create_worker(pool);
- BUG_ON(!worker);
- spin_lock_irq(&pool->lock);
- start_worker(worker);
- spin_unlock_irq(&pool->lock);
+ BUG_ON(create_and_start_worker(pool) < 0);
}
}