From a18b83b7ef3c98cd8b4bb885e4a649a8f30fb7b0 Mon Sep 17 00:00:00 2001 From: Bharata B Rao Date: Mon, 23 Mar 2009 10:02:53 +0530 Subject: cpuacct: make cpuacct hierarchy walk in cpuacct_charge() safe when rcupreempt is used -v2 Impact: fix cgroups race under rcu-preempt cpuacct_charge() obtains task's ca and does a hierarchy walk upwards. This can race with the task's movement between cgroups. This race can cause an access to freed ca pointer in cpuacct_charge() or access to invalid cgroups pointer of the task. This will not happen with rcu or tree rcu as cpuacct_charge() is called with preemption disabled. However if rcupreempt is used, the race is seen. Thanks to Li Zefan for explaining this. Fix this race by explicitly protecting ca and the hierarchy walk with rcu_read_lock(). Changes for v2: - Update patch descrition (as per Li Zefan's review comments). - Remove comments in cpuacct_charge() which explained why rcu_read_lock() was needed (as per Peter Zijlstra's review comments). Signed-off-by: Bharata B Rao Cc: Dhaval Giani Cc: Li Zefan Cc: Paul Menage Cc: KAMEZAWA Hiroyuki Acked-by: Peter Zijlstra Acked-by: Balbir Singh Tested-by: Balbir Singh Signed-off-by: Ingo Molnar --- kernel/sched.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 186c6fd08acf..cc397aae5eae 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -9654,12 +9654,17 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime) return; cpu = task_cpu(tsk); + + rcu_read_lock(); + ca = task_ca(tsk); for (; ca; ca = ca->parent) { u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); *cpuusage += cputime; } + + rcu_read_unlock(); } struct cgroup_subsys cpuacct_subsys = { -- cgit v1.2.3-55-g7522 From 13b8bd0a5713bdf05659019badd7c0407984ece1 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 25 Mar 2009 15:01:22 +1030 Subject: sched_rt: don't allocate cpumask in fastpath Impact: cleanup As pointed out by Steven Rostedt. Since the arg in question is unused, we simply change cpupri_find() to accept NULL. Reported-by: Steven Rostedt Signed-off-by: Rusty Russell LKML-Reference: <200903251501.22664.rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar --- kernel/sched_cpupri.c | 5 +++-- kernel/sched_rt.c | 15 ++++----------- 2 files changed, 7 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c index 1e00bfacf9b8..cdd3c89574cd 100644 --- a/kernel/sched_cpupri.c +++ b/kernel/sched_cpupri.c @@ -55,7 +55,7 @@ static int convert_prio(int prio) * cpupri_find - find the best (lowest-pri) CPU in the system * @cp: The cpupri context * @p: The task - * @lowest_mask: A mask to fill in with selected CPUs + * @lowest_mask: A mask to fill in with selected CPUs (or NULL) * * Note: This function returns the recommended CPUs as calculated during the * current invokation. By the time the call returns, the CPUs may have in @@ -81,7 +81,8 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) continue; - cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); + if (lowest_mask) + cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); return 1; } diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index bac1061cea2f..fbec5a58ff10 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -805,20 +805,15 @@ static int select_task_rq_rt(struct task_struct *p, int sync) static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) { - cpumask_var_t mask; - if (rq->curr->rt.nr_cpus_allowed == 1) return; - if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) - return; - if (p->rt.nr_cpus_allowed != 1 - && cpupri_find(&rq->rd->cpupri, p, mask)) - goto free; + && cpupri_find(&rq->rd->cpupri, p, NULL)) + return; - if (!cpupri_find(&rq->rd->cpupri, rq->curr, mask)) - goto free; + if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) + return; /* * There appears to be other cpus that can accept @@ -827,8 +822,6 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) */ requeue_task_rt(rq, p, 1); resched_task(rq->curr); -free: - free_cpumask_var(mask); } #endif /* CONFIG_SMP */ -- cgit v1.2.3-55-g7522 From c5f8d99585d7b5b7e857fabf8aefd0174903a98c Mon Sep 17 00:00:00 2001 From: Hidetoshi Seto Date: Tue, 31 Mar 2009 16:56:03 +0900 Subject: posixtimers, sched: Fix posix clock monotonicity Impact: Regression fix (against clock_gettime() backwarding bug) This patch re-introduces a couple of functions, task_sched_runtime and thread_group_sched_runtime, which was once removed at the time of 2.6.28-rc1. These functions protect the sampling of thread/process clock with rq lock. This rq lock is required not to update rq->clock during the sampling. i.e. The clock_gettime() may return ((accounted runtime before update) + (delta after update)) that is less than what it should be. v2 -> v3: - Rename static helper function __task_delta_exec() to do_task_delta_exec() since -tip tree already has a __task_delta_exec() of different version. v1 -> v2: - Revises comments of function and patch description. - Add note about accuracy of thread group's runtime. Signed-off-by: Hidetoshi Seto Acked-by: Peter Zijlstra Cc: stable@kernel.org [2.6.28.x][2.6.29.x] LKML-Reference: <49D1CC93.4080401@jp.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/posix-cpu-timers.c | 7 ++--- kernel/sched.c | 65 +++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 61 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index fa07da94d7be..4318c3085788 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -224,7 +224,7 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, cpu->cpu = virt_ticks(p); break; case CPUCLOCK_SCHED: - cpu->sched = p->se.sum_exec_runtime + task_delta_exec(p); + cpu->sched = task_sched_runtime(p); break; } return 0; @@ -240,18 +240,19 @@ static int cpu_clock_sample_group(const clockid_t which_clock, { struct task_cputime cputime; - thread_group_cputime(p, &cputime); switch (CPUCLOCK_WHICH(which_clock)) { default: return -EINVAL; case CPUCLOCK_PROF: + thread_group_cputime(p, &cputime); cpu->cpu = cputime_add(cputime.utime, cputime.stime); break; case CPUCLOCK_VIRT: + thread_group_cputime(p, &cputime); cpu->cpu = cputime.utime; break; case CPUCLOCK_SCHED: - cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p); + cpu->sched = thread_group_sched_runtime(p); break; } return 0; diff --git a/kernel/sched.c b/kernel/sched.c index cc397aae5eae..c8d7f17bd036 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4139,9 +4139,25 @@ DEFINE_PER_CPU(struct kernel_stat, kstat); EXPORT_PER_CPU_SYMBOL(kstat); /* - * Return any ns on the sched_clock that have not yet been banked in + * Return any ns on the sched_clock that have not yet been accounted in * @p in case that task is currently running. + * + * Called with task_rq_lock() held on @rq. */ +static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) +{ + u64 ns = 0; + + if (task_current(rq, p)) { + update_rq_clock(rq); + ns = rq->clock - p->se.exec_start; + if ((s64)ns < 0) + ns = 0; + } + + return ns; +} + unsigned long long task_delta_exec(struct task_struct *p) { unsigned long flags; @@ -4149,16 +4165,49 @@ unsigned long long task_delta_exec(struct task_struct *p) u64 ns = 0; rq = task_rq_lock(p, &flags); + ns = do_task_delta_exec(p, rq); + task_rq_unlock(rq, &flags); - if (task_current(rq, p)) { - u64 delta_exec; + return ns; +} - update_rq_clock(rq); - delta_exec = rq->clock - p->se.exec_start; - if ((s64)delta_exec > 0) - ns = delta_exec; - } +/* + * Return accounted runtime for the task. + * In case the task is currently running, return the runtime plus current's + * pending runtime that have not been accounted yet. + */ +unsigned long long task_sched_runtime(struct task_struct *p) +{ + unsigned long flags; + struct rq *rq; + u64 ns = 0; + + rq = task_rq_lock(p, &flags); + ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); + task_rq_unlock(rq, &flags); + + return ns; +} +/* + * Return sum_exec_runtime for the thread group. + * In case the task is currently running, return the sum plus current's + * pending runtime that have not been accounted yet. + * + * Note that the thread group might have other running tasks as well, + * so the return value not includes other pending runtime that other + * running tasks might have. + */ +unsigned long long thread_group_sched_runtime(struct task_struct *p) +{ + struct task_cputime totals; + unsigned long flags; + struct rq *rq; + u64 ns; + + rq = task_rq_lock(p, &flags); + thread_group_cputime(p, &totals); + ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq); task_rq_unlock(rq, &flags); return ns; -- cgit v1.2.3-55-g7522 From ef12fefabf94b6a902ad3abd3eb124b00560c445 Mon Sep 17 00:00:00 2001 From: Bharata B Rao Date: Tue, 31 Mar 2009 10:02:22 +0530 Subject: cpuacct: add per-cgroup utime/stime statistics Add per-cgroup cpuacct controller statistics like the system and user time consumed by the group of tasks. Changelog: v7 - Changed the name of the statistic from utime to user and from stime to system so that in future we could easily add other statistics like irq, softirq, steal times etc easily. v6 - Fixed a bug in the error path of cpuacct_create() (pointed by Li Zefan). v5 - In cpuacct_stats_show(), use cputime64_to_clock_t() since we are operating on a 64bit variable here. v4 - Remove comments in cpuacct_update_stats() which explained why rcu_read_lock() was needed (as per Peter Zijlstra's review comments). - Don't say that percpu_counter_read() is broken in Documentation/cpuacct.txt as per KAMEZAWA Hiroyuki's review comments. v3 - Fix a small race in the cpuacct hierarchy walk. v2 - stime and utime now exported in clock_t units instead of msecs. - Addressed the code review comments from Balbir and Li Zefan. - Moved to -tip tree. v1 - Moved the stime/utime accounting to cpuacct controller. Earlier versions - http://lkml.org/lkml/2009/2/25/129 Signed-off-by: Bharata B Rao Signed-off-by: Balaji Rao Cc: Dhaval Giani Cc: Paul Menage Cc: Andrew Morton Cc: KAMEZAWA Hiroyuki Reviewed-by: Li Zefan Acked-by: Peter Zijlstra Acked-by: Balbir Singh Tested-by: Balbir Singh LKML-Reference: <20090331043222.GA4093@in.ibm.com> Signed-off-by: Ingo Molnar --- Documentation/cgroups/cpuacct.txt | 18 ++++++++ kernel/sched.c | 87 ++++++++++++++++++++++++++++++++++++--- 2 files changed, 99 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/Documentation/cgroups/cpuacct.txt b/Documentation/cgroups/cpuacct.txt index bb775fbe43d7..8b930946c52a 100644 --- a/Documentation/cgroups/cpuacct.txt +++ b/Documentation/cgroups/cpuacct.txt @@ -30,3 +30,21 @@ The above steps create a new group g1 and move the current shell process (bash) into it. CPU time consumed by this bash and its children can be obtained from g1/cpuacct.usage and the same is accumulated in /cgroups/cpuacct.usage also. + +cpuacct.stat file lists a few statistics which further divide the +CPU time obtained by the cgroup into user and system times. Currently +the following statistics are supported: + +user: Time spent by tasks of the cgroup in user mode. +system: Time spent by tasks of the cgroup in kernel mode. + +user and system are in USER_HZ unit. + +cpuacct controller uses percpu_counter interface to collect user and +system times. This has two side effects: + +- It is theoretically possible to see wrong values for user and system times. + This is because percpu_counter_read() on 32bit systems isn't safe + against concurrent writes. +- It is possible to see slightly outdated values for user and system times + due to the batch processing nature of percpu_counter. diff --git a/kernel/sched.c b/kernel/sched.c index c8d7f17bd036..8d1bdbe8aafc 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1393,10 +1393,22 @@ iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, struct rq_iterator *iterator); #endif +/* Time spent by the tasks of the cpu accounting group executing in ... */ +enum cpuacct_stat_index { + CPUACCT_STAT_USER, /* ... user mode */ + CPUACCT_STAT_SYSTEM, /* ... kernel mode */ + + CPUACCT_STAT_NSTATS, +}; + #ifdef CONFIG_CGROUP_CPUACCT static void cpuacct_charge(struct task_struct *tsk, u64 cputime); +static void cpuacct_update_stats(struct task_struct *tsk, + enum cpuacct_stat_index idx, cputime_t val); #else static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} +static inline void cpuacct_update_stats(struct task_struct *tsk, + enum cpuacct_stat_index idx, cputime_t val) {} #endif static inline void inc_cpu_load(struct rq *rq, unsigned long load) @@ -4236,6 +4248,8 @@ void account_user_time(struct task_struct *p, cputime_t cputime, cpustat->nice = cputime64_add(cpustat->nice, tmp); else cpustat->user = cputime64_add(cpustat->user, tmp); + + cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime); /* Account for user time used */ acct_update_integrals(p); } @@ -4297,6 +4311,8 @@ void account_system_time(struct task_struct *p, int hardirq_offset, else cpustat->system = cputime64_add(cpustat->system, tmp); + cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime); + /* Account for system time used */ acct_update_integrals(p); } @@ -9539,6 +9555,7 @@ struct cpuacct { struct cgroup_subsys_state css; /* cpuusage holds pointer to a u64-type object on every cpu */ u64 *cpuusage; + struct percpu_counter cpustat[CPUACCT_STAT_NSTATS]; struct cpuacct *parent; }; @@ -9563,20 +9580,32 @@ static struct cgroup_subsys_state *cpuacct_create( struct cgroup_subsys *ss, struct cgroup *cgrp) { struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL); + int i; if (!ca) - return ERR_PTR(-ENOMEM); + goto out; ca->cpuusage = alloc_percpu(u64); - if (!ca->cpuusage) { - kfree(ca); - return ERR_PTR(-ENOMEM); - } + if (!ca->cpuusage) + goto out_free_ca; + + for (i = 0; i < CPUACCT_STAT_NSTATS; i++) + if (percpu_counter_init(&ca->cpustat[i], 0)) + goto out_free_counters; if (cgrp->parent) ca->parent = cgroup_ca(cgrp->parent); return &ca->css; + +out_free_counters: + while (--i >= 0) + percpu_counter_destroy(&ca->cpustat[i]); + free_percpu(ca->cpuusage); +out_free_ca: + kfree(ca); +out: + return ERR_PTR(-ENOMEM); } /* destroy an existing cpu accounting group */ @@ -9584,7 +9613,10 @@ static void cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) { struct cpuacct *ca = cgroup_ca(cgrp); + int i; + for (i = 0; i < CPUACCT_STAT_NSTATS; i++) + percpu_counter_destroy(&ca->cpustat[i]); free_percpu(ca->cpuusage); kfree(ca); } @@ -9671,6 +9703,25 @@ static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft, return 0; } +static const char *cpuacct_stat_desc[] = { + [CPUACCT_STAT_USER] = "user", + [CPUACCT_STAT_SYSTEM] = "system", +}; + +static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft, + struct cgroup_map_cb *cb) +{ + struct cpuacct *ca = cgroup_ca(cgrp); + int i; + + for (i = 0; i < CPUACCT_STAT_NSTATS; i++) { + s64 val = percpu_counter_read(&ca->cpustat[i]); + val = cputime64_to_clock_t(val); + cb->fill(cb, cpuacct_stat_desc[i], val); + } + return 0; +} + static struct cftype files[] = { { .name = "usage", @@ -9681,7 +9732,10 @@ static struct cftype files[] = { .name = "usage_percpu", .read_seq_string = cpuacct_percpu_seq_read, }, - + { + .name = "stat", + .read_map = cpuacct_stats_show, + }, }; static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) @@ -9716,6 +9770,27 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime) rcu_read_unlock(); } +/* + * Charge the system/user time to the task's accounting group. + */ +static void cpuacct_update_stats(struct task_struct *tsk, + enum cpuacct_stat_index idx, cputime_t val) +{ + struct cpuacct *ca; + + if (unlikely(!cpuacct_subsys.active)) + return; + + rcu_read_lock(); + ca = task_ca(tsk); + + do { + percpu_counter_add(&ca->cpustat[idx], val); + ca = ca->parent; + } while (ca); + rcu_read_unlock(); +} + struct cgroup_subsys cpuacct_subsys = { .name = "cpuacct", .create = cpuacct_create, -- cgit v1.2.3-55-g7522 From 46e0bb9c12f4bab539736f1714cbf16600f681ec Mon Sep 17 00:00:00 2001 From: Gautham R Shenoy Date: Mon, 30 Mar 2009 10:25:20 +0530 Subject: sched: Print sched_group::__cpu_power in sched_domain_debug Impact: extend debug info /proc/sched_debug If the user changes the value of the sched_mc/smt_power_savings sysfs tunable, it'll trigger a rebuilding of the whole sched_domain tree, with the SD_POWERSAVINGS_BALANCE flag set at certain levels. As a result, there would be a change in the __cpu_power of sched_groups in the sched_domain hierarchy. Print the __cpu_power values for each sched_group in sched_domain_debug to help verify this change and correlate it with the change in the load-balancing behavior. Signed-off-by: Gautham R Shenoy Cc: Peter Zijlstra LKML-Reference: <20090330045520.2869.24777.stgit@sofia.in.ibm.com> Signed-off-by: Ingo Molnar --- kernel/sched.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 8d1bdbe8aafc..6234d10c6a79 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -6963,7 +6963,8 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, cpumask_or(groupmask, groupmask, sched_group_cpus(group)); cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); - printk(KERN_CONT " %s", str); + printk(KERN_CONT " %s (__cpu_power = %d)", str, + group->__cpu_power); group = group->next; } while (group != sd->groups); -- cgit v1.2.3-55-g7522 From 633fe795b80693a8198e7d82f66538a72d2bbba2 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 1 Apr 2009 17:47:23 -0700 Subject: timers: add missing kernel-doc Add missing kernel-doc parameter notation and change function name to its new name: Warning(kernel/timer.c:543): No description found for parameter 'name' Warning(kernel/timer.c:543): No description found for parameter 'key' Signed-off-by: Randy Dunlap Cc: akpm Cc: Johannes Berg LKML-Reference: <20090401174723.f0bea0eb.randy.dunlap@oracle.com> Signed-off-by: Ingo Molnar --- kernel/timer.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/timer.c b/kernel/timer.c index 9b77fc9a9ac8..3af9a0bc4292 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -524,10 +524,13 @@ static void __init_timer(struct timer_list *timer) } /** - * init_timer - initialize a timer. + * init_timer_key - initialize a timer * @timer: the timer to be initialized + * @name: name of the timer + * @key: lockdep class key of the fake lock used for tracking timer + * sync lock dependencies * - * init_timer() must be done to a timer prior calling *any* of the + * init_timer_key() must be done to a timer prior calling *any* of the * other timer functions. */ void init_timer(struct timer_list *timer) -- cgit v1.2.3-55-g7522 From cd84a42f315e50edd454c27a3da3951ccd3d735a Mon Sep 17 00:00:00 2001 From: Darren Hart Date: Thu, 2 Apr 2009 14:19:38 -0700 Subject: futex: comment requeue key reference semantics We've tripped over the futex_requeue drop_count refering to key2 instead of key1. The code is actually correct, but is non-intuitive. This patch adds an explicit comment explaining the requeue. Signed-off-by: Darren Hart Cc: Peter Zijlstra Cc: Nick Piggin Cc: Rusty Russell Signed-off-by: Andrew Morton Signed-off-by: Ingo Molnar --- kernel/futex.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/futex.c b/kernel/futex.c index 6b50a024bca2..eef8cd26b5e5 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -883,7 +883,12 @@ retry_private: out_unlock: double_unlock_hb(hb1, hb2); - /* drop_futex_key_refs() must be called outside the spinlocks. */ + /* + * drop_futex_key_refs() must be called outside the spinlocks. During + * the requeue we moved futex_q's from the hash bucket at key1 to the + * one at key2 and updated their key pointer. We no longer need to + * hold the references to key1. + */ while (--drop_count >= 0) drop_futex_key_refs(&key1); -- cgit v1.2.3-55-g7522 From 9756b15e1b58453a6fd54b85c1ad8515209e10bb Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 30 Mar 2009 20:37:20 -0700 Subject: irq: fix cpumask memory leak on offstack cpumask kernels Need to free the old cpumask for affinity and pending_mask. Signed-off-by: Yinghai Lu Acked-by: Rusty Russell LKML-Reference: <49D18FF0.50707@kernel.org> Signed-off-by: Ingo Molnar --- include/linux/irq.h | 14 ++++++++++++++ kernel/irq/numa_migrate.c | 1 + 2 files changed, 15 insertions(+) (limited to 'kernel') diff --git a/include/linux/irq.h b/include/linux/irq.h index 974890b3c52f..99d147efe399 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -482,6 +482,16 @@ static inline void init_copy_desc_masks(struct irq_desc *old_desc, #endif } +static inline void free_desc_masks(struct irq_desc *old_desc, + struct irq_desc *new_desc) +{ + free_cpumask_var(old_desc->affinity); + +#ifdef CONFIG_GENERIC_PENDING_IRQ + free_cpumask_var(old_desc->pending_mask); +#endif +} + #else /* !CONFIG_SMP */ static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu, @@ -495,6 +505,10 @@ static inline void init_copy_desc_masks(struct irq_desc *old_desc, { } +static inline void free_desc_masks(struct irq_desc *old_desc, + struct irq_desc *new_desc) +{ +} #endif /* CONFIG_SMP */ #endif /* _LINUX_IRQ_H */ diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index 243d6121e50e..44bbdcbaf8d2 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c @@ -54,6 +54,7 @@ static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) { free_kstat_irqs(old_desc, desc); + free_desc_masks(old_desc, desc); arch_free_chip_data(old_desc, desc); } -- cgit v1.2.3-55-g7522 From 6279a751fe096a21dc7704e918d570d3ff06e769 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 27 Mar 2009 01:06:07 +0100 Subject: posix-timers: fix RLIMIT_CPU && fork() See http://bugzilla.kernel.org/show_bug.cgi?id=12911 copy_signal() copies signal->rlim, but RLIMIT_CPU is "lost". Because posix_cpu_timers_init_group() sets cputime_expires.prof_exp = 0 and thus fastpath_timer_check() returns false unless we have other expired cpu timers. Change copy_signal() to set cputime_expires.prof_exp if we have RLIMIT_CPU. Also, set cputimer.running = 1 in that case. This is not strictly necessary, but imho makes sense. Reported-by: Peter Lojkin Signed-off-by: Oleg Nesterov Acked-by: Peter Zijlstra Cc: Peter Lojkin Cc: Roland McGrath Cc: stable@kernel.org LKML-Reference: <20090327000607.GA10104@redhat.com> Signed-off-by: Ingo Molnar --- kernel/fork.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index 4854c2c4a82e..9b51a1b190d4 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -808,6 +808,12 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) sig->cputime_expires.virt_exp = cputime_zero; sig->cputime_expires.sched_exp = 0; + if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { + sig->cputime_expires.prof_exp = + secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); + sig->cputimer.running = 1; + } + /* The timer lists. */ INIT_LIST_HEAD(&sig->cpu_timers[0]); INIT_LIST_HEAD(&sig->cpu_timers[1]); @@ -823,11 +829,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) atomic_inc(¤t->signal->live); return 0; } - sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); - - if (sig) - posix_cpu_timers_init_group(sig); + sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); tsk->signal = sig; if (!sig) return -ENOMEM; @@ -865,6 +868,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); task_unlock(current->group_leader); + posix_cpu_timers_init_group(sig); + acct_init_pacct(&sig->pacct); tty_audit_fork(sig); -- cgit v1.2.3-55-g7522 From 8f2e586567b1bad72dac7c3810fe9a2ef7117506 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 27 Mar 2009 01:06:10 +0100 Subject: posix-timers: fix RLIMIT_CPU && setitimer(CPUCLOCK_PROF) update_rlimit_cpu() tries to optimize out set_process_cpu_timer() in case when we already have CPUCLOCK_PROF timer which should expire first. But it uses cputime_lt() instead of cputime_gt(). Test case: int main(void) { struct itimerval it = { .it_value = { .tv_sec = 1000 }, }; assert(!setitimer(ITIMER_PROF, &it, NULL)); struct rlimit rl = { .rlim_cur = 1, .rlim_max = 1, }; assert(!setrlimit(RLIMIT_CPU, &rl)); for (;;) ; return 0; } Without this patch, the task is not killed as RLIMIT_CPU demands. Signed-off-by: Oleg Nesterov Acked-by: Peter Zijlstra Cc: Peter Lojkin Cc: Roland McGrath Cc: stable@kernel.org LKML-Reference: <20090327000610.GA10108@redhat.com> Signed-off-by: Ingo Molnar --- kernel/posix-cpu-timers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 8e5d9a68b022..bb53185d8c78 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -18,7 +18,7 @@ void update_rlimit_cpu(unsigned long rlim_new) cputime = secs_to_cputime(rlim_new); if (cputime_eq(current->signal->it_prof_expires, cputime_zero) || - cputime_lt(current->signal->it_prof_expires, cputime)) { + cputime_gt(current->signal->it_prof_expires, cputime)) { spin_lock_irq(¤t->sighand->siglock); set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); spin_unlock_irq(¤t->sighand->siglock); -- cgit v1.2.3-55-g7522 From 3a709703538c471530405556dda136fd0d82b0dc Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Tue, 7 Apr 2009 23:21:06 -0700 Subject: ptrace: some checkpatch fixes This fixes all the checkpatch --file complaints about kernel/ptrace.c and also removes an unused #include. I've verified that there are no changes to the compiled code on x86_64. Signed-off-by: Roland McGrath [ Removed the parts that just split a line - Linus ] Signed-off-by: Linus Torvalds --- kernel/ptrace.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/ptrace.c b/kernel/ptrace.c index aaad0ec34194..64191fa09b7e 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -21,9 +21,7 @@ #include #include #include - -#include -#include +#include /* @@ -48,7 +46,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) list_add(&child->ptrace_entry, &new_parent->ptraced); child->parent = new_parent; } - + /* * Turn a tracing stop into a normal stop now, since with no tracer there * would be no way to wake it up with SIGCONT or SIGKILL. If there was a @@ -173,7 +171,7 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode) task_lock(task); err = __ptrace_may_access(task, mode); task_unlock(task); - return (!err ? true : false); + return !err; } int ptrace_attach(struct task_struct *task) @@ -358,7 +356,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst copied += retval; src += retval; dst += retval; - len -= retval; + len -= retval; } return copied; } @@ -383,7 +381,7 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds copied += retval; src += retval; dst += retval; - len -= retval; + len -= retval; } return copied; } @@ -496,9 +494,9 @@ static int ptrace_resume(struct task_struct *child, long request, long data) if (unlikely(!arch_has_single_step())) return -EIO; user_enable_single_step(child); - } - else + } else { user_disable_single_step(child); + } child->exit_code = data; wake_up_process(child); -- cgit v1.2.3-55-g7522 From 3217ab97f14c5c8f9f975ed8c40c351164b0b10e Mon Sep 17 00:00:00 2001 From: Vitaliy Gusev Date: Thu, 9 Apr 2009 09:50:35 -0600 Subject: kthread: Don't looking for a task in create_kthread() #2 Remove the unnecessary find_task_by_pid_ns(). kthread() can just use "current" to get the same result. Signed-off-by: Vitaliy Gusev Acked-by: Oleg Nesterov Signed-off-by: Rusty Russell --- kernel/kthread.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/kthread.c b/kernel/kthread.c index 84bbadd4d021..c013bf000ce6 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -76,6 +76,7 @@ static int kthread(void *_create) /* OK, tell user we're spawned, wait for stop or wakeup */ __set_current_state(TASK_UNINTERRUPTIBLE); + create->result = current; complete(&create->started); schedule(); @@ -101,9 +102,6 @@ static void create_kthread(struct kthread_create_info *create) } else { struct sched_param param = { .sched_priority = 0 }; wait_for_completion(&create->started); - read_lock(&tasklist_lock); - create->result = find_task_by_pid_ns(pid, &init_pid_ns); - read_unlock(&tasklist_lock); /* * root may have changed our (kthreadd's) priority or CPU mask. * The kernel thread should not inherit these properties. -- cgit v1.2.3-55-g7522 From 1c99315bb36b5d776210546d438ca928dc9b1f22 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Apr 2009 09:50:36 -0600 Subject: kthread: move sched-realeted initialization from kthreadd context kthreadd is the single thread which implements ths "create" request, move sched_setscheduler/etc from create_kthread() to kthread_create() to improve the scalability. We should be careful with sched_setscheduler(), use _nochek helper. Signed-off-by: Oleg Nesterov Cc: Christoph Hellwig Cc: "Eric W. Biederman" Cc: Ingo Molnar Cc: Pavel Emelyanov Cc: Vitaliy Gusev Signed-off-by: Rusty Russell --- kernel/kthread.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/kthread.c b/kernel/kthread.c index c013bf000ce6..4ebaf8519abf 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -97,19 +97,10 @@ static void create_kthread(struct kthread_create_info *create) /* We want our own signal handler (we take no signals by default). */ pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); - if (pid < 0) { + if (pid < 0) create->result = ERR_PTR(pid); - } else { - struct sched_param param = { .sched_priority = 0 }; + else wait_for_completion(&create->started); - /* - * root may have changed our (kthreadd's) priority or CPU mask. - * The kernel thread should not inherit these properties. - */ - sched_setscheduler(create->result, SCHED_NORMAL, ¶m); - set_user_nice(create->result, KTHREAD_NICE_LEVEL); - set_cpus_allowed_ptr(create->result, cpu_all_mask); - } complete(&create->done); } @@ -152,11 +143,20 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), wait_for_completion(&create.done); if (!IS_ERR(create.result)) { + struct sched_param param = { .sched_priority = 0 }; va_list args; + va_start(args, namefmt); vsnprintf(create.result->comm, sizeof(create.result->comm), namefmt, args); va_end(args); + /* + * root may have changed our (kthreadd's) priority or CPU mask. + * The kernel thread should not inherit these properties. + */ + sched_setscheduler_nocheck(create.result, SCHED_NORMAL, ¶m); + set_user_nice(create.result, KTHREAD_NICE_LEVEL); + set_cpus_allowed_ptr(create.result, cpu_all_mask); } return create.result; } -- cgit v1.2.3-55-g7522 From 6b44003e5ca66a3fffeb5bc90f40ada2c4340896 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 9 Apr 2009 09:50:37 -0600 Subject: work_on_cpu(): rewrite it to create a kernel thread on demand Impact: circular locking bugfix The various implemetnations and proposed implemetnations of work_on_cpu() are vulnerable to various deadlocks because they all used queues of some form. Unrelated pieces of kernel code thus gained dependencies wherein if one work_on_cpu() caller holds a lock which some other work_on_cpu() callback also takes, the kernel could rarely deadlock. Fix this by creating a short-lived kernel thread for each work_on_cpu() invokation. This is not terribly fast, but the only current caller of work_on_cpu() is pci_call_probe(). It would be nice to find some other way of doing the node-local allocations in the PCI probe code so that we can zap work_on_cpu() altogether. The code there is rather nasty. I can't think of anything simple at this time... Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Rusty Russell --- kernel/workqueue.c | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b6b966ce1451..f71fb2a08950 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -966,20 +966,20 @@ undo: } #ifdef CONFIG_SMP -static struct workqueue_struct *work_on_cpu_wq __read_mostly; struct work_for_cpu { - struct work_struct work; + struct completion completion; long (*fn)(void *); void *arg; long ret; }; -static void do_work_for_cpu(struct work_struct *w) +static int do_work_for_cpu(void *_wfc) { - struct work_for_cpu *wfc = container_of(w, struct work_for_cpu, work); - + struct work_for_cpu *wfc = _wfc; wfc->ret = wfc->fn(wfc->arg); + complete(&wfc->completion); + return 0; } /** @@ -990,17 +990,23 @@ static void do_work_for_cpu(struct work_struct *w) * * This will return the value @fn returns. * It is up to the caller to ensure that the cpu doesn't go offline. + * The caller must not hold any locks which would prevent @fn from completing. */ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) { - struct work_for_cpu wfc; - - INIT_WORK(&wfc.work, do_work_for_cpu); - wfc.fn = fn; - wfc.arg = arg; - queue_work_on(cpu, work_on_cpu_wq, &wfc.work); - flush_work(&wfc.work); - + struct task_struct *sub_thread; + struct work_for_cpu wfc = { + .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion), + .fn = fn, + .arg = arg, + }; + + sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu"); + if (IS_ERR(sub_thread)) + return PTR_ERR(sub_thread); + kthread_bind(sub_thread, cpu); + wake_up_process(sub_thread); + wait_for_completion(&wfc.completion); return wfc.ret; } EXPORT_SYMBOL_GPL(work_on_cpu); @@ -1016,8 +1022,4 @@ void __init init_workqueues(void) hotcpu_notifier(workqueue_cpu_callback, 0); keventd_wq = create_workqueue("events"); BUG_ON(!keventd_wq); -#ifdef CONFIG_SMP - work_on_cpu_wq = create_workqueue("work_on_cpu"); - BUG_ON(!work_on_cpu_wq); -#endif } -- cgit v1.2.3-55-g7522 From 47788c58e66c050982241d9a05eb690daceb05a9 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 8 Apr 2009 20:40:59 +0200 Subject: tracing/syscalls: use a dedicated file header Impact: fix build warnings and possibe compat misbehavior on IA64 Building a kernel on ia64 might trigger these ugly build warnings: CC arch/ia64/ia32/sys_ia32.o In file included from arch/ia64/ia32/sys_ia32.c:55: arch/ia64/ia32/ia32priv.h:290:1: warning: "elf_check_arch" redefined In file included from include/linux/elf.h:7, from include/linux/module.h:14, from include/linux/ftrace.h:8, from include/linux/syscalls.h:68, from arch/ia64/ia32/sys_ia32.c:18: arch/ia64/include/asm/elf.h:19:1: warning: this is the location of the previous definition [...] sys_ia32.c includes linux/syscalls.h which in turn includes linux/ftrace.h to import the syscalls tracing prototypes. But including ftrace.h can pull too much things for a low level file, especially on ia64 where the ia32 private headers conflict with higher level headers. Now we isolate the syscall tracing headers in their own lightweight file. Reported-by: Tony Luck Tested-by: Tony Luck Signed-off-by: Frederic Weisbecker Acked-by: Tony Luck Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Jason Baron Cc: "Frank Ch. Eigler" Cc: Mathieu Desnoyers Cc: KOSAKI Motohiro Cc: Lai Jiangshan Cc: Jiaying Zhang Cc: Michael Rubin Cc: Martin Bligh Cc: Michael Davidson LKML-Reference: <20090408184058.GB6017@nowhere> Signed-off-by: Ingo Molnar --- arch/x86/kernel/ftrace.c | 2 ++ arch/x86/kernel/ptrace.c | 3 ++- include/linux/ftrace.h | 29 ----------------------------- include/linux/syscalls.h | 2 +- include/trace/syscall.h | 35 +++++++++++++++++++++++++++++++++++ kernel/trace/trace_syscalls.c | 2 +- 6 files changed, 41 insertions(+), 32 deletions(-) create mode 100644 include/trace/syscall.h (limited to 'kernel') diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 70a10ca100f6..18dfa30795c9 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -18,6 +18,8 @@ #include #include +#include + #include #include #include diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index fe9345c967de..23b7c8f017e2 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include @@ -35,6 +34,8 @@ #include #include +#include + #include "tls.h" enum x86_regset { diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index ff112a872d75..8a0c2f221e6b 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -511,33 +511,4 @@ static inline void trace_hw_branch_oops(void) {} #endif /* CONFIG_HW_BRANCH_TRACER */ -/* - * A syscall entry in the ftrace syscalls array. - * - * @name: name of the syscall - * @nb_args: number of parameters it takes - * @types: list of types as strings - * @args: list of args as strings (args[i] matches types[i]) - */ -struct syscall_metadata { - const char *name; - int nb_args; - const char **types; - const char **args; -}; - -#ifdef CONFIG_FTRACE_SYSCALLS -extern void arch_init_ftrace_syscalls(void); -extern struct syscall_metadata *syscall_nr_to_meta(int nr); -extern void start_ftrace_syscalls(void); -extern void stop_ftrace_syscalls(void); -extern void ftrace_syscall_enter(struct pt_regs *regs); -extern void ftrace_syscall_exit(struct pt_regs *regs); -#else -static inline void start_ftrace_syscalls(void) { } -static inline void stop_ftrace_syscalls(void) { } -static inline void ftrace_syscall_enter(struct pt_regs *regs) { } -static inline void ftrace_syscall_exit(struct pt_regs *regs) { } -#endif - #endif /* _LINUX_FTRACE_H */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 6470f74074af..dabe4ad89141 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -65,7 +65,7 @@ struct old_linux_dirent; #include #include #include -#include +#include #define __SC_DECL1(t1, a1) t1 a1 #define __SC_DECL2(t2, a2, ...) t2 a2, __SC_DECL1(__VA_ARGS__) diff --git a/include/trace/syscall.h b/include/trace/syscall.h new file mode 100644 index 000000000000..8cfe515cbc47 --- /dev/null +++ b/include/trace/syscall.h @@ -0,0 +1,35 @@ +#ifndef _TRACE_SYSCALL_H +#define _TRACE_SYSCALL_H + +#include + +/* + * A syscall entry in the ftrace syscalls array. + * + * @name: name of the syscall + * @nb_args: number of parameters it takes + * @types: list of types as strings + * @args: list of args as strings (args[i] matches types[i]) + */ +struct syscall_metadata { + const char *name; + int nb_args; + const char **types; + const char **args; +}; + +#ifdef CONFIG_FTRACE_SYSCALLS +extern void arch_init_ftrace_syscalls(void); +extern struct syscall_metadata *syscall_nr_to_meta(int nr); +extern void start_ftrace_syscalls(void); +extern void stop_ftrace_syscalls(void); +extern void ftrace_syscall_enter(struct pt_regs *regs); +extern void ftrace_syscall_exit(struct pt_regs *regs); +#else +static inline void start_ftrace_syscalls(void) { } +static inline void stop_ftrace_syscalls(void) { } +static inline void ftrace_syscall_enter(struct pt_regs *regs) { } +static inline void ftrace_syscall_exit(struct pt_regs *regs) { } +#endif + +#endif /* _TRACE_SYSCALL_H */ diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index a2a3af29c943..5e579645ac86 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -1,5 +1,5 @@ +#include #include -#include #include #include "trace_output.h" -- cgit v1.2.3-55-g7522 From 9eb85125ce218a8b8d9a7c982510388e227adbec Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 9 Apr 2009 11:19:40 +0800 Subject: blktrace: pass the right pointer to kfree() Impact: fix kfree crash with non-standard act_mask string If passing a string with leading white spaces to strstrip(), the returned ptr != the original ptr. This bug was introduced by me. Signed-off-by: Li Zefan Cc: Jens Axboe Cc: Arnaldo Carvalho de Melo LKML-Reference: <49DD694C.8020902@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/blktrace.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index b32ff446c3fb..921ef5d1f0ba 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -1377,12 +1377,12 @@ static int blk_trace_str2mask(const char *str) { int i; int mask = 0; - char *s, *token; + char *buf, *s, *token; - s = kstrdup(str, GFP_KERNEL); - if (s == NULL) + buf = kstrdup(str, GFP_KERNEL); + if (buf == NULL) return -ENOMEM; - s = strstrip(s); + s = strstrip(buf); while (1) { token = strsep(&s, ","); @@ -1403,7 +1403,7 @@ static int blk_trace_str2mask(const char *str) break; } } - kfree(s); + kfree(buf); return mask; } -- cgit v1.2.3-55-g7522 From 36cd3c9f925b9307236505ae7ad1ad7ac4d4357c Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 9 Apr 2009 18:48:34 +0200 Subject: mutex: have non-spinning mutexes on s390 by default Impact: performance regression fix for s390 The adaptive spinning mutexes will not always do what one would expect on virtualized architectures like s390. Especially the cpu_relax() loop in mutex_spin_on_owner might hurt if the mutex holding cpu has been scheduled away by the hypervisor. We would end up in a cpu_relax() loop when there is no chance that the state of the mutex changes until the target cpu has been scheduled again by the hypervisor. For that reason we should change the default behaviour to no-spin on s390. We do have an instruction which allows to yield the current cpu in favour of a different target cpu. Also we have an instruction which allows us to figure out if the target cpu is physically backed. However we need to do some performance tests until we can come up with a solution that will do the right thing on s390. Signed-off-by: Heiko Carstens Acked-by: Peter Zijlstra Cc: Martin Schwidefsky Cc: Christian Borntraeger LKML-Reference: <20090409184834.7a0df7b2@osiris.boeblingen.de.ibm.com> Signed-off-by: Ingo Molnar --- arch/Kconfig | 3 +++ arch/s390/Kconfig | 1 + kernel/mutex.c | 3 ++- 3 files changed, 6 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/arch/Kconfig b/arch/Kconfig index dc81b34c5d82..78a35e9dc104 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -109,3 +109,6 @@ config HAVE_CLK config HAVE_DMA_API_DEBUG bool + +config HAVE_DEFAULT_NO_SPIN_MUTEXES + bool diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index dcb667c4375a..2eca5fe0e75b 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -82,6 +82,7 @@ config S390 select USE_GENERIC_SMP_HELPERS if SMP select HAVE_SYSCALL_WRAPPERS select HAVE_FUNCTION_TRACER + select HAVE_DEFAULT_NO_SPIN_MUTEXES select HAVE_OPROFILE select HAVE_KPROBES select HAVE_KRETPROBES diff --git a/kernel/mutex.c b/kernel/mutex.c index 5d79781394a3..507cf2b5e9f1 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -148,7 +148,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, preempt_disable(); mutex_acquire(&lock->dep_map, subclass, 0, ip); -#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) +#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && \ + !defined(CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES) /* * Optimistic spinning. * -- cgit v1.2.3-55-g7522 From d1e7e02f30be672c6f6ee40908be83877a0d49d1 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 2 Apr 2009 15:16:56 +0800 Subject: tracing: disable seeking for trace_pipe_raw Impact: disable pread() We set tracing_buffers_fops.llseek to no_llseek, but we can still perform pread() to read this file. That is not expected. This fix uses nonseekable_open() to disable it. tracing_buffers_fops.llseek is still set to no_llseek, it mark this file is a "non-seekable device" and is used by sys_splice(). See also do_splice() or manual of splice(2): ERRORS EINVAL Target file system doesn't support splicing; neither of the descriptors refers to a pipe; or offset given for non-seekable device. Signed-off-by: Lai Jiangshan Cc: Frederic Weisbecker Cc: Steven Rostedt LKML-Reference: <49D46668.8030806@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9d28476a9851..24b0168f1a29 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3285,7 +3285,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp) filp->private_data = info; - return 0; + return nonseekable_open(inode, filp); out: kfree(info); -- cgit v1.2.3-55-g7522 From ddd538f3e6a1a4bec2f6942f83a753263e6577b4 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 2 Apr 2009 15:16:59 +0800 Subject: tracing: allocate page when needed Impact: Cleanup Sometimes, we open trace_pipe_raw, but we don't read(2) it, we just splice(2) it, thus, the page is not used. Signed-off-by: Lai Jiangshan Cc: Frederic Weisbecker Cc: Steven Rostedt LKML-Reference: <49D4666B.4010608@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 24b0168f1a29..8e189ffb899a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3277,19 +3277,13 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp) info->tr = &global_trace; info->cpu = cpu; - info->spare = ring_buffer_alloc_read_page(info->tr->buffer); + info->spare = NULL; /* Force reading ring buffer for first read */ info->read = (unsigned int)-1; - if (!info->spare) - goto out; filp->private_data = info; return nonseekable_open(inode, filp); - - out: - kfree(info); - return -ENOMEM; } static ssize_t @@ -3304,6 +3298,11 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, if (!count) return 0; + if (!info->spare) + info->spare = ring_buffer_alloc_read_page(info->tr->buffer); + if (!info->spare) + return -ENOMEM; + /* Do we have previous read data to read? */ if (info->read < PAGE_SIZE) goto read; @@ -3342,7 +3341,8 @@ static int tracing_buffers_release(struct inode *inode, struct file *file) { struct ftrace_buffer_info *info = file->private_data; - ring_buffer_free_read_page(info->tr->buffer, info->spare); + if (info->spare) + ring_buffer_free_read_page(info->tr->buffer, info->spare); kfree(info); return 0; -- cgit v1.2.3-55-g7522 From c7625a555f55d7ae49236cde551786c88f5a5ce1 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 2 Apr 2009 15:17:04 +0800 Subject: tracing: update file->f_pos when splice(2) it Impact: Cleanup These two lines: if (unlikely(*ppos)) return -ESPIPE; in tracing_buffers_splice_read() are not needed, VFS layer has disabled seek(2). We remove these two lines, and then we can update file->f_pos. And tracing_buffers_read() updates file->f_pos, this fix make tracing_buffers_splice_read() updates file->f_pos too. Signed-off-by: Lai Jiangshan Cc: Frederic Weisbecker Cc: Steven Rostedt LKML-Reference: <49D46670.4010503@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8e189ffb899a..94629760dabf 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3428,13 +3428,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, int size, i; size_t ret; - /* - * We can't seek on a buffer input - */ - if (unlikely(*ppos)) - return -ESPIPE; - - for (i = 0; i < PIPE_BUFFERS && len; i++, len -= size) { struct page *page; int r; @@ -3474,6 +3467,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, spd.partial[i].offset = 0; spd.partial[i].private = (unsigned long)ref; spd.nr_pages++; + *ppos += size; } spd.nr_pages = i; -- cgit v1.2.3-55-g7522 From 93cfb3c9fd83d877a8f1ffad9ff862b617b32828 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 2 Apr 2009 15:17:08 +0800 Subject: tracing: fix splice return too large I got these from strace: splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 12288 splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 12288 splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 12288 splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 16384 splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 8192 splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 8192 splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 8192 I wanted to splice_read 4096 bytes, but it returns 8192 or larger. It is because the return value of tracing_buffers_splice_read() does not include "zero out any left over data" bytes. But tracing_buffers_read() includes these bytes, we make them consistent. Signed-off-by: Lai Jiangshan Cc: Frederic Weisbecker Cc: Steven Rostedt LKML-Reference: <49D46674.9030804@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 94629760dabf..1ce5dc6372b8 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3428,7 +3428,19 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, int size, i; size_t ret; - for (i = 0; i < PIPE_BUFFERS && len; i++, len -= size) { + if (*ppos & (PAGE_SIZE - 1)) { + WARN_ONCE(1, "Ftrace: previous read must page-align\n"); + return -EINVAL; + } + + if (len & (PAGE_SIZE - 1)) { + WARN_ONCE(1, "Ftrace: splice_read should page-align\n"); + if (len < PAGE_SIZE) + return -EINVAL; + len &= PAGE_MASK; + } + + for (i = 0; i < PIPE_BUFFERS && len; i++, len -= PAGE_SIZE) { struct page *page; int r; @@ -3467,7 +3479,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, spd.partial[i].offset = 0; spd.partial[i].private = (unsigned long)ref; spd.nr_pages++; - *ppos += size; + *ppos += PAGE_SIZE; } spd.nr_pages = i; -- cgit v1.2.3-55-g7522 From 4d1f4372dbea068ba4ee3d98231133a4a4ee15bd Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 10 Apr 2009 08:48:36 +0800 Subject: tracing: fix document references When moving documents to Documentation/trace/, I forgot to grep Kconfig to find out those references. Signed-off-by: Li Zefan Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Pekka Enberg Cc: Pekka Paalanen Cc: eduard.munteanu@linux360.ro LKML-Reference: <49DE97EF.7080208@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 2246141bda4d..417d1985e299 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -312,7 +312,7 @@ config KMEMTRACE and profile kernel code. This requires an userspace application to use. See - Documentation/vm/kmemtrace.txt for more information. + Documentation/trace/kmemtrace.txt for more information. Saying Y will make the kernel somewhat larger and slower. However, if you disable kmemtrace at run-time or boot-time, the performance @@ -403,7 +403,7 @@ config MMIOTRACE implementation and works via page faults. Tracing is disabled by default and can be enabled at run-time. - See Documentation/tracers/mmiotrace.txt. + See Documentation/trace/mmiotrace.txt. If you are not helping to develop drivers, say N. config MMIOTRACE_TEST -- cgit v1.2.3-55-g7522 From 0462b5664b2bda5a18fef7efb5bb32ce36590c1a Mon Sep 17 00:00:00 2001 From: Zhaolei Date: Wed, 8 Apr 2009 17:00:13 +0800 Subject: ftrace: Output REC->var instead of __entry->var for trace format print fmt: "irq=%d return=%s", __entry->irq, __entry->ret ? \"handled\" : \"unhandled\" "__entry" should be convert to "REC" by __stringify() macro. Signed-off-by: Zhao Lei Acked-by: Frederic Weisbecker Cc: Steven Rostedt LKML-Reference: <49DC679D.2090901@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_events_stage_2.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_events_stage_2.h b/kernel/trace/trace_events_stage_2.h index 30743f7d4110..d363c6672c6c 100644 --- a/kernel/trace/trace_events_stage_2.h +++ b/kernel/trace/trace_events_stage_2.h @@ -105,10 +105,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ return 0; #undef __entry -#define __entry "REC" +#define __entry REC #undef TP_printk -#define TP_printk(fmt, args...) "%s, %s\n", #fmt, #args +#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args) #undef TP_fast_assign #define TP_fast_assign(args...) args -- cgit v1.2.3-55-g7522 From d6de2c80e9d758d2e36c21699117db6178c0f517 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 10 Apr 2009 12:17:41 -0700 Subject: async: Fix module loading async-work regression Several drivers use asynchronous work to do device discovery, and we synchronize with them in the compiled-in case before we actually try to mount root filesystems etc. However, when compiled as modules, that synchronization is missing - the module loading completes, but the driver hasn't actually finished probing for devices, and that means that any user mode that expects to use the devices after the 'insmod' is now potentially broken. We already saw one case of a similar issue in the ACPI battery code, where the kernel itself expected the module to be all done, and unmapped the init memory - but the async device discovery was still running. That got hacked around by just removing the "__init" (see commit 5d38258ec026921a7b266f4047ebeaa75db358e5 "ACPI battery: fix async boot oops"), but the real fix is to just make the module loading wait for all async work to be completed. It will slow down module loading, but since common devices should be built in anyway, and since the bug is really annoying and hard to handle from user space (and caused several S3 resume regressions), the simple fix to wait is the right one. This fixes at least http://bugzilla.kernel.org/show_bug.cgi?id=13063 but probably a few other bugzilla entries too (12936, for example), and is confirmed to fix Rafael's storage driver breakage after resume bug report (no bugzilla entry). We should also be able to now revert that ACPI battery fix. Reported-and-tested-by: Rafael J. Wysocki Tested-by: Heinz Diehl Acked-by: Arjan van de Ven Signed-off-by: Linus Torvalds --- kernel/module.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 05f014efa32c..e797812a4d95 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2388,6 +2388,9 @@ SYSCALL_DEFINE3(init_module, void __user *, umod, blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_LIVE, mod); + /* We need to finish all async code before the module init sequence is done */ + async_synchronize_full(); + mutex_lock(&module_mutex); /* Drop initial reference. */ module_put(mod); -- cgit v1.2.3-55-g7522 From 8433a40eb7f2c4883ad57f9900f63e4d59240eb7 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Sat, 11 Apr 2009 15:52:18 +0800 Subject: tracing/filters: NIL-terminate user input filter Make sure messages from user space are NIL-terminated strings, otherwise we could dump random memory while reading filter file. Try this: # echo 'parent_comm ==' > events/sched/sched_process_fork/filter # cat events/sched/sched_process_fork/filter parent_comm == � Signed-off-by: Li Zefan Acked-by: Tom Zanussi Acked-by: Frederic Weisbecker Cc: Steven Rostedt LKML-Reference: <49E04C32.6060508@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_events.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 64ec4d278ffb..054bc1802bcd 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -503,6 +503,7 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; + buf[cnt] = '\0'; pred = kzalloc(sizeof(*pred), GFP_KERNEL); if (!pred) @@ -569,6 +570,7 @@ subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; + buf[cnt] = '\0'; pred = kzalloc(sizeof(*pred), GFP_KERNEL); if (!pred) -- cgit v1.2.3-55-g7522 From bcabd91c271e50eebc0cb9220ac92700332b452e Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Sat, 11 Apr 2009 15:52:35 +0800 Subject: tracing/filters: fix NULL pointer dereference Try this, and you'll see NULL pointer dereference bug: # echo -n 'parent_comm ==' > sched/sched_process_fork/filter Because we passed NULL ptr to simple_strtoull(). Signed-off-by: Li Zefan Acked-by: Tom Zanussi Acked-by: Frederic Weisbecker Cc: Steven Rostedt LKML-Reference: <49E04C43.1050504@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_events_filter.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 026be412f356..9d2162fd2305 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -410,6 +410,11 @@ int filter_parse(char **pbuf, struct filter_pred *pred) } } + if (!val_str) { + pred->field_name = NULL; + return -EINVAL; + } + pred->field_name = kstrdup(pred->field_name, GFP_KERNEL); if (!pred->field_name) return -ENOMEM; -- cgit v1.2.3-55-g7522 From a3e0ab050774117d4a6173087c8bf3888662a83f Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Sat, 11 Apr 2009 15:52:51 +0800 Subject: tracing/filters: allow user input integer to be oct or hex Before patch: # echo 'parent_pid == 0x10' > events/sched/sched_process_fork/filter # cat sched/sched_process_fork/filter parent_pid == 0 After patch: # cat sched/sched_process_fork/filter parent_pid == 16 Also check the input more strictly. Signed-off-by: Li Zefan Acked-by: Tom Zanussi Acked-by: Frederic Weisbecker Cc: Steven Rostedt LKML-Reference: <49E04C53.4010600@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_events_filter.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 9d2162fd2305..49b3ef54ec46 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -419,12 +419,13 @@ int filter_parse(char **pbuf, struct filter_pred *pred) if (!pred->field_name) return -ENOMEM; - pred->val = simple_strtoull(val_str, &tmp, 10); + pred->val = simple_strtoull(val_str, &tmp, 0); if (tmp == val_str) { pred->str_val = kstrdup(val_str, GFP_KERNEL); if (!pred->str_val) return -ENOMEM; - } + } else if (*tmp != '\0') + return -EINVAL; return 0; } -- cgit v1.2.3-55-g7522 From 44e9c8b7adc52079f0535f9de0c2c2477831389b Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Sat, 11 Apr 2009 15:55:28 +0800 Subject: tracing/filters: return proper error code when writing filter file - propagate return value of filter_add_pred() to the user - return -ENOSPC but not -ENOMEM or -EINVAL when the filter array is full Signed-off-by: Li Zefan Acked-by: Tom Zanussi Acked-by: Frederic Weisbecker Cc: Steven Rostedt LKML-Reference: <49E04CF0.3010105@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_events.c | 10 ++++++---- kernel/trace/trace_events_filter.c | 4 ++-- 2 files changed, 8 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 054bc1802bcd..576f4fa2af0d 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -521,9 +521,10 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, return cnt; } - if (filter_add_pred(call, pred)) { + err = filter_add_pred(call, pred); + if (err < 0) { filter_free_pred(pred); - return -EINVAL; + return err; } *ppos += cnt; @@ -588,10 +589,11 @@ subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, return cnt; } - if (filter_add_subsystem_pred(system, pred)) { + err = filter_add_subsystem_pred(system, pred); + if (err < 0) { filter_free_subsystem_preds(system); filter_free_pred(pred); - return -EINVAL; + return err; } *ppos += cnt; diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 49b3ef54ec46..e03cbf1e38f3 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -215,7 +215,7 @@ static int __filter_add_pred(struct ftrace_event_call *call, } } - return -ENOMEM; + return -ENOSPC; } static int is_string_field(const char *type) @@ -319,7 +319,7 @@ int filter_add_subsystem_pred(struct event_subsystem *system, } if (i == MAX_FILTER_PRED) - return -EINVAL; + return -ENOSPC; events_for_each(call) { int err; -- cgit v1.2.3-55-g7522 From 9eeba6138cefc0435695463ddadb0d95e0a6bcd2 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 11 Apr 2009 03:17:17 +0200 Subject: lockdep: warn about lockdep disabling after kernel taint Impact: provide useful missing info for developers Kernel taint can occur in several situations such as warnings, load of prorietary or staging modules, bad page, etc... But when such taint happens, a developer might still be working on the kernel, expecting that lockdep is still enabled. But a taint disables lockdep without ever warning about it. Such a kernel behaviour doesn't really help for kernel development. This patch adds this missing warning. Since the taint is done most of the time after the main message that explain the real source issue, it seems safe to warn about it inside add_taint() so that it appears at last, without hurting the main information. v2: Use a generic helper to disable lockdep instead of an open coded xchg(). Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra LKML-Reference: <1239412638-6739-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- include/linux/debug_locks.h | 7 +++++++ kernel/panic.c | 10 ++++++++-- lib/debug_locks.c | 2 +- 3 files changed, 16 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 096476f1fb35..493dedb7a67b 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h @@ -2,12 +2,19 @@ #define __LINUX_DEBUG_LOCKING_H #include +#include struct task_struct; extern int debug_locks; extern int debug_locks_silent; + +static inline int __debug_locks_off(void) +{ + return xchg(&debug_locks, 0); +} + /* * Generic 'turn off all lock debugging' function: */ diff --git a/kernel/panic.c b/kernel/panic.c index 3fd8c5bf8b39..940ca14f6dbf 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -213,8 +213,14 @@ unsigned long get_taint(void) void add_taint(unsigned flag) { - /* can't trust the integrity of the kernel anymore: */ - debug_locks = 0; + /* + * Can't trust the integrity of the kernel anymore. + * We don't call directly debug_locks_off() because the issue + * is not necessarily serious enough to set oops_in_progress to 1 + */ + if (__debug_locks_off()) + printk(KERN_WARNING "Disabling lockdep due to kernel taint\n"); + set_bit(flag, &tainted_mask); } EXPORT_SYMBOL(add_taint); diff --git a/lib/debug_locks.c b/lib/debug_locks.c index 0218b4693dd8..bc3b11731b9c 100644 --- a/lib/debug_locks.c +++ b/lib/debug_locks.c @@ -36,7 +36,7 @@ int debug_locks_silent; */ int debug_locks_off(void) { - if (xchg(&debug_locks, 0)) { + if (__debug_locks_off()) { if (!debug_locks_silent) { oops_in_progress = 1; console_verbose(); -- cgit v1.2.3-55-g7522 From 574bbe782057fdf0490dc7dec906a2dc26363e20 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 11 Apr 2009 03:17:18 +0200 Subject: lockdep: continue lock debugging despite some taints Impact: broaden lockdep checks Lockdep is disabled after any kernel taints. This might be convenient to ignore bad locking issues which sources come from outside the kernel tree. Nevertheless, it might be a frustrating experience for the staging developers or those who experience a warning but are focused on another things that require lockdep. The v2 of this patch simply don't disable anymore lockdep in case of TAINT_CRAP and TAINT_WARN events. Signed-off-by: Frederic Weisbecker Cc: LTP Cc: Peter Zijlstra Cc: Greg KH LKML-Reference: <1239412638-6739-2-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- kernel/panic.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/panic.c b/kernel/panic.c index 940ca14f6dbf..934fb377f4b3 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -217,8 +217,10 @@ void add_taint(unsigned flag) * Can't trust the integrity of the kernel anymore. * We don't call directly debug_locks_off() because the issue * is not necessarily serious enough to set oops_in_progress to 1 + * Also we want to keep up lockdep for staging development and + * post-warning case. */ - if (__debug_locks_off()) + if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off()) printk(KERN_WARNING "Disabling lockdep due to kernel taint\n"); set_bit(flag, &tainted_mask); -- cgit v1.2.3-55-g7522 From c751085943362143f84346d274e0011419c84202 Mon Sep 17 00:00:00 2001 From: Rafael J. Wysocki Date: Sun, 12 Apr 2009 20:06:56 +0200 Subject: PM/Hibernate: Wait for SCSI devices scan to complete during resume There is a race between resume from hibernation and the asynchronous scanning of SCSI devices and to prevent it from happening we need to call scsi_complete_async_scans() during resume from hibernation. In addition, if the resume from hibernation is userland-driven, it's better to wait for all device probes in the kernel to complete before attempting to open the resume device. Signed-off-by: Rafael J. Wysocki Acked-by: Arjan van de Ven Signed-off-by: Linus Torvalds --- drivers/scsi/scsi_priv.h | 3 --- drivers/scsi/scsi_wait_scan.c | 2 +- include/scsi/scsi_scan.h | 11 +++++++++++ kernel/power/disk.c | 8 ++++++++ kernel/power/user.c | 9 +++++++++ 5 files changed, 29 insertions(+), 4 deletions(-) create mode 100644 include/scsi/scsi_scan.h (limited to 'kernel') diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index e1850904ff73..fbc83bebdd8e 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -38,9 +38,6 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) { }; #endif -/* scsi_scan.c */ -int scsi_complete_async_scans(void); - /* scsi_devinfo.c */ extern int scsi_get_device_flags(struct scsi_device *sdev, const unsigned char *vendor, diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c index 8a636103083d..2f21af21269a 100644 --- a/drivers/scsi/scsi_wait_scan.c +++ b/drivers/scsi/scsi_wait_scan.c @@ -11,7 +11,7 @@ */ #include -#include "scsi_priv.h" +#include static int __init wait_scan_init(void) { diff --git a/include/scsi/scsi_scan.h b/include/scsi/scsi_scan.h new file mode 100644 index 000000000000..78898889243d --- /dev/null +++ b/include/scsi/scsi_scan.h @@ -0,0 +1,11 @@ +#ifndef _SCSI_SCSI_SCAN_H +#define _SCSI_SCSI_SCAN_H + +#ifdef CONFIG_SCSI +/* drivers/scsi/scsi_scan.c */ +extern int scsi_complete_async_scans(void); +#else +static inline int scsi_complete_async_scans(void) { return 0; } +#endif + +#endif /* _SCSI_SCSI_SCAN_H */ diff --git a/kernel/power/disk.c b/kernel/power/disk.c index 5f21ab2bbcdf..0854770b63b9 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include "power.h" @@ -644,6 +645,13 @@ static int software_resume(void) if (noresume) return 0; + /* + * We can't depend on SCSI devices being available after loading one of + * their modules if scsi_complete_async_scans() is not called and the + * resume device usually is a SCSI one. + */ + scsi_complete_async_scans(); + /* * name_to_dev_t() below takes a sysfs buffer mutex when sysfs * is configured into the kernel. Since the regular hibernate diff --git a/kernel/power/user.c b/kernel/power/user.c index 6c85359364f2..ed97375daae9 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -24,6 +24,7 @@ #include #include #include +#include #include @@ -92,6 +93,7 @@ static int snapshot_open(struct inode *inode, struct file *filp) filp->private_data = data; memset(&data->handle, 0, sizeof(struct snapshot_handle)); if ((filp->f_flags & O_ACCMODE) == O_RDONLY) { + /* Hibernating. The image device should be accessible. */ data->swap = swsusp_resume_device ? swap_type_of(swsusp_resume_device, 0, NULL) : -1; data->mode = O_RDONLY; @@ -99,6 +101,13 @@ static int snapshot_open(struct inode *inode, struct file *filp) if (error) pm_notifier_call_chain(PM_POST_HIBERNATION); } else { + /* + * Resuming. We may need to wait for the image device to + * appear. + */ + wait_for_device_probe(); + scsi_complete_async_scans(); + data->swap = -1; data->mode = O_WRONLY; error = pm_notifier_call_chain(PM_RESTORE_PREPARE); -- cgit v1.2.3-55-g7522 From a3d03ecaf9fe722bf96e4ef4a2f5e42ef652ddeb Mon Sep 17 00:00:00 2001 From: Zhaolei Date: Mon, 13 Apr 2009 15:23:53 +0800 Subject: tracing: Fix power tracer header Before patch: # tracer: power # # TASK-PID CPU# TIMESTAMP FUNCTION # | | | | | [ 676.875865889] CSTATE: Going to C1 on cpu 0 for 0.005911463 [ 676.882938805] CSTATE: Going to C1 on cpu 0 for 0.104796532 ... After patch: # tracer: power # # TIMESTAMP STATE EVENT # | | | [ 676.875865889] CSTATE: Going to C1 on cpu 0 for 0.005911463 [ 676.882938805] CSTATE: Going to C1 on cpu 0 for 0.104796532 ... v2: Use seq_puts instead of seq_printf Signed-off-by: Zhao Lei Cc: Arjan van de Ven Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Tom Zanussi LKML-Reference: <49E2E889.5000903@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_power.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c index bae791ebcc51..118439709fb7 100644 --- a/kernel/trace/trace_power.c +++ b/kernel/trace/trace_power.c @@ -186,6 +186,12 @@ static enum print_line_t power_print_line(struct trace_iterator *iter) return TRACE_TYPE_UNHANDLED; } +static void power_print_header(struct seq_file *s) +{ + seq_puts(s, "# TIMESTAMP STATE EVENT\n"); + seq_puts(s, "# | | |\n"); +} + static struct tracer power_tracer __read_mostly = { .name = "power", @@ -194,6 +200,7 @@ static struct tracer power_tracer __read_mostly = .stop = stop_power_trace, .reset = power_trace_reset, .print_line = power_print_line, + .print_header = power_print_header, }; static int init_power_trace(void) -- cgit v1.2.3-55-g7522 From 4be6f6bb66111c9468733a4ed9cad10dc3a762c0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 13 Apr 2009 14:39:33 -0700 Subject: mm: move the scan_unevictable_pages sysctl to the vm table vm knobs should go in the vm table. Probably too late for randomize_va_space though. Signed-off-by: Peter Zijlstra Acked-by: Lee Schermerhorn Acked-by: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sysctl.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 4286b62b34a0..e3d2c7dd59b9 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -902,16 +902,6 @@ static struct ctl_table kern_table[] = { .proc_handler = &proc_dointvec, }, #endif -#ifdef CONFIG_UNEVICTABLE_LRU - { - .ctl_name = CTL_UNNUMBERED, - .procname = "scan_unevictable_pages", - .data = &scan_unevictable_pages, - .maxlen = sizeof(scan_unevictable_pages), - .mode = 0644, - .proc_handler = &scan_unevictable_handler, - }, -#endif #ifdef CONFIG_SLOW_WORK { .ctl_name = CTL_UNNUMBERED, @@ -1302,6 +1292,16 @@ static struct ctl_table vm_table[] = { .extra2 = &one, }, #endif +#ifdef CONFIG_UNEVICTABLE_LRU + { + .ctl_name = CTL_UNNUMBERED, + .procname = "scan_unevictable_pages", + .data = &scan_unevictable_pages, + .maxlen = sizeof(scan_unevictable_pages), + .mode = 0644, + .proc_handler = &scan_unevictable_handler, + }, +#endif /* * NOTE: do not add new entries to this table unless you have read * Documentation/sysctl/ctl_unnumbered.txt -- cgit v1.2.3-55-g7522 From f1671f6d783a2385d32e11f456cbe32f0e4b4b49 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 13 Apr 2009 14:40:03 -0700 Subject: ptrace: fix exit_ptrace() vs ptrace_traceme() race Pointed out by Roland. The bug was recently introduced by me in "forget_original_parent: split out the un-ptrace part", commit 39c626ae47c469abdfd30c6e42eff884931380d6. Since that patch we have a window after exit_ptrace() drops tasklist and before forget_original_parent() takes it again. In this window the child can do ptrace(PTRACE_TRACEME) and nobody can untrace this child after that. Change ptrace_traceme() to not attach to the exiting ->real_parent. We don't report the error in this case, we pretend we attach right before ->real_parent calls exit_ptrace() which should untrace us anyway. Signed-off-by: Oleg Nesterov Acked-by: Roland McGrath Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/ptrace.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 64191fa09b7e..dfcd83ceee3b 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -604,10 +604,11 @@ repeat: ret = security_ptrace_traceme(current->parent); /* - * Set the ptrace bit in the process ptrace flags. - * Then link us on our parent's ptraced list. + * Check PF_EXITING to ensure ->real_parent has not passed + * exit_ptrace(). Otherwise we don't report the error but + * pretend ->real_parent untraces us right after return. */ - if (!ret) { + if (!ret && !(current->real_parent->flags & PF_EXITING)) { current->ptrace |= PT_PTRACED; __ptrace_link(current, current->real_parent); } -- cgit v1.2.3-55-g7522 From 3d26dcf7679c5cc6c9f3b95ffdb2152fba2b7fae Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 13 Apr 2009 14:40:08 -0700 Subject: kernel/sys.c: clean up sys_shutdown exit path Impact: cleanup, fix Clean up sys_shutdown() exit path. Factor out common code. Return correct error code instead of always 0 on failure. Signed-off-by: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sys.c | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) (limited to 'kernel') diff --git a/kernel/sys.c b/kernel/sys.c index 51dbb55604e8..e7998cf31498 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -360,6 +360,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, void __user *, arg) { char buffer[256]; + int ret = 0; /* We only trust the superuser with rebooting the system. */ if (!capable(CAP_SYS_BOOT)) @@ -397,7 +398,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, kernel_halt(); unlock_kernel(); do_exit(0); - break; + panic("cannot halt"); case LINUX_REBOOT_CMD_POWER_OFF: kernel_power_off(); @@ -417,29 +418,22 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, #ifdef CONFIG_KEXEC case LINUX_REBOOT_CMD_KEXEC: - { - int ret; - ret = kernel_kexec(); - unlock_kernel(); - return ret; - } + ret = kernel_kexec(); + break; #endif #ifdef CONFIG_HIBERNATION case LINUX_REBOOT_CMD_SW_SUSPEND: - { - int ret = hibernate(); - unlock_kernel(); - return ret; - } + ret = hibernate(); + break; #endif default: - unlock_kernel(); - return -EINVAL; + ret = -EINVAL; + break; } unlock_kernel(); - return 0; + return ret; } static void deferred_cad(struct work_struct *dummy) -- cgit v1.2.3-55-g7522 From 132380a06b24704fd6c9be55c44d4ef3972cead2 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 2 Apr 2009 14:18:25 +0800 Subject: tracing, sched: mark get_parent_ip() notrace Impact: remove overly redundant tracing entries When tracer is "function" or "function_graph", way too much "get_parent_ip" entries are recorded in ring_buffer. Signed-off-by: Lai Jiangshan Acked-by: Frederic Weisbecker Acked-by: Steven Rostedt LKML-Reference: <49D458B1.5000703@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/sched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 5724508c3b66..e90e70ed36a3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4846,7 +4846,7 @@ void scheduler_tick(void) #endif } -unsigned long get_parent_ip(unsigned long addr) +notrace unsigned long get_parent_ip(unsigned long addr) { if (in_lock_functions(addr)) { addr = CALLER_ADDR2; -- cgit v1.2.3-55-g7522 From 557055bebe9212dfa6b9f5df811dfd0dac77ec55 Mon Sep 17 00:00:00 2001 From: Zhaolei Date: Mon, 13 Apr 2009 16:02:34 +0800 Subject: tracing: Fix branch tracer header Before patch: # tracer: branch # # TASK-PID CPU# TIMESTAMP FUNCTION # | | | | | <...>-2981 [000] 24008.872738: [ ok ] trace_irq_handler_exit:irq_event_types.h:41 <...>-2981 [000] 24008.872742: [ ok ] note_interrupt:spurious.c:229 ... After patch: # tracer: branch # # TASK-PID CPU# TIMESTAMP CORRECT FUNC:FILE:LINE # | | | | | | <...>-2985 [000] 26329.142970: [ ok ] slab_free:slub.c:1776 <...>-2985 [000] 26329.142972: [ ok ] trace_kmem_cache_free:kmem_event_types.h:191 ... Signed-off-by: Zhao Lei Acked-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Tom Zanussi LKML-Reference: <49E2F19A.3040006@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_branch.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index ad8c22efff41..8333715e4066 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c @@ -155,6 +155,13 @@ static enum print_line_t trace_branch_print(struct trace_iterator *iter, return TRACE_TYPE_HANDLED; } +static void branch_print_header(struct seq_file *s) +{ + seq_puts(s, "# TASK-PID CPU# TIMESTAMP CORRECT" + " FUNC:FILE:LINE\n"); + seq_puts(s, "# | | | | | " + " |\n"); +} static struct trace_event trace_branch_event = { .type = TRACE_BRANCH, @@ -169,6 +176,7 @@ static struct tracer branch_trace __read_mostly = #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_branch, #endif /* CONFIG_FTRACE_SELFTEST */ + .print_header = branch_print_header, }; __init static int init_branch_tracer(void) -- cgit v1.2.3-55-g7522 From ef631b0ca01655d24e9ca7e199262c4a46416a26 Mon Sep 17 00:00:00 2001 From: Paul E. McKenney Date: Mon, 13 Apr 2009 21:31:16 -0700 Subject: rcu: Make hierarchical RCU less IPI-happy This patch fixes a hierarchical-RCU performance bug located by Anton Blanchard. The problem stems from a misguided attempt to provide a work-around for jiffies-counter failure. This work-around uses a per-CPU n_rcu_pending counter, which is incremented on each call to rcu_pending(), which in turn is called from each scheduling-clock interrupt. Each CPU then treats this counter as a surrogate for the jiffies counter, so that if the jiffies counter fails to advance, the per-CPU n_rcu_pending counter will cause RCU to invoke force_quiescent_state(), which in turn will (among other things) send resched IPIs to CPUs that have thus far failed to pass through an RCU quiescent state. Unfortunately, each CPU resets only its own counter after sending a batch of IPIs. This means that the other CPUs will also (needlessly) send -another- round of IPIs, for a full N-squared set of IPIs in the worst case every three scheduler-clock ticks until the grace period finally ends. It is not reasonable for a given CPU to reset each and every n_rcu_pending for all the other CPUs, so this patch instead simply disables the jiffies-counter "training wheels", thus eliminating the excessive IPIs. Note that the jiffies-counter IPIs do not have this problem due to the fact that the jiffies counter is global, so that the CPU sending the IPIs can easily reset things, thus preventing the other CPUs from sending redundant IPIs. Note also that the n_rcu_pending counter remains, as it will continue to be used for tracing. It may also see use to update the jiffies counter, should an appropriate kick-the-jiffies-counter API appear. Located-by: Anton Blanchard Tested-by: Anton Blanchard Signed-off-by: Paul E. McKenney Cc: anton@samba.org Cc: akpm@linux-foundation.org Cc: dipankar@in.ibm.com Cc: manfred@colorfullife.com Cc: cl@linux-foundation.org Cc: josht@linux.vnet.ibm.com Cc: schamp@sgi.com Cc: niv@us.ibm.com Cc: dvhltc@us.ibm.com Cc: ego@in.ibm.com Cc: laijs@cn.fujitsu.com Cc: rostedt@goodmis.org Cc: peterz@infradead.org Cc: penberg@cs.helsinki.fi Cc: andi@firstfloor.org Cc: "Paul E. McKenney" LKML-Reference: <12396834793575-git-send-email-> Signed-off-by: Ingo Molnar --- include/linux/rcutree.h | 3 +-- kernel/rcutree.c | 19 ++++--------------- kernel/rcutree_trace.c | 14 +++++--------- 3 files changed, 10 insertions(+), 26 deletions(-) (limited to 'kernel') diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 0cdda00f2b2a..58b2aa5312b9 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -161,9 +161,8 @@ struct rcu_data { unsigned long offline_fqs; /* Kicked due to being offline. */ unsigned long resched_ipi; /* Sent a resched IPI. */ - /* 5) state to allow this CPU to force_quiescent_state on others */ + /* 5) For future __rcu_pending statistics. */ long n_rcu_pending; /* rcu_pending() calls since boot. */ - long n_rcu_pending_force_qs; /* when to force quiescent states. */ int cpu; }; diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 7f3266922572..d2a372fb0b9b 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -530,8 +530,6 @@ static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) rdp->qs_pending = 1; rdp->passed_quiesc = 0; rdp->gpnum = rsp->gpnum; - rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending + - RCU_JIFFIES_TILL_FORCE_QS; } /* @@ -578,8 +576,6 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) rsp->gpnum++; rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; - rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending + - RCU_JIFFIES_TILL_FORCE_QS; record_gp_stall_check_time(rsp); dyntick_record_completed(rsp, rsp->completed - 1); note_new_gpnum(rsp, rdp); @@ -1055,7 +1051,6 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) { unsigned long flags; long lastcomp; - struct rcu_data *rdp = rsp->rda[smp_processor_id()]; struct rcu_node *rnp = rcu_get_root(rsp); u8 signaled; @@ -1066,16 +1061,13 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) return; /* Someone else is already on the job. */ } if (relaxed && - (long)(rsp->jiffies_force_qs - jiffies) >= 0 && - (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) >= 0) + (long)(rsp->jiffies_force_qs - jiffies) >= 0) goto unlock_ret; /* no emergency and done recently. */ rsp->n_force_qs++; spin_lock(&rnp->lock); lastcomp = rsp->completed; signaled = rsp->signaled; rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; - rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending + - RCU_JIFFIES_TILL_FORCE_QS; if (lastcomp == rsp->gpnum) { rsp->n_force_qs_ngp++; spin_unlock(&rnp->lock); @@ -1144,8 +1136,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) * If an RCU GP has gone long enough, go check for dyntick * idle CPUs and, if needed, send resched IPIs. */ - if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || - (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0) + if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) force_quiescent_state(rsp, 1); /* @@ -1230,8 +1221,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), if (unlikely(++rdp->qlen > qhimark)) { rdp->blimit = LONG_MAX; force_quiescent_state(rsp, 0); - } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || - (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0) + } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) force_quiescent_state(rsp, 1); local_irq_restore(flags); } @@ -1290,8 +1280,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) /* Has an RCU GP gone long enough to send resched IPIs &c? */ if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) && - ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || - (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0)) + ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) return 1; /* nothing to do */ diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index 4ee954f6a8d5..4b1875ba9404 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c @@ -49,14 +49,12 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) { if (!rdp->beenonline) return; - seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d rpfq=%ld rp=%x", + seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d", rdp->cpu, cpu_is_offline(rdp->cpu) ? '!' : ' ', rdp->completed, rdp->gpnum, rdp->passed_quiesc, rdp->passed_quiesc_completed, - rdp->qs_pending, - rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending, - (int)(rdp->n_rcu_pending & 0xffff)); + rdp->qs_pending); #ifdef CONFIG_NO_HZ seq_printf(m, " dt=%d/%d dn=%d df=%lu", rdp->dynticks->dynticks, @@ -102,14 +100,12 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) { if (!rdp->beenonline) return; - seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d,%ld,%ld", + seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d", rdp->cpu, cpu_is_offline(rdp->cpu) ? "\"Y\"" : "\"N\"", rdp->completed, rdp->gpnum, rdp->passed_quiesc, rdp->passed_quiesc_completed, - rdp->qs_pending, - rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending, - rdp->n_rcu_pending); + rdp->qs_pending); #ifdef CONFIG_NO_HZ seq_printf(m, ",%d,%d,%d,%lu", rdp->dynticks->dynticks, @@ -123,7 +119,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) static int show_rcudata_csv(struct seq_file *m, void *unused) { - seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",\"rpfq\",\"rp\","); + seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\","); #ifdef CONFIG_NO_HZ seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\","); #endif /* #ifdef CONFIG_NO_HZ */ -- cgit v1.2.3-55-g7522 From 6ec3cfeca04622e3d80c9270191cd7f5f88214af Mon Sep 17 00:00:00 2001 From: Pallipadi, Venkatesh Date: Mon, 13 Apr 2009 15:20:58 -0700 Subject: x86, irq: Remove IRQ_DISABLED check in process context IRQ move As discussed in the thread here: http://marc.info/?l=linux-kernel&m=123964468521142&w=2 Eric W. Biederman observed: > It looks like some additional bugs have slipped in since last I looked. > > set_irq_affinity does this: > ifdef CONFIG_GENERIC_PENDING_IRQ > if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { > cpumask_copy(desc->affinity, cpumask); > desc->chip->set_affinity(irq, cpumask); > } else { > desc->status |= IRQ_MOVE_PENDING; > cpumask_copy(desc->pending_mask, cpumask); > } > #else > > That IRQ_DISABLED case is a software state and as such it has nothing to > do with how safe it is to move an irq in process context. [...] > > The only reason we migrate MSIs in interrupt context today is that there > wasn't infrastructure for support migration both in interrupt context > and outside of it. Yes. The idea here was to force the MSI migration to happen in process context. One of the patches in the series did disable_irq(dev->irq); irq_set_affinity(dev->irq, cpumask_of(dev->cpu)); enable_irq(dev->irq); with the above patch adding irq/manage code check for interrupt disabled and moving the interrupt in process context. IIRC, there was no IRQ_MOVE_PCNTXT when we were developing this HPET code and we ended up having this ugly hack. IRQ_MOVE_PCNTXT was there when we eventually submitted the patch upstream. But, looks like I did a blind rebasing instead of using IRQ_MOVE_PCNTXT in hpet MSI code. Below patch fixes this. i.e., revert commit 932775a4ab622e3c99bd59f14cc and add PCNTXT to HPET MSI setup. Also removes copying of desc->affinity in generic code as set_affinity routines are doing it internally. Reported-by: "Eric W. Biederman" Signed-off-by: Venkatesh Pallipadi Acked-by: "Eric W. Biederman" Cc: "Li Shaohua" Cc: Gary Hade Cc: "lcm@us.ibm.com" Cc: suresh.b.siddha@intel.com LKML-Reference: <20090413222058.GB8211@linux-os.sc.intel.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic/io_apic.c | 2 ++ kernel/irq/manage.c | 5 ++--- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index a2789e42e162..30da617d18e4 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -3670,12 +3670,14 @@ int arch_setup_hpet_msi(unsigned int irq) { int ret; struct msi_msg msg; + struct irq_desc *desc = irq_to_desc(irq); ret = msi_compose_msg(NULL, irq, &msg); if (ret < 0) return ret; hpet_msi_write(irq, &msg); + desc->status |= IRQ_MOVE_PCNTXT; set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq, "edge"); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 7e2e7dd4cd2f..2734eca59243 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -109,10 +109,9 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) spin_lock_irqsave(&desc->lock, flags); #ifdef CONFIG_GENERIC_PENDING_IRQ - if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { - cpumask_copy(desc->affinity, cpumask); + if (desc->status & IRQ_MOVE_PCNTXT) desc->chip->set_affinity(irq, cpumask); - } else { + else { desc->status |= IRQ_MOVE_PENDING; cpumask_copy(desc->pending_mask, cpumask); } -- cgit v1.2.3-55-g7522 From 297dbf50d7ab0539cf9cf7f2a66918665a18e45e Mon Sep 17 00:00:00 2001 From: Nikanth Karthikesan Date: Wed, 15 Apr 2009 10:37:04 +0530 Subject: swap: Remove code handling bio_alloc failure with __GFP_WAIT Remove code handling bio_alloc failure with __GFP_WAIT. Signed-off-by: Nikanth Karthikesan Signed-off-by: Jens Axboe --- kernel/power/swap.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'kernel') diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 505f319e489c..8ba052c86d48 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -64,8 +64,6 @@ static int submit(int rw, pgoff_t page_off, struct page *page, struct bio *bio; bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); - if (!bio) - return -ENOMEM; bio->bi_sector = page_off * (PAGE_SIZE >> 9); bio->bi_bdev = resume_bdev; bio->bi_end_io = end_swap_bio_read; -- cgit v1.2.3-55-g7522 From 5b1d07ed0e5b2707f786957c7a40eb2f399c84a8 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 15 Apr 2009 19:35:01 +0100 Subject: RCU: Don't try and predeclare inline funcs as it upsets some versions of gcc Don't try and predeclare inline funcs like this: static inline void wait_migrated_callbacks(void) ... static void _rcu_barrier(enum rcu_barrier type) { ... wait_migrated_callbacks(); } ... static inline void wait_migrated_callbacks(void) { wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); } as it upsets some versions of gcc under some circumstances: kernel/rcupdate.c: In function `_rcu_barrier': kernel/rcupdate.c:125: sorry, unimplemented: inlining failed in call to 'wait_migrated_callbacks': function body not available kernel/rcupdate.c:152: sorry, unimplemented: called from here This can be dealt with by simply putting the static variables (rcu_migrate_*) at the top, and moving the implementation of the function up so that it replaces its forward declaration. Signed-off-by: David Howells Cc: Dipankar Sarma Cc: Paul E. McKenney Signed-off-by: Linus Torvalds --- kernel/rcupdate.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 2c7b8457d0d2..a967c9feb90a 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -58,6 +58,10 @@ static DEFINE_MUTEX(rcu_barrier_mutex); static struct completion rcu_barrier_completion; int rcu_scheduler_active __read_mostly; +static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0); +static struct rcu_head rcu_migrate_head[3]; +static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq); + /* * Awaken the corresponding synchronize_rcu() instance now that a * grace period has elapsed. @@ -122,7 +126,10 @@ static void rcu_barrier_func(void *type) } } -static inline void wait_migrated_callbacks(void); +static inline void wait_migrated_callbacks(void) +{ + wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); +} /* * Orchestrate the specified type of RCU barrier, waiting for all @@ -179,21 +186,12 @@ void rcu_barrier_sched(void) } EXPORT_SYMBOL_GPL(rcu_barrier_sched); -static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0); -static struct rcu_head rcu_migrate_head[3]; -static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq); - static void rcu_migrate_callback(struct rcu_head *notused) { if (atomic_dec_and_test(&rcu_migrate_type_count)) wake_up(&rcu_migrate_wq); } -static inline void wait_migrated_callbacks(void) -{ - wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); -} - static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, unsigned long action, void *hcpu) { -- cgit v1.2.3-55-g7522 From 381512cf3d27f63f7a45b1bbe7d2d609c2ea3b74 Mon Sep 17 00:00:00 2001 From: Gautham R Shenoy Date: Tue, 14 Apr 2009 09:09:36 +0530 Subject: sched: Avoid printing sched_group::__cpu_power for default case Commit 46e0bb9c12f4 ("sched: Print sched_group::__cpu_power in sched_domain_debug") produces a messy dmesg output while attempting to print the sched_group::__cpu_power for each group in the sched_domain hierarchy. Fix this by avoid printing the __cpu_power for default cases. (i.e, __cpu_power == SCHED_LOAD_SCALE). [ Impact: reduce syslog clutter ] Reported-by: Tony Luck Signed-off-by: Gautham R Shenoy Fixed-by: Tony Luck Cc: a.p.zijlstra@chello.nl LKML-Reference: <20090414033936.GA534@in.ibm.com> Signed-off-by: Ingo Molnar --- kernel/sched.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index e90e70ed36a3..b902e587a3a0 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -7367,8 +7367,12 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, cpumask_or(groupmask, groupmask, sched_group_cpus(group)); cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); - printk(KERN_CONT " %s (__cpu_power = %d)", str, - group->__cpu_power); + + printk(KERN_CONT " %s", str); + if (group->__cpu_power != SCHED_LOAD_SCALE) { + printk(KERN_CONT " (__cpu_power = %d)", + group->__cpu_power); + } group = group->next; } while (group != sd->groups); -- cgit v1.2.3-55-g7522 From 79d381c9f2354b594dcab9b04dfcc0debf7294fe Mon Sep 17 00:00:00 2001 From: H Hartley Sweeten Date: Thu, 16 Apr 2009 19:30:18 -0400 Subject: kernel/softirq.c: fix sparse warning Fix sparse warning in kernel/softirq.c. warning: do-while statement is not a compound statement Signed-off-by: H Hartley Sweeten LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/softirq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/softirq.c b/kernel/softirq.c index 2fecefacdc5b..b525dd348511 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -472,9 +472,9 @@ void tasklet_kill(struct tasklet_struct *t) printk("Attempt to kill tasklet from interrupt\n"); while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { - do + do { yield(); - while (test_bit(TASKLET_STATE_SCHED, &t->state)); + } while (test_bit(TASKLET_STATE_SCHED, &t->state)); } tasklet_unlock_wait(t); clear_bit(TASKLET_STATE_SCHED, &t->state); -- cgit v1.2.3-55-g7522 From c8a250058656495be02c00de61e26b017c86ef00 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 17 Apr 2009 09:40:49 +0200 Subject: lockdep: more robust lockdep_map init sequence Steven Rostedt reported: > OK, I think I figured this bug out. This is a lockdep issue with respect > to tracepoints. > > The trace points in lockdep are called all the time. Outside the lockdep > logic. But if lockdep were to trigger an error / warning (which this run > did) we might be in trouble. For new locks, like the dentry->d_lock, that > are created, they will not get a name: > > void lockdep_init_map(struct lockdep_map *lock, const char *name, > struct lock_class_key *key, int subclass) > { > if (unlikely(!debug_locks)) > return; > > When a problem is found by lockdep, debug_locks becomes false. Thus we > stop allocating names for locks. This dentry->d_lock I had, now has no > name. Worse yet, I have CONFIG_DEBUG_VM set, that scrambles non > initialized memory. Thus, when the trace point was hit, it had junk for > the lock->name, and the machine crashed. Ah, nice catch. I think we should put at least the name in regardless. Ensure we at least initialize the trivial entries of the depmap so that they can be relied upon, even when lockdep itself decided to pack up and go home. [ Impact: fix lock tracing after lockdep warnings. ] Reported-by: Steven Rostedt Signed-off-by: Peter Zijlstra Acked-by: Steven Rostedt Cc: Andrew Morton Cc: Frederic Weisbecker LKML-Reference: <1239954049.23397.4156.camel@laptop> Signed-off-by: Ingo Molnar --- kernel/lockdep.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/lockdep.c b/kernel/lockdep.c index b0f011866969..accb40cdb12a 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -2490,13 +2490,20 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, void lockdep_init_map(struct lockdep_map *lock, const char *name, struct lock_class_key *key, int subclass) { - if (unlikely(!debug_locks)) + lock->class_cache = NULL; +#ifdef CONFIG_LOCK_STAT + lock->cpu = raw_smp_processor_id(); +#endif + + if (DEBUG_LOCKS_WARN_ON(!name)) { + lock->name = "NULL"; return; + } + + lock->name = name; if (DEBUG_LOCKS_WARN_ON(!key)) return; - if (DEBUG_LOCKS_WARN_ON(!name)) - return; /* * Sanity check, the lock-class key must be persistent: */ @@ -2505,12 +2512,11 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name, DEBUG_LOCKS_WARN_ON(1); return; } - lock->name = name; lock->key = key; - lock->class_cache = NULL; -#ifdef CONFIG_LOCK_STAT - lock->cpu = raw_smp_processor_id(); -#endif + + if (unlikely(!debug_locks)) + return; + if (subclass) register_lock_class(lock, subclass, 1); } -- cgit v1.2.3-55-g7522 From ff54250a0ebab7f90a5f848a0ba63f999830c872 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 18 Apr 2009 21:44:24 -0700 Subject: Remove 'recurse into child resources' logic from 'reserve_region_with_split()' This function is not actually used right now, since the original use case for it was done with insert_resource_expand_to_fit() instead. However, we now have another usage case that wants to basically do a "reserve IO resource, splitting around existing resources", however that one doesn't actually want the "recurse into the conflicting resource" logic at all. And since recursing into the conflicting resource was the most complex part, and isn't wanted, just remove it. Maybe we'll some day want both versions, but we can just resurrect the logic then. Tested-by: Yinghai Lu Signed-off-by: Linus Torvalds --- kernel/resource.c | 46 ++++++++++++---------------------------------- 1 file changed, 12 insertions(+), 34 deletions(-) (limited to 'kernel') diff --git a/kernel/resource.c b/kernel/resource.c index fd5d7d574bb9..ac5f3a36923f 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -533,43 +533,21 @@ static void __init __reserve_region_with_split(struct resource *root, res->end = end; res->flags = IORESOURCE_BUSY; - for (;;) { - conflict = __request_resource(parent, res); - if (!conflict) - break; - if (conflict != parent) { - parent = conflict; - if (!(conflict->flags & IORESOURCE_BUSY)) - continue; - } - - /* Uhhuh, that didn't work out.. */ - kfree(res); - res = NULL; - break; - } - - if (!res) { - /* failed, split and try again */ - - /* conflict covered whole area */ - if (conflict->start <= start && conflict->end >= end) - return; + conflict = __request_resource(parent, res); + if (!conflict) + return; - if (conflict->start > start) - __reserve_region_with_split(root, start, conflict->start-1, name); - if (!(conflict->flags & IORESOURCE_BUSY)) { - resource_size_t common_start, common_end; + /* failed, split and try again */ + kfree(res); - common_start = max(conflict->start, start); - common_end = min(conflict->end, end); - if (common_start < common_end) - __reserve_region_with_split(root, common_start, common_end, name); - } - if (conflict->end < end) - __reserve_region_with_split(root, conflict->end+1, end, name); - } + /* conflict covered whole area */ + if (conflict->start <= start && conflict->end >= end) + return; + if (conflict->start > start) + __reserve_region_with_split(root, start, conflict->start-1, name); + if (conflict->end < end) + __reserve_region_with_split(root, conflict->end+1, end, name); } void __init reserve_region_with_split(struct resource *root, -- cgit v1.2.3-55-g7522 From 6a7c7eaf71b636f197d73b381a2ab729ebdcfb2e Mon Sep 17 00:00:00 2001 From: Rafael J. Wysocki Date: Sun, 19 Apr 2009 20:08:42 +0200 Subject: PM/Suspend: Introduce two new platform callbacks to avoid breakage Commit 900af0d973856d6feb6fc088c2d0d3fde57707d3 (PM: Change suspend code ordering) changed the ordering of suspend code in such a way that the platform .prepare() callback is now executed after the device drivers' late suspend callbacks have run. Unfortunately, this turns out to break ARM platforms that need to talk via I2C to power control devices during the .prepare() callback. For this reason introduce two new platform suspend callbacks, .prepare_late() and .wake(), that will be called just prior to disabling non-boot CPUs and right after bringing them back on line, respectively, and use them instead of .prepare() and .finish() for ACPI suspend. Make the PM core execute the .prepare() and .finish() platform suspend callbacks where they were executed previously (that is, right after calling the regular suspend methods provided by device drivers and right before executing their regular resume methods, respectively). It is not necessary to make analogous changes to the hibernation code and data structures at the moment, because they are only used by ACPI platforms. Signed-off-by: Rafael J. Wysocki Reported-by: Russell King Acked-by: Len Brown --- drivers/acpi/sleep.c | 8 ++++---- include/linux/suspend.h | 36 ++++++++++++++++++++++++++---------- kernel/power/main.c | 24 +++++++++++++++++------- 3 files changed, 47 insertions(+), 21 deletions(-) (limited to 'kernel') diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 779e4e500df4..d060e6fd7fd5 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -300,9 +300,9 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state) static struct platform_suspend_ops acpi_suspend_ops = { .valid = acpi_suspend_state_valid, .begin = acpi_suspend_begin, - .prepare = acpi_pm_prepare, + .prepare_late = acpi_pm_prepare, .enter = acpi_suspend_enter, - .finish = acpi_pm_finish, + .wake = acpi_pm_finish, .end = acpi_pm_end, }; @@ -328,9 +328,9 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state) static struct platform_suspend_ops acpi_suspend_ops_old = { .valid = acpi_suspend_state_valid, .begin = acpi_suspend_begin_old, - .prepare = acpi_pm_disable_gpes, + .prepare_late = acpi_pm_disable_gpes, .enter = acpi_suspend_enter, - .finish = acpi_pm_finish, + .wake = acpi_pm_finish, .end = acpi_pm_end, .recover = acpi_pm_finish, }; diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 3e3a4364cbff..795032edfc46 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -58,10 +58,17 @@ typedef int __bitwise suspend_state_t; * by @begin(). * @prepare() is called right after devices have been suspended (ie. the * appropriate .suspend() method has been executed for each device) and - * before the nonboot CPUs are disabled (it is executed with IRQs enabled). - * This callback is optional. It returns 0 on success or a negative - * error code otherwise, in which case the system cannot enter the desired - * sleep state (@enter() and @finish() will not be called in that case). + * before device drivers' late suspend callbacks are executed. It returns + * 0 on success or a negative error code otherwise, in which case the + * system cannot enter the desired sleep state (@prepare_late(), @enter(), + * @wake(), and @finish() will not be called in that case). + * + * @prepare_late: Finish preparing the platform for entering the system sleep + * state indicated by @begin(). + * @prepare_late is called before disabling nonboot CPUs and after + * device drivers' late suspend callbacks have been executed. It returns + * 0 on success or a negative error code otherwise, in which case the + * system cannot enter the desired sleep state (@enter() and @wake()). * * @enter: Enter the system sleep state indicated by @begin() or represented by * the argument if @begin() is not implemented. @@ -69,19 +76,26 @@ typedef int __bitwise suspend_state_t; * error code otherwise, in which case the system cannot enter the desired * sleep state. * - * @finish: Called when the system has just left a sleep state, right after - * the nonboot CPUs have been enabled and before devices are resumed (it is - * executed with IRQs enabled). + * @wake: Called when the system has just left a sleep state, right after + * the nonboot CPUs have been enabled and before device drivers' early + * resume callbacks are executed. + * This callback is optional, but should be implemented by the platforms + * that implement @prepare_late(). If implemented, it is always called + * after @enter(), even if @enter() fails. + * + * @finish: Finish wake-up of the platform. + * @finish is called right prior to calling device drivers' regular suspend + * callbacks. * This callback is optional, but should be implemented by the platforms * that implement @prepare(). If implemented, it is always called after - * @enter() (even if @enter() fails). + * @enter() and @wake(), if implemented, even if any of them fails. * * @end: Called by the PM core right after resuming devices, to indicate to * the platform that the system has returned to the working state or * the transition to the sleep state has been aborted. * This callback is optional, but should be implemented by the platforms - * that implement @begin(), but platforms implementing @begin() should - * also provide a @end() which cleans up transitions aborted before + * that implement @begin(). Accordingly, platforms implementing @begin() + * should also provide a @end() which cleans up transitions aborted before * @enter(). * * @recover: Recover the platform from a suspend failure. @@ -93,7 +107,9 @@ struct platform_suspend_ops { int (*valid)(suspend_state_t state); int (*begin)(suspend_state_t state); int (*prepare)(void); + int (*prepare_late)(void); int (*enter)(suspend_state_t state); + void (*wake)(void); void (*finish)(void); void (*end)(void); void (*recover)(void); diff --git a/kernel/power/main.c b/kernel/power/main.c index f172f41858bb..f99ed6a75eac 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -291,20 +291,26 @@ static int suspend_enter(suspend_state_t state) device_pm_lock(); + if (suspend_ops->prepare) { + error = suspend_ops->prepare(); + if (error) + goto Done; + } + error = device_power_down(PMSG_SUSPEND); if (error) { printk(KERN_ERR "PM: Some devices failed to power down\n"); - goto Done; + goto Platfrom_finish; } - if (suspend_ops->prepare) { - error = suspend_ops->prepare(); + if (suspend_ops->prepare_late) { + error = suspend_ops->prepare_late(); if (error) goto Power_up_devices; } if (suspend_test(TEST_PLATFORM)) - goto Platfrom_finish; + goto Platform_wake; error = disable_nonboot_cpus(); if (error || suspend_test(TEST_CPUS)) @@ -326,13 +332,17 @@ static int suspend_enter(suspend_state_t state) Enable_cpus: enable_nonboot_cpus(); - Platfrom_finish: - if (suspend_ops->finish) - suspend_ops->finish(); + Platform_wake: + if (suspend_ops->wake) + suspend_ops->wake(); Power_up_devices: device_power_up(PMSG_RESUME); + Platfrom_finish: + if (suspend_ops->finish) + suspend_ops->finish(); + Done: device_pm_unlock(); -- cgit v1.2.3-55-g7522 From 24b6f16ecf37f918a1934d590e9e71c100d6388f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Apr 2009 03:25:41 -0400 Subject: No need for crossing to mountpoint in audit_tag_tree() is_under() will DTRT anyway. And yes, is_subdir() behaviour is intentional. Signed-off-by: Al Viro --- fs/dcache.c | 1 - kernel/audit_tree.c | 3 --- 2 files changed, 4 deletions(-) (limited to 'kernel') diff --git a/fs/dcache.c b/fs/dcache.c index 761d30be2683..1fcffebfb44f 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -2149,7 +2149,6 @@ int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry) int result; unsigned long seq; - /* FIXME: This is old behavior, needed? Please check callers. */ if (new_dentry == old_dentry) return 1; diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 917ab9525568..6e7351739a82 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -734,9 +734,6 @@ int audit_tag_tree(char *old, char *new) dentry = dget(path.dentry); path_put(&path); - if (dentry == tagged->mnt_root && dentry == mnt->mnt_root) - follow_up(&mnt, &dentry); - list_add_tail(&list, &tagged->mnt_list); mutex_lock(&audit_filter_mutex); -- cgit v1.2.3-55-g7522 From 8e19608e8b5c001e4a66ce482edc474f05fb7355 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Tue, 21 Apr 2009 12:24:00 -0700 Subject: clocksource: pass clocksource to read() callback Pass clocksource pointer to the read() callback for clocksources. This allows us to share the callback between multiple instances. [hugh@veritas.com: fix powerpc build of clocksource pass clocksource mods] [akpm@linux-foundation.org: cleanup] Signed-off-by: Magnus Damm Acked-by: John Stultz Cc: Thomas Gleixner Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/mach-at91/at91rm9200_time.c | 2 +- arch/arm/mach-at91/at91sam926x_time.c | 2 +- arch/arm/mach-davinci/time.c | 2 +- arch/arm/mach-imx/time.c | 2 +- arch/arm/mach-ixp4xx/common.c | 2 +- arch/arm/mach-msm/timer.c | 4 ++-- arch/arm/mach-netx/time.c | 2 +- arch/arm/mach-ns9xxx/time-ns9360.c | 2 +- arch/arm/mach-omap1/time.c | 2 +- arch/arm/mach-omap2/timer-gp.c | 2 +- arch/arm/mach-pxa/time.c | 2 +- arch/arm/mach-realview/core.c | 2 +- arch/arm/mach-versatile/core.c | 2 +- arch/arm/plat-mxc/time.c | 2 +- arch/arm/plat-omap/common.c | 4 ++-- arch/arm/plat-orion/time.c | 2 +- arch/avr32/kernel/time.c | 2 +- arch/blackfin/kernel/time-ts.c | 12 ++++++------ arch/ia64/kernel/cyclone.c | 2 +- arch/ia64/kernel/time.c | 4 ++-- arch/ia64/sn/kernel/sn2/timer.c | 2 +- arch/m68knommu/platform/68328/timers.c | 2 +- arch/m68knommu/platform/coldfire/dma_timer.c | 2 +- arch/m68knommu/platform/coldfire/pit.c | 2 +- arch/m68knommu/platform/coldfire/timers.c | 2 +- arch/mips/kernel/cevt-txx9.c | 2 +- arch/mips/kernel/csrc-bcm1480.c | 2 +- arch/mips/kernel/csrc-ioasic.c | 6 +++--- arch/mips/kernel/csrc-r4k.c | 2 +- arch/mips/kernel/csrc-sb1250.c | 2 +- arch/mips/kernel/i8253.c | 2 +- arch/mips/nxp/pnx8550/common/time.c | 2 +- arch/mips/sgi-ip27/ip27-timer.c | 2 +- arch/powerpc/kernel/time.c | 8 ++++---- arch/s390/kernel/time.c | 2 +- arch/sh/kernel/time_32.c | 2 +- arch/sh/kernel/timers/timer-tmu.c | 2 +- arch/sparc/kernel/time_64.c | 7 ++++++- arch/um/kernel/time.c | 2 +- arch/x86/kernel/hpet.c | 6 +++--- arch/x86/kernel/i8253.c | 2 +- arch/x86/kernel/kvmclock.c | 7 ++++++- arch/x86/kernel/tsc.c | 2 +- arch/x86/kernel/vmiclock_32.c | 2 +- arch/x86/lguest/boot.c | 2 +- arch/x86/xen/time.c | 7 ++++++- drivers/char/hpet.c | 2 +- drivers/clocksource/acpi_pm.c | 12 ++++++------ drivers/clocksource/cyclone.c | 2 +- drivers/clocksource/scx200_hrt.c | 2 +- drivers/clocksource/tcb_clksrc.c | 2 +- include/linux/clocksource.h | 6 +++--- kernel/time/clocksource.c | 8 ++++---- kernel/time/jiffies.c | 2 +- 54 files changed, 94 insertions(+), 79 deletions(-) (limited to 'kernel') diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c index 1ff1bda0a894..309f3511aa20 100644 --- a/arch/arm/mach-at91/at91rm9200_time.c +++ b/arch/arm/mach-at91/at91rm9200_time.c @@ -85,7 +85,7 @@ static struct irqaction at91rm9200_timer_irq = { .handler = at91rm9200_timer_interrupt }; -static cycle_t read_clk32k(void) +static cycle_t read_clk32k(struct clocksource *cs) { return read_CRTR(); } diff --git a/arch/arm/mach-at91/at91sam926x_time.c b/arch/arm/mach-at91/at91sam926x_time.c index b63e1d5f1bad..4bd56aee4370 100644 --- a/arch/arm/mach-at91/at91sam926x_time.c +++ b/arch/arm/mach-at91/at91sam926x_time.c @@ -31,7 +31,7 @@ static u32 pit_cnt; /* access only w/system irq blocked */ * Clocksource: just a monotonic counter of MCK/16 cycles. * We don't care whether or not PIT irqs are enabled. */ -static cycle_t read_pit_clk(void) +static cycle_t read_pit_clk(struct clocksource *cs) { unsigned long flags; u32 elapsed; diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c index f8bcd29d17a6..6c227d4ba998 100644 --- a/arch/arm/mach-davinci/time.c +++ b/arch/arm/mach-davinci/time.c @@ -238,7 +238,7 @@ static void __init timer_init(void) /* * clocksource */ -static cycle_t read_cycles(void) +static cycle_t read_cycles(struct clocksource *cs) { struct timer_s *t = &timers[TID_CLOCKSOURCE]; diff --git a/arch/arm/mach-imx/time.c b/arch/arm/mach-imx/time.c index aff0ebcfa847..5aef18b599e5 100644 --- a/arch/arm/mach-imx/time.c +++ b/arch/arm/mach-imx/time.c @@ -73,7 +73,7 @@ static void __init imx_timer_hardware_init(void) IMX_TCTL(TIMER_BASE) = TCTL_FRR | TCTL_CLK_PCLK1 | TCTL_TEN; } -cycle_t imx_get_cycles(void) +cycle_t imx_get_cycles(struct clocksource *cs) { return IMX_TCN(TIMER_BASE); } diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index f4656d2ac8a8..1e93dfee7543 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c @@ -401,7 +401,7 @@ void __init ixp4xx_sys_init(void) /* * clocksource */ -cycle_t ixp4xx_get_cycles(void) +cycle_t ixp4xx_get_cycles(struct clocksource *cs) { return *IXP4XX_OSTS; } diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 444d9c0f5ca6..4855b8ca5101 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c @@ -57,12 +57,12 @@ static irqreturn_t msm_timer_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static cycle_t msm_gpt_read(void) +static cycle_t msm_gpt_read(struct clocksource *cs) { return readl(MSM_GPT_BASE + TIMER_COUNT_VAL); } -static cycle_t msm_dgt_read(void) +static cycle_t msm_dgt_read(struct clocksource *cs) { return readl(MSM_DGT_BASE + TIMER_COUNT_VAL) >> MSM_DGT_SHIFT; } diff --git a/arch/arm/mach-netx/time.c b/arch/arm/mach-netx/time.c index f201fddb594f..82801dbf0579 100644 --- a/arch/arm/mach-netx/time.c +++ b/arch/arm/mach-netx/time.c @@ -104,7 +104,7 @@ static struct irqaction netx_timer_irq = { .handler = netx_timer_interrupt, }; -cycle_t netx_get_cycles(void) +cycle_t netx_get_cycles(struct clocksource *cs) { return readl(NETX_GPIO_COUNTER_CURRENT(TIMER_CLOCKSOURCE)); } diff --git a/arch/arm/mach-ns9xxx/time-ns9360.c b/arch/arm/mach-ns9xxx/time-ns9360.c index 41df69721769..77281260358a 100644 --- a/arch/arm/mach-ns9xxx/time-ns9360.c +++ b/arch/arm/mach-ns9xxx/time-ns9360.c @@ -25,7 +25,7 @@ #define TIMER_CLOCKEVENT 1 static u32 latch; -static cycle_t ns9360_clocksource_read(void) +static cycle_t ns9360_clocksource_read(struct clocksource *cs) { return __raw_readl(SYS_TR(TIMER_CLOCKSOURCE)); } diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c index 495a32c287b4..4d56408d3cff 100644 --- a/arch/arm/mach-omap1/time.c +++ b/arch/arm/mach-omap1/time.c @@ -198,7 +198,7 @@ static struct irqaction omap_mpu_timer2_irq = { .handler = omap_mpu_timer2_interrupt, }; -static cycle_t mpu_read(void) +static cycle_t mpu_read(struct clocksource *cs) { return ~omap_mpu_timer_read(1); } diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c index 9fc13a2cc3f4..1cb2c0909c2b 100644 --- a/arch/arm/mach-omap2/timer-gp.c +++ b/arch/arm/mach-omap2/timer-gp.c @@ -138,7 +138,7 @@ static inline void __init omap2_gp_clocksource_init(void) {} * clocksource */ static struct omap_dm_timer *gpt_clocksource; -static cycle_t clocksource_read_cycles(void) +static cycle_t clocksource_read_cycles(struct clocksource *cs) { return (cycle_t)omap_dm_timer_read_counter(gpt_clocksource); } diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c index 8eb3830fbb0b..750c448db672 100644 --- a/arch/arm/mach-pxa/time.c +++ b/arch/arm/mach-pxa/time.c @@ -125,7 +125,7 @@ static struct clock_event_device ckevt_pxa_osmr0 = { .set_mode = pxa_osmr0_set_mode, }; -static cycle_t pxa_read_oscr(void) +static cycle_t pxa_read_oscr(struct clocksource *cs) { return OSCR; } diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c index 9ab947c14f26..942e1a7eb9b2 100644 --- a/arch/arm/mach-realview/core.c +++ b/arch/arm/mach-realview/core.c @@ -715,7 +715,7 @@ static struct irqaction realview_timer_irq = { .handler = realview_timer_interrupt, }; -static cycle_t realview_get_cycles(void) +static cycle_t realview_get_cycles(struct clocksource *cs) { return ~readl(timer3_va_base + TIMER_VALUE); } diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index 565776680d8c..1f929c391af7 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c @@ -948,7 +948,7 @@ static struct irqaction versatile_timer_irq = { .handler = versatile_timer_interrupt, }; -static cycle_t versatile_get_cycles(void) +static cycle_t versatile_get_cycles(struct clocksource *cs) { return ~readl(TIMER3_VA_BASE + TIMER_VALUE); } diff --git a/arch/arm/plat-mxc/time.c b/arch/arm/plat-mxc/time.c index ef1b3cd85bd3..dab3357196fb 100644 --- a/arch/arm/plat-mxc/time.c +++ b/arch/arm/plat-mxc/time.c @@ -36,7 +36,7 @@ static enum clock_event_mode clockevent_mode = CLOCK_EVT_MODE_UNUSED; /* clock source */ -static cycle_t mxc_get_cycles(void) +static cycle_t mxc_get_cycles(struct clocksource *cs) { return __raw_readl(TIMER_BASE + MXC_TCN); } diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c index d1797147732f..433021f3d7cc 100644 --- a/arch/arm/plat-omap/common.c +++ b/arch/arm/plat-omap/common.c @@ -185,7 +185,7 @@ console_initcall(omap_add_serial_console); #include -static cycle_t omap_32k_read(void) +static cycle_t omap_32k_read(struct clocksource *cs) { return omap_readl(TIMER_32K_SYNCHRONIZED); } @@ -207,7 +207,7 @@ unsigned long long sched_clock(void) { unsigned long long ret; - ret = (unsigned long long)omap_32k_read(); + ret = (unsigned long long)omap_32k_read(&clocksource_32k); ret = (ret * clocksource_32k.mult_orig) >> clocksource_32k.shift; return ret; } diff --git a/arch/arm/plat-orion/time.c b/arch/arm/plat-orion/time.c index 6fa2923e6dca..2faf9dba4ef7 100644 --- a/arch/arm/plat-orion/time.c +++ b/arch/arm/plat-orion/time.c @@ -41,7 +41,7 @@ static u32 ticks_per_jiffy; /* * Clocksource handling. */ -static cycle_t orion_clksrc_read(void) +static cycle_t orion_clksrc_read(struct clocksource *cs) { return 0xffffffff - readl(TIMER0_VAL); } diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c index 0ff46bf873b0..f27aa3b259fa 100644 --- a/arch/avr32/kernel/time.c +++ b/arch/avr32/kernel/time.c @@ -18,7 +18,7 @@ #include -static cycle_t read_cycle_count(void) +static cycle_t read_cycle_count(struct clocksource *cs) { return (cycle_t)sysreg_read(COUNT); } diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c index 0ed2badfd746..27646121280a 100644 --- a/arch/blackfin/kernel/time-ts.c +++ b/arch/blackfin/kernel/time-ts.c @@ -58,16 +58,11 @@ static inline unsigned long long cycles_2_ns(cycle_t cyc) return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; } -static cycle_t read_cycles(void) +static cycle_t read_cycles(struct clocksource *cs) { return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod); } -unsigned long long sched_clock(void) -{ - return cycles_2_ns(read_cycles()); -} - static struct clocksource clocksource_bfin = { .name = "bfin_cycles", .rating = 350, @@ -77,6 +72,11 @@ static struct clocksource clocksource_bfin = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; +unsigned long long sched_clock(void) +{ + return cycles_2_ns(read_cycles(&clocksource_bfin)); +} + static int __init bfin_clocksource_init(void) { set_cyc2ns_scale(get_cclk() / 1000); diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c index 790ef0d87e12..71e35864d2e2 100644 --- a/arch/ia64/kernel/cyclone.c +++ b/arch/ia64/kernel/cyclone.c @@ -21,7 +21,7 @@ void __init cyclone_setup(void) static void __iomem *cyclone_mc; -static cycle_t read_cyclone(void) +static cycle_t read_cyclone(struct clocksource *cs) { return (cycle_t)readq((void __iomem *)cyclone_mc); } diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 641c8b61c4f1..604c1a35db33 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -33,7 +33,7 @@ #include "fsyscall_gtod_data.h" -static cycle_t itc_get_cycles(void); +static cycle_t itc_get_cycles(struct clocksource *cs); struct fsyscall_gtod_data_t fsyscall_gtod_data = { .lock = SEQLOCK_UNLOCKED, @@ -383,7 +383,7 @@ ia64_init_itm (void) } } -static cycle_t itc_get_cycles(void) +static cycle_t itc_get_cycles(struct clocksource *cs) { u64 lcycle, now, ret; diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c index cf67fc562054..21d6f09e3447 100644 --- a/arch/ia64/sn/kernel/sn2/timer.c +++ b/arch/ia64/sn/kernel/sn2/timer.c @@ -23,7 +23,7 @@ extern unsigned long sn_rtc_cycles_per_second; -static cycle_t read_sn2(void) +static cycle_t read_sn2(struct clocksource *cs) { return (cycle_t)readq(RTC_COUNTER_ADDR); } diff --git a/arch/m68knommu/platform/68328/timers.c b/arch/m68knommu/platform/68328/timers.c index 6bafefa546e5..309f725995bf 100644 --- a/arch/m68knommu/platform/68328/timers.c +++ b/arch/m68knommu/platform/68328/timers.c @@ -75,7 +75,7 @@ static struct irqaction m68328_timer_irq = { /***************************************************************************/ -static cycle_t m68328_read_clk(void) +static cycle_t m68328_read_clk(struct clocksource *cs) { unsigned long flags; u32 cycles; diff --git a/arch/m68knommu/platform/coldfire/dma_timer.c b/arch/m68knommu/platform/coldfire/dma_timer.c index 772578b1084f..a5f562823d7a 100644 --- a/arch/m68knommu/platform/coldfire/dma_timer.c +++ b/arch/m68knommu/platform/coldfire/dma_timer.c @@ -34,7 +34,7 @@ #define DMA_DTMR_CLK_DIV_16 (2 << 1) #define DMA_DTMR_ENABLE (1 << 0) -static cycle_t cf_dt_get_cycles(void) +static cycle_t cf_dt_get_cycles(struct clocksource *cs) { return __raw_readl(DTCN0); } diff --git a/arch/m68knommu/platform/coldfire/pit.c b/arch/m68knommu/platform/coldfire/pit.c index 2a12e7fa9748..61b96211f8ff 100644 --- a/arch/m68knommu/platform/coldfire/pit.c +++ b/arch/m68knommu/platform/coldfire/pit.c @@ -125,7 +125,7 @@ static struct irqaction pit_irq = { /***************************************************************************/ -static cycle_t pit_read_clk(void) +static cycle_t pit_read_clk(struct clocksource *cs) { unsigned long flags; u32 cycles; diff --git a/arch/m68knommu/platform/coldfire/timers.c b/arch/m68knommu/platform/coldfire/timers.c index 454f25493491..1ba8a3731653 100644 --- a/arch/m68knommu/platform/coldfire/timers.c +++ b/arch/m68knommu/platform/coldfire/timers.c @@ -78,7 +78,7 @@ static struct irqaction mcftmr_timer_irq = { /***************************************************************************/ -static cycle_t mcftmr_read_clk(void) +static cycle_t mcftmr_read_clk(struct clocksource *cs) { unsigned long flags; u32 cycles; diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c index eccf7d6096bd..2e911e3da8d3 100644 --- a/arch/mips/kernel/cevt-txx9.c +++ b/arch/mips/kernel/cevt-txx9.c @@ -22,7 +22,7 @@ static struct txx9_tmr_reg __iomem *txx9_cs_tmrptr; -static cycle_t txx9_cs_read(void) +static cycle_t txx9_cs_read(struct clocksource *cs) { return __raw_readl(&txx9_cs_tmrptr->trr); } diff --git a/arch/mips/kernel/csrc-bcm1480.c b/arch/mips/kernel/csrc-bcm1480.c index 868745e7184b..51489f8a825e 100644 --- a/arch/mips/kernel/csrc-bcm1480.c +++ b/arch/mips/kernel/csrc-bcm1480.c @@ -28,7 +28,7 @@ #include -static cycle_t bcm1480_hpt_read(void) +static cycle_t bcm1480_hpt_read(struct clocksource *cs) { return (cycle_t) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT)); } diff --git a/arch/mips/kernel/csrc-ioasic.c b/arch/mips/kernel/csrc-ioasic.c index 1d5f63cf8997..b551f48d3a07 100644 --- a/arch/mips/kernel/csrc-ioasic.c +++ b/arch/mips/kernel/csrc-ioasic.c @@ -25,7 +25,7 @@ #include #include -static cycle_t dec_ioasic_hpt_read(void) +static cycle_t dec_ioasic_hpt_read(struct clocksource *cs) { return ioasic_read(IO_REG_FCTR); } @@ -47,13 +47,13 @@ void __init dec_ioasic_clocksource_init(void) while (!ds1287_timer_state()) ; - start = dec_ioasic_hpt_read(); + start = dec_ioasic_hpt_read(&clocksource_dec); while (i--) while (!ds1287_timer_state()) ; - end = dec_ioasic_hpt_read(); + end = dec_ioasic_hpt_read(&clocksource_dec); freq = (end - start) * 10; printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq); diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c index f1a2893931ed..e95a3cd48eea 100644 --- a/arch/mips/kernel/csrc-r4k.c +++ b/arch/mips/kernel/csrc-r4k.c @@ -10,7 +10,7 @@ #include -static cycle_t c0_hpt_read(void) +static cycle_t c0_hpt_read(struct clocksource *cs) { return read_c0_count(); } diff --git a/arch/mips/kernel/csrc-sb1250.c b/arch/mips/kernel/csrc-sb1250.c index 92212bbb8e45..d14d3d1907fa 100644 --- a/arch/mips/kernel/csrc-sb1250.c +++ b/arch/mips/kernel/csrc-sb1250.c @@ -33,7 +33,7 @@ * The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over * again. */ -static cycle_t sb1250_hpt_read(void) +static cycle_t sb1250_hpt_read(struct clocksource *cs) { unsigned int count; diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c index 689719e34f08..ed20e7fe65e3 100644 --- a/arch/mips/kernel/i8253.c +++ b/arch/mips/kernel/i8253.c @@ -128,7 +128,7 @@ void __init setup_pit_timer(void) * to just read by itself. So use jiffies to emulate a free * running counter: */ -static cycle_t pit_read(void) +static cycle_t pit_read(struct clocksource *cs) { unsigned long flags; int count; diff --git a/arch/mips/nxp/pnx8550/common/time.c b/arch/mips/nxp/pnx8550/common/time.c index cf293b279098..8df43e9e4d90 100644 --- a/arch/mips/nxp/pnx8550/common/time.c +++ b/arch/mips/nxp/pnx8550/common/time.c @@ -35,7 +35,7 @@ static unsigned long cpj; -static cycle_t hpt_read(void) +static cycle_t hpt_read(struct clocksource *cs) { return read_c0_count2(); } diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c index f024057a35f8..f10a7cd64f7e 100644 --- a/arch/mips/sgi-ip27/ip27-timer.c +++ b/arch/mips/sgi-ip27/ip27-timer.c @@ -159,7 +159,7 @@ static void __init hub_rt_clock_event_global_init(void) setup_irq(irq, &hub_rt_irqaction); } -static cycle_t hub_rt_read(void) +static cycle_t hub_rt_read(struct clocksource *cs) { return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT); } diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 926ea864e34f..48571ac56fb7 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -77,7 +77,7 @@ #include #include -static cycle_t rtc_read(void); +static cycle_t rtc_read(struct clocksource *); static struct clocksource clocksource_rtc = { .name = "rtc", .rating = 400, @@ -88,7 +88,7 @@ static struct clocksource clocksource_rtc = { .read = rtc_read, }; -static cycle_t timebase_read(void); +static cycle_t timebase_read(struct clocksource *); static struct clocksource clocksource_timebase = { .name = "timebase", .rating = 400, @@ -766,12 +766,12 @@ unsigned long read_persistent_clock(void) } /* clocksource code */ -static cycle_t rtc_read(void) +static cycle_t rtc_read(struct clocksource *cs) { return (cycle_t)get_rtc(); } -static cycle_t timebase_read(void) +static cycle_t timebase_read(struct clocksource *cs) { return (cycle_t)get_tb(); } diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 6ded50dfa75a..ef596d020573 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -201,7 +201,7 @@ unsigned long read_persistent_clock(void) return ts.tv_sec; } -static cycle_t read_tod_clock(void) +static cycle_t read_tod_clock(struct clocksource *cs) { return get_clock(); } diff --git a/arch/sh/kernel/time_32.c b/arch/sh/kernel/time_32.c index c34e1e0f9b02..1700d2465f6c 100644 --- a/arch/sh/kernel/time_32.c +++ b/arch/sh/kernel/time_32.c @@ -208,7 +208,7 @@ unsigned long long sched_clock(void) if (!clocksource_sh.rating) return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); - cycles = clocksource_sh.read(); + cycles = clocksource_sh.read(&clocksource_sh); return cyc2ns(&clocksource_sh, cycles); } #endif diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c index c5d3396f5960..fe8d8930ccb6 100644 --- a/arch/sh/kernel/timers/timer-tmu.c +++ b/arch/sh/kernel/timers/timer-tmu.c @@ -81,7 +81,7 @@ static int tmu_timer_stop(void) */ static int tmus_are_scaled; -static cycle_t tmu_timer_read(void) +static cycle_t tmu_timer_read(struct clocksource *cs) { return ((cycle_t)(~_tmu_read(TMU1)))<get_tick(); +} + void __init time_init(void) { unsigned long freq = sparc64_init_timers(); @@ -827,7 +832,7 @@ void __init time_init(void) clocksource_tick.mult = clocksource_hz2mult(freq, clocksource_tick.shift); - clocksource_tick.read = tick_ops->get_tick; + clocksource_tick.read = clocksource_tick_read; printk("clocksource: mult[%x] shift[%d]\n", clocksource_tick.mult, clocksource_tick.shift); diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c index b13a87a3ec95..c8b9c469fcd7 100644 --- a/arch/um/kernel/time.c +++ b/arch/um/kernel/time.c @@ -65,7 +65,7 @@ static irqreturn_t um_timer(int irq, void *dev) return IRQ_HANDLED; } -static cycle_t itimer_read(void) +static cycle_t itimer_read(struct clocksource *cs) { return os_nsecs() / 1000; } diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 648b3a2a3a44..3f0019e0a229 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -722,7 +722,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n, /* * Clock source related code */ -static cycle_t read_hpet(void) +static cycle_t read_hpet(struct clocksource *cs) { return (cycle_t)hpet_readl(HPET_COUNTER); } @@ -756,7 +756,7 @@ static int hpet_clocksource_register(void) hpet_restart_counter(); /* Verify whether hpet counter works */ - t1 = read_hpet(); + t1 = hpet_readl(HPET_COUNTER); rdtscll(start); /* @@ -770,7 +770,7 @@ static int hpet_clocksource_register(void) rdtscll(now); } while ((now - start) < 200000UL); - if (t1 == read_hpet()) { + if (t1 == hpet_readl(HPET_COUNTER)) { printk(KERN_WARNING "HPET counter not counting. HPET disabled\n"); return -ENODEV; diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index 3475440baa54..c2e0bb0890d4 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c @@ -129,7 +129,7 @@ void __init setup_pit_timer(void) * to just read by itself. So use jiffies to emulate a free * running counter: */ -static cycle_t pit_read(void) +static cycle_t pit_read(struct clocksource *cs) { static int old_count; static u32 old_jifs; diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 137f2e8132df..223af43f1526 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -77,6 +77,11 @@ static cycle_t kvm_clock_read(void) return ret; } +static cycle_t kvm_clock_get_cycles(struct clocksource *cs) +{ + return kvm_clock_read(); +} + /* * If we don't do that, there is the possibility that the guest * will calibrate under heavy load - thus, getting a lower lpj - @@ -107,7 +112,7 @@ static void kvm_get_preset_lpj(void) static struct clocksource kvm_clock = { .name = "kvm-clock", - .read = kvm_clock_read, + .read = kvm_clock_get_cycles, .rating = 400, .mask = CLOCKSOURCE_MASK(64), .mult = 1 << KVM_SCALE, diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 7a567ebe6361..d57de05dc430 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -699,7 +699,7 @@ static struct clocksource clocksource_tsc; * code, which is necessary to support wrapping clocksources like pm * timer. */ -static cycle_t read_tsc(void) +static cycle_t read_tsc(struct clocksource *cs) { cycle_t ret = (cycle_t)get_cycles(); diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index d303369a7bad..2b3eb82efeeb 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c @@ -283,7 +283,7 @@ void __devinit vmi_time_ap_init(void) /** vmi clocksource */ static struct clocksource clocksource_vmi; -static cycle_t read_real_cycles(void) +static cycle_t read_real_cycles(struct clocksource *cs) { cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); return max(ret, clocksource_vmi.cycle_last); diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index a2085368a3dc..ca7ec44bafc3 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -663,7 +663,7 @@ static unsigned long lguest_tsc_khz(void) /* If we can't use the TSC, the kernel falls back to our lower-priority * "lguest_clock", where we read the time value given to us by the Host. */ -static cycle_t lguest_clock_read(void) +static cycle_t lguest_clock_read(struct clocksource *cs) { unsigned long sec, nsec; diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 14f240623497..0a5aa44299a5 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -213,6 +213,11 @@ cycle_t xen_clocksource_read(void) return ret; } +static cycle_t xen_clocksource_get_cycles(struct clocksource *cs) +{ + return xen_clocksource_read(); +} + static void xen_read_wallclock(struct timespec *ts) { struct shared_info *s = HYPERVISOR_shared_info; @@ -241,7 +246,7 @@ int xen_set_wallclock(unsigned long now) static struct clocksource xen_clocksource __read_mostly = { .name = "xen", .rating = 400, - .read = xen_clocksource_read, + .read = xen_clocksource_get_cycles, .mask = ~0, .mult = 1< value1) diff --git a/drivers/clocksource/cyclone.c b/drivers/clocksource/cyclone.c index 8615059a8729..64e528e8bfa6 100644 --- a/drivers/clocksource/cyclone.c +++ b/drivers/clocksource/cyclone.c @@ -19,7 +19,7 @@ int use_cyclone = 0; static void __iomem *cyclone_ptr; -static cycle_t read_cyclone(void) +static cycle_t read_cyclone(struct clocksource *cs) { return (cycle_t)readl(cyclone_ptr); } diff --git a/drivers/clocksource/scx200_hrt.c b/drivers/clocksource/scx200_hrt.c index b92da677aa5d..27f4d9637b62 100644 --- a/drivers/clocksource/scx200_hrt.c +++ b/drivers/clocksource/scx200_hrt.c @@ -43,7 +43,7 @@ MODULE_PARM_DESC(ppm, "+-adjust to actual XO freq (ppm)"); /* The base timer frequency, * 27 if selected */ #define HRT_FREQ 1000000 -static cycle_t read_hrt(void) +static cycle_t read_hrt(struct clocksource *cs) { /* Read the timer value */ return (cycle_t) inl(scx200_cb_base + SCx200_TIMER_OFFSET); diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index 254f1064d973..01b886e68822 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c @@ -39,7 +39,7 @@ static void __iomem *tcaddr; -static cycle_t tc_get_cycles(void) +static cycle_t tc_get_cycles(struct clocksource *cs) { unsigned long flags; u32 lower, upper; diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 573819ef4cc0..0d96cde9ee5d 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -143,7 +143,7 @@ extern u64 timecounter_cyc2time(struct timecounter *tc, * 400-499: Perfect * The ideal clocksource. A must-use where * available. - * @read: returns a cycle value + * @read: returns a cycle value, passes clocksource as argument * @mask: bitmask for two's complement * subtraction of non 64 bit counters * @mult: cycle to nanosecond multiplier (adjusted by NTP) @@ -162,7 +162,7 @@ struct clocksource { char *name; struct list_head list; int rating; - cycle_t (*read)(void); + cycle_t (*read)(struct clocksource *cs); cycle_t mask; u32 mult; u32 mult_orig; @@ -271,7 +271,7 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) */ static inline cycle_t clocksource_read(struct clocksource *cs) { - return cs->read(); + return cs->read(cs); } /** diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index c46c931a7fe7..ecfd7b5187e0 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -181,12 +181,12 @@ static void clocksource_watchdog(unsigned long data) resumed = test_and_clear_bit(0, &watchdog_resumed); - wdnow = watchdog->read(); + wdnow = watchdog->read(watchdog); wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); watchdog_last = wdnow; list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { - csnow = cs->read(); + csnow = cs->read(cs); if (unlikely(resumed)) { cs->wd_last = csnow; @@ -247,7 +247,7 @@ static void clocksource_check_watchdog(struct clocksource *cs) list_add(&cs->wd_list, &watchdog_list); if (!started && watchdog) { - watchdog_last = watchdog->read(); + watchdog_last = watchdog->read(watchdog); watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); @@ -268,7 +268,7 @@ static void clocksource_check_watchdog(struct clocksource *cs) cse->flags &= ~CLOCK_SOURCE_WATCHDOG; /* Start if list is not empty */ if (!list_empty(&watchdog_list)) { - watchdog_last = watchdog->read(); + watchdog_last = watchdog->read(watchdog); watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; add_timer_on(&watchdog_timer, diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 06f197560f3b..c3f6c30816e3 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -50,7 +50,7 @@ */ #define JIFFIES_SHIFT 8 -static cycle_t jiffies_read(void) +static cycle_t jiffies_read(struct clocksource *cs) { return (cycle_t) jiffies; } -- cgit v1.2.3-55-g7522 From 4614e6adafa2c5e6c3a9c245af2807fa7bc5117a Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Tue, 21 Apr 2009 12:24:02 -0700 Subject: clocksource: add enable() and disable() callbacks Add enable() and disable() callbacks for clocksources. This allows us to put unused clocksources in power save mode. The functions clocksource_enable() and clocksource_disable() wrap the callbacks and are inserted in the timekeeping code to enable before use and disable after switching to a new clocksource. Signed-off-by: Magnus Damm Acked-by: John Stultz Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/clocksource.h | 31 +++++++++++++++++++++++++++++++ kernel/time/timekeeping.c | 12 +++++++++--- 2 files changed, 40 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 0d96cde9ee5d..5a40d14daa9f 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -144,6 +144,8 @@ extern u64 timecounter_cyc2time(struct timecounter *tc, * The ideal clocksource. A must-use where * available. * @read: returns a cycle value, passes clocksource as argument + * @enable: optional function to enable the clocksource + * @disable: optional function to disable the clocksource * @mask: bitmask for two's complement * subtraction of non 64 bit counters * @mult: cycle to nanosecond multiplier (adjusted by NTP) @@ -163,6 +165,8 @@ struct clocksource { struct list_head list; int rating; cycle_t (*read)(struct clocksource *cs); + int (*enable)(struct clocksource *cs); + void (*disable)(struct clocksource *cs); cycle_t mask; u32 mult; u32 mult_orig; @@ -274,6 +278,33 @@ static inline cycle_t clocksource_read(struct clocksource *cs) return cs->read(cs); } +/** + * clocksource_enable: - enable clocksource + * @cs: pointer to clocksource + * + * Enables the specified clocksource. The clocksource callback + * function should start up the hardware and setup mult and field + * members of struct clocksource to reflect hardware capabilities. + */ +static inline int clocksource_enable(struct clocksource *cs) +{ + return cs->enable ? cs->enable(cs) : 0; +} + +/** + * clocksource_disable: - disable clocksource + * @cs: pointer to clocksource + * + * Disables the specified clocksource. The clocksource callback + * function should power down the now unused hardware block to + * save power. + */ +static inline void clocksource_disable(struct clocksource *cs) +{ + if (cs->disable) + cs->disable(cs); +} + /** * cyc2ns - converts clocksource cycles to nanoseconds * @cs: Pointer to clocksource diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 900f1b6598d1..687dff49f6e7 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -182,7 +182,7 @@ EXPORT_SYMBOL(do_settimeofday); */ static void change_clocksource(void) { - struct clocksource *new; + struct clocksource *new, *old; new = clocksource_get_next(); @@ -191,11 +191,16 @@ static void change_clocksource(void) clocksource_forward_now(); - new->raw_time = clock->raw_time; + if (clocksource_enable(new)) + return; + new->raw_time = clock->raw_time; + old = clock; clock = new; + clocksource_disable(old); + clock->cycle_last = 0; - clock->cycle_last = clocksource_read(new); + clock->cycle_last = clocksource_read(clock); clock->error = 0; clock->xtime_nsec = 0; clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); @@ -292,6 +297,7 @@ void __init timekeeping_init(void) ntp_init(); clock = clocksource_get_next(); + clocksource_enable(clock); clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); clock->cycle_last = clocksource_read(clock); -- cgit v1.2.3-55-g7522 From b48ccb095a0c9257241261ec2bd1cbb1bdabc48b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 23 Apr 2009 09:36:52 +0200 Subject: locking: clarify kernel-taint warning message Andi Kleen reported this message triggering on non-lockdep kernels: Disabling lockdep due to kernel taint Clarify the message to say 'lock debugging' - debug_locks_off() turns off all things lock debugging, not just lockdep. [ Impact: change kernel warning message text ] Reported-by: Andi Kleen Cc: Peter Zijlstra Cc: Andrew Morton Signed-off-by: Ingo Molnar --- kernel/panic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/panic.c b/kernel/panic.c index 934fb377f4b3..3dcaa1661357 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -221,7 +221,7 @@ void add_taint(unsigned flag) * post-warning case. */ if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off()) - printk(KERN_WARNING "Disabling lockdep due to kernel taint\n"); + printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n"); set_bit(flag, &tainted_mask); } -- cgit v1.2.3-55-g7522 From 418df63c2d94f238ac7e1d1d53be35dd6b7a7252 Mon Sep 17 00:00:00 2001 From: Jonathan Corbet Date: Wed, 22 Apr 2009 12:01:49 +0100 Subject: Delete slow-work timers properly Slow-work appears to delete its timer as soon as the first user unregisters, even though other users could be active. At the same time, it never seems to delete slow_work_oom_timer. Arrange for both to happen in the shutdown path. Signed-off-by: Jonathan Corbet Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- kernel/slow-work.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/slow-work.c b/kernel/slow-work.c index cf2bc01186ef..b28d19135f43 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c @@ -609,14 +609,14 @@ void slow_work_unregister_user(void) if (slow_work_user_count == 0) { printk(KERN_NOTICE "Slow work thread pool: Shutting down\n"); slow_work_threads_should_exit = true; + del_timer_sync(&slow_work_cull_timer); + del_timer_sync(&slow_work_oom_timer); wake_up_all(&slow_work_thread_wq); wait_for_completion(&slow_work_last_thread_exited); printk(KERN_NOTICE "Slow work thread pool:" " Shut down complete\n"); } - del_timer_sync(&slow_work_cull_timer); - mutex_unlock(&slow_work_user_lock); } EXPORT_SYMBOL(slow_work_unregister_user); -- cgit v1.2.3-55-g7522 From 0c8454f56623505a99463405fd7d5664adfbb094 Mon Sep 17 00:00:00 2001 From: Rafael J. Wysocki Date: Sat, 25 Apr 2009 00:16:06 +0200 Subject: PM/Hibernate: Fix waiting for image device to appear on resume Commit c751085943362143f84346d274e0011419c84202 ("PM/Hibernate: Wait for SCSI devices scan to complete during resume") added a call to scsi_complete_async_scans() to software_resume(), so that it waited for the SCSI scanning to complete, but the call was added at a wrong place. Namely, it should have been added after wait_for_device_probe(), which is called only if the image partition hasn't been specified yet. Also, it's reasonable to check if the image partition is present and only wait for the device probing and SCSI scanning to complete if it is not the case. Additionally, since noresume is checked right at the beginning of software_resume() and the function returns immediately if it's set, it doesn't make sense to check it once again later. Signed-off-by: Rafael J. Wysocki Signed-off-by: Linus Torvalds --- kernel/power/disk.c | 51 +++++++++++++++++++++++++++------------------------ 1 file changed, 27 insertions(+), 24 deletions(-) (limited to 'kernel') diff --git a/kernel/power/disk.c b/kernel/power/disk.c index 0854770b63b9..e71ca9cd81b2 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c @@ -645,13 +645,6 @@ static int software_resume(void) if (noresume) return 0; - /* - * We can't depend on SCSI devices being available after loading one of - * their modules if scsi_complete_async_scans() is not called and the - * resume device usually is a SCSI one. - */ - scsi_complete_async_scans(); - /* * name_to_dev_t() below takes a sysfs buffer mutex when sysfs * is configured into the kernel. Since the regular hibernate @@ -663,32 +656,42 @@ static int software_resume(void) * here to avoid lockdep complaining. */ mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING); + + if (swsusp_resume_device) + goto Check_image; + + if (!strlen(resume_file)) { + error = -ENOENT; + goto Unlock; + } + + pr_debug("PM: Checking image partition %s\n", resume_file); + + /* Check if the device is there */ + swsusp_resume_device = name_to_dev_t(resume_file); if (!swsusp_resume_device) { - if (!strlen(resume_file)) { - mutex_unlock(&pm_mutex); - return -ENOENT; - } /* * Some device discovery might still be in progress; we need * to wait for this to finish. */ wait_for_device_probe(); + /* + * We can't depend on SCSI devices being available after loading + * one of their modules until scsi_complete_async_scans() is + * called and the resume device usually is a SCSI one. + */ + scsi_complete_async_scans(); + swsusp_resume_device = name_to_dev_t(resume_file); - pr_debug("PM: Resume from partition %s\n", resume_file); - } else { - pr_debug("PM: Resume from partition %d:%d\n", - MAJOR(swsusp_resume_device), - MINOR(swsusp_resume_device)); + if (!swsusp_resume_device) { + error = -ENODEV; + goto Unlock; + } } - if (noresume) { - /** - * FIXME: If noresume is specified, we need to find the - * partition and reset it back to normal swap space. - */ - mutex_unlock(&pm_mutex); - return 0; - } + Check_image: + pr_debug("PM: Resume from partition %d:%d\n", + MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device)); pr_debug("PM: Checking hibernation image.\n"); error = swsusp_check(); -- cgit v1.2.3-55-g7522 From cad81bc2529ab8c62b6fdc83a1c0c7f4a87209eb Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 27 Apr 2009 01:41:34 +0200 Subject: ptrace: ptrace_attach: fix the usage of ->cred_exec_mutex ptrace_attach() needs task->cred_exec_mutex, not current->cred_exec_mutex. Signed-off-by: Oleg Nesterov Acked-by: Roland McGrath Acked-by: David Howells Signed-off-by: James Morris --- kernel/ptrace.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/ptrace.c b/kernel/ptrace.c index dfcd83ceee3b..0692ab5a0d67 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -188,7 +188,7 @@ int ptrace_attach(struct task_struct *task) /* Protect exec's credential calculations against our interference; * SUID, SGID and LSM creds get determined differently under ptrace. */ - retval = mutex_lock_interruptible(¤t->cred_exec_mutex); + retval = mutex_lock_interruptible(&task->cred_exec_mutex); if (retval < 0) goto out; @@ -232,7 +232,7 @@ repeat: bad: write_unlock_irqrestore(&tasklist_lock, flags); task_unlock(task); - mutex_unlock(¤t->cred_exec_mutex); + mutex_unlock(&task->cred_exec_mutex); out: return retval; } -- cgit v1.2.3-55-g7522