summaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/cpufreq_governor.c
diff options
context:
space:
mode:
authorRafael J. Wysocki2016-02-08 23:41:10 +0100
committerRafael J. Wysocki2016-03-09 14:40:57 +0100
commit686cc637c99324ad52a6f8e59181f6407405bfe2 (patch)
treedceaa755823bcef404f0d91ce4fcc26a005a6b74 /drivers/cpufreq/cpufreq_governor.c
parentcpufreq: governor: Symmetrize cpu_dbs_info initialization and cleanup (diff)
downloadkernel-qcow2-linux-686cc637c99324ad52a6f8e59181f6407405bfe2.tar.gz
kernel-qcow2-linux-686cc637c99324ad52a6f8e59181f6407405bfe2.tar.xz
kernel-qcow2-linux-686cc637c99324ad52a6f8e59181f6407405bfe2.zip
cpufreq: governor: Rename skip_work to work_count
The skip_work field in struct policy_dbs_info technically is a counter, so give it a new name to reflect that. No functional changes. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Diffstat (limited to 'drivers/cpufreq/cpufreq_governor.c')
-rw-r--r--drivers/cpufreq/cpufreq_governor.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 7c08d8360f72..298be52adea0 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -196,16 +196,16 @@ static inline void gov_clear_update_util(struct cpufreq_policy *policy)
static void gov_cancel_work(struct policy_dbs_info *policy_dbs)
{
/* Tell dbs_update_util_handler() to skip queuing up work items. */
- atomic_inc(&policy_dbs->skip_work);
+ atomic_inc(&policy_dbs->work_count);
/*
* If dbs_update_util_handler() is already running, it may not notice
- * the incremented skip_work, so wait for it to complete to prevent its
+ * the incremented work_count, so wait for it to complete to prevent its
* work item from being queued up after the cancel_work_sync() below.
*/
gov_clear_update_util(policy_dbs->policy);
irq_work_sync(&policy_dbs->irq_work);
cancel_work_sync(&policy_dbs->work);
- atomic_set(&policy_dbs->skip_work, 0);
+ atomic_set(&policy_dbs->work_count, 0);
}
static void dbs_work_handler(struct work_struct *work)
@@ -234,7 +234,7 @@ static void dbs_work_handler(struct work_struct *work)
* up using a stale sample delay value.
*/
smp_mb__before_atomic();
- atomic_dec(&policy_dbs->skip_work);
+ atomic_dec(&policy_dbs->work_count);
}
static void dbs_irq_work(struct irq_work *irq_work)
@@ -267,7 +267,7 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time,
* - The governor is being stopped.
* - It is too early (too little time from the previous sample).
*/
- if (atomic_inc_return(&policy_dbs->skip_work) == 1) {
+ if (atomic_inc_return(&policy_dbs->work_count) == 1) {
u64 delta_ns;
delta_ns = time - policy_dbs->last_sample_time;
@@ -277,7 +277,7 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time,
return;
}
}
- atomic_dec(&policy_dbs->skip_work);
+ atomic_dec(&policy_dbs->work_count);
}
static void set_sampling_rate(struct dbs_data *dbs_data,
@@ -305,7 +305,7 @@ static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *poli
return NULL;
mutex_init(&policy_dbs->timer_mutex);
- atomic_set(&policy_dbs->skip_work, 0);
+ atomic_set(&policy_dbs->work_count, 0);
init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
INIT_WORK(&policy_dbs->work, dbs_work_handler);