summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds2015-06-23 03:57:44 +0200
committerLinus Torvalds2015-06-23 03:57:44 +0200
commit43224b96af3154cedd7220f7b90094905f07ac78 (patch)
tree44279acc4613b314ff031620fd62641db3c85b71 /include
parentMerge branch 'x86-core-for-linus' of git://git.kernel.org/pub/scm/linux/kerne... (diff)
parentclocksource: Increase dependencies of timer-stm32 to limit build wreckage (diff)
downloadkernel-qcow2-linux-43224b96af3154cedd7220f7b90094905f07ac78.tar.gz
kernel-qcow2-linux-43224b96af3154cedd7220f7b90094905f07ac78.tar.xz
kernel-qcow2-linux-43224b96af3154cedd7220f7b90094905f07ac78.zip
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer updates from Thomas Gleixner: "A rather largish update for everything time and timer related: - Cache footprint optimizations for both hrtimers and timer wheel - Lower the NOHZ impact on systems which have NOHZ or timer migration disabled at runtime. - Optimize run time overhead of hrtimer interrupt by making the clock offset updates smarter - hrtimer cleanups and removal of restrictions to tackle some problems in sched/perf - Some more leap second tweaks - Another round of changes addressing the 2038 problem - First step to change the internals of clock event devices by introducing the necessary infrastructure - Allow constant folding for usecs/msecs_to_jiffies() - The usual pile of clockevent/clocksource driver updates The hrtimer changes contain updates to sched, perf and x86 as they depend on them plus changes all over the tree to cleanup API changes and redundant code, which got copied all over the place. The y2038 changes touch s390 to remove the last non 2038 safe code related to boot/persistant clock" * 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (114 commits) clocksource: Increase dependencies of timer-stm32 to limit build wreckage timer: Minimize nohz off overhead timer: Reduce timer migration overhead if disabled timer: Stats: Simplify the flags handling timer: Replace timer base by a cpu index timer: Use hlist for the timer wheel hash buckets timer: Remove FIFO "guarantee" timers: Sanitize catchup_timer_jiffies() usage hrtimer: Allow hrtimer::function() to free the timer seqcount: Introduce raw_write_seqcount_barrier() seqcount: Rename write_seqcount_barrier() hrtimer: Fix hrtimer_is_queued() hole hrtimer: Remove HRTIMER_STATE_MIGRATE selftest: Timers: Avoid signal deadlock in leap-a-day timekeeping: Copy the shadow-timekeeper over the real timekeeper last clockevents: Check state instead of mode in suspend/resume path selftests: timers: Add leap-second timer edge testing to leap-a-day.c ntp: Do leapsecond adjustment in adjtimex read path time: Prevent early expiry of hrtimers[CLOCK_REALTIME] at the leap second edge ntp: Introduce and use SECS_PER_DAY macro instead of 86400 ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/alarmtimer.h4
-rw-r--r--include/linux/clockchips.h37
-rw-r--r--include/linux/clocksource.h1
-rw-r--r--include/linux/hrtimer.h167
-rw-r--r--include/linux/interrupt.h9
-rw-r--r--include/linux/jiffies.h130
-rw-r--r--include/linux/perf_event.h4
-rw-r--r--include/linux/rcupdate.h6
-rw-r--r--include/linux/rcutree.h2
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/sched/sysctl.h12
-rw-r--r--include/linux/seqlock.h47
-rw-r--r--include/linux/time64.h2
-rw-r--r--include/linux/timekeeper_internal.h19
-rw-r--r--include/linux/timekeeping.h2
-rw-r--r--include/linux/timer.h63
-rw-r--r--include/linux/timerqueue.h8
-rw-r--r--include/trace/events/timer.h12
18 files changed, 375 insertions, 156 deletions
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h
index a899402a5a0e..52f3b7da4f2d 100644
--- a/include/linux/alarmtimer.h
+++ b/include/linux/alarmtimer.h
@@ -43,8 +43,8 @@ struct alarm {
void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
enum alarmtimer_restart (*function)(struct alarm *, ktime_t));
-int alarm_start(struct alarm *alarm, ktime_t start);
-int alarm_start_relative(struct alarm *alarm, ktime_t start);
+void alarm_start(struct alarm *alarm, ktime_t start);
+void alarm_start_relative(struct alarm *alarm, ktime_t start);
void alarm_restart(struct alarm *alarm);
int alarm_try_to_cancel(struct alarm *alarm);
int alarm_cancel(struct alarm *alarm);
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 96c280b2c263..597a1e836f22 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -37,12 +37,15 @@ enum clock_event_mode {
* reached from DETACHED or SHUTDOWN.
* ONESHOT: Device is programmed to generate event only once. Can be reached
* from DETACHED or SHUTDOWN.
+ * ONESHOT_STOPPED: Device was programmed in ONESHOT mode and is temporarily
+ * stopped.
*/
enum clock_event_state {
CLOCK_EVT_STATE_DETACHED,
CLOCK_EVT_STATE_SHUTDOWN,
CLOCK_EVT_STATE_PERIODIC,
CLOCK_EVT_STATE_ONESHOT,
+ CLOCK_EVT_STATE_ONESHOT_STOPPED,
};
/*
@@ -84,12 +87,13 @@ enum clock_event_state {
* @mult: nanosecond to cycles multiplier
* @shift: nanoseconds to cycles divisor (power of two)
* @mode: operating mode, relevant only to ->set_mode(), OBSOLETE
- * @state: current state of the device, assigned by the core code
+ * @state_use_accessors:current state of the device, assigned by the core code
* @features: features
* @retries: number of forced programming retries
* @set_mode: legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME.
* @set_state_periodic: switch state to periodic, if !set_mode
* @set_state_oneshot: switch state to oneshot, if !set_mode
+ * @set_state_oneshot_stopped: switch state to oneshot_stopped, if !set_mode
* @set_state_shutdown: switch state to shutdown, if !set_mode
* @tick_resume: resume clkevt device, if !set_mode
* @broadcast: function to broadcast events
@@ -113,7 +117,7 @@ struct clock_event_device {
u32 mult;
u32 shift;
enum clock_event_mode mode;
- enum clock_event_state state;
+ enum clock_event_state state_use_accessors;
unsigned int features;
unsigned long retries;
@@ -121,11 +125,12 @@ struct clock_event_device {
* State transition callback(s): Only one of the two groups should be
* defined:
* - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME.
- * - set_state_{shutdown|periodic|oneshot}(), tick_resume().
+ * - set_state_{shutdown|periodic|oneshot|oneshot_stopped}(), tick_resume().
*/
void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *);
int (*set_state_periodic)(struct clock_event_device *);
int (*set_state_oneshot)(struct clock_event_device *);
+ int (*set_state_oneshot_stopped)(struct clock_event_device *);
int (*set_state_shutdown)(struct clock_event_device *);
int (*tick_resume)(struct clock_event_device *);
@@ -144,6 +149,32 @@ struct clock_event_device {
struct module *owner;
} ____cacheline_aligned;
+/* Helpers to verify state of a clockevent device */
+static inline bool clockevent_state_detached(struct clock_event_device *dev)
+{
+ return dev->state_use_accessors == CLOCK_EVT_STATE_DETACHED;
+}
+
+static inline bool clockevent_state_shutdown(struct clock_event_device *dev)
+{
+ return dev->state_use_accessors == CLOCK_EVT_STATE_SHUTDOWN;
+}
+
+static inline bool clockevent_state_periodic(struct clock_event_device *dev)
+{
+ return dev->state_use_accessors == CLOCK_EVT_STATE_PERIODIC;
+}
+
+static inline bool clockevent_state_oneshot(struct clock_event_device *dev)
+{
+ return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT;
+}
+
+static inline bool clockevent_state_oneshot_stopped(struct clock_event_device *dev)
+{
+ return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT_STOPPED;
+}
+
/*
* Calculate a multiplication factor for scaled math, which is used to convert
* nanoseconds based values to clock ticks:
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index d27d0152271f..278dd279a7a8 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -181,7 +181,6 @@ static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift)
extern int clocksource_unregister(struct clocksource*);
extern void clocksource_touch_watchdog(void);
-extern struct clocksource* clocksource_get_next(void);
extern void clocksource_change_rating(struct clocksource *cs, int rating);
extern void clocksource_suspend(void);
extern void clocksource_resume(void);
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 05f6df1fdf5b..76dd4f0da5ca 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -53,34 +53,25 @@ enum hrtimer_restart {
*
* 0x00 inactive
* 0x01 enqueued into rbtree
- * 0x02 callback function running
- * 0x04 timer is migrated to another cpu
*
- * Special cases:
- * 0x03 callback function running and enqueued
- * (was requeued on another CPU)
- * 0x05 timer was migrated on CPU hotunplug
+ * The callback state is not part of the timer->state because clearing it would
+ * mean touching the timer after the callback, this makes it impossible to free
+ * the timer from the callback function.
*
- * The "callback function running and enqueued" status is only possible on
- * SMP. It happens for example when a posix timer expired and the callback
+ * Therefore we track the callback state in:
+ *
+ * timer->base->cpu_base->running == timer
+ *
+ * On SMP it is possible to have a "callback function running and enqueued"
+ * status. It happens for example when a posix timer expired and the callback
* queued a signal. Between dropping the lock which protects the posix timer
* and reacquiring the base lock of the hrtimer, another CPU can deliver the
- * signal and rearm the timer. We have to preserve the callback running state,
- * as otherwise the timer could be removed before the softirq code finishes the
- * the handling of the timer.
- *
- * The HRTIMER_STATE_ENQUEUED bit is always or'ed to the current state
- * to preserve the HRTIMER_STATE_CALLBACK in the above scenario. This
- * also affects HRTIMER_STATE_MIGRATE where the preservation is not
- * necessary. HRTIMER_STATE_MIGRATE is cleared after the timer is
- * enqueued on the new cpu.
+ * signal and rearm the timer.
*
* All state transitions are protected by cpu_base->lock.
*/
#define HRTIMER_STATE_INACTIVE 0x00
#define HRTIMER_STATE_ENQUEUED 0x01
-#define HRTIMER_STATE_CALLBACK 0x02
-#define HRTIMER_STATE_MIGRATE 0x04
/**
* struct hrtimer - the basic hrtimer structure
@@ -130,6 +121,12 @@ struct hrtimer_sleeper {
struct task_struct *task;
};
+#ifdef CONFIG_64BIT
+# define HRTIMER_CLOCK_BASE_ALIGN 64
+#else
+# define HRTIMER_CLOCK_BASE_ALIGN 32
+#endif
+
/**
* struct hrtimer_clock_base - the timer base for a specific clock
* @cpu_base: per cpu clock base
@@ -137,9 +134,7 @@ struct hrtimer_sleeper {
* timer to a base on another cpu.
* @clockid: clock id for per_cpu support
* @active: red black tree root node for the active timers
- * @resolution: the resolution of the clock, in nanoseconds
* @get_time: function to retrieve the current time of the clock
- * @softirq_time: the time when running the hrtimer queue in the softirq
* @offset: offset of this clock to the monotonic base
*/
struct hrtimer_clock_base {
@@ -147,11 +142,9 @@ struct hrtimer_clock_base {
int index;
clockid_t clockid;
struct timerqueue_head active;
- ktime_t resolution;
ktime_t (*get_time)(void);
- ktime_t softirq_time;
ktime_t offset;
-};
+} __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
enum hrtimer_base_type {
HRTIMER_BASE_MONOTONIC,
@@ -165,11 +158,16 @@ enum hrtimer_base_type {
* struct hrtimer_cpu_base - the per cpu clock bases
* @lock: lock protecting the base and associated clock bases
* and timers
+ * @seq: seqcount around __run_hrtimer
+ * @running: pointer to the currently running hrtimer
* @cpu: cpu number
* @active_bases: Bitfield to mark bases with active timers
- * @clock_was_set: Indicates that clock was set from irq context.
+ * @clock_was_set_seq: Sequence counter of clock was set events
+ * @migration_enabled: The migration of hrtimers to other cpus is enabled
+ * @nohz_active: The nohz functionality is enabled
* @expires_next: absolute time of the next event which was scheduled
* via clock_set_next_event()
+ * @next_timer: Pointer to the first expiring timer
* @in_hrtirq: hrtimer_interrupt() is currently executing
* @hres_active: State of high resolution mode
* @hang_detected: The last hrtimer interrupt detected a hang
@@ -178,27 +176,38 @@ enum hrtimer_base_type {
* @nr_hangs: Total number of hrtimer interrupt hangs
* @max_hang_time: Maximum time spent in hrtimer_interrupt
* @clock_base: array of clock bases for this cpu
+ *
+ * Note: next_timer is just an optimization for __remove_hrtimer().
+ * Do not dereference the pointer because it is not reliable on
+ * cross cpu removals.
*/
struct hrtimer_cpu_base {
raw_spinlock_t lock;
+ seqcount_t seq;
+ struct hrtimer *running;
unsigned int cpu;
unsigned int active_bases;
- unsigned int clock_was_set;
+ unsigned int clock_was_set_seq;
+ bool migration_enabled;
+ bool nohz_active;
#ifdef CONFIG_HIGH_RES_TIMERS
+ unsigned int in_hrtirq : 1,
+ hres_active : 1,
+ hang_detected : 1;
ktime_t expires_next;
- int in_hrtirq;
- int hres_active;
- int hang_detected;
- unsigned long nr_events;
- unsigned long nr_retries;
- unsigned long nr_hangs;
- ktime_t max_hang_time;
+ struct hrtimer *next_timer;
+ unsigned int nr_events;
+ unsigned int nr_retries;
+ unsigned int nr_hangs;
+ unsigned int max_hang_time;
#endif
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
-};
+} ____cacheline_aligned;
static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
{
+ BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN);
+
timer->node.expires = time;
timer->_softexpires = time;
}
@@ -262,19 +271,16 @@ static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
return ktime_sub(timer->node.expires, timer->base->get_time());
}
-#ifdef CONFIG_HIGH_RES_TIMERS
-struct clock_event_device;
-
-extern void hrtimer_interrupt(struct clock_event_device *dev);
-
-/*
- * In high resolution mode the time reference must be read accurate
- */
static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
{
return timer->base->get_time();
}
+#ifdef CONFIG_HIGH_RES_TIMERS
+struct clock_event_device;
+
+extern void hrtimer_interrupt(struct clock_event_device *dev);
+
static inline int hrtimer_is_hres_active(struct hrtimer *timer)
{
return timer->base->cpu_base->hres_active;
@@ -295,21 +301,16 @@ extern void hrtimer_peek_ahead_timers(void);
extern void clock_was_set_delayed(void);
+extern unsigned int hrtimer_resolution;
+
#else
# define MONOTONIC_RES_NSEC LOW_RES_NSEC
# define KTIME_MONOTONIC_RES KTIME_LOW_RES
-static inline void hrtimer_peek_ahead_timers(void) { }
+#define hrtimer_resolution (unsigned int)LOW_RES_NSEC
-/*
- * In non high resolution mode the time reference is taken from
- * the base softirq time variable.
- */
-static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
-{
- return timer->base->softirq_time;
-}
+static inline void hrtimer_peek_ahead_timers(void) { }
static inline int hrtimer_is_hres_active(struct hrtimer *timer)
{
@@ -353,49 +354,47 @@ static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
#endif
/* Basic timer operations: */
-extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
- const enum hrtimer_mode mode);
-extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
unsigned long range_ns, const enum hrtimer_mode mode);
-extern int
-__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
- unsigned long delta_ns,
- const enum hrtimer_mode mode, int wakeup);
+
+/**
+ * hrtimer_start - (re)start an hrtimer on the current CPU
+ * @timer: the timer to be added
+ * @tim: expiry time
+ * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or
+ * relative (HRTIMER_MODE_REL)
+ */
+static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
+ const enum hrtimer_mode mode)
+{
+ hrtimer_start_range_ns(timer, tim, 0, mode);
+}
extern int hrtimer_cancel(struct hrtimer *timer);
extern int hrtimer_try_to_cancel(struct hrtimer *timer);
-static inline int hrtimer_start_expires(struct hrtimer *timer,
- enum hrtimer_mode mode)
+static inline void hrtimer_start_expires(struct hrtimer *timer,
+ enum hrtimer_mode mode)
{
unsigned long delta;
ktime_t soft, hard;
soft = hrtimer_get_softexpires(timer);
hard = hrtimer_get_expires(timer);
delta = ktime_to_ns(ktime_sub(hard, soft));
- return hrtimer_start_range_ns(timer, soft, delta, mode);
+ hrtimer_start_range_ns(timer, soft, delta, mode);
}
-static inline int hrtimer_restart(struct hrtimer *timer)
+static inline void hrtimer_restart(struct hrtimer *timer)
{
- return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
+ hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
/* Query timers: */
extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
-extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
-extern ktime_t hrtimer_get_next_event(void);
+extern u64 hrtimer_get_next_event(void);
-/*
- * A timer is active, when it is enqueued into the rbtree or the
- * callback function is running or it's in the state of being migrated
- * to another cpu.
- */
-static inline int hrtimer_active(const struct hrtimer *timer)
-{
- return timer->state != HRTIMER_STATE_INACTIVE;
-}
+extern bool hrtimer_active(const struct hrtimer *timer);
/*
* Helper function to check, whether the timer is on one of the queues
@@ -411,14 +410,29 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
*/
static inline int hrtimer_callback_running(struct hrtimer *timer)
{
- return timer->state & HRTIMER_STATE_CALLBACK;
+ return timer->base->cpu_base->running == timer;
}
/* Forward a hrtimer so it expires after now: */
extern u64
hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
-/* Forward a hrtimer so it expires after the hrtimer's current now */
+/**
+ * hrtimer_forward_now - forward the timer expiry so it expires after now
+ * @timer: hrtimer to forward
+ * @interval: the interval to forward
+ *
+ * Forward the timer expiry so it will expire after the current time
+ * of the hrtimer clock base. Returns the number of overruns.
+ *
+ * Can be safely called from the callback function of @timer. If
+ * called from other contexts @timer must neither be enqueued nor
+ * running the callback and the caller needs to take care of
+ * serialization.
+ *
+ * Note: This only updates the timer expiry value and does not requeue
+ * the timer.
+ */
static inline u64 hrtimer_forward_now(struct hrtimer *timer,
ktime_t interval)
{
@@ -443,7 +457,6 @@ extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
/* Soft interrupt function to run the hrtimer queues: */
extern void hrtimer_run_queues(void);
-extern void hrtimer_run_pending(void);
/* Bootup initialization: */
extern void __init hrtimers_init(void);
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 950ae4501826..be7e75c945e9 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -413,7 +413,8 @@ enum
BLOCK_IOPOLL_SOFTIRQ,
TASKLET_SOFTIRQ,
SCHED_SOFTIRQ,
- HRTIMER_SOFTIRQ,
+ HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
+ numbering. Sigh! */
RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
NR_SOFTIRQS
@@ -592,10 +593,10 @@ tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
clockid_t which_clock, enum hrtimer_mode mode);
static inline
-int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
- const enum hrtimer_mode mode)
+void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
+ const enum hrtimer_mode mode)
{
- return hrtimer_start(&ttimer->timer, time, mode);
+ hrtimer_start(&ttimer->timer, time, mode);
}
static inline
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index c367cbdf73ab..535fd3bb1ba8 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -7,6 +7,7 @@
#include <linux/time.h>
#include <linux/timex.h>
#include <asm/param.h> /* for HZ */
+#include <generated/timeconst.h>
/*
* The following defines establish the engineering parameters of the PLL
@@ -288,8 +289,133 @@ static inline u64 jiffies_to_nsecs(const unsigned long j)
return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
}
-extern unsigned long msecs_to_jiffies(const unsigned int m);
-extern unsigned long usecs_to_jiffies(const unsigned int u);
+extern unsigned long __msecs_to_jiffies(const unsigned int m);
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+/*
+ * HZ is equal to or smaller than 1000, and 1000 is a nice round
+ * multiple of HZ, divide with the factor between them, but round
+ * upwards:
+ */
+static inline unsigned long _msecs_to_jiffies(const unsigned int m)
+{
+ return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
+}
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+/*
+ * HZ is larger than 1000, and HZ is a nice round multiple of 1000 -
+ * simply multiply with the factor between them.
+ *
+ * But first make sure the multiplication result cannot overflow:
+ */
+static inline unsigned long _msecs_to_jiffies(const unsigned int m)
+{
+ if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
+ return MAX_JIFFY_OFFSET;
+ return m * (HZ / MSEC_PER_SEC);
+}
+#else
+/*
+ * Generic case - multiply, round and divide. But first check that if
+ * we are doing a net multiplication, that we wouldn't overflow:
+ */
+static inline unsigned long _msecs_to_jiffies(const unsigned int m)
+{
+ if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
+ return MAX_JIFFY_OFFSET;
+
+ return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32) >> MSEC_TO_HZ_SHR32;
+}
+#endif
+/**
+ * msecs_to_jiffies: - convert milliseconds to jiffies
+ * @m: time in milliseconds
+ *
+ * conversion is done as follows:
+ *
+ * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
+ *
+ * - 'too large' values [that would result in larger than
+ * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
+ *
+ * - all other values are converted to jiffies by either multiplying
+ * the input value by a factor or dividing it with a factor and
+ * handling any 32-bit overflows.
+ * for the details see __msecs_to_jiffies()
+ *
+ * msecs_to_jiffies() checks for the passed in value being a constant
+ * via __builtin_constant_p() allowing gcc to eliminate most of the
+ * code, __msecs_to_jiffies() is called if the value passed does not
+ * allow constant folding and the actual conversion must be done at
+ * runtime.
+ * the HZ range specific helpers _msecs_to_jiffies() are called both
+ * directly here and from __msecs_to_jiffies() in the case where
+ * constant folding is not possible.
+ */
+static inline unsigned long msecs_to_jiffies(const unsigned int m)
+{
+ if (__builtin_constant_p(m)) {
+ if ((int)m < 0)
+ return MAX_JIFFY_OFFSET;
+ return _msecs_to_jiffies(m);
+ } else {
+ return __msecs_to_jiffies(m);
+ }
+}
+
+extern unsigned long __usecs_to_jiffies(const unsigned int u);
+#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
+static inline unsigned long _usecs_to_jiffies(const unsigned int u)
+{
+ return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
+}
+#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
+static inline unsigned long _usecs_to_jiffies(const unsigned int u)
+{
+ return u * (HZ / USEC_PER_SEC);
+}
+static inline unsigned long _usecs_to_jiffies(const unsigned int u)
+{
+#else
+static inline unsigned long _usecs_to_jiffies(const unsigned int u)
+{
+ return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32)
+ >> USEC_TO_HZ_SHR32;
+}
+#endif
+
+/**
+ * usecs_to_jiffies: - convert microseconds to jiffies
+ * @u: time in microseconds
+ *
+ * conversion is done as follows:
+ *
+ * - 'too large' values [that would result in larger than
+ * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
+ *
+ * - all other values are converted to jiffies by either multiplying
+ * the input value by a factor or dividing it with a factor and
+ * handling any 32-bit overflows as for msecs_to_jiffies.
+ *
+ * usecs_to_jiffies() checks for the passed in value being a constant
+ * via __builtin_constant_p() allowing gcc to eliminate most of the
+ * code, __usecs_to_jiffies() is called if the value passed does not
+ * allow constant folding and the actual conversion must be done at
+ * runtime.
+ * the HZ range specific helpers _usecs_to_jiffies() are called both
+ * directly here and from __msecs_to_jiffies() in the case where
+ * constant folding is not possible.
+ */
+static inline unsigned long usecs_to_jiffies(const unsigned int u)
+{
+ if (__builtin_constant_p(u)) {
+ if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
+ return MAX_JIFFY_OFFSET;
+ return _usecs_to_jiffies(u);
+ } else {
+ return __usecs_to_jiffies(u);
+ }
+}
+
extern unsigned long timespec_to_jiffies(const struct timespec *value);
extern void jiffies_to_timespec(const unsigned long jiffies,
struct timespec *value);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index a204d5266f5f..1b82d44b0a02 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -562,8 +562,12 @@ struct perf_cpu_context {
struct perf_event_context *task_ctx;
int active_oncpu;
int exclusive;
+
+ raw_spinlock_t hrtimer_lock;
struct hrtimer hrtimer;
ktime_t hrtimer_interval;
+ unsigned int hrtimer_active;
+
struct pmu *unique_pmu;
struct perf_cgroup *cgrp;
};
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 03a899aabd17..33a056bb886f 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -44,6 +44,8 @@
#include <linux/debugobjects.h>
#include <linux/bug.h>
#include <linux/compiler.h>
+#include <linux/ktime.h>
+
#include <asm/barrier.h>
extern int rcu_expedited; /* for sysctl */
@@ -1100,9 +1102,9 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
#ifdef CONFIG_TINY_RCU
-static inline int rcu_needs_cpu(unsigned long *delta_jiffies)
+static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
{
- *delta_jiffies = ULONG_MAX;
+ *nextevt = KTIME_MAX;
return 0;
}
#endif /* #ifdef CONFIG_TINY_RCU */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 3fa4a43ab415..456879143f89 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -31,7 +31,7 @@
#define __LINUX_RCUTREE_H
void rcu_note_context_switch(void);
-int rcu_needs_cpu(unsigned long *delta_jiffies);
+int rcu_needs_cpu(u64 basem, u64 *nextevt);
void rcu_cpu_stall_reset(void);
/*
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d4193d5613cf..30364cb58b1f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -345,14 +345,10 @@ extern int runqueue_is_locked(int cpu);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void nohz_balance_enter_idle(int cpu);
extern void set_cpu_sd_state_idle(void);
-extern int get_nohz_timer_target(int pinned);
+extern int get_nohz_timer_target(void);
#else
static inline void nohz_balance_enter_idle(int cpu) { }
static inline void set_cpu_sd_state_idle(void) { }
-static inline int get_nohz_timer_target(int pinned)
-{
- return smp_processor_id();
-}
#endif
/*
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 596a0e007c62..c9e4731cf10b 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -57,24 +57,12 @@ extern unsigned int sysctl_numa_balancing_scan_size;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
extern unsigned int sysctl_sched_time_avg;
-extern unsigned int sysctl_timer_migration;
extern unsigned int sysctl_sched_shares_window;
int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length,
loff_t *ppos);
#endif
-#ifdef CONFIG_SCHED_DEBUG
-static inline unsigned int get_sysctl_timer_migration(void)
-{
- return sysctl_timer_migration;
-}
-#else
-static inline unsigned int get_sysctl_timer_migration(void)
-{
- return 1;
-}
-#endif
/*
* control realtime throttling:
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 5f68d0a391ce..486e685a226a 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -233,6 +233,47 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
s->sequence++;
}
+/**
+ * raw_write_seqcount_barrier - do a seq write barrier
+ * @s: pointer to seqcount_t
+ *
+ * This can be used to provide an ordering guarantee instead of the
+ * usual consistency guarantee. It is one wmb cheaper, because we can
+ * collapse the two back-to-back wmb()s.
+ *
+ * seqcount_t seq;
+ * bool X = true, Y = false;
+ *
+ * void read(void)
+ * {
+ * bool x, y;
+ *
+ * do {
+ * int s = read_seqcount_begin(&seq);
+ *
+ * x = X; y = Y;
+ *
+ * } while (read_seqcount_retry(&seq, s));
+ *
+ * BUG_ON(!x && !y);
+ * }
+ *
+ * void write(void)
+ * {
+ * Y = true;
+ *
+ * raw_write_seqcount_barrier(seq);
+ *
+ * X = false;
+ * }
+ */
+static inline void raw_write_seqcount_barrier(seqcount_t *s)
+{
+ s->sequence++;
+ smp_wmb();
+ s->sequence++;
+}
+
/*
* raw_write_seqcount_latch - redirect readers to even/odd copy
* @s: pointer to seqcount_t
@@ -266,13 +307,13 @@ static inline void write_seqcount_end(seqcount_t *s)
}
/**
- * write_seqcount_barrier - invalidate in-progress read-side seq operations
+ * write_seqcount_invalidate - invalidate in-progress read-side seq operations
* @s: pointer to seqcount_t
*
- * After write_seqcount_barrier, no read-side seq operations will complete
+ * After write_seqcount_invalidate, no read-side seq operations will complete
* successfully and see data older than this.
*/
-static inline void write_seqcount_barrier(seqcount_t *s)
+static inline void write_seqcount_invalidate(seqcount_t *s)
{
smp_wmb();
s->sequence+=2;
diff --git a/include/linux/time64.h b/include/linux/time64.h
index a3831478d9cf..77b5df2acd2a 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -2,6 +2,7 @@
#define _LINUX_TIME64_H
#include <uapi/linux/time.h>
+#include <linux/math64.h>
typedef __s64 time64_t;
@@ -28,6 +29,7 @@ struct timespec64 {
#define FSEC_PER_SEC 1000000000000000LL
/* Located here for timespec[64]_valid_strict */
+#define TIME64_MAX ((s64)~((u64)1 << 63))
#define KTIME_MAX ((s64)~((u64)1 << 63))
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index fb86963859c7..25247220b4b7 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -49,6 +49,8 @@ struct tk_read_base {
* @offs_boot: Offset clock monotonic -> clock boottime
* @offs_tai: Offset clock monotonic -> clock tai
* @tai_offset: The current UTC to TAI offset in seconds
+ * @clock_was_set_seq: The sequence number of clock was set events
+ * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
* @raw_time: Monotonic raw base time in timespec64 format
* @cycle_interval: Number of clock cycles in one NTP interval
* @xtime_interval: Number of clock shifted nano seconds in one NTP
@@ -60,6 +62,9 @@ struct tk_read_base {
* shifted nano seconds.
* @ntp_error_shift: Shift conversion between clock shifted nano seconds and
* ntp shifted nano seconds.
+ * @last_warning: Warning ratelimiter (DEBUG_TIMEKEEPING)
+ * @underflow_seen: Underflow warning flag (DEBUG_TIMEKEEPING)
+ * @overflow_seen: Overflow warning flag (DEBUG_TIMEKEEPING)
*
* Note: For timespec(64) based interfaces wall_to_monotonic is what
* we need to add to xtime (or xtime corrected for sub jiffie times)
@@ -85,6 +90,8 @@ struct timekeeper {
ktime_t offs_boot;
ktime_t offs_tai;
s32 tai_offset;
+ unsigned int clock_was_set_seq;
+ ktime_t next_leap_ktime;
struct timespec64 raw_time;
/* The following members are for timekeeping internal use */
@@ -104,6 +111,18 @@ struct timekeeper {
s64 ntp_error;
u32 ntp_error_shift;
u32 ntp_err_mult;
+#ifdef CONFIG_DEBUG_TIMEKEEPING
+ long last_warning;
+ /*
+ * These simple flag variables are managed
+ * without locks, which is racy, but they are
+ * ok since we don't really care about being
+ * super precise about how many events were
+ * seen, just that a problem was observed.
+ */
+ int underflow_seen;
+ int overflow_seen;
+#endif
};
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 99176af216af..3aa72e648650 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -163,6 +163,7 @@ extern ktime_t ktime_get(void);
extern ktime_t ktime_get_with_offset(enum tk_offsets offs);
extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs);
extern ktime_t ktime_get_raw(void);
+extern u32 ktime_get_resolution_ns(void);
/**
* ktime_get_real - get the real (wall-) time in ktime_t format
@@ -266,7 +267,6 @@ extern int persistent_clock_is_local;
extern void read_persistent_clock(struct timespec *ts);
extern void read_persistent_clock64(struct timespec64 *ts);
-extern void read_boot_clock(struct timespec *ts);
extern void read_boot_clock64(struct timespec64 *ts);
extern int update_persistent_clock(struct timespec now);
extern int update_persistent_clock64(struct timespec64 now);
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 8c5a197e1587..61aa61dc410c 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -14,27 +14,23 @@ struct timer_list {
* All fields that change during normal runtime grouped to the
* same cacheline
*/
- struct list_head entry;
- unsigned long expires;
- struct tvec_base *base;
-
- void (*function)(unsigned long);
- unsigned long data;
-
- int slack;
+ struct hlist_node entry;
+ unsigned long expires;
+ void (*function)(unsigned long);
+ unsigned long data;
+ u32 flags;
+ int slack;
#ifdef CONFIG_TIMER_STATS
- int start_pid;
- void *start_site;
- char start_comm[16];
+ int start_pid;
+ void *start_site;
+ char start_comm[16];
#endif
#ifdef CONFIG_LOCKDEP
- struct lockdep_map lockdep_map;
+ struct lockdep_map lockdep_map;
#endif
};
-extern struct tvec_base boot_tvec_bases;
-
#ifdef CONFIG_LOCKDEP
/*
* NB: because we have to copy the lockdep_map, setting the lockdep_map key
@@ -49,9 +45,6 @@ extern struct tvec_base boot_tvec_bases;
#endif
/*
- * Note that all tvec_bases are at least 4 byte aligned and lower two bits
- * of base in timer_list is guaranteed to be zero. Use them for flags.
- *
* A deferrable timer will work normally when the system is busy, but
* will not cause a CPU to come out of idle just to service it; instead,
* the timer will be serviced when the CPU eventually wakes up with a
@@ -65,17 +58,18 @@ extern struct tvec_base boot_tvec_bases;
* workqueue locking issues. It's not meant for executing random crap
* with interrupts disabled. Abuse is monitored!
*/
-#define TIMER_DEFERRABLE 0x1LU
-#define TIMER_IRQSAFE 0x2LU
-
-#define TIMER_FLAG_MASK 0x3LU
+#define TIMER_CPUMASK 0x0007FFFF
+#define TIMER_MIGRATING 0x00080000
+#define TIMER_BASEMASK (TIMER_CPUMASK | TIMER_MIGRATING)
+#define TIMER_DEFERRABLE 0x00100000
+#define TIMER_IRQSAFE 0x00200000
#define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
- .entry = { .prev = TIMER_ENTRY_STATIC }, \
+ .entry = { .next = TIMER_ENTRY_STATIC }, \
.function = (_function), \
.expires = (_expires), \
.data = (_data), \
- .base = (void *)((unsigned long)&boot_tvec_bases + (_flags)), \
+ .flags = (_flags), \
.slack = -1, \
__TIMER_LOCKDEP_MAP_INITIALIZER( \
__FILE__ ":" __stringify(__LINE__)) \
@@ -168,7 +162,7 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
*/
static inline int timer_pending(const struct timer_list * timer)
{
- return timer->entry.next != NULL;
+ return timer->entry.pprev != NULL;
}
extern void add_timer_on(struct timer_list *timer, int cpu);
@@ -188,26 +182,16 @@ extern void set_timer_slack(struct timer_list *time, int slack_hz);
#define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1)
/*
- * Return when the next timer-wheel timeout occurs (in absolute jiffies),
- * locks the timer base and does the comparison against the given
- * jiffie.
- */
-extern unsigned long get_next_timer_interrupt(unsigned long now);
-
-/*
* Timer-statistics info:
*/
#ifdef CONFIG_TIMER_STATS
extern int timer_stats_active;
-#define TIMER_STATS_FLAG_DEFERRABLE 0x1
-
extern void init_timer_stats(void);
extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
- void *timerf, char *comm,
- unsigned int timer_flag);
+ void *timerf, char *comm, u32 flags);
extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
void *addr);
@@ -254,6 +238,15 @@ extern void run_local_timers(void);
struct hrtimer;
extern enum hrtimer_restart it_real_fn(struct hrtimer *);
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+#include <linux/sysctl.h>
+
+extern unsigned int sysctl_timer_migration;
+int timer_migration_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+#endif
+
unsigned long __round_jiffies(unsigned long j, int cpu);
unsigned long __round_jiffies_relative(unsigned long j, int cpu);
unsigned long round_jiffies(unsigned long j);
diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h
index a520fd70a59f..7eec17ad7fa1 100644
--- a/include/linux/timerqueue.h
+++ b/include/linux/timerqueue.h
@@ -16,10 +16,10 @@ struct timerqueue_head {
};
-extern void timerqueue_add(struct timerqueue_head *head,
- struct timerqueue_node *node);
-extern void timerqueue_del(struct timerqueue_head *head,
- struct timerqueue_node *node);
+extern bool timerqueue_add(struct timerqueue_head *head,
+ struct timerqueue_node *node);
+extern bool timerqueue_del(struct timerqueue_head *head,
+ struct timerqueue_node *node);
extern struct timerqueue_node *timerqueue_iterate_next(
struct timerqueue_node *node);
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 68c2c2000f02..073b9ac245ba 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -43,15 +43,18 @@ DEFINE_EVENT(timer_class, timer_init,
*/
TRACE_EVENT(timer_start,
- TP_PROTO(struct timer_list *timer, unsigned long expires),
+ TP_PROTO(struct timer_list *timer,
+ unsigned long expires,
+ unsigned int flags),
- TP_ARGS(timer, expires),
+ TP_ARGS(timer, expires, flags),
TP_STRUCT__entry(
__field( void *, timer )
__field( void *, function )
__field( unsigned long, expires )
__field( unsigned long, now )
+ __field( unsigned int, flags )
),
TP_fast_assign(
@@ -59,11 +62,12 @@ TRACE_EVENT(timer_start,
__entry->function = timer->function;
__entry->expires = expires;
__entry->now = jiffies;
+ __entry->flags = flags;
),
- TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld]",
+ TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] flags=0x%08x",
__entry->timer, __entry->function, __entry->expires,
- (long)__entry->expires - __entry->now)
+ (long)__entry->expires - __entry->now, __entry->flags)
);
/**