summaryrefslogtreecommitdiffstats
path: root/kernel/time/ntp.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time/ntp.c')
-rw-r--r--kernel/time/ntp.c227
1 files changed, 113 insertions, 114 deletions
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 99e03bec68e4..8d70da1b9a0d 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -493,6 +493,67 @@ out:
return leap;
}
+static void sync_hw_clock(struct work_struct *work);
+static DECLARE_DELAYED_WORK(sync_work, sync_hw_clock);
+
+static void sched_sync_hw_clock(struct timespec64 now,
+ unsigned long target_nsec, bool fail)
+
+{
+ struct timespec64 next;
+
+ getnstimeofday64(&next);
+ if (!fail)
+ next.tv_sec = 659;
+ else {
+ /*
+ * Try again as soon as possible. Delaying long periods
+ * decreases the accuracy of the work queue timer. Due to this
+ * the algorithm is very likely to require a short-sleep retry
+ * after the above long sleep to synchronize ts_nsec.
+ */
+ next.tv_sec = 0;
+ }
+
+ /* Compute the needed delay that will get to tv_nsec == target_nsec */
+ next.tv_nsec = target_nsec - next.tv_nsec;
+ if (next.tv_nsec <= 0)
+ next.tv_nsec += NSEC_PER_SEC;
+ if (next.tv_nsec >= NSEC_PER_SEC) {
+ next.tv_sec++;
+ next.tv_nsec -= NSEC_PER_SEC;
+ }
+
+ queue_delayed_work(system_power_efficient_wq, &sync_work,
+ timespec64_to_jiffies(&next));
+}
+
+static void sync_rtc_clock(void)
+{
+ unsigned long target_nsec;
+ struct timespec64 adjust, now;
+ int rc;
+
+ if (!IS_ENABLED(CONFIG_RTC_SYSTOHC))
+ return;
+
+ getnstimeofday64(&now);
+
+ adjust = now;
+ if (persistent_clock_is_local)
+ adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
+
+ /*
+ * The current RTC in use will provide the target_nsec it wants to be
+ * called at, and does rtc_tv_nsec_ok internally.
+ */
+ rc = rtc_set_ntp_time(adjust, &target_nsec);
+ if (rc == -ENODEV)
+ return;
+
+ sched_sync_hw_clock(now, target_nsec, rc);
+}
+
#ifdef CONFIG_GENERIC_CMOS_UPDATE
int __weak update_persistent_clock(struct timespec now)
{
@@ -508,76 +569,75 @@ int __weak update_persistent_clock64(struct timespec64 now64)
}
#endif
-#if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC)
-static void sync_cmos_clock(struct work_struct *work);
-
-static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
-
-static void sync_cmos_clock(struct work_struct *work)
+static bool sync_cmos_clock(void)
{
+ static bool no_cmos;
struct timespec64 now;
- struct timespec64 next;
- int fail = 1;
+ struct timespec64 adjust;
+ int rc = -EPROTO;
+ long target_nsec = NSEC_PER_SEC / 2;
+
+ if (!IS_ENABLED(CONFIG_GENERIC_CMOS_UPDATE))
+ return false;
+
+ if (no_cmos)
+ return false;
/*
- * If we have an externally synchronized Linux clock, then update
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- * This code is run on a timer. If the clock is set, that timer
- * may not expire at the correct time. Thus, we adjust...
- * We want the clock to be within a couple of ticks from the target.
+ * Historically update_persistent_clock64() has followed x86
+ * semantics, which match the MC146818A/etc RTC. This RTC will store
+ * 'adjust' and then in .5s it will advance once second.
+ *
+ * Architectures are strongly encouraged to use rtclib and not
+ * implement this legacy API.
*/
- if (!ntp_synced()) {
- /*
- * Not synced, exit, do not restart a timer (if one is
- * running, let it run out).
- */
- return;
- }
-
getnstimeofday64(&now);
- if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) {
- struct timespec64 adjust = now;
-
- fail = -ENODEV;
+ if (rtc_tv_nsec_ok(-1 * target_nsec, &adjust, &now)) {
if (persistent_clock_is_local)
adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
-#ifdef CONFIG_GENERIC_CMOS_UPDATE
- fail = update_persistent_clock64(adjust);
-#endif
-
-#ifdef CONFIG_RTC_SYSTOHC
- if (fail == -ENODEV)
- fail = rtc_set_ntp_time(adjust);
-#endif
+ rc = update_persistent_clock64(adjust);
+ /*
+ * The machine does not support update_persistent_clock64 even
+ * though it defines CONFIG_GENERIC_CMOS_UPDATE.
+ */
+ if (rc == -ENODEV) {
+ no_cmos = true;
+ return false;
+ }
}
- next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec - (TICK_NSEC / 2);
- if (next.tv_nsec <= 0)
- next.tv_nsec += NSEC_PER_SEC;
+ sched_sync_hw_clock(now, target_nsec, rc);
+ return true;
+}
- if (!fail || fail == -ENODEV)
- next.tv_sec = 659;
- else
- next.tv_sec = 0;
+/*
+ * If we have an externally synchronized Linux clock, then update RTC clock
+ * accordingly every ~11 minutes. Generally RTCs can only store second
+ * precision, but many RTCs will adjust the phase of their second tick to
+ * match the moment of update. This infrastructure arranges to call to the RTC
+ * set at the correct moment to phase synchronize the RTC second tick over
+ * with the kernel clock.
+ */
+static void sync_hw_clock(struct work_struct *work)
+{
+ if (!ntp_synced())
+ return;
- if (next.tv_nsec >= NSEC_PER_SEC) {
- next.tv_sec++;
- next.tv_nsec -= NSEC_PER_SEC;
- }
- queue_delayed_work(system_power_efficient_wq,
- &sync_cmos_work, timespec64_to_jiffies(&next));
+ if (sync_cmos_clock())
+ return;
+
+ sync_rtc_clock();
}
void ntp_notify_cmos_timer(void)
{
- queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0);
-}
-
-#else
-void ntp_notify_cmos_timer(void) { }
-#endif
+ if (!ntp_synced())
+ return;
+ if (IS_ENABLED(CONFIG_GENERIC_CMOS_UPDATE) ||
+ IS_ENABLED(CONFIG_RTC_SYSTOHC))
+ queue_delayed_work(system_power_efficient_wq, &sync_work, 0);
+}
/*
* Propagate a new txc->status value into the NTP state:
@@ -654,67 +714,6 @@ static inline void process_adjtimex_modes(struct timex *txc,
}
-
-/**
- * ntp_validate_timex - Ensures the timex is ok for use in do_adjtimex
- */
-int ntp_validate_timex(struct timex *txc)
-{
- if (txc->modes & ADJ_ADJTIME) {
- /* singleshot must not be used with any other mode bits */
- if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
- return -EINVAL;
- if (!(txc->modes & ADJ_OFFSET_READONLY) &&
- !capable(CAP_SYS_TIME))
- return -EPERM;
- } else {
- /* In order to modify anything, you gotta be super-user! */
- if (txc->modes && !capable(CAP_SYS_TIME))
- return -EPERM;
- /*
- * if the quartz is off by more than 10% then
- * something is VERY wrong!
- */
- if (txc->modes & ADJ_TICK &&
- (txc->tick < 900000/USER_HZ ||
- txc->tick > 1100000/USER_HZ))
- return -EINVAL;
- }
-
- if (txc->modes & ADJ_SETOFFSET) {
- /* In order to inject time, you gotta be super-user! */
- if (!capable(CAP_SYS_TIME))
- return -EPERM;
-
- if (txc->modes & ADJ_NANO) {
- struct timespec ts;
-
- ts.tv_sec = txc->time.tv_sec;
- ts.tv_nsec = txc->time.tv_usec;
- if (!timespec_inject_offset_valid(&ts))
- return -EINVAL;
-
- } else {
- if (!timeval_inject_offset_valid(&txc->time))
- return -EINVAL;
- }
- }
-
- /*
- * Check for potential multiplication overflows that can
- * only happen on 64-bit systems:
- */
- if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
- if (LLONG_MIN / PPM_SCALE > txc->freq)
- return -EINVAL;
- if (LLONG_MAX / PPM_SCALE < txc->freq)
- return -EINVAL;
- }
-
- return 0;
-}
-
-
/*
* adjtimex mainly allows reading (and writing, if superuser) of
* kernel time-keeping variables. used by xntpd.