diff options
author | Damien Zammit <damien@zamaudio.com> | 2025-09-21 09:14:04 +0000 |
---|---|---|
committer | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2025-09-21 18:46:09 +0200 |
commit | c33f27196d9cb072d48bf5389bb64ad99a0d735a (patch) | |
tree | af845d5d8325378677d1c9583356c3e16d57d1b8 | |
parent | 8d456cd9e417e9787481df15736b5f1c55cbc870 (diff) |
mach_clock: Fix monotonic clock sometimes going backwards
Between reading mtime and reading hpclock_read_counter,
there may be an interrupt that updates mtime, therefore
we need a check to perform the clock read process again
in this case.
TESTED: on UP using:
```
\#include <stdio.h>
\#include <time.h>
int main()
{
struct timespec ts, now;
int i;
int cnt = 0;
clock_gettime(CLOCK_MONOTONIC, &ts);
for (i = 0; i < 10000000; i++) {
clock_gettime(CLOCK_MONOTONIC, &now);
if ((now.tv_nsec < ts.tv_nsec) && (now.tv_sec <= ts.tv_sec)) {
printf("BACKWARDS\n");
cnt++;
printf(" %u %09lu\n %u %09lu\n\n",
ts.tv_sec, ts.tv_nsec,
now.tv_sec, now.tv_nsec
);
}
ts = now;
}
printf("went backwards %d out of %d times\n", cnt, i);
return 0;
}
```
Before the change, some backward transitions were detected.
After this change, none were detected.
Message-ID: <20250921091345.2183347-1-damien@zamaudio.com>
-rw-r--r-- | kern/mach_clock.c | 20 |
1 files changed, 13 insertions, 7 deletions
diff --git a/kern/mach_clock.c b/kern/mach_clock.c index 3be0fb74..1c2ba0cb 100644 --- a/kern/mach_clock.c +++ b/kern/mach_clock.c @@ -132,24 +132,30 @@ MACRO_END #define read_mapped_time(time) \ MACRO_BEGIN \ + uint32_t _last_hpc; \ do { \ + _last_hpc = last_hpc_read; \ (time)->seconds = mtime->time_value.seconds; \ __sync_synchronize(); \ (time)->nanoseconds = mtime->time_value.nanoseconds; \ __sync_synchronize(); \ - } while ((time)->seconds != mtime->check_seconds64); \ - time_value64_add_hpc(time); \ + } while (((time)->seconds != mtime->check_seconds64) \ + || (_last_hpc != last_hpc_read)); \ + time_value64_add_hpc(time, _last_hpc); \ MACRO_END -#define read_mapped_uptime(uptime) \ +#define read_mapped_uptime(uptime) \ MACRO_BEGIN \ + uint32_t _last_hpc; \ do { \ + _last_hpc = last_hpc_read; \ (uptime)->seconds = mtime->uptime_value.seconds; \ __sync_synchronize(); \ (uptime)->nanoseconds = mtime->uptime_value.nanoseconds;\ __sync_synchronize(); \ - } while ((uptime)->seconds != mtime->check_upseconds64); \ - time_value64_add_hpc(uptime); \ + } while (((uptime)->seconds != mtime->check_upseconds64) \ + || (_last_hpc != last_hpc_read)); \ + time_value64_add_hpc(uptime, _last_hpc); \ MACRO_END def_simple_lock_irq_data(static, timer_lock) /* lock for ... */ @@ -443,11 +449,11 @@ clock_boottime_update(const struct time_value64 *new_time) * Add the time value since last clock interrupt in nanosecond. */ static void -time_value64_add_hpc(time_value64_t *value) +time_value64_add_hpc(time_value64_t *value, uint32_t last_hpc) { uint32_t now = hpclock_read_counter(); /* Time since last clock interrupt in nanosecond. */ - int64_t ns = (now - last_hpc_read) * hpclock_get_counter_period_nsec(); + int64_t ns = (now - last_hpc) * hpclock_get_counter_period_nsec(); /* Limit the value of ns under the period of a clock interrupt. */ if (ns >= tick * 1000) |