From 6d91c78f0c240e3c7d81e19e85507e0aec580d6f Mon Sep 17 00:00:00 2001 From: Zhaoming Luo Date: Mon, 24 Mar 2025 12:25:51 +0800 Subject: Integrate HPET so the functions used for getting time can have a higher accuracy Integrate HPET so host_get_time, host_get_time64, and host_get_uptime64 are more precise. The highest precision can be 10ns when this patch is applied. * i386/i386/apic.c: Implement the two high-precision clock interface functions added in this patch for i386. * i386/i386at/model_dep.c: Initialize HPET if APIC is defined * kern/mach_clock.c: Integrate the high-precision clocks to have the 10ns precise time values. * kern/mach_clock.h: Add two new interface functions for accessing the high-precision clocks. Message-ID: <20250324042551.4752-2-zhmingluo@163.com> --- kern/mach_clock.c | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) (limited to 'kern/mach_clock.c') diff --git a/kern/mach_clock.c b/kern/mach_clock.c index 5501b7b8..48f673a4 100644 --- a/kern/mach_clock.c +++ b/kern/mach_clock.c @@ -83,6 +83,15 @@ unsigned tickadj = 500 / HZ; /* can adjust 100 usecs per second */ unsigned bigadj = 1000000; /* adjust 10*tickadj if adjustment > bigadj */ +/* A high-precision (hardware) clock is taken into account to increase the + * accuracy of the functions used for getting time (e.g. host_get_time64()). + * The counter of the clock is read once in every clock interrupt. When any + * of the functions used for getting time is called, the counter is read again + * and the difference between these two read is multiplied by the counter + * period and added to the read value from time or uptime to get a more + * accurate time read. */ +uint32_t last_hpc_read = 0; + /* * This update protocol, with a check value, allows * do { @@ -128,7 +137,8 @@ MACRO_BEGIN \ __sync_synchronize(); \ (time)->nanoseconds = mtime->time_value.nanoseconds; \ __sync_synchronize(); \ - } while ((time)->seconds != mtime->check_seconds64); \ + } while ((time)->seconds != mtime->check_seconds64); \ + time_value64_add_hpc(time); \ MACRO_END #define read_mapped_uptime(uptime) \ @@ -139,6 +149,7 @@ MACRO_BEGIN \ (uptime)->nanoseconds = mtime->uptime_value.nanoseconds;\ __sync_synchronize(); \ } while ((uptime)->seconds != mtime->check_upseconds64); \ + time_value64_add_hpc(uptime); \ MACRO_END def_simple_lock_irq_data(static, timer_lock) /* lock for ... */ @@ -292,6 +303,7 @@ void clock_interrupt( } } } + last_hpc_read = hpclock_read_counter(); } /* @@ -426,6 +438,26 @@ clock_boottime_update(const struct time_value64 *new_time) time_value64_add(&clock_boottime_offset, &delta); } +/* + * Add the time value since last clock interrupt in nanosecond. + */ +static void +time_value64_add_hpc(time_value64_t *value) +{ + uint32_t now = hpclock_read_counter(); + /* Time since last clock interrupt in nanosecond. */ + int64_t ns = (now - last_hpc_read) * hpclock_get_counter_period_nsec(); + + /* Limit the value of ns under the period of a clock interrupt. */ + if (ns >= tick * 1000) + /* Let ns stuck at the end of the clock interrupt period when + something bad happens. */ + ns = (tick * 1000) - 1; + + time_value64_add_nanos(value, ns); +} + + /* * Record a timestamp in STAMP. Records values in the boot-time clock * frame. -- cgit v1.2.3