summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2025-04-09 22:28:48 +0200
committerIngo Molnar <mingo@kernel.org>2025-04-10 11:57:40 +0200
commitdfe2574ce87e031c0c37d49b9bee7e1f3c95bff9 (patch)
treee89d187497fc4d5a113f60a964c92c9c5f2bab57
parent0af2f6be1b4281385b618cb86ad946eded089ac8 (diff)
x86/msr: Standardize on u64 in <asm/msr.h>
There's 9 uses of 'unsigned long long' in <asm/msr.h>, which is really the same as 'u64', which is used 34 times. Standardize on u64. Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Juergen Gross <jgross@suse.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Xin Li <xin@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/x86/include/asm/msr.h18
1 files changed, 9 insertions, 9 deletions
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 9397a319d165..8ee6fc633476 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -48,7 +48,7 @@ struct saved_msrs {
#define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32)
#define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
#else
-#define DECLARE_ARGS(val, low, high) unsigned long long val
+#define DECLARE_ARGS(val, low, high) u64 val
#define EAX_EDX_VAL(val, low, high) (val)
#define EAX_EDX_RET(val, low, high) "=A" (val)
#endif
@@ -79,7 +79,7 @@ static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
* think of extending them - you will be slapped with a stinking trout or a frozen
* shark will reach you, wherever you are! You've been warned.
*/
-static __always_inline unsigned long long __rdmsr(unsigned int msr)
+static __always_inline u64 __rdmsr(unsigned int msr)
{
DECLARE_ARGS(val, low, high);
@@ -113,9 +113,9 @@ do { \
__wrmsr((msr), (u32)((u64)(val)), \
(u32)((u64)(val) >> 32))
-static inline unsigned long long native_read_msr(unsigned int msr)
+static inline u64 native_read_msr(unsigned int msr)
{
- unsigned long long val;
+ u64 val;
val = __rdmsr(msr);
@@ -125,7 +125,7 @@ static inline unsigned long long native_read_msr(unsigned int msr)
return val;
}
-static inline unsigned long long native_read_msr_safe(unsigned int msr,
+static inline u64 native_read_msr_safe(unsigned int msr,
int *err)
{
DECLARE_ARGS(val, low, high);
@@ -179,7 +179,7 @@ extern int wrmsr_safe_regs(u32 regs[8]);
* CPU can and will speculatively execute that RDTSC, though, so the
* results can be non-monotonic if compared on different CPUs.
*/
-static __always_inline unsigned long long rdtsc(void)
+static __always_inline u64 rdtsc(void)
{
DECLARE_ARGS(val, low, high);
@@ -196,7 +196,7 @@ static __always_inline unsigned long long rdtsc(void)
* be impossible to observe non-monotonic rdtsc_unordered() behavior
* across multiple CPUs as long as the TSC is synced.
*/
-static __always_inline unsigned long long rdtsc_ordered(void)
+static __always_inline u64 rdtsc_ordered(void)
{
DECLARE_ARGS(val, low, high);
@@ -224,7 +224,7 @@ static __always_inline unsigned long long rdtsc_ordered(void)
return EAX_EDX_VAL(val, low, high);
}
-static inline unsigned long long native_read_pmc(int counter)
+static inline u64 native_read_pmc(int counter)
{
DECLARE_ARGS(val, low, high);
@@ -280,7 +280,7 @@ static inline int wrmsr_safe(unsigned int msr, u32 low, u32 high)
__err; \
})
-static inline int rdmsrl_safe(unsigned int msr, unsigned long long *p)
+static inline int rdmsrl_safe(unsigned int msr, u64 *p)
{
int err;