summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSourabh Jain <sourabhjain@linux.ibm.com>2024-12-17 13:16:40 +0530
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2025-02-08 09:56:49 +0100
commit52b76423a4e23800488361694ebb510fbec77945 (patch)
tree813b1337b767bc1e20f0979f01d32e99d1f6d8b9
parent4338831b831c76c9511b1f3aef64510b292da444 (diff)
powerpc/book3s64/hugetlb: Fix disabling hugetlb when fadump is active
[ Upstream commit d629d7a8efc33d05d62f4805c0ffb44727e3d99f ] Commit 8597538712eb ("powerpc/fadump: Do not use hugepages when fadump is active") disabled hugetlb support when fadump is active by returning early from hugetlbpage_init():arch/powerpc/mm/hugetlbpage.c and not populating hpage_shift/HPAGE_SHIFT. Later, commit 2354ad252b66 ("powerpc/mm: Update default hugetlb size early") moved the allocation of hpage_shift/HPAGE_SHIFT to early boot, which inadvertently re-enabled hugetlb support when fadump is active. Fix this by implementing hugepages_supported() on powerpc. This ensures that disabling hugetlb for the fadump kernel is independent of hpage_shift/HPAGE_SHIFT. Fixes: 2354ad252b66 ("powerpc/mm: Update default hugetlb size early") Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com> Signed-off-by: Sourabh Jain <sourabhjain@linux.ibm.com> Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com> Link: https://patch.msgid.link/20241217074640.1064510-1-sourabhjain@linux.ibm.com Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r--arch/powerpc/include/asm/hugetlb.h9
1 files changed, 9 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 18a3028ac3b6..dad2e7980f24 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -15,6 +15,15 @@
extern bool hugetlb_disabled;
+static inline bool hugepages_supported(void)
+{
+ if (hugetlb_disabled)
+ return false;
+
+ return HPAGE_SHIFT != 0;
+}
+#define hugepages_supported hugepages_supported
+
void __init hugetlbpage_init_defaultsize(void);
int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,