summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDev Jain <dev.jain@arm.com>2025-04-03 10:58:44 +0530
committerWill Deacon <will@kernel.org>2025-04-29 16:26:34 +0100
commitfcf8dda8cc4860076395d807d9b3f781ca1d8f4f (patch)
tree87b47b1a940679cb085b5c0805ceb0c30f98f83d
parentf101c56447717c595d803894ba0e215f56c6fba4 (diff)
arm64: pageattr: Explicitly bail out when changing permissions for vmalloc_huge mappings
arm64 uses apply_to_page_range to change permissions for kernel vmalloc mappings, which does not support changing permissions for block mappings. This function will change permissions until it encounters a block mapping, and will bail out with a warning. Since there are no reports of this triggering, it implies that there are currently no cases of code doing a vmalloc_huge() followed by partial permission change. But this is a footgun waiting to go off, so let's detect it early and avoid the possibility of permissions in an intermediate state. So, explicitly disallow changing permissions for VM_ALLOW_HUGE_VMAP mappings. Reviewed-by: Ryan Roberts <ryan.roberts@arm.com> Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Signed-off-by: Dev Jain <dev.jain@arm.com> Acked-by: David Hildenbrand <david@redhat.com> Reviewed-by: Gavin Shan <gshan@redhat.com> Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com> Link: https://lore.kernel.org/r/20250403052844.61818-1-dev.jain@arm.com Signed-off-by: Will Deacon <will@kernel.org>
-rw-r--r--arch/arm64/mm/pageattr.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 39fd1f7ff02a..04d4a8f676db 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -96,8 +96,8 @@ static int change_memory_common(unsigned long addr, int numpages,
* we are operating on does not result in such splitting.
*
* Let's restrict ourselves to mappings created by vmalloc (or vmap).
- * Those are guaranteed to consist entirely of page mappings, and
- * splitting is never needed.
+ * Disallow VM_ALLOW_HUGE_VMAP mappings to guarantee that only page
+ * mappings are updated and splitting is never needed.
*
* So check whether the [addr, addr + size) interval is entirely
* covered by precisely one VM area that has the VM_ALLOC flag set.
@@ -105,7 +105,7 @@ static int change_memory_common(unsigned long addr, int numpages,
area = find_vm_area((void *)addr);
if (!area ||
end > (unsigned long)kasan_reset_tag(area->addr) + area->size ||
- !(area->flags & VM_ALLOC))
+ ((area->flags & (VM_ALLOC | VM_ALLOW_HUGE_VMAP)) != VM_ALLOC))
return -EINVAL;
if (!numpages)