summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKefeng Wang <wangkefeng.wang@huawei.com>2024-10-28 22:56:56 +0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-12-27 14:02:20 +0100
commitcb12d61361ce769672c7c7bd32107252598cdd8b (patch)
tree57f9160b87a4ab2826ba159be93e7d6fc0c50995
parentb79b6fe0737f233f0be1465052b7f0e75f324735 (diff)
mm: use aligned address in copy_user_gigantic_page()
commit f5d09de9f1bf9674c6418ff10d0a40cfe29268e1 upstream. In current kernel, hugetlb_wp() calls copy_user_large_folio() with the fault address. Where the fault address may be not aligned with the huge page size. Then, copy_user_large_folio() may call copy_user_gigantic_page() with the address, while copy_user_gigantic_page() requires the address to be huge page size aligned. So, this may cause memory corruption or information leak, addtional, use more obvious naming 'addr_hint' instead of 'addr' for copy_user_gigantic_page(). Link: https://lkml.kernel.org/r/20241028145656.932941-2-wangkefeng.wang@huawei.com Fixes: 530dd9926dc1 ("mm: memory: improve copy_user_large_folio()") Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Reviewed-by: David Hildenbrand <david@redhat.com> Cc: Huang Ying <ying.huang@intel.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--mm/hugetlb.c5
-rw-r--r--mm/memory.c5
2 files changed, 5 insertions, 5 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 190fa05635f4..5dc57b74a8fe 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5333,7 +5333,7 @@ again:
break;
}
ret = copy_user_large_folio(new_folio, pte_folio,
- ALIGN_DOWN(addr, sz), dst_vma);
+ addr, dst_vma);
folio_put(pte_folio);
if (ret) {
folio_put(new_folio);
@@ -6632,8 +6632,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
*foliop = NULL;
goto out;
}
- ret = copy_user_large_folio(folio, *foliop,
- ALIGN_DOWN(dst_addr, size), dst_vma);
+ ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
folio_put(*foliop);
*foliop = NULL;
if (ret) {
diff --git a/mm/memory.c b/mm/memory.c
index 3f20556e66fb..d322ddfe6791 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -6817,13 +6817,14 @@ void folio_zero_user(struct folio *folio, unsigned long addr_hint)
}
static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
- unsigned long addr,
+ unsigned long addr_hint,
struct vm_area_struct *vma,
unsigned int nr_pages)
{
- int i;
+ unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(dst));
struct page *dst_page;
struct page *src_page;
+ int i;
for (i = 0; i < nr_pages; i++) {
dst_page = folio_page(dst, i);