summaryrefslogtreecommitdiff
path: root/fs/proc/task_mmu.c
diff options
context:
space:
mode:
authorDavid Hildenbrand <david@redhat.com>2024-06-07 14:23:53 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-08-03 08:54:09 +0200
commitcdeba6d1cfb100c58162eb0e9634ad8fbf5ae24b (patch)
tree8405a3aea209c3acc50b06985a38e1dd83e16e55 /fs/proc/task_mmu.c
parent92e8bd49ab48688e52704403c52f1dace4b4743a (diff)
fs/proc/task_mmu: don't indicate PM_MMAP_EXCLUSIVE without PM_PRESENT
[ Upstream commit da7f31ed0f4df8f61e8195e527aa83dd54896ba3 ] Relying on the mapcount for non-present PTEs that reference pages doesn't make any sense: they are not accounted in the mapcount, so page_mapcount() == 1 won't return the result we actually want to know. While we don't check the mapcount for migration entries already, we could end up checking it for swap, hwpoison, device exclusive, ... entries, which we really shouldn't. There is one exception: device private entries, which we consider fake-present (e.g., incremented the mapcount). But we won't care about that for now for PM_MMAP_EXCLUSIVE, because indicating PM_SWAP for them although they are fake-present already sounds suspiciously wrong. Let's never indicate PM_MMAP_EXCLUSIVE without PM_PRESENT. Link: https://lkml.kernel.org/r/20240607122357.115423-3-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Oscar Salvador <osalvador@suse.de> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Lance Yang <ioworker0@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Stable-dep-of: 2c1f057e5be6 ("fs/proc/task_mmu: properly detect PM_MMAP_EXCLUSIVE per page of PMD-mapped THPs") Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'fs/proc/task_mmu.c')
-rw-r--r--fs/proc/task_mmu.c8
1 files changed, 2 insertions, 6 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index e327d1c77de88..a5a946bef0ac4 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1411,7 +1411,6 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
{
u64 frame = 0, flags = 0;
struct page *page = NULL;
- bool migration = false;
if (pte_present(pte)) {
if (pm->show_pfn)
@@ -1443,7 +1442,6 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
(offset << MAX_SWAPFILES_SHIFT);
}
flags |= PM_SWAP;
- migration = is_migration_entry(entry);
if (is_pfn_swap_entry(entry))
page = pfn_swap_entry_to_page(entry);
if (pte_marker_entry_uffd_wp(entry))
@@ -1452,7 +1450,7 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
if (page && !PageAnon(page))
flags |= PM_FILE;
- if (page && !migration && page_mapcount(page) == 1)
+ if (page && (flags & PM_PRESENT) && page_mapcount(page) == 1)
flags |= PM_MMAP_EXCLUSIVE;
if (vma->vm_flags & VM_SOFTDIRTY)
flags |= PM_SOFT_DIRTY;
@@ -1469,7 +1467,6 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
pte_t *pte, *orig_pte;
int err = 0;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- bool migration = false;
ptl = pmd_trans_huge_lock(pmdp, vma);
if (ptl) {
@@ -1513,14 +1510,13 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
if (pmd_swp_uffd_wp(pmd))
flags |= PM_UFFD_WP;
VM_BUG_ON(!is_pmd_migration_entry(pmd));
- migration = is_migration_entry(entry);
page = pfn_swap_entry_to_page(entry);
}
#endif
if (page && !PageAnon(page))
flags |= PM_FILE;
- if (page && !migration && page_mapcount(page) == 1)
+ if (page && (flags & PM_PRESENT) && page_mapcount(page) == 1)
flags |= PM_MMAP_EXCLUSIVE;
for (; addr != end; addr += PAGE_SIZE) {