summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDev Jain <dev.jain@arm.com>2025-07-18 14:32:39 +0530
committerAndrew Morton <akpm@linux-foundation.org>2025-07-24 19:12:40 -0700
commit1d40f4e3d9d6a2d6807daa22d40ff27fc0c3d0f5 (patch)
tree05d594f1e0290038b9d3dd299f1ac247af7cf31b
parentb9bf6c2872c530776852b295eb399a23626e9611 (diff)
mm: optimize mprotect() for MM_CP_PROT_NUMA by batch-skipping PTEs
For the MM_CP_PROT_NUMA skipping case, observe that, if we skip an iteration due to the underlying folio satisfying any of the skip conditions, then for all subsequent ptes which map the same folio, the iteration will be skipped for them too. Therefore, we can optimize by using folio_pte_batch() to batch skip the iterations. Use prot_numa_skip() introduced in the previous patch to determine whether we need to skip the iteration. Change its signature to have a double pointer to a folio, which will be used by mprotect_folio_pte_batch() to determine the number of iterations we can safely skip. Link: https://lkml.kernel.org/r/20250718090244.21092-3-dev.jain@arm.com Signed-off-by: Dev Jain <dev.jain@arm.com> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com> Reviewed-by: Zi Yan <ziy@nvidia.com> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Barry Song <baohua@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: David Hildenbrand <david@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jann Horn <jannh@google.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Kevin Brodsky <kevin.brodsky@arm.com> Cc: Lance Yang <ioworker0@gmail.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Peter Xu <peterx@redhat.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Will Deacon <will@kernel.org> Cc: Yang Shi <yang@os.amperecomputing.com> Cc: Yicong Yang <yangyicong@hisilicon.com> Cc: Zhenhua Huang <quic_zhenhuah@quicinc.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--mm/mprotect.c55
1 files changed, 42 insertions, 13 deletions
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 2a9c73bd07787..97adc62c50ab7 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -83,28 +83,43 @@ bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
return pte_dirty(pte);
}
+static int mprotect_folio_pte_batch(struct folio *folio, pte_t *ptep,
+ pte_t pte, int max_nr_ptes)
+{
+ /* No underlying folio, so cannot batch */
+ if (!folio)
+ return 1;
+
+ if (!folio_test_large(folio))
+ return 1;
+
+ return folio_pte_batch(folio, ptep, pte, max_nr_ptes);
+}
+
static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
- pte_t oldpte, pte_t *pte, int target_node)
+ pte_t oldpte, pte_t *pte, int target_node,
+ struct folio **foliop)
{
- struct folio *folio;
+ struct folio *folio = NULL;
+ bool ret = true;
bool toptier;
int nid;
/* Avoid TLB flush if possible */
if (pte_protnone(oldpte))
- return true;
+ goto skip;
folio = vm_normal_folio(vma, addr, oldpte);
if (!folio)
- return true;
+ goto skip;
if (folio_is_zone_device(folio) || folio_test_ksm(folio))
- return true;
+ goto skip;
/* Also skip shared copy-on-write pages */
if (is_cow_mapping(vma->vm_flags) &&
(folio_maybe_dma_pinned(folio) || folio_maybe_mapped_shared(folio)))
- return true;
+ goto skip;
/*
* While migration can move some dirty pages,
@@ -112,7 +127,7 @@ static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
* context.
*/
if (folio_is_file_lru(folio) && folio_test_dirty(folio))
- return true;
+ goto skip;
/*
* Don't mess with PTEs if page is already on the node
@@ -120,7 +135,7 @@ static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
*/
nid = folio_nid(folio);
if (target_node == nid)
- return true;
+ goto skip;
toptier = node_is_toptier(nid);
@@ -129,11 +144,15 @@ static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
* balancing is disabled
*/
if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) && toptier)
- return true;
+ goto skip;
+ ret = false;
if (folio_use_access_time(folio))
folio_xchg_access_time(folio, jiffies_to_msecs(jiffies));
- return false;
+
+skip:
+ *foliop = folio;
+ return ret;
}
static long change_pte_range(struct mmu_gather *tlb,
@@ -147,6 +166,7 @@ static long change_pte_range(struct mmu_gather *tlb,
bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
+ int nr_ptes;
tlb_change_page_size(tlb, PAGE_SIZE);
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
@@ -161,8 +181,11 @@ static long change_pte_range(struct mmu_gather *tlb,
flush_tlb_batched_pending(vma->vm_mm);
arch_enter_lazy_mmu_mode();
do {
+ nr_ptes = 1;
oldpte = ptep_get(pte);
if (pte_present(oldpte)) {
+ int max_nr_ptes = (end - addr) >> PAGE_SHIFT;
+ struct folio *folio;
pte_t ptent;
/*
@@ -170,9 +193,15 @@ static long change_pte_range(struct mmu_gather *tlb,
* pages. See similar comment in change_huge_pmd.
*/
if (prot_numa) {
- if (prot_numa_skip(vma, addr, oldpte, pte,
- target_node))
+ int ret = prot_numa_skip(vma, addr, oldpte, pte,
+ target_node, &folio);
+ if (ret) {
+
+ /* determine batch to skip */
+ nr_ptes = mprotect_folio_pte_batch(folio,
+ pte, oldpte, max_nr_ptes);
continue;
+ }
}
oldpte = ptep_modify_prot_start(vma, addr, pte);
@@ -289,7 +318,7 @@ static long change_pte_range(struct mmu_gather *tlb,
pages++;
}
}
- } while (pte++, addr += PAGE_SIZE, addr != end);
+ } while (pte += nr_ptes, addr += nr_ptes * PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(pte - 1, ptl);