diff options
-rw-r--r-- | mm/huge_memory.c | 19 |
1 files changed, 16 insertions, 3 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d21c9ef0943c..3877483a20fd 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1235,6 +1235,18 @@ out_unlock: return ret; } +/* + * foll_force can write to even unwritable pmd's, but only + * after we've gone through a cow cycle and they are dirty. + */ +static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page, + unsigned int flags) +{ + return pmd_write(pmd) || + ((flags & FOLL_FORCE) && (flags & FOLL_COW) && + page && PageAnon(page)); +} + struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, @@ -1245,15 +1257,16 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, assert_spin_locked(&mm->page_table_lock); - if (flags & FOLL_WRITE && !pmd_write(*pmd)) - goto out; - /* Avoid dumping huge zero page */ if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) return ERR_PTR(-EFAULT); page = pmd_page(*pmd); VM_BUG_ON(!PageHead(page)); + + if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, page, flags)) + return NULL; + if (flags & FOLL_TOUCH) { pmd_t _pmd; /* |