diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/compaction.c | 12 | ||||
| -rw-r--r-- | mm/filemap.c | 26 | ||||
| -rw-r--r-- | mm/gup.c | 14 | ||||
| -rw-r--r-- | mm/huge_memory.c | 9 | ||||
| -rw-r--r-- | mm/hugetlb.c | 110 | ||||
| -rw-r--r-- | mm/madvise.c | 4 | ||||
| -rw-r--r-- | mm/memcontrol.c | 34 | ||||
| -rw-r--r-- | mm/memory-failure.c | 36 | ||||
| -rw-r--r-- | mm/memory_hotplug.c | 18 | ||||
| -rw-r--r-- | mm/mempolicy.c | 6 | ||||
| -rw-r--r-- | mm/memremap.c | 39 | ||||
| -rw-r--r-- | mm/migrate.c | 44 | ||||
| -rw-r--r-- | mm/page-writeback.c | 6 | ||||
| -rw-r--r-- | mm/page_alloc.c | 5 | ||||
| -rw-r--r-- | mm/percpu.c | 8 | ||||
| -rw-r--r-- | mm/rmap.c | 5 | ||||
| -rw-r--r-- | mm/slub.c | 2 | ||||
| -rw-r--r-- | mm/truncate.c | 2 | ||||
| -rw-r--r-- | mm/vmscan.c | 5 | 
19 files changed, 161 insertions, 224 deletions
| diff --git a/mm/compaction.c b/mm/compaction.c index 6e0ee5641788..13cb7a961b31 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -817,6 +817,10 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,  	 * delay for some time until fewer pages are isolated  	 */  	while (unlikely(too_many_isolated(pgdat))) { +		/* stop isolation if there are still pages not migrated */ +		if (cc->nr_migratepages) +			return 0; +  		/* async migration should just abort */  		if (cc->mode == MIGRATE_ASYNC)  			return 0; @@ -1012,8 +1016,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,  isolate_success:  		list_add(&page->lru, &cc->migratepages); -		cc->nr_migratepages++; -		nr_isolated++; +		cc->nr_migratepages += compound_nr(page); +		nr_isolated += compound_nr(page);  		/*  		 * Avoid isolating too much unless this block is being @@ -1021,7 +1025,7 @@ isolate_success:  		 * or a lock is contended. For contention, isolate quickly to  		 * potentially remove one source of contention.  		 */ -		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX && +		if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX &&  		    !cc->rescan && !cc->contended) {  			++low_pfn;  			break; @@ -1132,7 +1136,7 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,  		if (!pfn)  			break; -		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) +		if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX)  			break;  	} diff --git a/mm/filemap.c b/mm/filemap.c index d5e7c2029d16..0b2067b3c328 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1484,11 +1484,19 @@ void end_page_writeback(struct page *page)  		rotate_reclaimable_page(page);  	} +	/* +	 * Writeback does not hold a page reference of its own, relying +	 * on truncation to wait for the clearing of PG_writeback. +	 * But here we must make sure that the page is not freed and +	 * reused before the wake_up_page(). +	 */ +	get_page(page);  	if (!test_clear_page_writeback(page))  		BUG();  	smp_mb__after_atomic();  	wake_up_page(page, PG_writeback); +	put_page(page);  }  EXPORT_SYMBOL(end_page_writeback); @@ -2347,10 +2355,15 @@ page_ok:  page_not_up_to_date:  		/* Get exclusive access to the page ... */ -		if (iocb->ki_flags & IOCB_WAITQ) +		if (iocb->ki_flags & IOCB_WAITQ) { +			if (written) { +				put_page(page); +				goto out; +			}  			error = lock_page_async(page, iocb->ki_waitq); -		else +		} else {  			error = lock_page_killable(page); +		}  		if (unlikely(error))  			goto readpage_error; @@ -2393,10 +2406,15 @@ readpage:  		}  		if (!PageUptodate(page)) { -			if (iocb->ki_flags & IOCB_WAITQ) +			if (iocb->ki_flags & IOCB_WAITQ) { +				if (written) { +					put_page(page); +					goto out; +				}  				error = lock_page_async(page, iocb->ki_waitq); -			else +			} else {  				error = lock_page_killable(page); +			}  			if (unlikely(error))  				goto readpage_error; @@ -1647,8 +1647,11 @@ check_again:  		/*  		 * drop the above get_user_pages reference.  		 */ -		for (i = 0; i < nr_pages; i++) -			put_page(pages[i]); +		if (gup_flags & FOLL_PIN) +			unpin_user_pages(pages, nr_pages); +		else +			for (i = 0; i < nr_pages; i++) +				put_page(pages[i]);  		if (migrate_pages(&cma_page_list, alloc_migration_target, NULL,  			(unsigned long)&mtc, MIGRATE_SYNC, MR_CONTIG_RANGE)) { @@ -1728,8 +1731,11 @@ static long __gup_longterm_locked(struct mm_struct *mm,  			goto out;  		if (check_dax_vmas(vmas_tmp, rc)) { -			for (i = 0; i < rc; i++) -				put_page(pages[i]); +			if (gup_flags & FOLL_PIN) +				unpin_user_pages(pages, rc); +			else +				for (i = 0; i < rc; i++) +					put_page(pages[i]);  			rc = -EOPNOTSUPP;  			goto out;  		} diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9474dbc150ed..ec2bb93f7431 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -710,7 +710,6 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)  			transparent_hugepage_use_zero_page()) {  		pgtable_t pgtable;  		struct page *zero_page; -		bool set;  		vm_fault_t ret;  		pgtable = pte_alloc_one(vma->vm_mm);  		if (unlikely(!pgtable)) @@ -723,25 +722,25 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)  		}  		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);  		ret = 0; -		set = false;  		if (pmd_none(*vmf->pmd)) {  			ret = check_stable_address_space(vma->vm_mm);  			if (ret) {  				spin_unlock(vmf->ptl); +				pte_free(vma->vm_mm, pgtable);  			} else if (userfaultfd_missing(vma)) {  				spin_unlock(vmf->ptl); +				pte_free(vma->vm_mm, pgtable);  				ret = handle_userfault(vmf, VM_UFFD_MISSING);  				VM_BUG_ON(ret & VM_FAULT_FALLBACK);  			} else {  				set_huge_zero_page(pgtable, vma->vm_mm, vma,  						   haddr, vmf->pmd, zero_page);  				spin_unlock(vmf->ptl); -				set = true;  			} -		} else +		} else {  			spin_unlock(vmf->ptl); -		if (!set)  			pte_free(vma->vm_mm, pgtable); +		}  		return ret;  	}  	gfp = alloc_hugepage_direct_gfpmask(vma); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index fe76f8fd5a73..37f15c3c24dc 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -648,6 +648,8 @@ retry:  			}  			del += t - f; +			hugetlb_cgroup_uncharge_file_region( +				resv, rg, t - f);  			/* New entry for end of split region */  			nrg->from = t; @@ -660,9 +662,6 @@ retry:  			/* Original entry is trimmed */  			rg->to = f; -			hugetlb_cgroup_uncharge_file_region( -				resv, rg, nrg->to - nrg->from); -  			list_add(&nrg->link, &rg->link);  			nrg = NULL;  			break; @@ -678,17 +677,17 @@ retry:  		}  		if (f <= rg->from) {	/* Trim beginning of region */ -			del += t - rg->from; -			rg->from = t; -  			hugetlb_cgroup_uncharge_file_region(resv, rg,  							    t - rg->from); -		} else {		/* Trim end of region */ -			del += rg->to - f; -			rg->to = f; +			del += t - rg->from; +			rg->from = t; +		} else {		/* Trim end of region */  			hugetlb_cgroup_uncharge_file_region(resv, rg,  							    rg->to - f); + +			del += rg->to - f; +			rg->to = f;  		}  	} @@ -1569,103 +1568,23 @@ int PageHeadHuge(struct page *page_head)  }  /* - * Find address_space associated with hugetlbfs page. - * Upon entry page is locked and page 'was' mapped although mapped state - * could change.  If necessary, use anon_vma to find vma and associated - * address space.  The returned mapping may be stale, but it can not be - * invalid as page lock (which is held) is required to destroy mapping. - */ -static struct address_space *_get_hugetlb_page_mapping(struct page *hpage) -{ -	struct anon_vma *anon_vma; -	pgoff_t pgoff_start, pgoff_end; -	struct anon_vma_chain *avc; -	struct address_space *mapping = page_mapping(hpage); - -	/* Simple file based mapping */ -	if (mapping) -		return mapping; - -	/* -	 * Even anonymous hugetlbfs mappings are associated with an -	 * underlying hugetlbfs file (see hugetlb_file_setup in mmap -	 * code).  Find a vma associated with the anonymous vma, and -	 * use the file pointer to get address_space. -	 */ -	anon_vma = page_lock_anon_vma_read(hpage); -	if (!anon_vma) -		return mapping;  /* NULL */ - -	/* Use first found vma */ -	pgoff_start = page_to_pgoff(hpage); -	pgoff_end = pgoff_start + pages_per_huge_page(page_hstate(hpage)) - 1; -	anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, -					pgoff_start, pgoff_end) { -		struct vm_area_struct *vma = avc->vma; - -		mapping = vma->vm_file->f_mapping; -		break; -	} - -	anon_vma_unlock_read(anon_vma); -	return mapping; -} - -/*   * Find and lock address space (mapping) in write mode.   * - * Upon entry, the page is locked which allows us to find the mapping - * even in the case of an anon page.  However, locking order dictates - * the i_mmap_rwsem be acquired BEFORE the page lock.  This is hugetlbfs - * specific.  So, we first try to lock the sema while still holding the - * page lock.  If this works, great!  If not, then we need to drop the - * page lock and then acquire i_mmap_rwsem and reacquire page lock.  Of - * course, need to revalidate state along the way. + * Upon entry, the page is locked which means that page_mapping() is + * stable.  Due to locking order, we can only trylock_write.  If we can + * not get the lock, simply return NULL to caller.   */  struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)  { -	struct address_space *mapping, *mapping2; +	struct address_space *mapping = page_mapping(hpage); -	mapping = _get_hugetlb_page_mapping(hpage); -retry:  	if (!mapping)  		return mapping; -	/* -	 * If no contention, take lock and return -	 */  	if (i_mmap_trylock_write(mapping))  		return mapping; -	/* -	 * Must drop page lock and wait on mapping sema. -	 * Note:  Once page lock is dropped, mapping could become invalid. -	 * As a hack, increase map count until we lock page again. -	 */ -	atomic_inc(&hpage->_mapcount); -	unlock_page(hpage); -	i_mmap_lock_write(mapping); -	lock_page(hpage); -	atomic_add_negative(-1, &hpage->_mapcount); - -	/* verify page is still mapped */ -	if (!page_mapped(hpage)) { -		i_mmap_unlock_write(mapping); -		return NULL; -	} - -	/* -	 * Get address space again and verify it is the same one -	 * we locked.  If not, drop lock and retry. -	 */ -	mapping2 = _get_hugetlb_page_mapping(hpage); -	if (mapping2 != mapping) { -		i_mmap_unlock_write(mapping); -		mapping = mapping2; -		goto retry; -	} - -	return mapping; +	return NULL;  }  pgoff_t __basepage_index(struct page *page) @@ -2443,6 +2362,9 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,  		rsv_adjust = hugepage_subpool_put_pages(spool, 1);  		hugetlb_acct_memory(h, -rsv_adjust); +		if (deferred_reserve) +			hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h), +					pages_per_huge_page(h), page);  	}  	return page; diff --git a/mm/madvise.c b/mm/madvise.c index 416a56b8e757..a8d8d48a57fe 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -226,7 +226,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,  		struct address_space *mapping)  {  	XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start)); -	pgoff_t end_index = end / PAGE_SIZE; +	pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1);  	struct page *page;  	rcu_read_lock(); @@ -1231,8 +1231,6 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,  		ret = total_len - iov_iter_count(&iter);  	mmput(mm); -	return ret; -  release_task:  	put_task_struct(task);  put_pid: diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3a24e3b619f5..29459a6ce1c7 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -867,8 +867,13 @@ void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)  	rcu_read_lock();  	memcg = mem_cgroup_from_obj(p); -	/* Untracked pages have no memcg, no lruvec. Update only the node */ -	if (!memcg || memcg == root_mem_cgroup) { +	/* +	 * Untracked pages have no memcg, no lruvec. Update only the +	 * node. If we reparent the slab objects to the root memcg, +	 * when we free the slab object, we need to update the per-memcg +	 * vmstats to keep it correct for the root memcg. +	 */ +	if (!memcg) {  		__mod_node_page_state(pgdat, idx, val);  	} else {  		lruvec = mem_cgroup_lruvec(memcg, pgdat); @@ -4110,11 +4115,17 @@ static int memcg_stat_show(struct seq_file *m, void *v)  			   (u64)memsw * PAGE_SIZE);  	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { +		unsigned long nr; +  		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())  			continue; +		nr = memcg_page_state(memcg, memcg1_stats[i]); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +		if (memcg1_stats[i] == NR_ANON_THPS) +			nr *= HPAGE_PMD_NR; +#endif  		seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], -			   (u64)memcg_page_state(memcg, memcg1_stats[i]) * -			   PAGE_SIZE); +						(u64)nr * PAGE_SIZE);  	}  	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) @@ -5339,17 +5350,22 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)  		memcg->swappiness = mem_cgroup_swappiness(parent);  		memcg->oom_kill_disable = parent->oom_kill_disable;  	} -	if (parent && parent->use_hierarchy) { +	if (!parent) { +		page_counter_init(&memcg->memory, NULL); +		page_counter_init(&memcg->swap, NULL); +		page_counter_init(&memcg->kmem, NULL); +		page_counter_init(&memcg->tcpmem, NULL); +	} else if (parent->use_hierarchy) {  		memcg->use_hierarchy = true;  		page_counter_init(&memcg->memory, &parent->memory);  		page_counter_init(&memcg->swap, &parent->swap);  		page_counter_init(&memcg->kmem, &parent->kmem);  		page_counter_init(&memcg->tcpmem, &parent->tcpmem);  	} else { -		page_counter_init(&memcg->memory, NULL); -		page_counter_init(&memcg->swap, NULL); -		page_counter_init(&memcg->kmem, NULL); -		page_counter_init(&memcg->tcpmem, NULL); +		page_counter_init(&memcg->memory, &root_mem_cgroup->memory); +		page_counter_init(&memcg->swap, &root_mem_cgroup->swap); +		page_counter_init(&memcg->kmem, &root_mem_cgroup->kmem); +		page_counter_init(&memcg->tcpmem, &root_mem_cgroup->tcpmem);  		/*  		 * Deeper hierachy with use_hierarchy == false doesn't make  		 * much sense so let cgroup subsystem know about this diff --git a/mm/memory-failure.c b/mm/memory-failure.c index c0bb186bba62..5d880d4eb9a2 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1057,27 +1057,25 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,  	if (!PageHuge(hpage)) {  		unmap_success = try_to_unmap(hpage, ttu);  	} else { -		/* -		 * For hugetlb pages, try_to_unmap could potentially call -		 * huge_pmd_unshare.  Because of this, take semaphore in -		 * write mode here and set TTU_RMAP_LOCKED to indicate we -		 * have taken the lock at this higer level. -		 * -		 * Note that the call to hugetlb_page_mapping_lock_write -		 * is necessary even if mapping is already set.  It handles -		 * ugliness of potentially having to drop page lock to obtain -		 * i_mmap_rwsem. -		 */ -		mapping = hugetlb_page_mapping_lock_write(hpage); - -		if (mapping) { -			unmap_success = try_to_unmap(hpage, +		if (!PageAnon(hpage)) { +			/* +			 * For hugetlb pages in shared mappings, try_to_unmap +			 * could potentially call huge_pmd_unshare.  Because of +			 * this, take semaphore in write mode here and set +			 * TTU_RMAP_LOCKED to indicate we have taken the lock +			 * at this higer level. +			 */ +			mapping = hugetlb_page_mapping_lock_write(hpage); +			if (mapping) { +				unmap_success = try_to_unmap(hpage,  						     ttu|TTU_RMAP_LOCKED); -			i_mmap_unlock_write(mapping); +				i_mmap_unlock_write(mapping); +			} else { +				pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn); +				unmap_success = false; +			}  		} else { -			pr_info("Memory failure: %#lx: could not find mapping for mapped huge page\n", -				pfn); -			unmap_success = false; +			unmap_success = try_to_unmap(hpage, ttu);  		}  	}  	if (!unmap_success) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index b44d4c7ba73b..63b2e46b6555 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -350,24 +350,6 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,  	return err;  } -#ifdef CONFIG_NUMA -int __weak memory_add_physaddr_to_nid(u64 start) -{ -	pr_info_once("Unknown online node for memory at 0x%llx, assuming node 0\n", -			start); -	return 0; -} -EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); - -int __weak phys_to_target_node(u64 start) -{ -	pr_info_once("Unknown target node for memory at 0x%llx, assuming node 0\n", -			start); -	return 0; -} -EXPORT_SYMBOL_GPL(phys_to_target_node); -#endif -  /* find the smallest valid pfn in the range [start_pfn, end_pfn) */  static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,  				     unsigned long start_pfn, diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 3fde772ef5ef..3ca4898f3f24 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -525,7 +525,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,  	unsigned long flags = qp->flags;  	int ret;  	bool has_unmovable = false; -	pte_t *pte; +	pte_t *pte, *mapped_pte;  	spinlock_t *ptl;  	ptl = pmd_trans_huge_lock(pmd, vma); @@ -539,7 +539,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,  	if (pmd_trans_unstable(pmd))  		return 0; -	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); +	mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);  	for (; addr != end; pte++, addr += PAGE_SIZE) {  		if (!pte_present(*pte))  			continue; @@ -571,7 +571,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,  		} else  			break;  	} -	pte_unmap_unlock(pte - 1, ptl); +	pte_unmap_unlock(mapped_pte, ptl);  	cond_resched();  	if (has_unmovable) diff --git a/mm/memremap.c b/mm/memremap.c index 73a206d0f645..16b2fb482da1 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -41,28 +41,24 @@ EXPORT_SYMBOL_GPL(memremap_compat_align);  DEFINE_STATIC_KEY_FALSE(devmap_managed_key);  EXPORT_SYMBOL(devmap_managed_key); -static void devmap_managed_enable_put(void) +static void devmap_managed_enable_put(struct dev_pagemap *pgmap)  { -	static_branch_dec(&devmap_managed_key); +	if (pgmap->type == MEMORY_DEVICE_PRIVATE || +	    pgmap->type == MEMORY_DEVICE_FS_DAX) +		static_branch_dec(&devmap_managed_key);  } -static int devmap_managed_enable_get(struct dev_pagemap *pgmap) +static void devmap_managed_enable_get(struct dev_pagemap *pgmap)  { -	if (pgmap->type == MEMORY_DEVICE_PRIVATE && -	    (!pgmap->ops || !pgmap->ops->page_free)) { -		WARN(1, "Missing page_free method\n"); -		return -EINVAL; -	} - -	static_branch_inc(&devmap_managed_key); -	return 0; +	if (pgmap->type == MEMORY_DEVICE_PRIVATE || +	    pgmap->type == MEMORY_DEVICE_FS_DAX) +		static_branch_inc(&devmap_managed_key);  }  #else -static int devmap_managed_enable_get(struct dev_pagemap *pgmap) +static void devmap_managed_enable_get(struct dev_pagemap *pgmap)  { -	return -EINVAL;  } -static void devmap_managed_enable_put(void) +static void devmap_managed_enable_put(struct dev_pagemap *pgmap)  {  }  #endif /* CONFIG_DEV_PAGEMAP_OPS */ @@ -169,7 +165,7 @@ void memunmap_pages(struct dev_pagemap *pgmap)  		pageunmap_range(pgmap, i);  	WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); -	devmap_managed_enable_put(); +	devmap_managed_enable_put(pgmap);  }  EXPORT_SYMBOL_GPL(memunmap_pages); @@ -307,7 +303,6 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)  		.pgprot = PAGE_KERNEL,  	};  	const int nr_range = pgmap->nr_range; -	bool need_devmap_managed = true;  	int error, i;  	if (WARN_ONCE(!nr_range, "nr_range must be specified\n")) @@ -323,6 +318,10 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)  			WARN(1, "Missing migrate_to_ram method\n");  			return ERR_PTR(-EINVAL);  		} +		if (!pgmap->ops->page_free) { +			WARN(1, "Missing page_free method\n"); +			return ERR_PTR(-EINVAL); +		}  		if (!pgmap->owner) {  			WARN(1, "Missing owner\n");  			return ERR_PTR(-EINVAL); @@ -336,11 +335,9 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)  		}  		break;  	case MEMORY_DEVICE_GENERIC: -		need_devmap_managed = false;  		break;  	case MEMORY_DEVICE_PCI_P2PDMA:  		params.pgprot = pgprot_noncached(params.pgprot); -		need_devmap_managed = false;  		break;  	default:  		WARN(1, "Invalid pgmap type %d\n", pgmap->type); @@ -364,11 +361,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)  		}  	} -	if (need_devmap_managed) { -		error = devmap_managed_enable_get(pgmap); -		if (error) -			return ERR_PTR(error); -	} +	devmap_managed_enable_get(pgmap);  	/*  	 * Clear the pgmap nr_range as it will be incremented for each diff --git a/mm/migrate.c b/mm/migrate.c index 5ca5842df5db..5795cb82e27c 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1328,34 +1328,38 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,  		goto put_anon;  	if (page_mapped(hpage)) { -		/* -		 * try_to_unmap could potentially call huge_pmd_unshare. -		 * Because of this, take semaphore in write mode here and -		 * set TTU_RMAP_LOCKED to let lower levels know we have -		 * taken the lock. -		 */ -		mapping = hugetlb_page_mapping_lock_write(hpage); -		if (unlikely(!mapping)) -			goto unlock_put_anon; +		bool mapping_locked = false; +		enum ttu_flags ttu = TTU_MIGRATION|TTU_IGNORE_MLOCK| +					TTU_IGNORE_ACCESS; + +		if (!PageAnon(hpage)) { +			/* +			 * In shared mappings, try_to_unmap could potentially +			 * call huge_pmd_unshare.  Because of this, take +			 * semaphore in write mode here and set TTU_RMAP_LOCKED +			 * to let lower levels know we have taken the lock. +			 */ +			mapping = hugetlb_page_mapping_lock_write(hpage); +			if (unlikely(!mapping)) +				goto unlock_put_anon; + +			mapping_locked = true; +			ttu |= TTU_RMAP_LOCKED; +		} -		try_to_unmap(hpage, -			TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS| -			TTU_RMAP_LOCKED); +		try_to_unmap(hpage, ttu);  		page_was_mapped = 1; -		/* -		 * Leave mapping locked until after subsequent call to -		 * remove_migration_ptes() -		 */ + +		if (mapping_locked) +			i_mmap_unlock_write(mapping);  	}  	if (!page_mapped(hpage))  		rc = move_to_new_page(new_hpage, hpage, mode); -	if (page_was_mapped) { +	if (page_was_mapped)  		remove_migration_ptes(hpage, -			rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, true); -		i_mmap_unlock_write(mapping); -	} +			rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);  unlock_put_anon:  	unlock_page(new_hpage); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 7709f0e223f5..586042472ac9 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2754,12 +2754,6 @@ int test_clear_page_writeback(struct page *page)  	} else {  		ret = TestClearPageWriteback(page);  	} -	/* -	 * NOTE: Page might be free now! Writeback doesn't hold a page -	 * reference on its own, it relies on truncation to wait for -	 * the clearing of PG_writeback. The below can only access -	 * page state that is static across allocation cycles. -	 */  	if (ret) {  		dec_lruvec_state(lruvec, NR_WRITEBACK);  		dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 23f5066bd4a5..eaa227a479e4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5103,6 +5103,11 @@ refill:  		if (!page_ref_sub_and_test(page, nc->pagecnt_bias))  			goto refill; +		if (unlikely(nc->pfmemalloc)) { +			free_the_page(page, compound_order(page)); +			goto refill; +		} +  #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)  		/* if size can vary use size else just use PAGE_SIZE */  		size = nc->size; diff --git a/mm/percpu.c b/mm/percpu.c index 66a93f096394..ad7a37ee74ef 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1315,8 +1315,8 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,  	region_size = ALIGN(start_offset + map_size, lcm_align);  	/* allocate chunk */ -	alloc_size = sizeof(struct pcpu_chunk) + -		BITS_TO_LONGS(region_size >> PAGE_SHIFT) * sizeof(unsigned long); +	alloc_size = struct_size(chunk, populated, +				 BITS_TO_LONGS(region_size >> PAGE_SHIFT));  	chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);  	if (!chunk)  		panic("%s: Failed to allocate %zu bytes\n", __func__, @@ -2521,8 +2521,8 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,  	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;  	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;  	pcpu_atom_size = ai->atom_size; -	pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + -		BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); +	pcpu_chunk_struct_size = struct_size(chunk, populated, +					     BITS_TO_LONGS(pcpu_unit_pages));  	pcpu_stats_save_ai(ai); diff --git a/mm/rmap.c b/mm/rmap.c index 1b84945d655c..31b29321adfe 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1413,9 +1413,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,  		/*  		 * If sharing is possible, start and end will be adjusted  		 * accordingly. -		 * -		 * If called for a huge page, caller must hold i_mmap_rwsem -		 * in write mode as it is possible to call huge_pmd_unshare.  		 */  		adjust_range_if_pmd_sharing_possible(vma, &range.start,  						     &range.end); @@ -1462,7 +1459,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,  		subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);  		address = pvmw.address; -		if (PageHuge(page)) { +		if (PageHuge(page) && !PageAnon(page)) {  			/*  			 * To call huge_pmd_unshare, i_mmap_rwsem must be  			 * held in write mode.  Caller needs to explicitly diff --git a/mm/slub.c b/mm/slub.c index b30be2385d1c..34dcc09e2ec9 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2852,7 +2852,7 @@ redo:  	object = c->freelist;  	page = c->page; -	if (unlikely(!object || !node_match(page, node))) { +	if (unlikely(!object || !page || !node_match(page, node))) {  		object = __slab_alloc(s, gfpflags, node, addr, c);  	} else {  		void *next_object = get_freepointer_safe(s, object); diff --git a/mm/truncate.c b/mm/truncate.c index 18cec39a9f53..960edf5803ca 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -528,7 +528,7 @@ void truncate_inode_pages_final(struct address_space *mapping)  }  EXPORT_SYMBOL(truncate_inode_pages_final); -unsigned long __invalidate_mapping_pages(struct address_space *mapping, +static unsigned long __invalidate_mapping_pages(struct address_space *mapping,  		pgoff_t start, pgoff_t end, unsigned long *nr_pagevec)  {  	pgoff_t indices[PAGEVEC_SIZE]; diff --git a/mm/vmscan.c b/mm/vmscan.c index 1b8f0e059767..7b4e31eac2cf 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1516,7 +1516,8 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,  	nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,  			TTU_IGNORE_ACCESS, &stat, true);  	list_splice(&clean_pages, page_list); -	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -nr_reclaimed); +	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, +			    -(long)nr_reclaimed);  	/*  	 * Since lazyfree pages are isolated from file LRU from the beginning,  	 * they will rotate back to anonymous LRU in the end if it failed to @@ -1526,7 +1527,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,  	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON,  			    stat.nr_lazyfree_fail);  	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -			    -stat.nr_lazyfree_fail); +			    -(long)stat.nr_lazyfree_fail);  	return nr_reclaimed;  } | 
