diff options
author | Jiri Kosina <jkosina@suse.cz> | 2021-06-30 09:02:28 +0200 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2021-06-30 09:02:28 +0200 |
commit | 5a94296bc02ac616336da7b5332b86d2ca8827f0 (patch) | |
tree | d13c4e7db2b4b01adb87fbf2ea5bafd2dc920b2f /mm/internal.h | |
parent | 3b770932eefb7c0c6319d332023efee87eb12913 (diff) | |
parent | 24a31ea94922d391a96a9dd0a9a830de65423817 (diff) |
Merge branch 'for-5.14/amd-sfh' into for-linus
- support for Renoir and Cezanne SoCs
- support for Ambient Light Sensor
- support for Human Presence Detection sensor
all from Basavaraj Natikar
Diffstat (limited to 'mm/internal.h')
-rw-r--r-- | mm/internal.h | 57 |
1 files changed, 31 insertions, 26 deletions
diff --git a/mm/internal.h b/mm/internal.h index bbe900f9f095a..2f1182948aa6e 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -96,26 +96,6 @@ static inline void set_page_refcounted(struct page *page) set_page_count(page, 1); } -/* - * When kernel touch the user page, the user page may be have been marked - * poison but still mapped in user space, if without this page, the kernel - * can guarantee the data integrity and operation success, the kernel is - * better to check the posion status and avoid touching it, be good not to - * panic, coredump for process fatal signal is a sample case matching this - * scenario. Or if kernel can't guarantee the data integrity, it's better - * not to call this function, let kernel touch the poison page and get to - * panic. - */ -static inline bool is_page_poisoned(struct page *page) -{ - if (PageHWPoison(page)) - return true; - else if (PageHuge(page) && PageHWPoison(compound_head(page))) - return true; - - return false; -} - extern unsigned long highest_memmap_pfn; /* @@ -145,10 +125,10 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); * family of functions. * * nodemask, migratetype and highest_zoneidx are initialized only once in - * __alloc_pages_nodemask() and then never change. + * __alloc_pages() and then never change. * * zonelist, preferred_zone and highest_zoneidx are set first in - * __alloc_pages_nodemask() for the fast path, and might be later changed + * __alloc_pages() for the fast path, and might be later changed * in __alloc_pages_slowpath(). All other functions pass the whole structure * by a const pointer. */ @@ -244,7 +224,13 @@ struct compact_control { unsigned int nr_freepages; /* Number of isolated free pages */ unsigned int nr_migratepages; /* Number of pages to migrate */ unsigned long free_pfn; /* isolate_freepages search base */ - unsigned long migrate_pfn; /* isolate_migratepages search base */ + /* + * Acts as an in/out parameter to page isolation for migration. + * isolate_migratepages uses it as a search base. + * isolate_migratepages_block will update the value to the next pfn + * after the last isolated one. + */ + unsigned long migrate_pfn; unsigned long fast_start_pfn; /* a pfn to start linear scan from */ struct zone *zone; unsigned long total_migrate_scanned; @@ -280,7 +266,7 @@ struct capture_control { unsigned long isolate_freepages_range(struct compact_control *cc, unsigned long start_pfn, unsigned long end_pfn); -unsigned long +int isolate_migratepages_range(struct compact_control *cc, unsigned long low_pfn, unsigned long end_pfn); int find_suitable_fallback(struct free_area *area, unsigned int order, @@ -328,7 +314,7 @@ static inline bool is_exec_mapping(vm_flags_t flags) } /* - * Stack area - atomatically grows in one direction + * Stack area - automatically grows in one direction * * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: * do_mmap() forbids all other combinations. @@ -446,7 +432,9 @@ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, static inline void clear_page_mlock(struct page *page) { } static inline void mlock_vma_page(struct page *page) { } static inline void mlock_migrate_page(struct page *new, struct page *old) { } - +static inline void vunmap_range_noflush(unsigned long start, unsigned long end) +{ +} #endif /* !CONFIG_MMU */ /* @@ -637,4 +625,21 @@ struct migration_target_control { gfp_t gfp_mask; }; +/* + * mm/vmalloc.c + */ +#ifdef CONFIG_MMU +int vmap_pages_range_noflush(unsigned long addr, unsigned long end, + pgprot_t prot, struct page **pages, unsigned int page_shift); +#else +static inline +int vmap_pages_range_noflush(unsigned long addr, unsigned long end, + pgprot_t prot, struct page **pages, unsigned int page_shift) +{ + return -EINVAL; +} +#endif + +void vunmap_range_noflush(unsigned long start, unsigned long end); + #endif /* __MM_INTERNAL_H */ |