summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/kasan/report.c4
-rw-r--r--mm/ksm.c6
-rw-r--r--mm/memory-failure.c4
-rw-r--r--mm/vmscan.c8
-rw-r--r--mm/zsmalloc.c3
5 files changed, 21 insertions, 4 deletions
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index b0877035491f8..62c01b4527eba 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -399,7 +399,9 @@ static void print_address_description(void *addr, u8 tag,
}
if (is_vmalloc_addr(addr)) {
- pr_err("The buggy address %px belongs to a vmalloc virtual mapping\n", addr);
+ pr_err("The buggy address belongs to a");
+ if (!vmalloc_dump_obj(addr))
+ pr_cont(" vmalloc virtual mapping\n");
page = vmalloc_to_page(addr);
}
diff --git a/mm/ksm.c b/mm/ksm.c
index 8583fb91ef136..a9d3e719e0899 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -3669,10 +3669,10 @@ static ssize_t advisor_mode_show(struct kobject *kobj,
{
const char *output;
- if (ksm_advisor == KSM_ADVISOR_NONE)
- output = "[none] scan-time";
- else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
+ if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
output = "none [scan-time]";
+ else
+ output = "[none] scan-time";
return sysfs_emit(buf, "%s\n", output);
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index b91a33fb6c694..225dddff091d7 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1561,6 +1561,10 @@ static int get_hwpoison_page(struct page *p, unsigned long flags)
return ret;
}
+/*
+ * The caller must guarantee the folio isn't large folio, except hugetlb.
+ * try_to_unmap() can't handle it.
+ */
int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill)
{
enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 3783e45bfc92e..d3bce4d7a3398 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1128,6 +1128,14 @@ retry:
goto keep;
if (folio_contain_hwpoisoned_page(folio)) {
+ /*
+ * unmap_poisoned_folio() can't handle large
+ * folio, just skip it. memory_failure() will
+ * handle it if the UCE is triggered again.
+ */
+ if (folio_test_large(folio))
+ goto keep_locked;
+
unmap_poisoned_folio(folio, folio_pfn(folio), false);
folio_unlock(folio);
folio_put(folio);
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index d14a7e317ac8b..03fe0452e6e2e 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1053,6 +1053,9 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
if (!zspage)
return NULL;
+ if (!IS_ENABLED(CONFIG_COMPACTION))
+ gfp &= ~__GFP_MOVABLE;
+
zspage->magic = ZSPAGE_MAGIC;
zspage->pool = pool;
zspage->class = class->index;