summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Reaver <me@davidreaver.com>2025-01-12 07:26:55 -0800
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2025-02-18 20:39:24 +0100
commitd42044aad6528e0c9533dbaf836d1b0fbb19fe2d (patch)
tree0fa890031566f3911761307370aed21f28dbb4ff
parent0ad2507d5d93f39619fc42372c347d6006b64319 (diff)
PM: hibernate: Replace deprecated kmap_atomic() with kmap_local_page()
kmap_atomic() is deprecated and should be replaced with kmap_local_page() [1][2]. kmap_local_page() is faster in kernels with HIGHMEM enabled, can take page faults, and allows preemption. According to [2], this replacement is safe as long as the code between kmap_atomic() and kunmap_atomic() does not implicitly depend on disabling page faults or preemption. In all of the call sites in this patch, the only thing happening between mapping and unmapping pages is copy_page() calls, and I don't suspect they depend on disabling page faults or preemption. Link: https://lwn.net/Articles/836144/ [1] Link: https://docs.kernel.org/mm/highmem.html#temporary-virtual-mappings [2] Signed-off-by: David Reaver <me@davidreaver.com> Link: https://patch.msgid.link/20250112152658.20132-1-me@davidreaver.com Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r--kernel/power/snapshot.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index c9fb559a6399..4e6e24e8b854 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -2270,9 +2270,9 @@ int snapshot_read_next(struct snapshot_handle *handle)
*/
void *kaddr;
- kaddr = kmap_atomic(page);
+ kaddr = kmap_local_page(page);
copy_page(buffer, kaddr);
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
handle->buffer = buffer;
} else {
handle->buffer = page_address(page);
@@ -2561,9 +2561,9 @@ static void copy_last_highmem_page(void)
if (last_highmem_page) {
void *dst;
- dst = kmap_atomic(last_highmem_page);
+ dst = kmap_local_page(last_highmem_page);
copy_page(dst, buffer);
- kunmap_atomic(dst);
+ kunmap_local(dst);
last_highmem_page = NULL;
}
}
@@ -2881,13 +2881,13 @@ static inline void swap_two_pages_data(struct page *p1, struct page *p2,
{
void *kaddr1, *kaddr2;
- kaddr1 = kmap_atomic(p1);
- kaddr2 = kmap_atomic(p2);
+ kaddr1 = kmap_local_page(p1);
+ kaddr2 = kmap_local_page(p2);
copy_page(buf, kaddr1);
copy_page(kaddr1, kaddr2);
copy_page(kaddr2, buf);
- kunmap_atomic(kaddr2);
- kunmap_atomic(kaddr1);
+ kunmap_local(kaddr2);
+ kunmap_local(kaddr1);
}
/**