diff options
| author | Paolo Bonzini <pbonzini@redhat.com> | 2025-10-18 10:25:43 +0200 | 
|---|---|---|
| committer | Paolo Bonzini <pbonzini@redhat.com> | 2025-10-18 10:25:43 +0200 | 
| commit | 4361f5aa8bfcecbab3fc8db987482b9e08115a6a (patch) | |
| tree | 7fbee6760544a527348a11775c286105e47f73d2 | |
| parent | 5d26eaae15fb5c190164362a3e19081935574efc (diff) | |
| parent | 505f5224b197b77169c977e747cbc18b222f85f9 (diff) | |
Merge tag 'kvm-x86-fixes-6.18-rc2' of https://github.com/kvm-x86/linux into HEAD
KVM x86 fixes for 6.18:
 - Expand the KVM_PRE_FAULT_MEMORY selftest to add a regression test for the
   bug fixed by commit 3ccbf6f47098 ("KVM: x86/mmu: Return -EAGAIN if userspace
   deletes/moves memslot during prefault")
 - Don't try to get PMU capabbilities from perf when running a CPU with hybrid
   CPUs/PMUs, as perf will rightly WARN.
 - Rework KVM_CAP_GUEST_MEMFD_MMAP (newly introduced in 6.18) into a more
   generic KVM_CAP_GUEST_MEMFD_FLAGS
 - Add a guest_memfd INIT_SHARED flag and require userspace to explicitly set
   said flag to initialize memory as SHARED, irrespective of MMAP.  The
   behavior merged in 6.18 is that enabling mmap() implicitly initializes
   memory as SHARED, which would result in an ABI collision for x86 CoCo VMs
   as their memory is currently always initialized PRIVATE.
 - Allow mmap() on guest_memfd for x86 CoCo VMs, i.e. on VMs with private
   memory, to enable testing such setups, i.e. to hopefully flush out any
   other lurking ABI issues before 6.18 is officially released.
 - Add testcases to the guest_memfd selftest to cover guest_memfd without MMAP,
   and host userspace accesses to mmap()'d private memory.
| -rw-r--r-- | Documentation/virt/kvm/api.rst | 15 | ||||
| -rw-r--r-- | arch/x86/kvm/pmu.c | 8 | ||||
| -rw-r--r-- | arch/x86/kvm/x86.c | 7 | ||||
| -rw-r--r-- | include/linux/kvm_host.h | 12 | ||||
| -rw-r--r-- | include/uapi/linux/kvm.h | 5 | ||||
| -rw-r--r-- | tools/testing/selftests/kvm/guest_memfd_test.c | 169 | ||||
| -rw-r--r-- | tools/testing/selftests/kvm/include/kvm_util.h | 25 | ||||
| -rw-r--r-- | tools/testing/selftests/kvm/include/test_util.h | 19 | ||||
| -rw-r--r-- | tools/testing/selftests/kvm/lib/kvm_util.c | 44 | ||||
| -rw-r--r-- | tools/testing/selftests/kvm/lib/test_util.c | 7 | ||||
| -rw-r--r-- | tools/testing/selftests/kvm/mmu_stress_test.c | 5 | ||||
| -rw-r--r-- | tools/testing/selftests/kvm/pre_fault_memory_test.c | 131 | ||||
| -rw-r--r-- | tools/testing/selftests/kvm/s390/ucontrol_test.c | 16 | ||||
| -rw-r--r-- | tools/testing/selftests/kvm/set_memory_region_test.c | 17 | ||||
| -rw-r--r-- | virt/kvm/Kconfig | 1 | ||||
| -rw-r--r-- | virt/kvm/guest_memfd.c | 75 | ||||
| -rw-r--r-- | virt/kvm/kvm_main.c | 4 | 
17 files changed, 375 insertions, 185 deletions
| diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 4973c74db5c6..57061fa29e6a 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -6437,9 +6437,18 @@ most one mapping per page, i.e. binding multiple memory regions to a single  guest_memfd range is not allowed (any number of memory regions can be bound to  a single guest_memfd file, but the bound ranges must not overlap). -When the capability KVM_CAP_GUEST_MEMFD_MMAP is supported, the 'flags' field -supports GUEST_MEMFD_FLAG_MMAP.  Setting this flag on guest_memfd creation -enables mmap() and faulting of guest_memfd memory to host userspace. +The capability KVM_CAP_GUEST_MEMFD_FLAGS enumerates the `flags` that can be +specified via KVM_CREATE_GUEST_MEMFD.  Currently defined flags: + +  ============================ ================================================ +  GUEST_MEMFD_FLAG_MMAP        Enable using mmap() on the guest_memfd file +                               descriptor. +  GUEST_MEMFD_FLAG_INIT_SHARED Make all memory in the file shared during +                               KVM_CREATE_GUEST_MEMFD (memory files created +                               without INIT_SHARED will be marked private). +                               Shared memory can be faulted into host userspace +                               page tables. Private memory cannot. +  ============================ ================================================  When the KVM MMU performs a PFN lookup to service a guest fault and the backing  guest_memfd has the GUEST_MEMFD_FLAG_MMAP set, then the fault will always be diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 40ac4cb44ed2..487ad19a236e 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -108,16 +108,18 @@ void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)  	bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;  	int min_nr_gp_ctrs = pmu_ops->MIN_NR_GP_COUNTERS; -	perf_get_x86_pmu_capability(&kvm_host_pmu); -  	/*  	 * Hybrid PMUs don't play nice with virtualization without careful  	 * configuration by userspace, and KVM's APIs for reporting supported  	 * vPMU features do not account for hybrid PMUs.  Disable vPMU support  	 * for hybrid PMUs until KVM gains a way to let userspace opt-in.  	 */ -	if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) +	if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {  		enable_pmu = false; +		memset(&kvm_host_pmu, 0, sizeof(kvm_host_pmu)); +	} else { +		perf_get_x86_pmu_capability(&kvm_host_pmu); +	}  	if (enable_pmu) {  		/* diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 42ecd093bb4c..b4b5d2d09634 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -13941,10 +13941,11 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)  #ifdef CONFIG_KVM_GUEST_MEMFD  /* - * KVM doesn't yet support mmap() on guest_memfd for VMs with private memory - * (the private vs. shared tracking needs to be moved into guest_memfd). + * KVM doesn't yet support initializing guest_memfd memory as shared for VMs + * with private memory (the private vs. shared tracking needs to be moved into + * guest_memfd).   */ -bool kvm_arch_supports_gmem_mmap(struct kvm *kvm) +bool kvm_arch_supports_gmem_init_shared(struct kvm *kvm)  {  	return !kvm_arch_has_private_mem(kvm);  } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index fa36e70df088..5bd76cf394fa 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -729,7 +729,17 @@ static inline bool kvm_arch_has_private_mem(struct kvm *kvm)  #endif  #ifdef CONFIG_KVM_GUEST_MEMFD -bool kvm_arch_supports_gmem_mmap(struct kvm *kvm); +bool kvm_arch_supports_gmem_init_shared(struct kvm *kvm); + +static inline u64 kvm_gmem_get_supported_flags(struct kvm *kvm) +{ +	u64 flags = GUEST_MEMFD_FLAG_MMAP; + +	if (!kvm || kvm_arch_supports_gmem_init_shared(kvm)) +		flags |= GUEST_MEMFD_FLAG_INIT_SHARED; + +	return flags; +}  #endif  #ifndef kvm_arch_has_readonly_mem diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 6efa98a57ec1..52f6000ab020 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -962,7 +962,7 @@ struct kvm_enable_cap {  #define KVM_CAP_ARM_EL2_E2H0 241  #define KVM_CAP_RISCV_MP_STATE_RESET 242  #define KVM_CAP_ARM_CACHEABLE_PFNMAP_SUPPORTED 243 -#define KVM_CAP_GUEST_MEMFD_MMAP 244 +#define KVM_CAP_GUEST_MEMFD_FLAGS 244  struct kvm_irq_routing_irqchip {  	__u32 irqchip; @@ -1599,7 +1599,8 @@ struct kvm_memory_attributes {  #define KVM_MEMORY_ATTRIBUTE_PRIVATE           (1ULL << 3)  #define KVM_CREATE_GUEST_MEMFD	_IOWR(KVMIO,  0xd4, struct kvm_create_guest_memfd) -#define GUEST_MEMFD_FLAG_MMAP	(1ULL << 0) +#define GUEST_MEMFD_FLAG_MMAP		(1ULL << 0) +#define GUEST_MEMFD_FLAG_INIT_SHARED	(1ULL << 1)  struct kvm_create_guest_memfd {  	__u64 size; diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c index b3ca6737f304..e7d9aeb418d3 100644 --- a/tools/testing/selftests/kvm/guest_memfd_test.c +++ b/tools/testing/selftests/kvm/guest_memfd_test.c @@ -14,8 +14,6 @@  #include <linux/bitmap.h>  #include <linux/falloc.h>  #include <linux/sizes.h> -#include <setjmp.h> -#include <signal.h>  #include <sys/mman.h>  #include <sys/types.h>  #include <sys/stat.h> @@ -24,7 +22,9 @@  #include "test_util.h"  #include "ucall_common.h" -static void test_file_read_write(int fd) +static size_t page_size; + +static void test_file_read_write(int fd, size_t total_size)  {  	char buf[64]; @@ -38,18 +38,22 @@ static void test_file_read_write(int fd)  		    "pwrite on a guest_mem fd should fail");  } -static void test_mmap_supported(int fd, size_t page_size, size_t total_size) +static void test_mmap_cow(int fd, size_t size) +{ +	void *mem; + +	mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); +	TEST_ASSERT(mem == MAP_FAILED, "Copy-on-write not allowed by guest_memfd."); +} + +static void test_mmap_supported(int fd, size_t total_size)  {  	const char val = 0xaa;  	char *mem;  	size_t i;  	int ret; -	mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); -	TEST_ASSERT(mem == MAP_FAILED, "Copy-on-write not allowed by guest_memfd."); - -	mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); -	TEST_ASSERT(mem != MAP_FAILED, "mmap() for guest_memfd should succeed."); +	mem = kvm_mmap(total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);  	memset(mem, val, total_size);  	for (i = 0; i < total_size; i++) @@ -68,45 +72,37 @@ static void test_mmap_supported(int fd, size_t page_size, size_t total_size)  	for (i = 0; i < total_size; i++)  		TEST_ASSERT_EQ(READ_ONCE(mem[i]), val); -	ret = munmap(mem, total_size); -	TEST_ASSERT(!ret, "munmap() should succeed."); -} - -static sigjmp_buf jmpbuf; -void fault_sigbus_handler(int signum) -{ -	siglongjmp(jmpbuf, 1); +	kvm_munmap(mem, total_size);  } -static void test_fault_overflow(int fd, size_t page_size, size_t total_size) +static void test_fault_sigbus(int fd, size_t accessible_size, size_t map_size)  { -	struct sigaction sa_old, sa_new = { -		.sa_handler = fault_sigbus_handler, -	}; -	size_t map_size = total_size * 4;  	const char val = 0xaa;  	char *mem;  	size_t i; -	int ret; -	mem = mmap(NULL, map_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); -	TEST_ASSERT(mem != MAP_FAILED, "mmap() for guest_memfd should succeed."); +	mem = kvm_mmap(map_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd); -	sigaction(SIGBUS, &sa_new, &sa_old); -	if (sigsetjmp(jmpbuf, 1) == 0) { -		memset(mem, 0xaa, map_size); -		TEST_ASSERT(false, "memset() should have triggered SIGBUS."); -	} -	sigaction(SIGBUS, &sa_old, NULL); +	TEST_EXPECT_SIGBUS(memset(mem, val, map_size)); +	TEST_EXPECT_SIGBUS((void)READ_ONCE(mem[accessible_size])); -	for (i = 0; i < total_size; i++) +	for (i = 0; i < accessible_size; i++)  		TEST_ASSERT_EQ(READ_ONCE(mem[i]), val); -	ret = munmap(mem, map_size); -	TEST_ASSERT(!ret, "munmap() should succeed."); +	kvm_munmap(mem, map_size); +} + +static void test_fault_overflow(int fd, size_t total_size) +{ +	test_fault_sigbus(fd, total_size, total_size * 4); +} + +static void test_fault_private(int fd, size_t total_size) +{ +	test_fault_sigbus(fd, 0, total_size);  } -static void test_mmap_not_supported(int fd, size_t page_size, size_t total_size) +static void test_mmap_not_supported(int fd, size_t total_size)  {  	char *mem; @@ -117,7 +113,7 @@ static void test_mmap_not_supported(int fd, size_t page_size, size_t total_size)  	TEST_ASSERT_EQ(mem, MAP_FAILED);  } -static void test_file_size(int fd, size_t page_size, size_t total_size) +static void test_file_size(int fd, size_t total_size)  {  	struct stat sb;  	int ret; @@ -128,7 +124,7 @@ static void test_file_size(int fd, size_t page_size, size_t total_size)  	TEST_ASSERT_EQ(sb.st_blksize, page_size);  } -static void test_fallocate(int fd, size_t page_size, size_t total_size) +static void test_fallocate(int fd, size_t total_size)  {  	int ret; @@ -165,7 +161,7 @@ static void test_fallocate(int fd, size_t page_size, size_t total_size)  	TEST_ASSERT(!ret, "fallocate to restore punched hole should succeed");  } -static void test_invalid_punch_hole(int fd, size_t page_size, size_t total_size) +static void test_invalid_punch_hole(int fd, size_t total_size)  {  	struct {  		off_t offset; @@ -196,8 +192,7 @@ static void test_invalid_punch_hole(int fd, size_t page_size, size_t total_size)  }  static void test_create_guest_memfd_invalid_sizes(struct kvm_vm *vm, -						  uint64_t guest_memfd_flags, -						  size_t page_size) +						  uint64_t guest_memfd_flags)  {  	size_t size;  	int fd; @@ -214,7 +209,6 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm)  {  	int fd1, fd2, ret;  	struct stat st1, st2; -	size_t page_size = getpagesize();  	fd1 = __vm_create_guest_memfd(vm, page_size, 0);  	TEST_ASSERT(fd1 != -1, "memfd creation should succeed"); @@ -239,9 +233,9 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm)  	close(fd1);  } -static void test_guest_memfd_flags(struct kvm_vm *vm, uint64_t valid_flags) +static void test_guest_memfd_flags(struct kvm_vm *vm)  { -	size_t page_size = getpagesize(); +	uint64_t valid_flags = vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS);  	uint64_t flag;  	int fd; @@ -260,43 +254,57 @@ static void test_guest_memfd_flags(struct kvm_vm *vm, uint64_t valid_flags)  	}  } -static void test_guest_memfd(unsigned long vm_type) +#define gmem_test(__test, __vm, __flags)				\ +do {									\ +	int fd = vm_create_guest_memfd(__vm, page_size * 4, __flags);	\ +									\ +	test_##__test(fd, page_size * 4);				\ +	close(fd);							\ +} while (0) + +static void __test_guest_memfd(struct kvm_vm *vm, uint64_t flags)  { -	uint64_t flags = 0; -	struct kvm_vm *vm; -	size_t total_size; -	size_t page_size; -	int fd; +	test_create_guest_memfd_multiple(vm); +	test_create_guest_memfd_invalid_sizes(vm, flags); -	page_size = getpagesize(); -	total_size = page_size * 4; +	gmem_test(file_read_write, vm, flags); -	vm = vm_create_barebones_type(vm_type); +	if (flags & GUEST_MEMFD_FLAG_MMAP) { +		if (flags & GUEST_MEMFD_FLAG_INIT_SHARED) { +			gmem_test(mmap_supported, vm, flags); +			gmem_test(fault_overflow, vm, flags); +		} else { +			gmem_test(fault_private, vm, flags); +		} -	if (vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_MMAP)) -		flags |= GUEST_MEMFD_FLAG_MMAP; +		gmem_test(mmap_cow, vm, flags); +	} else { +		gmem_test(mmap_not_supported, vm, flags); +	} -	test_create_guest_memfd_multiple(vm); -	test_create_guest_memfd_invalid_sizes(vm, flags, page_size); +	gmem_test(file_size, vm, flags); +	gmem_test(fallocate, vm, flags); +	gmem_test(invalid_punch_hole, vm, flags); +} -	fd = vm_create_guest_memfd(vm, total_size, flags); +static void test_guest_memfd(unsigned long vm_type) +{ +	struct kvm_vm *vm = vm_create_barebones_type(vm_type); +	uint64_t flags; -	test_file_read_write(fd); +	test_guest_memfd_flags(vm); -	if (flags & GUEST_MEMFD_FLAG_MMAP) { -		test_mmap_supported(fd, page_size, total_size); -		test_fault_overflow(fd, page_size, total_size); -	} else { -		test_mmap_not_supported(fd, page_size, total_size); -	} +	__test_guest_memfd(vm, 0); -	test_file_size(fd, page_size, total_size); -	test_fallocate(fd, page_size, total_size); -	test_invalid_punch_hole(fd, page_size, total_size); +	flags = vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS); +	if (flags & GUEST_MEMFD_FLAG_MMAP) +		__test_guest_memfd(vm, GUEST_MEMFD_FLAG_MMAP); -	test_guest_memfd_flags(vm, flags); +	/* MMAP should always be supported if INIT_SHARED is supported. */ +	if (flags & GUEST_MEMFD_FLAG_INIT_SHARED) +		__test_guest_memfd(vm, GUEST_MEMFD_FLAG_MMAP | +				       GUEST_MEMFD_FLAG_INIT_SHARED); -	close(fd);  	kvm_vm_free(vm);  } @@ -328,22 +336,26 @@ static void test_guest_memfd_guest(void)  	size_t size;  	int fd, i; -	if (!kvm_has_cap(KVM_CAP_GUEST_MEMFD_MMAP)) +	if (!kvm_check_cap(KVM_CAP_GUEST_MEMFD_FLAGS))  		return;  	vm = __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, &vcpu, 1, guest_code); -	TEST_ASSERT(vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_MMAP), -		    "Default VM type should always support guest_memfd mmap()"); +	TEST_ASSERT(vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS) & GUEST_MEMFD_FLAG_MMAP, +		    "Default VM type should support MMAP, supported flags = 0x%x", +		    vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS)); +	TEST_ASSERT(vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS) & GUEST_MEMFD_FLAG_INIT_SHARED, +		    "Default VM type should support INIT_SHARED, supported flags = 0x%x", +		    vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS));  	size = vm->page_size; -	fd = vm_create_guest_memfd(vm, size, GUEST_MEMFD_FLAG_MMAP); +	fd = vm_create_guest_memfd(vm, size, GUEST_MEMFD_FLAG_MMAP | +					     GUEST_MEMFD_FLAG_INIT_SHARED);  	vm_set_user_memory_region2(vm, slot, KVM_MEM_GUEST_MEMFD, gpa, size, NULL, fd, 0); -	mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); -	TEST_ASSERT(mem != MAP_FAILED, "mmap() on guest_memfd failed"); +	mem = kvm_mmap(size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);  	memset(mem, 0xaa, size); -	munmap(mem, size); +	kvm_munmap(mem, size);  	virt_pg_map(vm, gpa, gpa);  	vcpu_args_set(vcpu, 2, gpa, size); @@ -351,8 +363,7 @@ static void test_guest_memfd_guest(void)  	TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE); -	mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); -	TEST_ASSERT(mem != MAP_FAILED, "mmap() on guest_memfd failed"); +	mem = kvm_mmap(size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);  	for (i = 0; i < size; i++)  		TEST_ASSERT_EQ(mem[i], 0xff); @@ -366,6 +377,8 @@ int main(int argc, char *argv[])  	TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD)); +	page_size = getpagesize(); +  	/*  	 * Not all architectures support KVM_CAP_VM_TYPES. However, those that  	 * support guest_memfd have that support for the default VM type. diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 112d3f443a17..d3f3e455c031 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -286,6 +286,31 @@ static inline bool kvm_has_cap(long cap)  #define __KVM_SYSCALL_ERROR(_name, _ret) \  	"%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno) +static inline void *__kvm_mmap(size_t size, int prot, int flags, int fd, +			       off_t offset) +{ +	void *mem; + +	mem = mmap(NULL, size, prot, flags, fd, offset); +	TEST_ASSERT(mem != MAP_FAILED, __KVM_SYSCALL_ERROR("mmap()", +		    (int)(unsigned long)MAP_FAILED)); + +	return mem; +} + +static inline void *kvm_mmap(size_t size, int prot, int flags, int fd) +{ +	return __kvm_mmap(size, prot, flags, fd, 0); +} + +static inline void kvm_munmap(void *mem, size_t size) +{ +	int ret; + +	ret = munmap(mem, size); +	TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); +} +  /*   * Use the "inner", double-underscore macro when reporting errors from within   * other macros so that the name of ioctl() and not its literal numeric value diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h index c6ef895fbd9a..b4872ba8ed12 100644 --- a/tools/testing/selftests/kvm/include/test_util.h +++ b/tools/testing/selftests/kvm/include/test_util.h @@ -8,6 +8,8 @@  #ifndef SELFTEST_KVM_TEST_UTIL_H  #define SELFTEST_KVM_TEST_UTIL_H +#include <setjmp.h> +#include <signal.h>  #include <stdlib.h>  #include <stdarg.h>  #include <stdbool.h> @@ -78,6 +80,23 @@ do {									\  	__builtin_unreachable(); \  } while (0) +extern sigjmp_buf expect_sigbus_jmpbuf; +void expect_sigbus_handler(int signum); + +#define TEST_EXPECT_SIGBUS(action)						\ +do {										\ +	struct sigaction sa_old, sa_new = {					\ +		.sa_handler = expect_sigbus_handler,				\ +	};									\ +										\ +	sigaction(SIGBUS, &sa_new, &sa_old);					\ +	if (sigsetjmp(expect_sigbus_jmpbuf, 1) == 0) {				\ +		action;								\ +		TEST_FAIL("'%s' should have triggered SIGBUS", #action);	\ +	}									\ +	sigaction(SIGBUS, &sa_old, NULL);					\ +} while (0) +  size_t parse_size(const char *size);  int64_t timespec_to_ns(struct timespec ts); diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index a35adfebfa23..1a93d6361671 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -741,13 +741,11 @@ static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)  	int ret;  	if (vcpu->dirty_gfns) { -		ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size); -		TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); +		kvm_munmap(vcpu->dirty_gfns, vm->dirty_ring_size);  		vcpu->dirty_gfns = NULL;  	} -	ret = munmap(vcpu->run, vcpu_mmap_sz()); -	TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); +	kvm_munmap(vcpu->run, vcpu_mmap_sz());  	ret = close(vcpu->fd);  	TEST_ASSERT(!ret,  __KVM_SYSCALL_ERROR("close()", ret)); @@ -783,20 +781,16 @@ void kvm_vm_release(struct kvm_vm *vmp)  static void __vm_mem_region_delete(struct kvm_vm *vm,  				   struct userspace_mem_region *region)  { -	int ret; -  	rb_erase(®ion->gpa_node, &vm->regions.gpa_tree);  	rb_erase(®ion->hva_node, &vm->regions.hva_tree);  	hash_del(®ion->slot_node);  	sparsebit_free(®ion->unused_phy_pages);  	sparsebit_free(®ion->protected_phy_pages); -	ret = munmap(region->mmap_start, region->mmap_size); -	TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); +	kvm_munmap(region->mmap_start, region->mmap_size);  	if (region->fd >= 0) {  		/* There's an extra map when using shared memory. */ -		ret = munmap(region->mmap_alias, region->mmap_size); -		TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); +		kvm_munmap(region->mmap_alias, region->mmap_size);  		close(region->fd);  	}  	if (region->region.guest_memfd >= 0) @@ -1053,12 +1047,9 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,  		region->fd = kvm_memfd_alloc(region->mmap_size,  					     src_type == VM_MEM_SRC_SHARED_HUGETLB); -	region->mmap_start = mmap(NULL, region->mmap_size, -				  PROT_READ | PROT_WRITE, -				  vm_mem_backing_src_alias(src_type)->flag, -				  region->fd, 0); -	TEST_ASSERT(region->mmap_start != MAP_FAILED, -		    __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED)); +	region->mmap_start = kvm_mmap(region->mmap_size, PROT_READ | PROT_WRITE, +				      vm_mem_backing_src_alias(src_type)->flag, +				      region->fd);  	TEST_ASSERT(!is_backing_src_hugetlb(src_type) ||  		    region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz), @@ -1129,12 +1120,10 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,  	/* If shared memory, create an alias. */  	if (region->fd >= 0) { -		region->mmap_alias = mmap(NULL, region->mmap_size, -					  PROT_READ | PROT_WRITE, -					  vm_mem_backing_src_alias(src_type)->flag, -					  region->fd, 0); -		TEST_ASSERT(region->mmap_alias != MAP_FAILED, -			    __KVM_SYSCALL_ERROR("mmap()",  (int)(unsigned long)MAP_FAILED)); +		region->mmap_alias = kvm_mmap(region->mmap_size, +					      PROT_READ | PROT_WRITE, +					      vm_mem_backing_src_alias(src_type)->flag, +					      region->fd);  		/* Align host alias address */  		region->host_alias = align_ptr_up(region->mmap_alias, alignment); @@ -1344,10 +1333,8 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)  	TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size "  		"smaller than expected, vcpu_mmap_sz: %zi expected_min: %zi",  		vcpu_mmap_sz(), sizeof(*vcpu->run)); -	vcpu->run = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(), -		PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0); -	TEST_ASSERT(vcpu->run != MAP_FAILED, -		    __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED)); +	vcpu->run = kvm_mmap(vcpu_mmap_sz(), PROT_READ | PROT_WRITE, +			     MAP_SHARED, vcpu->fd);  	if (kvm_has_cap(KVM_CAP_BINARY_STATS_FD))  		vcpu->stats.fd = vcpu_get_stats_fd(vcpu); @@ -1794,9 +1781,8 @@ void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu)  			    page_size * KVM_DIRTY_LOG_PAGE_OFFSET);  		TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec"); -		addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, -			    page_size * KVM_DIRTY_LOG_PAGE_OFFSET); -		TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed"); +		addr = __kvm_mmap(size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, +				  page_size * KVM_DIRTY_LOG_PAGE_OFFSET);  		vcpu->dirty_gfns = addr;  		vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn); diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c index 03eb99af9b8d..8a1848586a85 100644 --- a/tools/testing/selftests/kvm/lib/test_util.c +++ b/tools/testing/selftests/kvm/lib/test_util.c @@ -18,6 +18,13 @@  #include "test_util.h" +sigjmp_buf expect_sigbus_jmpbuf; + +void __attribute__((used)) expect_sigbus_handler(int signum) +{ +	siglongjmp(expect_sigbus_jmpbuf, 1); +} +  /*   * Random number generator that is usable from guest code. This is the   * Park-Miller LCG using standard constants. diff --git a/tools/testing/selftests/kvm/mmu_stress_test.c b/tools/testing/selftests/kvm/mmu_stress_test.c index 6a437d2be9fa..37b7e6524533 100644 --- a/tools/testing/selftests/kvm/mmu_stress_test.c +++ b/tools/testing/selftests/kvm/mmu_stress_test.c @@ -339,8 +339,7 @@ int main(int argc, char *argv[])  	TEST_ASSERT(max_gpa > (4 * slot_size), "MAXPHYADDR <4gb ");  	fd = kvm_memfd_alloc(slot_size, hugepages); -	mem = mmap(NULL, slot_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); -	TEST_ASSERT(mem != MAP_FAILED, "mmap() failed"); +	mem = kvm_mmap(slot_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);  	TEST_ASSERT(!madvise(mem, slot_size, MADV_NOHUGEPAGE), "madvise() failed"); @@ -413,7 +412,7 @@ int main(int argc, char *argv[])  	for (slot = (slot - 1) & ~1ull; slot >= first_slot; slot -= 2)  		vm_set_user_memory_region(vm, slot, 0, 0, 0, NULL); -	munmap(mem, slot_size / 2); +	kvm_munmap(mem, slot_size / 2);  	/* Sanity check that the vCPUs actually ran. */  	for (i = 0; i < nr_vcpus; i++) diff --git a/tools/testing/selftests/kvm/pre_fault_memory_test.c b/tools/testing/selftests/kvm/pre_fault_memory_test.c index 0350a8896a2f..f04768c1d2e4 100644 --- a/tools/testing/selftests/kvm/pre_fault_memory_test.c +++ b/tools/testing/selftests/kvm/pre_fault_memory_test.c @@ -10,6 +10,7 @@  #include <test_util.h>  #include <kvm_util.h>  #include <processor.h> +#include <pthread.h>  /* Arbitrarily chosen values */  #define TEST_SIZE		(SZ_2M + PAGE_SIZE) @@ -30,18 +31,66 @@ static void guest_code(uint64_t base_gpa)  	GUEST_DONE();  } -static void pre_fault_memory(struct kvm_vcpu *vcpu, u64 gpa, u64 size, -			     u64 left) +struct slot_worker_data { +	struct kvm_vm *vm; +	u64 gpa; +	uint32_t flags; +	bool worker_ready; +	bool prefault_ready; +	bool recreate_slot; +}; + +static void *delete_slot_worker(void *__data) +{ +	struct slot_worker_data *data = __data; +	struct kvm_vm *vm = data->vm; + +	WRITE_ONCE(data->worker_ready, true); + +	while (!READ_ONCE(data->prefault_ready)) +		cpu_relax(); + +	vm_mem_region_delete(vm, TEST_SLOT); + +	while (!READ_ONCE(data->recreate_slot)) +		cpu_relax(); + +	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, data->gpa, +				    TEST_SLOT, TEST_NPAGES, data->flags); + +	return NULL; +} + +static void pre_fault_memory(struct kvm_vcpu *vcpu, u64 base_gpa, u64 offset, +			     u64 size, u64 expected_left, bool private)  {  	struct kvm_pre_fault_memory range = { -		.gpa = gpa, +		.gpa = base_gpa + offset,  		.size = size,  		.flags = 0,  	}; -	u64 prev; +	struct slot_worker_data data = { +		.vm = vcpu->vm, +		.gpa = base_gpa, +		.flags = private ? KVM_MEM_GUEST_MEMFD : 0, +	}; +	bool slot_recreated = false; +	pthread_t slot_worker;  	int ret, save_errno; +	u64 prev; + +	/* +	 * Concurrently delete (and recreate) the slot to test KVM's handling +	 * of a racing memslot deletion with prefaulting. +	 */ +	pthread_create(&slot_worker, NULL, delete_slot_worker, &data); -	do { +	while (!READ_ONCE(data.worker_ready)) +		cpu_relax(); + +	WRITE_ONCE(data.prefault_ready, true); + +	for (;;) {  		prev = range.size;  		ret = __vcpu_ioctl(vcpu, KVM_PRE_FAULT_MEMORY, &range);  		save_errno = errno; @@ -49,18 +98,65 @@ static void pre_fault_memory(struct kvm_vcpu *vcpu, u64 gpa, u64 size,  			    "%sexpecting range.size to change on %s",  			    ret < 0 ? "not " : "",  			    ret < 0 ? "failure" : "success"); -	} while (ret >= 0 ? range.size : save_errno == EINTR); -	TEST_ASSERT(range.size == left, -		    "Completed with %lld bytes left, expected %" PRId64, -		    range.size, left); +		/* +		 * Immediately retry prefaulting if KVM was interrupted by an +		 * unrelated signal/event. +		 */ +		if (ret < 0 && save_errno == EINTR) +			continue; + +		/* +		 * Tell the worker to recreate the slot in order to complete +		 * prefaulting (if prefault didn't already succeed before the +		 * slot was deleted) and/or to prepare for the next testcase. +		 * Wait for the worker to exit so that the next invocation of +		 * prefaulting is guaranteed to complete (assuming no KVM bugs). +		 */ +		if (!slot_recreated) { +			WRITE_ONCE(data.recreate_slot, true); +			pthread_join(slot_worker, NULL); +			slot_recreated = true; + +			/* +			 * Retry prefaulting to get a stable result, i.e. to +			 * avoid seeing random EAGAIN failures.  Don't retry if +			 * prefaulting already succeeded, as KVM disallows +			 * prefaulting with size=0, i.e. blindly retrying would +			 * result in test failures due to EINVAL.  KVM should +			 * always return success if all bytes are prefaulted, +			 * i.e. there is no need to guard against EAGAIN being +			 * returned. +			 */ +			if (range.size) +				continue; +		} + +		/* +		 * All done if there are no remaining bytes to prefault, or if +		 * prefaulting failed (EINTR was handled above, and EAGAIN due +		 * to prefaulting a memslot that's being actively deleted should +		 * be impossible since the memslot has already been recreated). +		 */ +		if (!range.size || ret < 0) +			break; +	} -	if (left == 0) -		__TEST_ASSERT_VM_VCPU_IOCTL(!ret, "KVM_PRE_FAULT_MEMORY", ret, vcpu->vm); +	TEST_ASSERT(range.size == expected_left, +		    "Completed with %llu bytes left, expected %lu", +		    range.size, expected_left); + +	/* +	 * Assert success if prefaulting the entire range should succeed, i.e. +	 * complete with no bytes remaining.  Otherwise prefaulting should have +	 * failed due to ENOENT (due to RET_PF_EMULATE for emulated MMIO when +	 * no memslot exists). +	 */ +	if (!expected_left) +		TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_PRE_FAULT_MEMORY, ret, vcpu->vm);  	else -		/* No memory slot causes RET_PF_EMULATE. it results in -ENOENT. */ -		__TEST_ASSERT_VM_VCPU_IOCTL(ret && save_errno == ENOENT, -					    "KVM_PRE_FAULT_MEMORY", ret, vcpu->vm); +		TEST_ASSERT_VM_VCPU_IOCTL(ret && save_errno == ENOENT, +					  KVM_PRE_FAULT_MEMORY, ret, vcpu->vm);  }  static void __test_pre_fault_memory(unsigned long vm_type, bool private) @@ -97,9 +193,10 @@ static void __test_pre_fault_memory(unsigned long vm_type, bool private)  	if (private)  		vm_mem_set_private(vm, guest_test_phys_mem, TEST_SIZE); -	pre_fault_memory(vcpu, guest_test_phys_mem, SZ_2M, 0); -	pre_fault_memory(vcpu, guest_test_phys_mem + SZ_2M, PAGE_SIZE * 2, PAGE_SIZE); -	pre_fault_memory(vcpu, guest_test_phys_mem + TEST_SIZE, PAGE_SIZE, PAGE_SIZE); + +	pre_fault_memory(vcpu, guest_test_phys_mem, 0, SZ_2M, 0, private); +	pre_fault_memory(vcpu, guest_test_phys_mem, SZ_2M, PAGE_SIZE * 2, PAGE_SIZE, private); +	pre_fault_memory(vcpu, guest_test_phys_mem, TEST_SIZE, PAGE_SIZE, PAGE_SIZE, private);  	vcpu_args_set(vcpu, 1, guest_test_virt_mem);  	vcpu_run(vcpu); diff --git a/tools/testing/selftests/kvm/s390/ucontrol_test.c b/tools/testing/selftests/kvm/s390/ucontrol_test.c index d265b34c54be..50bc1c38225a 100644 --- a/tools/testing/selftests/kvm/s390/ucontrol_test.c +++ b/tools/testing/selftests/kvm/s390/ucontrol_test.c @@ -142,19 +142,17 @@ FIXTURE_SETUP(uc_kvm)  	self->kvm_run_size = ioctl(self->kvm_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);  	ASSERT_GE(self->kvm_run_size, sizeof(struct kvm_run))  		  TH_LOG(KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, self->kvm_run_size)); -	self->run = (struct kvm_run *)mmap(NULL, self->kvm_run_size, -		    PROT_READ | PROT_WRITE, MAP_SHARED, self->vcpu_fd, 0); -	ASSERT_NE(self->run, MAP_FAILED); +	self->run = kvm_mmap(self->kvm_run_size, PROT_READ | PROT_WRITE, +			     MAP_SHARED, self->vcpu_fd);  	/**  	 * For virtual cpus that have been created with S390 user controlled  	 * virtual machines, the resulting vcpu fd can be memory mapped at page  	 * offset KVM_S390_SIE_PAGE_OFFSET in order to obtain a memory map of  	 * the virtual cpu's hardware control block.  	 */ -	self->sie_block = (struct kvm_s390_sie_block *)mmap(NULL, PAGE_SIZE, -			  PROT_READ | PROT_WRITE, MAP_SHARED, -			  self->vcpu_fd, KVM_S390_SIE_PAGE_OFFSET << PAGE_SHIFT); -	ASSERT_NE(self->sie_block, MAP_FAILED); +	self->sie_block = __kvm_mmap(PAGE_SIZE, PROT_READ | PROT_WRITE, +				     MAP_SHARED, self->vcpu_fd, +				     KVM_S390_SIE_PAGE_OFFSET << PAGE_SHIFT);  	TH_LOG("VM created %p %p", self->run, self->sie_block); @@ -186,8 +184,8 @@ FIXTURE_SETUP(uc_kvm)  FIXTURE_TEARDOWN(uc_kvm)  { -	munmap(self->sie_block, PAGE_SIZE); -	munmap(self->run, self->kvm_run_size); +	kvm_munmap(self->sie_block, PAGE_SIZE); +	kvm_munmap(self->run, self->kvm_run_size);  	close(self->vcpu_fd);  	close(self->vm_fd);  	close(self->kvm_fd); diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c index ce3ac0fd6dfb..7fe427ff9b38 100644 --- a/tools/testing/selftests/kvm/set_memory_region_test.c +++ b/tools/testing/selftests/kvm/set_memory_region_test.c @@ -433,10 +433,10 @@ static void test_add_max_memory_regions(void)  	pr_info("Adding slots 0..%i, each memory region with %dK size\n",  		(max_mem_slots - 1), MEM_REGION_SIZE >> 10); -	mem = mmap(NULL, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment, -		   PROT_READ | PROT_WRITE, -		   MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0); -	TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host"); + +	mem = kvm_mmap((size_t)max_mem_slots * MEM_REGION_SIZE + alignment, +		       PROT_READ | PROT_WRITE, +		       MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1);  	mem_aligned = (void *)(((size_t) mem + alignment - 1) & ~(alignment - 1));  	for (slot = 0; slot < max_mem_slots; slot++) @@ -446,9 +446,8 @@ static void test_add_max_memory_regions(void)  					  mem_aligned + (uint64_t)slot * MEM_REGION_SIZE);  	/* Check it cannot be added memory slots beyond the limit */ -	mem_extra = mmap(NULL, MEM_REGION_SIZE, PROT_READ | PROT_WRITE, -			 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); -	TEST_ASSERT(mem_extra != MAP_FAILED, "Failed to mmap() host"); +	mem_extra = kvm_mmap(MEM_REGION_SIZE, PROT_READ | PROT_WRITE, +			     MAP_PRIVATE | MAP_ANONYMOUS, -1);  	ret = __vm_set_user_memory_region(vm, max_mem_slots, 0,  					  (uint64_t)max_mem_slots * MEM_REGION_SIZE, @@ -456,8 +455,8 @@ static void test_add_max_memory_regions(void)  	TEST_ASSERT(ret == -1 && errno == EINVAL,  		    "Adding one more memory slot should fail with EINVAL"); -	munmap(mem, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment); -	munmap(mem_extra, MEM_REGION_SIZE); +	kvm_munmap(mem, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment); +	kvm_munmap(mem_extra, MEM_REGION_SIZE);  	kvm_vm_free(vm);  } diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index 0227e13cd8dd..5f0015c5dd95 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig @@ -113,6 +113,7 @@ config KVM_GENERIC_MEMORY_ATTRIBUTES         bool  config KVM_GUEST_MEMFD +       depends on KVM_GENERIC_MMU_NOTIFIER         select XARRAY_MULTI         bool diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c index 94bafd6c558c..fbca8c0972da 100644 --- a/virt/kvm/guest_memfd.c +++ b/virt/kvm/guest_memfd.c @@ -102,8 +102,17 @@ static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)  	return filemap_grab_folio(inode->i_mapping, index);  } -static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start, -				      pgoff_t end) +static enum kvm_gfn_range_filter kvm_gmem_get_invalidate_filter(struct inode *inode) +{ +	if ((u64)inode->i_private & GUEST_MEMFD_FLAG_INIT_SHARED) +		return KVM_FILTER_SHARED; + +	return KVM_FILTER_PRIVATE; +} + +static void __kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start, +					pgoff_t end, +					enum kvm_gfn_range_filter attr_filter)  {  	bool flush = false, found_memslot = false;  	struct kvm_memory_slot *slot; @@ -118,8 +127,7 @@ static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,  			.end = slot->base_gfn + min(pgoff + slot->npages, end) - pgoff,  			.slot = slot,  			.may_block = true, -			/* guest memfd is relevant to only private mappings. */ -			.attr_filter = KVM_FILTER_PRIVATE, +			.attr_filter = attr_filter,  		};  		if (!found_memslot) { @@ -139,8 +147,21 @@ static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,  		KVM_MMU_UNLOCK(kvm);  } -static void kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start, -				    pgoff_t end) +static void kvm_gmem_invalidate_begin(struct inode *inode, pgoff_t start, +				      pgoff_t end) +{ +	struct list_head *gmem_list = &inode->i_mapping->i_private_list; +	enum kvm_gfn_range_filter attr_filter; +	struct kvm_gmem *gmem; + +	attr_filter = kvm_gmem_get_invalidate_filter(inode); + +	list_for_each_entry(gmem, gmem_list, entry) +		__kvm_gmem_invalidate_begin(gmem, start, end, attr_filter); +} + +static void __kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start, +				      pgoff_t end)  {  	struct kvm *kvm = gmem->kvm; @@ -151,12 +172,20 @@ static void kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start,  	}  } -static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len) +static void kvm_gmem_invalidate_end(struct inode *inode, pgoff_t start, +				    pgoff_t end)  {  	struct list_head *gmem_list = &inode->i_mapping->i_private_list; +	struct kvm_gmem *gmem; + +	list_for_each_entry(gmem, gmem_list, entry) +		__kvm_gmem_invalidate_end(gmem, start, end); +} + +static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len) +{  	pgoff_t start = offset >> PAGE_SHIFT;  	pgoff_t end = (offset + len) >> PAGE_SHIFT; -	struct kvm_gmem *gmem;  	/*  	 * Bindings must be stable across invalidation to ensure the start+end @@ -164,13 +193,11 @@ static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)  	 */  	filemap_invalidate_lock(inode->i_mapping); -	list_for_each_entry(gmem, gmem_list, entry) -		kvm_gmem_invalidate_begin(gmem, start, end); +	kvm_gmem_invalidate_begin(inode, start, end);  	truncate_inode_pages_range(inode->i_mapping, offset, offset + len - 1); -	list_for_each_entry(gmem, gmem_list, entry) -		kvm_gmem_invalidate_end(gmem, start, end); +	kvm_gmem_invalidate_end(inode, start, end);  	filemap_invalidate_unlock(inode->i_mapping); @@ -280,8 +307,9 @@ static int kvm_gmem_release(struct inode *inode, struct file *file)  	 * Zap all SPTEs pointed at by this file.  Do not free the backing  	 * memory, as its lifetime is associated with the inode, not the file.  	 */ -	kvm_gmem_invalidate_begin(gmem, 0, -1ul); -	kvm_gmem_invalidate_end(gmem, 0, -1ul); +	__kvm_gmem_invalidate_begin(gmem, 0, -1ul, +				    kvm_gmem_get_invalidate_filter(inode)); +	__kvm_gmem_invalidate_end(gmem, 0, -1ul);  	list_del(&gmem->entry); @@ -328,6 +356,9 @@ static vm_fault_t kvm_gmem_fault_user_mapping(struct vm_fault *vmf)  	if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode))  		return VM_FAULT_SIGBUS; +	if (!((u64)inode->i_private & GUEST_MEMFD_FLAG_INIT_SHARED)) +		return VM_FAULT_SIGBUS; +  	folio = kvm_gmem_get_folio(inode, vmf->pgoff);  	if (IS_ERR(folio)) {  		int err = PTR_ERR(folio); @@ -400,8 +431,6 @@ static int kvm_gmem_migrate_folio(struct address_space *mapping,  static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *folio)  { -	struct list_head *gmem_list = &mapping->i_private_list; -	struct kvm_gmem *gmem;  	pgoff_t start, end;  	filemap_invalidate_lock_shared(mapping); @@ -409,8 +438,7 @@ static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *fol  	start = folio->index;  	end = start + folio_nr_pages(folio); -	list_for_each_entry(gmem, gmem_list, entry) -		kvm_gmem_invalidate_begin(gmem, start, end); +	kvm_gmem_invalidate_begin(mapping->host, start, end);  	/*  	 * Do not truncate the range, what action is taken in response to the @@ -421,8 +449,7 @@ static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *fol  	 * error to userspace.  	 */ -	list_for_each_entry(gmem, gmem_list, entry) -		kvm_gmem_invalidate_end(gmem, start, end); +	kvm_gmem_invalidate_end(mapping->host, start, end);  	filemap_invalidate_unlock_shared(mapping); @@ -458,7 +485,7 @@ static const struct inode_operations kvm_gmem_iops = {  	.setattr	= kvm_gmem_setattr,  }; -bool __weak kvm_arch_supports_gmem_mmap(struct kvm *kvm) +bool __weak kvm_arch_supports_gmem_init_shared(struct kvm *kvm)  {  	return true;  } @@ -522,12 +549,8 @@ int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args)  {  	loff_t size = args->size;  	u64 flags = args->flags; -	u64 valid_flags = 0; - -	if (kvm_arch_supports_gmem_mmap(kvm)) -		valid_flags |= GUEST_MEMFD_FLAG_MMAP; -	if (flags & ~valid_flags) +	if (flags & ~kvm_gmem_get_supported_flags(kvm))  		return -EINVAL;  	if (size <= 0 || !PAGE_ALIGNED(size)) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 226faeaa8e56..b7a0ae2a7b20 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -4928,8 +4928,8 @@ static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)  #ifdef CONFIG_KVM_GUEST_MEMFD  	case KVM_CAP_GUEST_MEMFD:  		return 1; -	case KVM_CAP_GUEST_MEMFD_MMAP: -		return !kvm || kvm_arch_supports_gmem_mmap(kvm); +	case KVM_CAP_GUEST_MEMFD_FLAGS: +		return kvm_gmem_get_supported_flags(kvm);  #endif  	default:  		break; | 
