diff options
Diffstat (limited to 'tools/testing/selftests/kvm/guest_memfd_test.c')
| -rw-r--r-- | tools/testing/selftests/kvm/guest_memfd_test.c | 169 | 
1 files changed, 91 insertions, 78 deletions
| diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c index b3ca6737f304..e7d9aeb418d3 100644 --- a/tools/testing/selftests/kvm/guest_memfd_test.c +++ b/tools/testing/selftests/kvm/guest_memfd_test.c @@ -14,8 +14,6 @@  #include <linux/bitmap.h>  #include <linux/falloc.h>  #include <linux/sizes.h> -#include <setjmp.h> -#include <signal.h>  #include <sys/mman.h>  #include <sys/types.h>  #include <sys/stat.h> @@ -24,7 +22,9 @@  #include "test_util.h"  #include "ucall_common.h" -static void test_file_read_write(int fd) +static size_t page_size; + +static void test_file_read_write(int fd, size_t total_size)  {  	char buf[64]; @@ -38,18 +38,22 @@ static void test_file_read_write(int fd)  		    "pwrite on a guest_mem fd should fail");  } -static void test_mmap_supported(int fd, size_t page_size, size_t total_size) +static void test_mmap_cow(int fd, size_t size) +{ +	void *mem; + +	mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); +	TEST_ASSERT(mem == MAP_FAILED, "Copy-on-write not allowed by guest_memfd."); +} + +static void test_mmap_supported(int fd, size_t total_size)  {  	const char val = 0xaa;  	char *mem;  	size_t i;  	int ret; -	mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); -	TEST_ASSERT(mem == MAP_FAILED, "Copy-on-write not allowed by guest_memfd."); - -	mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); -	TEST_ASSERT(mem != MAP_FAILED, "mmap() for guest_memfd should succeed."); +	mem = kvm_mmap(total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);  	memset(mem, val, total_size);  	for (i = 0; i < total_size; i++) @@ -68,45 +72,37 @@ static void test_mmap_supported(int fd, size_t page_size, size_t total_size)  	for (i = 0; i < total_size; i++)  		TEST_ASSERT_EQ(READ_ONCE(mem[i]), val); -	ret = munmap(mem, total_size); -	TEST_ASSERT(!ret, "munmap() should succeed."); -} - -static sigjmp_buf jmpbuf; -void fault_sigbus_handler(int signum) -{ -	siglongjmp(jmpbuf, 1); +	kvm_munmap(mem, total_size);  } -static void test_fault_overflow(int fd, size_t page_size, size_t total_size) +static void test_fault_sigbus(int fd, size_t accessible_size, size_t map_size)  { -	struct sigaction sa_old, sa_new = { -		.sa_handler = fault_sigbus_handler, -	}; -	size_t map_size = total_size * 4;  	const char val = 0xaa;  	char *mem;  	size_t i; -	int ret; -	mem = mmap(NULL, map_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); -	TEST_ASSERT(mem != MAP_FAILED, "mmap() for guest_memfd should succeed."); +	mem = kvm_mmap(map_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd); -	sigaction(SIGBUS, &sa_new, &sa_old); -	if (sigsetjmp(jmpbuf, 1) == 0) { -		memset(mem, 0xaa, map_size); -		TEST_ASSERT(false, "memset() should have triggered SIGBUS."); -	} -	sigaction(SIGBUS, &sa_old, NULL); +	TEST_EXPECT_SIGBUS(memset(mem, val, map_size)); +	TEST_EXPECT_SIGBUS((void)READ_ONCE(mem[accessible_size])); -	for (i = 0; i < total_size; i++) +	for (i = 0; i < accessible_size; i++)  		TEST_ASSERT_EQ(READ_ONCE(mem[i]), val); -	ret = munmap(mem, map_size); -	TEST_ASSERT(!ret, "munmap() should succeed."); +	kvm_munmap(mem, map_size); +} + +static void test_fault_overflow(int fd, size_t total_size) +{ +	test_fault_sigbus(fd, total_size, total_size * 4); +} + +static void test_fault_private(int fd, size_t total_size) +{ +	test_fault_sigbus(fd, 0, total_size);  } -static void test_mmap_not_supported(int fd, size_t page_size, size_t total_size) +static void test_mmap_not_supported(int fd, size_t total_size)  {  	char *mem; @@ -117,7 +113,7 @@ static void test_mmap_not_supported(int fd, size_t page_size, size_t total_size)  	TEST_ASSERT_EQ(mem, MAP_FAILED);  } -static void test_file_size(int fd, size_t page_size, size_t total_size) +static void test_file_size(int fd, size_t total_size)  {  	struct stat sb;  	int ret; @@ -128,7 +124,7 @@ static void test_file_size(int fd, size_t page_size, size_t total_size)  	TEST_ASSERT_EQ(sb.st_blksize, page_size);  } -static void test_fallocate(int fd, size_t page_size, size_t total_size) +static void test_fallocate(int fd, size_t total_size)  {  	int ret; @@ -165,7 +161,7 @@ static void test_fallocate(int fd, size_t page_size, size_t total_size)  	TEST_ASSERT(!ret, "fallocate to restore punched hole should succeed");  } -static void test_invalid_punch_hole(int fd, size_t page_size, size_t total_size) +static void test_invalid_punch_hole(int fd, size_t total_size)  {  	struct {  		off_t offset; @@ -196,8 +192,7 @@ static void test_invalid_punch_hole(int fd, size_t page_size, size_t total_size)  }  static void test_create_guest_memfd_invalid_sizes(struct kvm_vm *vm, -						  uint64_t guest_memfd_flags, -						  size_t page_size) +						  uint64_t guest_memfd_flags)  {  	size_t size;  	int fd; @@ -214,7 +209,6 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm)  {  	int fd1, fd2, ret;  	struct stat st1, st2; -	size_t page_size = getpagesize();  	fd1 = __vm_create_guest_memfd(vm, page_size, 0);  	TEST_ASSERT(fd1 != -1, "memfd creation should succeed"); @@ -239,9 +233,9 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm)  	close(fd1);  } -static void test_guest_memfd_flags(struct kvm_vm *vm, uint64_t valid_flags) +static void test_guest_memfd_flags(struct kvm_vm *vm)  { -	size_t page_size = getpagesize(); +	uint64_t valid_flags = vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS);  	uint64_t flag;  	int fd; @@ -260,43 +254,57 @@ static void test_guest_memfd_flags(struct kvm_vm *vm, uint64_t valid_flags)  	}  } -static void test_guest_memfd(unsigned long vm_type) +#define gmem_test(__test, __vm, __flags)				\ +do {									\ +	int fd = vm_create_guest_memfd(__vm, page_size * 4, __flags);	\ +									\ +	test_##__test(fd, page_size * 4);				\ +	close(fd);							\ +} while (0) + +static void __test_guest_memfd(struct kvm_vm *vm, uint64_t flags)  { -	uint64_t flags = 0; -	struct kvm_vm *vm; -	size_t total_size; -	size_t page_size; -	int fd; +	test_create_guest_memfd_multiple(vm); +	test_create_guest_memfd_invalid_sizes(vm, flags); -	page_size = getpagesize(); -	total_size = page_size * 4; +	gmem_test(file_read_write, vm, flags); -	vm = vm_create_barebones_type(vm_type); +	if (flags & GUEST_MEMFD_FLAG_MMAP) { +		if (flags & GUEST_MEMFD_FLAG_INIT_SHARED) { +			gmem_test(mmap_supported, vm, flags); +			gmem_test(fault_overflow, vm, flags); +		} else { +			gmem_test(fault_private, vm, flags); +		} -	if (vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_MMAP)) -		flags |= GUEST_MEMFD_FLAG_MMAP; +		gmem_test(mmap_cow, vm, flags); +	} else { +		gmem_test(mmap_not_supported, vm, flags); +	} -	test_create_guest_memfd_multiple(vm); -	test_create_guest_memfd_invalid_sizes(vm, flags, page_size); +	gmem_test(file_size, vm, flags); +	gmem_test(fallocate, vm, flags); +	gmem_test(invalid_punch_hole, vm, flags); +} -	fd = vm_create_guest_memfd(vm, total_size, flags); +static void test_guest_memfd(unsigned long vm_type) +{ +	struct kvm_vm *vm = vm_create_barebones_type(vm_type); +	uint64_t flags; -	test_file_read_write(fd); +	test_guest_memfd_flags(vm); -	if (flags & GUEST_MEMFD_FLAG_MMAP) { -		test_mmap_supported(fd, page_size, total_size); -		test_fault_overflow(fd, page_size, total_size); -	} else { -		test_mmap_not_supported(fd, page_size, total_size); -	} +	__test_guest_memfd(vm, 0); -	test_file_size(fd, page_size, total_size); -	test_fallocate(fd, page_size, total_size); -	test_invalid_punch_hole(fd, page_size, total_size); +	flags = vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS); +	if (flags & GUEST_MEMFD_FLAG_MMAP) +		__test_guest_memfd(vm, GUEST_MEMFD_FLAG_MMAP); -	test_guest_memfd_flags(vm, flags); +	/* MMAP should always be supported if INIT_SHARED is supported. */ +	if (flags & GUEST_MEMFD_FLAG_INIT_SHARED) +		__test_guest_memfd(vm, GUEST_MEMFD_FLAG_MMAP | +				       GUEST_MEMFD_FLAG_INIT_SHARED); -	close(fd);  	kvm_vm_free(vm);  } @@ -328,22 +336,26 @@ static void test_guest_memfd_guest(void)  	size_t size;  	int fd, i; -	if (!kvm_has_cap(KVM_CAP_GUEST_MEMFD_MMAP)) +	if (!kvm_check_cap(KVM_CAP_GUEST_MEMFD_FLAGS))  		return;  	vm = __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, &vcpu, 1, guest_code); -	TEST_ASSERT(vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_MMAP), -		    "Default VM type should always support guest_memfd mmap()"); +	TEST_ASSERT(vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS) & GUEST_MEMFD_FLAG_MMAP, +		    "Default VM type should support MMAP, supported flags = 0x%x", +		    vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS)); +	TEST_ASSERT(vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS) & GUEST_MEMFD_FLAG_INIT_SHARED, +		    "Default VM type should support INIT_SHARED, supported flags = 0x%x", +		    vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS));  	size = vm->page_size; -	fd = vm_create_guest_memfd(vm, size, GUEST_MEMFD_FLAG_MMAP); +	fd = vm_create_guest_memfd(vm, size, GUEST_MEMFD_FLAG_MMAP | +					     GUEST_MEMFD_FLAG_INIT_SHARED);  	vm_set_user_memory_region2(vm, slot, KVM_MEM_GUEST_MEMFD, gpa, size, NULL, fd, 0); -	mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); -	TEST_ASSERT(mem != MAP_FAILED, "mmap() on guest_memfd failed"); +	mem = kvm_mmap(size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);  	memset(mem, 0xaa, size); -	munmap(mem, size); +	kvm_munmap(mem, size);  	virt_pg_map(vm, gpa, gpa);  	vcpu_args_set(vcpu, 2, gpa, size); @@ -351,8 +363,7 @@ static void test_guest_memfd_guest(void)  	TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE); -	mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); -	TEST_ASSERT(mem != MAP_FAILED, "mmap() on guest_memfd failed"); +	mem = kvm_mmap(size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);  	for (i = 0; i < size; i++)  		TEST_ASSERT_EQ(mem[i], 0xff); @@ -366,6 +377,8 @@ int main(int argc, char *argv[])  	TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD)); +	page_size = getpagesize(); +  	/*  	 * Not all architectures support KVM_CAP_VM_TYPES. However, those that  	 * support guest_memfd have that support for the default VM type. | 
