diff options
Diffstat (limited to 'mm/vmalloc.c')
| -rw-r--r-- | mm/vmalloc.c | 141 | 
1 files changed, 86 insertions, 55 deletions
| diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 4d3b3d60d893..1f46c3b86f9f 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -41,6 +41,14 @@  #include "internal.h" +bool is_vmalloc_addr(const void *x) +{ +	unsigned long addr = (unsigned long)x; + +	return addr >= VMALLOC_START && addr < VMALLOC_END; +} +EXPORT_SYMBOL(is_vmalloc_addr); +  struct vfree_deferred {  	struct llist_head list;  	struct work_struct wq; @@ -1062,6 +1070,26 @@ __alloc_vmap_area(unsigned long size, unsigned long align,  }  /* + * Free a region of KVA allocated by alloc_vmap_area + */ +static void free_vmap_area(struct vmap_area *va) +{ +	/* +	 * Remove from the busy tree/list. +	 */ +	spin_lock(&vmap_area_lock); +	unlink_va(va, &vmap_area_root); +	spin_unlock(&vmap_area_lock); + +	/* +	 * Insert/Merge it back to the free tree/list. +	 */ +	spin_lock(&free_vmap_area_lock); +	merge_or_add_vmap_area(va, &free_vmap_area_root, &free_vmap_area_list); +	spin_unlock(&free_vmap_area_lock); +} + +/*   * Allocate a region of KVA of the specified size and alignment, within the   * vstart and vend.   */ @@ -1073,6 +1101,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,  	struct vmap_area *va, *pva;  	unsigned long addr;  	int purged = 0; +	int ret;  	BUG_ON(!size);  	BUG_ON(offset_in_page(size)); @@ -1139,6 +1168,7 @@ retry:  	va->va_end = addr + size;  	va->vm = NULL; +  	spin_lock(&vmap_area_lock);  	insert_vmap_area(va, &vmap_area_root, &vmap_area_list);  	spin_unlock(&vmap_area_lock); @@ -1147,6 +1177,12 @@ retry:  	BUG_ON(va->va_start < vstart);  	BUG_ON(va->va_end > vend); +	ret = kasan_populate_vmalloc(addr, size); +	if (ret) { +		free_vmap_area(va); +		return ERR_PTR(ret); +	} +  	return va;  overflow: @@ -1186,26 +1222,6 @@ int unregister_vmap_purge_notifier(struct notifier_block *nb)  EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);  /* - * Free a region of KVA allocated by alloc_vmap_area - */ -static void free_vmap_area(struct vmap_area *va) -{ -	/* -	 * Remove from the busy tree/list. -	 */ -	spin_lock(&vmap_area_lock); -	unlink_va(va, &vmap_area_root); -	spin_unlock(&vmap_area_lock); - -	/* -	 * Insert/Merge it back to the free tree/list. -	 */ -	spin_lock(&free_vmap_area_lock); -	merge_or_add_vmap_area(va, &free_vmap_area_root, &free_vmap_area_list); -	spin_unlock(&free_vmap_area_lock); -} - -/*   * Clear the pagetable entries of a given vmap_area   */  static void unmap_vmap_area(struct vmap_area *va) @@ -1375,7 +1391,7 @@ static void free_unmap_vmap_area(struct vmap_area *va)  {  	flush_cache_vunmap(va->va_start, va->va_end);  	unmap_vmap_area(va); -	if (debug_pagealloc_enabled()) +	if (debug_pagealloc_enabled_static())  		flush_tlb_kernel_range(va->va_start, va->va_end);  	free_vmap_area_noflush(va); @@ -1673,7 +1689,7 @@ static void vb_free(const void *addr, unsigned long size)  	vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); -	if (debug_pagealloc_enabled()) +	if (debug_pagealloc_enabled_static())  		flush_tlb_kernel_range((unsigned long)addr,  					(unsigned long)addr + size); @@ -1771,6 +1787,8 @@ void vm_unmap_ram(const void *mem, unsigned int count)  	BUG_ON(addr > VMALLOC_END);  	BUG_ON(!PAGE_ALIGNED(addr)); +	kasan_poison_vmalloc(mem, size); +  	if (likely(count <= VMAP_MAX_ALLOC)) {  		debug_check_no_locks_freed(mem, size);  		vb_free(mem, size); @@ -1821,6 +1839,9 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro  		addr = va->va_start;  		mem = (void *)addr;  	} + +	kasan_unpoison_vmalloc(mem, size); +  	if (vmap_page_range(addr, addr + size, prot, pages) < 0) {  		vm_unmap_ram(mem, count);  		return NULL; @@ -2075,6 +2096,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,  {  	struct vmap_area *va;  	struct vm_struct *area; +	unsigned long requested_size = size;  	BUG_ON(in_interrupt());  	size = PAGE_ALIGN(size); @@ -2098,23 +2120,9 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,  		return NULL;  	} -	setup_vmalloc_vm(area, va, flags, caller); +	kasan_unpoison_vmalloc((void *)va->va_start, requested_size); -	/* -	 * For KASAN, if we are in vmalloc space, we need to cover the shadow -	 * area with real memory. If we come here through VM_ALLOC, this is -	 * done by a higher level function that has access to the true size, -	 * which might not be a full page. -	 * -	 * We assume module space comes via VM_ALLOC path. -	 */ -	if (is_vmalloc_addr(area->addr) && !(area->flags & VM_ALLOC)) { -		if (kasan_populate_vmalloc(area->size, area)) { -			unmap_vmap_area(va); -			kfree(area); -			return NULL; -		} -	} +	setup_vmalloc_vm(area, va, flags, caller);  	return area;  } @@ -2293,8 +2301,7 @@ static void __vunmap(const void *addr, int deallocate_pages)  	debug_check_no_locks_freed(area->addr, get_vm_area_size(area));  	debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); -	if (area->flags & VM_KASAN) -		kasan_poison_vmalloc(area->addr, area->size); +	kasan_poison_vmalloc(area->addr, area->size);  	vm_remove_mappings(area, deallocate_pages); @@ -2539,7 +2546,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,  	if (!size || (size >> PAGE_SHIFT) > totalram_pages())  		goto fail; -	area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | +	area = __get_vm_area_node(real_size, align, VM_ALLOC | VM_UNINITIALIZED |  				vm_flags, start, end, node, gfp_mask, caller);  	if (!area)  		goto fail; @@ -2548,11 +2555,6 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,  	if (!addr)  		return NULL; -	if (is_vmalloc_or_module_addr(area->addr)) { -		if (kasan_populate_vmalloc(real_size, area)) -			return NULL; -	} -  	/*  	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED  	 * flag. It means that vm_struct is not fully initialized. @@ -3294,7 +3296,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,  	struct vmap_area **vas, *va;  	struct vm_struct **vms;  	int area, area2, last_area, term_area; -	unsigned long base, start, size, end, last_end; +	unsigned long base, start, size, end, last_end, orig_start, orig_end;  	bool purged = false;  	enum fit_type type; @@ -3424,6 +3426,15 @@ retry:  	spin_unlock(&free_vmap_area_lock); +	/* populate the kasan shadow space */ +	for (area = 0; area < nr_vms; area++) { +		if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) +			goto err_free_shadow; + +		kasan_unpoison_vmalloc((void *)vas[area]->va_start, +				       sizes[area]); +	} +  	/* insert all vm's */  	spin_lock(&vmap_area_lock);  	for (area = 0; area < nr_vms; area++) { @@ -3434,12 +3445,6 @@ retry:  	}  	spin_unlock(&vmap_area_lock); -	/* populate the shadow space outside of the lock */ -	for (area = 0; area < nr_vms; area++) { -		/* assume success here */ -		kasan_populate_vmalloc(sizes[area], vms[area]); -	} -  	kfree(vas);  	return vms; @@ -3451,8 +3456,12 @@ recovery:  	 * and when pcpu_get_vm_areas() is success.  	 */  	while (area--) { -		merge_or_add_vmap_area(vas[area], &free_vmap_area_root, -				       &free_vmap_area_list); +		orig_start = vas[area]->va_start; +		orig_end = vas[area]->va_end; +		va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root, +					    &free_vmap_area_list); +		kasan_release_vmalloc(orig_start, orig_end, +				      va->va_start, va->va_end);  		vas[area] = NULL;  	} @@ -3487,6 +3496,28 @@ err_free2:  	kfree(vas);  	kfree(vms);  	return NULL; + +err_free_shadow: +	spin_lock(&free_vmap_area_lock); +	/* +	 * We release all the vmalloc shadows, even the ones for regions that +	 * hadn't been successfully added. This relies on kasan_release_vmalloc +	 * being able to tolerate this case. +	 */ +	for (area = 0; area < nr_vms; area++) { +		orig_start = vas[area]->va_start; +		orig_end = vas[area]->va_end; +		va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root, +					    &free_vmap_area_list); +		kasan_release_vmalloc(orig_start, orig_end, +				      va->va_start, va->va_end); +		vas[area] = NULL; +		kfree(vms[area]); +	} +	spin_unlock(&free_vmap_area_lock); +	kfree(vas); +	kfree(vms); +	return NULL;  }  /** | 
