diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 7e31c5d3b3cb..30818930331f 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -413,6 +413,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, BUG_ON(offset_in_page(size)); BUG_ON(!is_power_of_2(align)); + might_sleep(); + va = kmalloc_node(sizeof(struct vmap_area), gfp_mask & GFP_RECLAIM_MASK, node); if (unlikely(!va)) @@ -632,7 +634,7 @@ static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); * by this look, but we want to avoid concurrent calls for performance * reasons and to make the pcpu_get_vm_areas more deterministic. */ -static DEFINE_SPINLOCK(vmap_purge_lock); +static DEFINE_MUTEX(vmap_purge_lock); /* for per-CPU blocks */ static void purge_fragmented_blocks_allcpus(void); @@ -686,9 +688,9 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) */ static void try_purge_vmap_area_lazy(void) { - if (spin_trylock(&vmap_purge_lock)) { + if (mutex_trylock(&vmap_purge_lock)) { __purge_vmap_area_lazy(ULONG_MAX, 0); - spin_unlock(&vmap_purge_lock); + mutex_unlock(&vmap_purge_lock); } } @@ -697,10 +699,10 @@ static void try_purge_vmap_area_lazy(void) */ static void purge_vmap_area_lazy(void) { - spin_lock(&vmap_purge_lock); + mutex_lock(&vmap_purge_lock); purge_fragmented_blocks_allcpus(); __purge_vmap_area_lazy(ULONG_MAX, 0); - spin_unlock(&vmap_purge_lock); + mutex_unlock(&vmap_purge_lock); } /* @@ -1063,6 +1065,8 @@ void vm_unmap_aliases(void) if (unlikely(!vmap_initialized)) return; + might_sleep(); + for_each_possible_cpu(cpu) { struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); struct vmap_block *vb; @@ -1087,11 +1091,11 @@ void vm_unmap_aliases(void) rcu_read_unlock(); } - spin_lock(&vmap_purge_lock); + mutex_lock(&vmap_purge_lock); purge_fragmented_blocks_allcpus(); if (!__purge_vmap_area_lazy(start, end) && flush) flush_tlb_kernel_range(start, end); - spin_unlock(&vmap_purge_lock); + mutex_unlock(&vmap_purge_lock); } EXPORT_SYMBOL_GPL(vm_unmap_aliases); @@ -1106,6 +1110,7 @@ void vm_unmap_ram(const void *mem, unsigned int count) unsigned long addr = (unsigned long)mem; struct vmap_area *va; + might_sleep(); BUG_ON(!addr); BUG_ON(addr < VMALLOC_START); BUG_ON(addr > VMALLOC_END); @@ -1495,6 +1500,8 @@ struct vm_struct *remove_vm_area(const void *addr) { struct vmap_area *va; + might_sleep(); + va = find_vmap_area((unsigned long)addr); if (va && va->flags & VM_VM_AREA) { struct vm_struct *vm = va->vm;