FROMLIST: android: binder: Drop lru lock in isolate callback
(from https://patchwork.kernel.org/patch/9945123/) Drop the global lru lock in isolate callback before calling zap_page_range which calls cond_resched, and re-acquire the global lru lock before returning. Also change return code to LRU_REMOVED_RETRY. Use mmput_async when fail to acquire mmap sem in an atomic context. Fix "BUG: sleeping function called from invalid context" errors when CONFIG_DEBUG_ATOMIC_SLEEP is enabled. Bug: 63926541 Change-Id: I45dbada421b715abed9a66d03d30ae2285671ca1 Fixes: f2517eb76f1f2 ("android: binder: Add global lru shrinker to binder") Reported-by: Kyle Yan <kyan@codeaurora.org> Acked-by: Arve Hjønnevåg <arve@android.com> Signed-off-by: Sherry Yang <sherryy@android.com>
This commit is contained in:
parent
9cfefbcfaa
commit
849c7764d8
1 changed files with 12 additions and 6 deletions
|
@ -913,6 +913,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
||||||
struct binder_alloc *alloc;
|
struct binder_alloc *alloc;
|
||||||
uintptr_t page_addr;
|
uintptr_t page_addr;
|
||||||
size_t index;
|
size_t index;
|
||||||
|
struct vm_area_struct *vma;
|
||||||
|
|
||||||
alloc = page->alloc;
|
alloc = page->alloc;
|
||||||
if (!mutex_trylock(&alloc->mutex))
|
if (!mutex_trylock(&alloc->mutex))
|
||||||
|
@ -923,16 +924,22 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
||||||
|
|
||||||
index = page - alloc->pages;
|
index = page - alloc->pages;
|
||||||
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
|
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
|
||||||
if (alloc->vma) {
|
vma = alloc->vma;
|
||||||
|
if (vma) {
|
||||||
mm = get_task_mm(alloc->tsk);
|
mm = get_task_mm(alloc->tsk);
|
||||||
if (!mm)
|
if (!mm)
|
||||||
goto err_get_task_mm_failed;
|
goto err_get_task_mm_failed;
|
||||||
if (!down_write_trylock(&mm->mmap_sem))
|
if (!down_write_trylock(&mm->mmap_sem))
|
||||||
goto err_down_write_mmap_sem_failed;
|
goto err_down_write_mmap_sem_failed;
|
||||||
|
}
|
||||||
|
|
||||||
|
list_lru_isolate(lru, item);
|
||||||
|
spin_unlock(lock);
|
||||||
|
|
||||||
|
if (vma) {
|
||||||
trace_binder_unmap_user_start(alloc, index);
|
trace_binder_unmap_user_start(alloc, index);
|
||||||
|
|
||||||
zap_page_range(alloc->vma,
|
zap_page_range(vma,
|
||||||
page_addr +
|
page_addr +
|
||||||
alloc->user_buffer_offset,
|
alloc->user_buffer_offset,
|
||||||
PAGE_SIZE, NULL);
|
PAGE_SIZE, NULL);
|
||||||
|
@ -951,13 +958,12 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
||||||
|
|
||||||
trace_binder_unmap_kernel_end(alloc, index);
|
trace_binder_unmap_kernel_end(alloc, index);
|
||||||
|
|
||||||
list_lru_isolate(lru, item);
|
spin_lock(lock);
|
||||||
|
|
||||||
mutex_unlock(&alloc->mutex);
|
mutex_unlock(&alloc->mutex);
|
||||||
return LRU_REMOVED;
|
return LRU_REMOVED_RETRY;
|
||||||
|
|
||||||
err_down_write_mmap_sem_failed:
|
err_down_write_mmap_sem_failed:
|
||||||
mmput(mm);
|
mmput_async(mm);
|
||||||
err_get_task_mm_failed:
|
err_get_task_mm_failed:
|
||||||
err_page_already_freed:
|
err_page_already_freed:
|
||||||
mutex_unlock(&alloc->mutex);
|
mutex_unlock(&alloc->mutex);
|
||||||
|
|
Loading…
Add table
Reference in a new issue