[IA64] eliminate race condition in smp_flush_tlb_mm
A race condition exists within smp_call_function_many() when called from smp_flush_tlb_mm(). On rare occasions the cpu_vm_mask can be cleared while smp_call_function_many is executing, occasionally resulting in a hung process. Make a copy of the mask prior to calling smp_call_function_many(). Signed-off-by: Dimitri Sivanich <sivanich@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
parent
90a8a73c06
commit
75c1c91cb9
1 changed files with 10 additions and 3 deletions
|
@ -293,6 +293,7 @@ smp_flush_tlb_all (void)
|
||||||
void
|
void
|
||||||
smp_flush_tlb_mm (struct mm_struct *mm)
|
smp_flush_tlb_mm (struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
|
cpumask_var_t cpus;
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
/* this happens for the common case of a single-threaded fork(): */
|
/* this happens for the common case of a single-threaded fork(): */
|
||||||
if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
|
if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
|
||||||
|
@ -301,9 +302,15 @@ smp_flush_tlb_mm (struct mm_struct *mm)
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) {
|
||||||
smp_call_function_many(mm_cpumask(mm),
|
smp_call_function((void (*)(void *))local_finish_flush_tlb_mm,
|
||||||
(void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
|
mm, 1);
|
||||||
|
} else {
|
||||||
|
cpumask_copy(cpus, mm_cpumask(mm));
|
||||||
|
smp_call_function_many(cpus,
|
||||||
|
(void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
|
||||||
|
free_cpumask_var(cpus);
|
||||||
|
}
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
local_finish_flush_tlb_mm(mm);
|
local_finish_flush_tlb_mm(mm);
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
|
Loading…
Add table
Reference in a new issue