diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c index 1e6a51cc25c4..99b8d991126f 100644 --- a/kernel/locking/osq_lock.c +++ b/kernel/locking/osq_lock.c @@ -106,32 +106,6 @@ bool osq_lock(struct optimistic_spin_queue *lock) prev = decode_cpu(old); node->prev = prev; - - /* - * We need to avoid reordering of link updation sequence of osq. - * A case in which the status of optimistic spin queue is - * CPU6->CPU2 in which CPU6 has acquired the lock. At this point - * if CPU0 comes in to acquire osq_lock, it will update the tail - * count. After tail count update if CPU2 starts to unqueue itself - * from optimistic spin queue, it will find updated tail count with - * CPU0 and update CPU2 node->next to NULL in osq_wait_next(). If - * reordering of following stores happen then prev->next where prev - * being CPU2 would be updated to point to CPU0 node: - * node->prev = prev; - * WRITE_ONCE(prev->next, node); - * - * At this point if next instruction - * WRITE_ONCE(next->prev, prev); - * in CPU2 path is committed before the update of CPU0 node->prev = - * prev then CPU0 node->prev will point to CPU6 node. At this point - * if CPU0 path's node->prev = prev is committed resulting in change - * of CPU0 prev back to CPU2 node. CPU2 node->next is NULL, so if - * CPU0 gets into unqueue path of osq_lock it will keep spinning - * in infinite loop as condition prev->next == node will never be - * true. - */ - smp_mb(); - WRITE_ONCE(prev->next, node); /*