[PATCH] fix i386 mutex fastpath on FRAME_POINTER && !DEBUG_MUTEXES
Call the mutex slowpath more conservatively - e.g. FRAME_POINTERS can change the calling convention, in which case a direct branch to the slowpath becomes illegal. Bug found by Hugh Dickins. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
042c904c3e
commit
73165b88ff
2 changed files with 14 additions and 11 deletions
|
@ -28,7 +28,13 @@ do { \
|
||||||
\
|
\
|
||||||
__asm__ __volatile__( \
|
__asm__ __volatile__( \
|
||||||
LOCK " decl (%%eax) \n" \
|
LOCK " decl (%%eax) \n" \
|
||||||
" js "#fail_fn" \n" \
|
" js 2f \n" \
|
||||||
|
"1: \n" \
|
||||||
|
\
|
||||||
|
LOCK_SECTION_START("") \
|
||||||
|
"2: call "#fail_fn" \n" \
|
||||||
|
" jmp 1b \n" \
|
||||||
|
LOCK_SECTION_END \
|
||||||
\
|
\
|
||||||
:"=a" (dummy) \
|
:"=a" (dummy) \
|
||||||
: "a" (count) \
|
: "a" (count) \
|
||||||
|
@ -78,7 +84,13 @@ do { \
|
||||||
\
|
\
|
||||||
__asm__ __volatile__( \
|
__asm__ __volatile__( \
|
||||||
LOCK " incl (%%eax) \n" \
|
LOCK " incl (%%eax) \n" \
|
||||||
" jle "#fail_fn" \n" \
|
" jle 2f \n" \
|
||||||
|
"1: \n" \
|
||||||
|
\
|
||||||
|
LOCK_SECTION_START("") \
|
||||||
|
"2: call "#fail_fn" \n" \
|
||||||
|
" jmp 1b \n" \
|
||||||
|
LOCK_SECTION_END \
|
||||||
\
|
\
|
||||||
:"=a" (dummy) \
|
:"=a" (dummy) \
|
||||||
: "a" (count) \
|
: "a" (count) \
|
||||||
|
|
|
@ -84,12 +84,6 @@ void fastcall __sched mutex_lock(struct mutex *lock)
|
||||||
/*
|
/*
|
||||||
* The locking fastpath is the 1->0 transition from
|
* The locking fastpath is the 1->0 transition from
|
||||||
* 'unlocked' into 'locked' state.
|
* 'unlocked' into 'locked' state.
|
||||||
*
|
|
||||||
* NOTE: if asm/mutex.h is included, then some architectures
|
|
||||||
* rely on mutex_lock() having _no other code_ here but this
|
|
||||||
* fastpath. That allows the assembly fastpath to do
|
|
||||||
* tail-merging optimizations. (If you want to put testcode
|
|
||||||
* here, do it under #ifndef CONFIG_MUTEX_DEBUG.)
|
|
||||||
*/
|
*/
|
||||||
__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
|
__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
|
||||||
}
|
}
|
||||||
|
@ -115,8 +109,6 @@ void fastcall __sched mutex_unlock(struct mutex *lock)
|
||||||
/*
|
/*
|
||||||
* The unlocking fastpath is the 0->1 transition from 'locked'
|
* The unlocking fastpath is the 0->1 transition from 'locked'
|
||||||
* into 'unlocked' state:
|
* into 'unlocked' state:
|
||||||
*
|
|
||||||
* NOTE: no other code must be here - see mutex_lock() .
|
|
||||||
*/
|
*/
|
||||||
__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
|
__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
|
||||||
}
|
}
|
||||||
|
@ -261,7 +253,6 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__);
|
||||||
*/
|
*/
|
||||||
int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
|
int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
|
||||||
{
|
{
|
||||||
/* NOTE: no other code must be here - see mutex_lock() */
|
|
||||||
return __mutex_fastpath_lock_retval
|
return __mutex_fastpath_lock_retval
|
||||||
(&lock->count, __mutex_lock_interruptible_slowpath);
|
(&lock->count, __mutex_lock_interruptible_slowpath);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue