MIPS: Fix gigaton of warning building with microMIPS.

With binutils 2.24 the attempt to switch with microMIPS mode to MIPS III
mode through .set mips3 results in *lots* of warnings like

{standard input}: Assembler messages:
{standard input}:397: Warning: the 64-bit MIPS architecture does not support the `smartmips' extension

during a kernel build.  Fixed by using .set arch=r4000 instead.

This breaks support for building the kernel with binutils 2.13 which
was supported for 32 bit kernels only anyway and 2.14 which was a bad
vintage for MIPS anyway.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
Ralf Baechle 2014-03-30 13:20:10 +02:00
parent 30ee615bb8
commit a809d46066
18 changed files with 84 additions and 84 deletions

View file

@ -95,7 +95,7 @@ LEAF(alchemy_sleep_au1000)
/* cache following instructions, as memory gets put to sleep */ /* cache following instructions, as memory gets put to sleep */
la t0, 1f la t0, 1f
.set mips3 .set arch=r4000
cache 0x14, 0(t0) cache 0x14, 0(t0)
cache 0x14, 32(t0) cache 0x14, 32(t0)
cache 0x14, 64(t0) cache 0x14, 64(t0)
@ -121,7 +121,7 @@ LEAF(alchemy_sleep_au1550)
/* cache following instructions, as memory gets put to sleep */ /* cache following instructions, as memory gets put to sleep */
la t0, 1f la t0, 1f
.set mips3 .set arch=r4000
cache 0x14, 0(t0) cache 0x14, 0(t0)
cache 0x14, 32(t0) cache 0x14, 32(t0)
cache 0x14, 64(t0) cache 0x14, 64(t0)
@ -163,7 +163,7 @@ LEAF(alchemy_sleep_au1300)
la t1, 4f la t1, 4f
subu t2, t1, t0 subu t2, t1, t0
.set mips3 .set arch=r4000
1: cache 0x14, 0(t0) 1: cache 0x14, 0(t0)
subu t2, t2, 32 subu t2, t2, 32

View file

@ -146,7 +146,7 @@ symbol = value
#define PREF(hint,addr) \ #define PREF(hint,addr) \
.set push; \ .set push; \
.set mips4; \ .set arch=r5000; \
pref hint, addr; \ pref hint, addr; \
.set pop .set pop
@ -159,7 +159,7 @@ symbol = value
#define PREFX(hint,addr) \ #define PREFX(hint,addr) \
.set push; \ .set push; \
.set mips4; \ .set arch=r5000; \
prefx hint, addr; \ prefx hint, addr; \
.set pop .set pop

View file

@ -53,7 +53,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)
int temp; int temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1: ll %0, %1 # atomic_add \n" "1: ll %0, %1 # atomic_add \n"
" addu %0, %2 \n" " addu %0, %2 \n"
" sc %0, %1 \n" " sc %0, %1 \n"
@ -66,7 +66,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
" ll %0, %1 # atomic_add \n" " ll %0, %1 # atomic_add \n"
" addu %0, %2 \n" " addu %0, %2 \n"
" sc %0, %1 \n" " sc %0, %1 \n"
@ -96,7 +96,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
int temp; int temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1: ll %0, %1 # atomic_sub \n" "1: ll %0, %1 # atomic_sub \n"
" subu %0, %2 \n" " subu %0, %2 \n"
" sc %0, %1 \n" " sc %0, %1 \n"
@ -109,7 +109,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
" ll %0, %1 # atomic_sub \n" " ll %0, %1 # atomic_sub \n"
" subu %0, %2 \n" " subu %0, %2 \n"
" sc %0, %1 \n" " sc %0, %1 \n"
@ -139,7 +139,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
int temp; int temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1: ll %1, %2 # atomic_add_return \n" "1: ll %1, %2 # atomic_add_return \n"
" addu %0, %1, %3 \n" " addu %0, %1, %3 \n"
" sc %0, %2 \n" " sc %0, %2 \n"
@ -153,7 +153,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
" ll %1, %2 # atomic_add_return \n" " ll %1, %2 # atomic_add_return \n"
" addu %0, %1, %3 \n" " addu %0, %1, %3 \n"
" sc %0, %2 \n" " sc %0, %2 \n"
@ -188,7 +188,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
int temp; int temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1: ll %1, %2 # atomic_sub_return \n" "1: ll %1, %2 # atomic_sub_return \n"
" subu %0, %1, %3 \n" " subu %0, %1, %3 \n"
" sc %0, %2 \n" " sc %0, %2 \n"
@ -205,7 +205,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
" ll %1, %2 # atomic_sub_return \n" " ll %1, %2 # atomic_sub_return \n"
" subu %0, %1, %3 \n" " subu %0, %1, %3 \n"
" sc %0, %2 \n" " sc %0, %2 \n"
@ -248,7 +248,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
int temp; int temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1: ll %1, %2 # atomic_sub_if_positive\n" "1: ll %1, %2 # atomic_sub_if_positive\n"
" subu %0, %1, %3 \n" " subu %0, %1, %3 \n"
" bltz %0, 1f \n" " bltz %0, 1f \n"
@ -266,7 +266,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
int temp; int temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1: ll %1, %2 # atomic_sub_if_positive\n" "1: ll %1, %2 # atomic_sub_if_positive\n"
" subu %0, %1, %3 \n" " subu %0, %1, %3 \n"
" bltz %0, 1f \n" " bltz %0, 1f \n"
@ -420,7 +420,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
long temp; long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1: lld %0, %1 # atomic64_add \n" "1: lld %0, %1 # atomic64_add \n"
" daddu %0, %2 \n" " daddu %0, %2 \n"
" scd %0, %1 \n" " scd %0, %1 \n"
@ -433,7 +433,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
" lld %0, %1 # atomic64_add \n" " lld %0, %1 # atomic64_add \n"
" daddu %0, %2 \n" " daddu %0, %2 \n"
" scd %0, %1 \n" " scd %0, %1 \n"
@ -463,7 +463,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
long temp; long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1: lld %0, %1 # atomic64_sub \n" "1: lld %0, %1 # atomic64_sub \n"
" dsubu %0, %2 \n" " dsubu %0, %2 \n"
" scd %0, %1 \n" " scd %0, %1 \n"
@ -476,7 +476,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
" lld %0, %1 # atomic64_sub \n" " lld %0, %1 # atomic64_sub \n"
" dsubu %0, %2 \n" " dsubu %0, %2 \n"
" scd %0, %1 \n" " scd %0, %1 \n"
@ -506,7 +506,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
long temp; long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1: lld %1, %2 # atomic64_add_return \n" "1: lld %1, %2 # atomic64_add_return \n"
" daddu %0, %1, %3 \n" " daddu %0, %1, %3 \n"
" scd %0, %2 \n" " scd %0, %2 \n"
@ -520,7 +520,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
" lld %1, %2 # atomic64_add_return \n" " lld %1, %2 # atomic64_add_return \n"
" daddu %0, %1, %3 \n" " daddu %0, %1, %3 \n"
" scd %0, %2 \n" " scd %0, %2 \n"
@ -556,7 +556,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
long temp; long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1: lld %1, %2 # atomic64_sub_return \n" "1: lld %1, %2 # atomic64_sub_return \n"
" dsubu %0, %1, %3 \n" " dsubu %0, %1, %3 \n"
" scd %0, %2 \n" " scd %0, %2 \n"
@ -571,7 +571,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
" lld %1, %2 # atomic64_sub_return \n" " lld %1, %2 # atomic64_sub_return \n"
" dsubu %0, %1, %3 \n" " dsubu %0, %1, %3 \n"
" scd %0, %2 \n" " scd %0, %2 \n"
@ -615,7 +615,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
long temp; long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1: lld %1, %2 # atomic64_sub_if_positive\n" "1: lld %1, %2 # atomic64_sub_if_positive\n"
" dsubu %0, %1, %3 \n" " dsubu %0, %1, %3 \n"
" bltz %0, 1f \n" " bltz %0, 1f \n"
@ -633,7 +633,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
long temp; long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1: lld %1, %2 # atomic64_sub_if_positive\n" "1: lld %1, %2 # atomic64_sub_if_positive\n"
" dsubu %0, %1, %3 \n" " dsubu %0, %1, %3 \n"
" bltz %0, 1f \n" " bltz %0, 1f \n"

View file

@ -79,7 +79,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
if (kernel_uses_llsc && R10000_LLSC_WAR) { if (kernel_uses_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1: " __LL "%0, %1 # set_bit \n" "1: " __LL "%0, %1 # set_bit \n"
" or %0, %2 \n" " or %0, %2 \n"
" " __SC "%0, %1 \n" " " __SC "%0, %1 \n"
@ -101,7 +101,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
" " __LL "%0, %1 # set_bit \n" " " __LL "%0, %1 # set_bit \n"
" or %0, %2 \n" " or %0, %2 \n"
" " __SC "%0, %1 \n" " " __SC "%0, %1 \n"
@ -131,7 +131,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
if (kernel_uses_llsc && R10000_LLSC_WAR) { if (kernel_uses_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1: " __LL "%0, %1 # clear_bit \n" "1: " __LL "%0, %1 # clear_bit \n"
" and %0, %2 \n" " and %0, %2 \n"
" " __SC "%0, %1 \n" " " __SC "%0, %1 \n"
@ -153,7 +153,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
" " __LL "%0, %1 # clear_bit \n" " " __LL "%0, %1 # clear_bit \n"
" and %0, %2 \n" " and %0, %2 \n"
" " __SC "%0, %1 \n" " " __SC "%0, %1 \n"
@ -197,7 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
unsigned long temp; unsigned long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1: " __LL "%0, %1 # change_bit \n" "1: " __LL "%0, %1 # change_bit \n"
" xor %0, %2 \n" " xor %0, %2 \n"
" " __SC "%0, %1 \n" " " __SC "%0, %1 \n"
@ -211,7 +211,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
" " __LL "%0, %1 # change_bit \n" " " __LL "%0, %1 # change_bit \n"
" xor %0, %2 \n" " xor %0, %2 \n"
" " __SC "%0, %1 \n" " " __SC "%0, %1 \n"
@ -244,7 +244,7 @@ static inline int test_and_set_bit(unsigned long nr,
unsigned long temp; unsigned long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1: " __LL "%0, %1 # test_and_set_bit \n" "1: " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n" " or %2, %0, %3 \n"
" " __SC "%2, %1 \n" " " __SC "%2, %1 \n"
@ -260,7 +260,7 @@ static inline int test_and_set_bit(unsigned long nr,
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
" " __LL "%0, %1 # test_and_set_bit \n" " " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n" " or %2, %0, %3 \n"
" " __SC "%2, %1 \n" " " __SC "%2, %1 \n"
@ -298,7 +298,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
unsigned long temp; unsigned long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1: " __LL "%0, %1 # test_and_set_bit \n" "1: " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n" " or %2, %0, %3 \n"
" " __SC "%2, %1 \n" " " __SC "%2, %1 \n"
@ -314,7 +314,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
" " __LL "%0, %1 # test_and_set_bit \n" " " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n" " or %2, %0, %3 \n"
" " __SC "%2, %1 \n" " " __SC "%2, %1 \n"
@ -353,7 +353,7 @@ static inline int test_and_clear_bit(unsigned long nr,
unsigned long temp; unsigned long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1: " __LL "%0, %1 # test_and_clear_bit \n" "1: " __LL "%0, %1 # test_and_clear_bit \n"
" or %2, %0, %3 \n" " or %2, %0, %3 \n"
" xor %2, %3 \n" " xor %2, %3 \n"
@ -386,7 +386,7 @@ static inline int test_and_clear_bit(unsigned long nr,
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
" " __LL "%0, %1 # test_and_clear_bit \n" " " __LL "%0, %1 # test_and_clear_bit \n"
" or %2, %0, %3 \n" " or %2, %0, %3 \n"
" xor %2, %3 \n" " xor %2, %3 \n"
@ -427,7 +427,7 @@ static inline int test_and_change_bit(unsigned long nr,
unsigned long temp; unsigned long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1: " __LL "%0, %1 # test_and_change_bit \n" "1: " __LL "%0, %1 # test_and_change_bit \n"
" xor %2, %0, %3 \n" " xor %2, %0, %3 \n"
" " __SC "%2, %1 \n" " " __SC "%2, %1 \n"
@ -443,7 +443,7 @@ static inline int test_and_change_bit(unsigned long nr,
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
" " __LL "%0, %1 # test_and_change_bit \n" " " __LL "%0, %1 # test_and_change_bit \n"
" xor %2, %0, %3 \n" " xor %2, %0, %3 \n"
" " __SC "\t%2, %1 \n" " " __SC "\t%2, %1 \n"

View file

@ -22,11 +22,11 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
unsigned long dummy; unsigned long dummy;
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1: ll %0, %3 # xchg_u32 \n" "1: ll %0, %3 # xchg_u32 \n"
" .set mips0 \n" " .set mips0 \n"
" move %2, %z4 \n" " move %2, %z4 \n"
" .set mips3 \n" " .set arch=r4000 \n"
" sc %2, %1 \n" " sc %2, %1 \n"
" beqzl %2, 1b \n" " beqzl %2, 1b \n"
" .set mips0 \n" " .set mips0 \n"
@ -38,11 +38,11 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
" ll %0, %3 # xchg_u32 \n" " ll %0, %3 # xchg_u32 \n"
" .set mips0 \n" " .set mips0 \n"
" move %2, %z4 \n" " move %2, %z4 \n"
" .set mips3 \n" " .set arch=r4000 \n"
" sc %2, %1 \n" " sc %2, %1 \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy) : "=&r" (retval), "=m" (*m), "=&r" (dummy)
@ -74,7 +74,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
unsigned long dummy; unsigned long dummy;
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1: lld %0, %3 # xchg_u64 \n" "1: lld %0, %3 # xchg_u64 \n"
" move %2, %z4 \n" " move %2, %z4 \n"
" scd %2, %1 \n" " scd %2, %1 \n"
@ -88,7 +88,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
" lld %0, %3 # xchg_u64 \n" " lld %0, %3 # xchg_u64 \n"
" move %2, %z4 \n" " move %2, %z4 \n"
" scd %2, %1 \n" " scd %2, %1 \n"
@ -145,12 +145,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \ " .set push \n" \
" .set noat \n" \ " .set noat \n" \
" .set mips3 \n" \ " .set arch=r4000 \n" \
"1: " ld " %0, %2 # __cmpxchg_asm \n" \ "1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \ " bne %0, %z3, 2f \n" \
" .set mips0 \n" \ " .set mips0 \n" \
" move $1, %z4 \n" \ " move $1, %z4 \n" \
" .set mips3 \n" \ " .set arch=r4000 \n" \
" " st " $1, %1 \n" \ " " st " $1, %1 \n" \
" beqzl $1, 1b \n" \ " beqzl $1, 1b \n" \
"2: \n" \ "2: \n" \
@ -162,12 +162,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \ " .set push \n" \
" .set noat \n" \ " .set noat \n" \
" .set mips3 \n" \ " .set arch=r4000 \n" \
"1: " ld " %0, %2 # __cmpxchg_asm \n" \ "1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \ " bne %0, %z3, 2f \n" \
" .set mips0 \n" \ " .set mips0 \n" \
" move $1, %z4 \n" \ " move $1, %z4 \n" \
" .set mips3 \n" \ " .set arch=r4000 \n" \
" " st " $1, %1 \n" \ " " st " $1, %1 \n" \
" beqz $1, 1b \n" \ " beqz $1, 1b \n" \
" .set pop \n" \ " .set pop \n" \

View file

@ -23,11 +23,11 @@
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \ " .set push \n" \
" .set noat \n" \ " .set noat \n" \
" .set mips3 \n" \ " .set arch=r4000 \n" \
"1: ll %1, %4 # __futex_atomic_op \n" \ "1: ll %1, %4 # __futex_atomic_op \n" \
" .set mips0 \n" \ " .set mips0 \n" \
" " insn " \n" \ " " insn " \n" \
" .set mips3 \n" \ " .set arch=r4000 \n" \
"2: sc $1, %2 \n" \ "2: sc $1, %2 \n" \
" beqzl $1, 1b \n" \ " beqzl $1, 1b \n" \
__WEAK_LLSC_MB \ __WEAK_LLSC_MB \
@ -49,11 +49,11 @@
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \ " .set push \n" \
" .set noat \n" \ " .set noat \n" \
" .set mips3 \n" \ " .set arch=r4000 \n" \
"1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \ "1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \
" .set mips0 \n" \ " .set mips0 \n" \
" " insn " \n" \ " " insn " \n" \
" .set mips3 \n" \ " .set arch=r4000 \n" \
"2: "user_sc("$1", "%2")" \n" \ "2: "user_sc("$1", "%2")" \n" \
" beqz $1, 1b \n" \ " beqz $1, 1b \n" \
__WEAK_LLSC_MB \ __WEAK_LLSC_MB \
@ -147,12 +147,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
"# futex_atomic_cmpxchg_inatomic \n" "# futex_atomic_cmpxchg_inatomic \n"
" .set push \n" " .set push \n"
" .set noat \n" " .set noat \n"
" .set mips3 \n" " .set arch=r4000 \n"
"1: ll %1, %3 \n" "1: ll %1, %3 \n"
" bne %1, %z4, 3f \n" " bne %1, %z4, 3f \n"
" .set mips0 \n" " .set mips0 \n"
" move $1, %z5 \n" " move $1, %z5 \n"
" .set mips3 \n" " .set arch=r4000 \n"
"2: sc $1, %2 \n" "2: sc $1, %2 \n"
" beqzl $1, 1b \n" " beqzl $1, 1b \n"
__WEAK_LLSC_MB __WEAK_LLSC_MB
@ -174,12 +174,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
"# futex_atomic_cmpxchg_inatomic \n" "# futex_atomic_cmpxchg_inatomic \n"
" .set push \n" " .set push \n"
" .set noat \n" " .set noat \n"
" .set mips3 \n" " .set arch=r4000 \n"
"1: "user_ll("%1", "%3")" \n" "1: "user_ll("%1", "%3")" \n"
" bne %1, %z4, 3f \n" " bne %1, %z4, 3f \n"
" .set mips0 \n" " .set mips0 \n"
" move $1, %z5 \n" " move $1, %z5 \n"
" .set mips3 \n" " .set arch=r4000 \n"
"2: "user_sc("$1", "%2")" \n" "2: "user_sc("$1", "%2")" \n"
" beqz $1, 1b \n" " beqz $1, 1b \n"
__WEAK_LLSC_MB __WEAK_LLSC_MB

View file

@ -331,7 +331,7 @@ static inline void pfx##write##bwlq(type val, \
if (irq) \ if (irq) \
local_irq_save(__flags); \ local_irq_save(__flags); \
__asm__ __volatile__( \ __asm__ __volatile__( \
".set mips3" "\t\t# __writeq""\n\t" \ ".set arch=r4000" "\t\t# __writeq""\n\t" \
"dsll32 %L0, %L0, 0" "\n\t" \ "dsll32 %L0, %L0, 0" "\n\t" \
"dsrl32 %L0, %L0, 0" "\n\t" \ "dsrl32 %L0, %L0, 0" "\n\t" \
"dsll32 %M0, %M0, 0" "\n\t" \ "dsll32 %M0, %M0, 0" "\n\t" \
@ -361,7 +361,7 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
if (irq) \ if (irq) \
local_irq_save(__flags); \ local_irq_save(__flags); \
__asm__ __volatile__( \ __asm__ __volatile__( \
".set mips3" "\t\t# __readq" "\n\t" \ ".set arch=r4000" "\t\t# __readq" "\n\t" \
"ld %L0, %1" "\n\t" \ "ld %L0, %1" "\n\t" \
"dsra32 %M0, %L0, 0" "\n\t" \ "dsra32 %M0, %L0, 0" "\n\t" \
"sll %L0, %L0, 0" "\n\t" \ "sll %L0, %L0, 0" "\n\t" \

View file

@ -33,7 +33,7 @@ static __inline__ long local_add_return(long i, local_t * l)
unsigned long temp; unsigned long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1:" __LL "%1, %2 # local_add_return \n" "1:" __LL "%1, %2 # local_add_return \n"
" addu %0, %1, %3 \n" " addu %0, %1, %3 \n"
__SC "%0, %2 \n" __SC "%0, %2 \n"
@ -47,7 +47,7 @@ static __inline__ long local_add_return(long i, local_t * l)
unsigned long temp; unsigned long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1:" __LL "%1, %2 # local_add_return \n" "1:" __LL "%1, %2 # local_add_return \n"
" addu %0, %1, %3 \n" " addu %0, %1, %3 \n"
__SC "%0, %2 \n" __SC "%0, %2 \n"
@ -78,7 +78,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
unsigned long temp; unsigned long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1:" __LL "%1, %2 # local_sub_return \n" "1:" __LL "%1, %2 # local_sub_return \n"
" subu %0, %1, %3 \n" " subu %0, %1, %3 \n"
__SC "%0, %2 \n" __SC "%0, %2 \n"
@ -92,7 +92,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
unsigned long temp; unsigned long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set arch=r4000 \n"
"1:" __LL "%1, %2 # local_sub_return \n" "1:" __LL "%1, %2 # local_sub_return \n"
" subu %0, %1, %3 \n" " subu %0, %1, %3 \n"
__SC "%0, %2 \n" __SC "%0, %2 \n"

View file

@ -76,7 +76,7 @@ static inline void set_value_reg32(volatile u32 *const addr,
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n" " .set push \n"
" .set mips3 \n" " .set arch=r4000 \n"
"1: ll %0, %1 # set_value_reg32 \n" "1: ll %0, %1 # set_value_reg32 \n"
" and %0, %2 \n" " and %0, %2 \n"
" or %0, %3 \n" " or %0, %3 \n"
@ -98,7 +98,7 @@ static inline void set_reg32(volatile u32 *const addr,
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n" " .set push \n"
" .set mips3 \n" " .set arch=r4000 \n"
"1: ll %0, %1 # set_reg32 \n" "1: ll %0, %1 # set_reg32 \n"
" or %0, %2 \n" " or %0, %2 \n"
" sc %0, %1 \n" " sc %0, %1 \n"
@ -119,7 +119,7 @@ static inline void clear_reg32(volatile u32 *const addr,
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n" " .set push \n"
" .set mips3 \n" " .set arch=r4000 \n"
"1: ll %0, %1 # clear_reg32 \n" "1: ll %0, %1 # clear_reg32 \n"
" and %0, %2 \n" " and %0, %2 \n"
" sc %0, %1 \n" " sc %0, %1 \n"
@ -140,7 +140,7 @@ static inline void toggle_reg32(volatile u32 *const addr,
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n" " .set push \n"
" .set mips3 \n" " .set arch=r4000 \n"
"1: ll %0, %1 # toggle_reg32 \n" "1: ll %0, %1 # toggle_reg32 \n"
" xor %0, %2 \n" " xor %0, %2 \n"
" sc %0, %1 \n" " sc %0, %1 \n"
@ -216,7 +216,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
#define custom_read_reg32(address, tmp) \ #define custom_read_reg32(address, tmp) \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \ " .set push \n" \
" .set mips3 \n" \ " .set arch=r4000 \n" \
"1: ll %0, %1 #custom_read_reg32 \n" \ "1: ll %0, %1 #custom_read_reg32 \n" \
" .set pop \n" \ " .set pop \n" \
: "=r" (tmp), "=m" (*address) \ : "=r" (tmp), "=m" (*address) \
@ -225,7 +225,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
#define custom_write_reg32(address, tmp) \ #define custom_write_reg32(address, tmp) \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \ " .set push \n" \
" .set mips3 \n" \ " .set arch=r4000 \n" \
" sc %0, %1 #custom_write_reg32 \n" \ " sc %0, %1 #custom_write_reg32 \n" \
" "__beqz"%0, 1b \n" \ " "__beqz"%0, 1b \n" \
" nop \n" \ " nop \n" \

View file

@ -36,7 +36,7 @@
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \ " .set push \n" \
" .set noreorder \n" \ " .set noreorder \n" \
" .set mips3\n\t \n" \ " .set arch=r4000 \n" \
" cache %0, %1 \n" \ " cache %0, %1 \n" \
" .set pop \n" \ " .set pop \n" \
: \ : \
@ -204,7 +204,7 @@ static inline void flush_scache_line(unsigned long addr)
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \ " .set push \n" \
" .set noreorder \n" \ " .set noreorder \n" \
" .set mips3 \n" \ " .set arch=r4000 \n" \
"1: cache %0, (%1) \n" \ "1: cache %0, (%1) \n" \
"2: .set pop \n" \ "2: .set pop \n" \
" .section __ex_table,\"a\" \n" \ " .section __ex_table,\"a\" \n" \

View file

@ -435,7 +435,7 @@
.macro RESTORE_SP_AND_RET .macro RESTORE_SP_AND_RET
LONG_L sp, PT_R29(sp) LONG_L sp, PT_R29(sp)
.set mips3 .set arch=r4000
eret eret
.set mips0 .set mips0
.endm .endm

View file

@ -122,7 +122,7 @@ NESTED(bmips_reset_nmi_vec, PT_SIZE, sp)
jr k0 jr k0
RESTORE_ALL RESTORE_ALL
.set mips3 .set arch=r4000
eret eret
/*********************************************************************** /***********************************************************************

View file

@ -67,7 +67,7 @@ NESTED(except_vec3_generic, 0, sp)
*/ */
NESTED(except_vec3_r4000, 0, sp) NESTED(except_vec3_r4000, 0, sp)
.set push .set push
.set mips3 .set arch=r4000
.set noat .set noat
mfc0 k1, CP0_CAUSE mfc0 k1, CP0_CAUSE
li k0, 31<<2 li k0, 31<<2
@ -139,7 +139,7 @@ LEAF(__r4k_wait)
nop nop
nop nop
#endif #endif
.set mips3 .set arch=r4000
wait wait
/* end of rollback region (the region size must be power of two) */ /* end of rollback region (the region size must be power of two) */
1: 1:
@ -577,7 +577,7 @@ isrdhwr:
ori k1, _THREAD_MASK ori k1, _THREAD_MASK
xori k1, _THREAD_MASK xori k1, _THREAD_MASK
LONG_L v1, TI_TP_VALUE(k1) LONG_L v1, TI_TP_VALUE(k1)
.set mips3 .set arch=r4000
eret eret
.set mips0 .set mips0
#endif #endif

View file

@ -64,7 +64,7 @@ void r4k_wait_irqoff(void)
if (!need_resched()) if (!need_resched())
__asm__( __asm__(
" .set push \n" " .set push \n"
" .set mips3 \n" " .set arch=r4000 \n"
" wait \n" " wait \n"
" .set pop \n"); " .set pop \n");
local_irq_enable(); local_irq_enable();
@ -82,7 +82,7 @@ static void rm7k_wait_irqoff(void)
if (!need_resched()) if (!need_resched())
__asm__( __asm__(
" .set push \n" " .set push \n"
" .set mips3 \n" " .set arch=r4000 \n"
" .set noat \n" " .set noat \n"
" mfc0 $1, $12 \n" " mfc0 $1, $12 \n"
" sync \n" " sync \n"
@ -103,7 +103,7 @@ static void au1k_wait(void)
unsigned long c0status = read_c0_status() | 1; /* irqs on */ unsigned long c0status = read_c0_status() | 1; /* irqs on */
__asm__( __asm__(
" .set mips3 \n" " .set arch=r4000 \n"
" cache 0x14, 0(%0) \n" " cache 0x14, 0(%0) \n"
" cache 0x14, 32(%0) \n" " cache 0x14, 32(%0) \n"
" sync \n" " sync \n"

View file

@ -31,7 +31,7 @@
.endm .endm
.set noreorder .set noreorder
.set mips3 .set arch=r4000
LEAF(_save_fp_context) LEAF(_save_fp_context)
cfc1 t1, fcr31 cfc1 t1, fcr31

View file

@ -294,7 +294,7 @@ LEAF(_init_fpu)
1: .set pop 1: .set pop
#endif /* CONFIG_CPU_MIPS32_R2 */ #endif /* CONFIG_CPU_MIPS32_R2 */
#else #else
.set mips3 .set arch=r4000
dmtc1 t1, $f0 dmtc1 t1, $f0
dmtc1 t1, $f2 dmtc1 t1, $f2
dmtc1 t1, $f4 dmtc1 t1, $f4

View file

@ -110,7 +110,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
if (cpu_has_llsc && R10000_LLSC_WAR) { if (cpu_has_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__ ( __asm__ __volatile__ (
" .set mips3 \n" " .set arch=r4000 \n"
" li %[err], 0 \n" " li %[err], 0 \n"
"1: ll %[old], (%[addr]) \n" "1: ll %[old], (%[addr]) \n"
" move %[tmp], %[new] \n" " move %[tmp], %[new] \n"
@ -135,7 +135,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
: "memory"); : "memory");
} else if (cpu_has_llsc) { } else if (cpu_has_llsc) {
__asm__ __volatile__ ( __asm__ __volatile__ (
" .set mips3 \n" " .set arch=r4000 \n"
" li %[err], 0 \n" " li %[err], 0 \n"
"1: ll %[old], (%[addr]) \n" "1: ll %[old], (%[addr]) \n"
" move %[tmp], %[new] \n" " move %[tmp], %[new] \n"

View file

@ -49,7 +49,7 @@ void msp7120_reset(void)
/* Cache the reset code of this function */ /* Cache the reset code of this function */
__asm__ __volatile__ ( __asm__ __volatile__ (
" .set push \n" " .set push \n"
" .set mips3 \n" " .set arch=r4000 \n"
" la %0,startpoint \n" " la %0,startpoint \n"
" la %1,endpoint \n" " la %1,endpoint \n"
" .set pop \n" " .set pop \n"