diff options
Diffstat (limited to 'linux/src/include/asm-i386')
-rw-r--r-- | linux/src/include/asm-i386/bitops.h | 28 | ||||
-rw-r--r-- | linux/src/include/asm-i386/io.h | 12 | ||||
-rw-r--r-- | linux/src/include/asm-i386/segment.h | 8 | ||||
-rw-r--r-- | linux/src/include/asm-i386/semaphore.h | 30 |
4 files changed, 46 insertions, 32 deletions
diff --git a/linux/src/include/asm-i386/bitops.h b/linux/src/include/asm-i386/bitops.h index fc4cf192..e2a4c14a 100644 --- a/linux/src/include/asm-i386/bitops.h +++ b/linux/src/include/asm-i386/bitops.h @@ -28,7 +28,7 @@ struct __dummy { unsigned long a[100]; }; #define ADDR (*(struct __dummy *) addr) #define CONST_ADDR (*(const struct __dummy *) addr) -extern __inline__ int set_bit(int nr, SMPVOL void * addr) +static __inline__ int set_bit(int nr, SMPVOL void * addr) { int oldbit; @@ -39,7 +39,7 @@ extern __inline__ int set_bit(int nr, SMPVOL void * addr) return oldbit; } -extern __inline__ int clear_bit(int nr, SMPVOL void * addr) +static __inline__ int clear_bit(int nr, SMPVOL void * addr) { int oldbit; @@ -50,7 +50,7 @@ extern __inline__ int clear_bit(int nr, SMPVOL void * addr) return oldbit; } -extern __inline__ int change_bit(int nr, SMPVOL void * addr) +static __inline__ int change_bit(int nr, SMPVOL void * addr) { int oldbit; @@ -61,7 +61,7 @@ extern __inline__ int change_bit(int nr, SMPVOL void * addr) return oldbit; } -extern __inline__ int test_and_set_bit(int nr, volatile void * addr) +static __inline__ int test_and_set_bit(int nr, volatile void * addr) { int oldbit; @@ -72,7 +72,7 @@ extern __inline__ int test_and_set_bit(int nr, volatile void * addr) return oldbit; } -extern __inline__ int test_and_clear_bit(int nr, volatile void * addr) +static __inline__ int test_and_clear_bit(int nr, volatile void * addr) { int oldbit; @@ -83,7 +83,7 @@ extern __inline__ int test_and_clear_bit(int nr, volatile void * addr) return oldbit; } -extern __inline__ int test_and_change_bit(int nr, volatile void * addr) +static __inline__ int test_and_change_bit(int nr, volatile void * addr) { int oldbit; @@ -98,7 +98,7 @@ extern __inline__ int test_and_change_bit(int nr, volatile void * addr) /* * This routine doesn't need to be atomic. */ -extern __inline__ int test_bit(int nr, const SMPVOL void * addr) +static __inline__ int test_bit(int nr, const SMPVOL void * addr) { return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0; } @@ -106,8 +106,9 @@ extern __inline__ int test_bit(int nr, const SMPVOL void * addr) /* * Find-bit routines.. */ -extern __inline__ int find_first_zero_bit(void * addr, unsigned size) +static __inline__ int find_first_zero_bit(void * addr, unsigned size) { + int d0, d1, d2; int res; if (!size) @@ -123,13 +124,12 @@ extern __inline__ int find_first_zero_bit(void * addr, unsigned size) "1:\tsubl %%ebx,%%edi\n\t" "shll $3,%%edi\n\t" "addl %%edi,%%edx" - :"=d" (res) - :"c" ((size + 31) >> 5), "D" (addr), "b" (addr) - :"ax", "cx", "di"); + :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) + :"1" ((size + 31) >> 5), "2" (addr), "b" (addr)); return res; } -extern __inline__ int find_next_zero_bit (void * addr, int size, int offset) +static __inline__ int find_next_zero_bit (void * addr, int size, int offset) { unsigned long * p = ((unsigned long *) addr) + (offset >> 5); int set = 0, bit = offset & 31, res; @@ -160,7 +160,7 @@ extern __inline__ int find_next_zero_bit (void * addr, int size, int offset) * ffz = Find First Zero in word. Undefined if no zero exists, * so code should check against ~0UL first.. */ -extern __inline__ unsigned long ffz(unsigned long word) +static __inline__ unsigned long ffz(unsigned long word) { __asm__("bsfl %1,%0" :"=r" (word) @@ -176,7 +176,7 @@ extern __inline__ unsigned long ffz(unsigned long word) * differs in spirit from the above ffz (man ffs). */ -extern __inline__ int ffs(int x) +static __inline__ int ffs(int x) { int r; diff --git a/linux/src/include/asm-i386/io.h b/linux/src/include/asm-i386/io.h index f961f1d2..34cf105b 100644 --- a/linux/src/include/asm-i386/io.h +++ b/linux/src/include/asm-i386/io.h @@ -45,12 +45,12 @@ * make the kernel segment mapped at 0, we need to do translation * on the i386 as well) */ -extern inline unsigned long virt_to_phys(volatile void * address) +static inline unsigned long virt_to_phys(volatile void * address) { return (unsigned long) _kvtophys(address); } -extern inline void * phys_to_virt(unsigned long address) +static inline void * phys_to_virt(unsigned long address) { return (void *) phystokv(address); } @@ -90,7 +90,7 @@ extern inline void * phys_to_virt(unsigned long address) */ #define __OUT1(s,x) \ -extern inline void __out##s(unsigned x value, unsigned short port) { +static inline void __out##s(unsigned x value, unsigned short port) { #define __OUT2(s,s1,s2) \ __asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" @@ -102,7 +102,7 @@ __OUT1(s##_p,x) __OUT2(s,s1,"w") : : "a" (value), "d" (port)); SLOW_DOWN_IO; } \ __OUT1(s##c_p,x) __OUT2(s,s1,"") : : "a" (value), "id" (port)); SLOW_DOWN_IO; } #define __IN1(s) \ -extern inline RETURN_TYPE __in##s(unsigned short port) { RETURN_TYPE _v; +static inline RETURN_TYPE __in##s(unsigned short port) { RETURN_TYPE _v; #define __IN2(s,s1,s2) \ __asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" @@ -114,12 +114,12 @@ __IN1(s##_p) __IN2(s,s1,"w") : "=a" (_v) : "d" (port) ,##i ); SLOW_DOWN_IO; retu __IN1(s##c_p) __IN2(s,s1,"") : "=a" (_v) : "id" (port) ,##i ); SLOW_DOWN_IO; return _v; } #define __INS(s) \ -extern inline void ins##s(unsigned short port, void * addr, unsigned long count) \ +static inline void ins##s(unsigned short port, void * addr, unsigned long count) \ { __asm__ __volatile__ ("cld ; rep ; ins" #s \ : "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } #define __OUTS(s) \ -extern inline void outs##s(unsigned short port, const void * addr, unsigned long count) \ +static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \ { __asm__ __volatile__ ("cld ; rep ; outs" #s \ : "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } diff --git a/linux/src/include/asm-i386/segment.h b/linux/src/include/asm-i386/segment.h index 5f8af993..d23aa173 100644 --- a/linux/src/include/asm-i386/segment.h +++ b/linux/src/include/asm-i386/segment.h @@ -60,7 +60,11 @@ static inline void __attribute__((always_inline)) __put_user(unsigned long x, vo :"ir" (x), "m" (*__sd(y))); break; default: +#ifdef __OPTIMIZE__ bad_user_access_length(); +#else + asm volatile("ud2"); +#endif } } @@ -85,7 +89,11 @@ static inline unsigned long __attribute__((always_inline)) __get_user(const void :"m" (*__const_sd(y))); return result; default: +#ifdef __OPTIMIZE__ return bad_user_access_length(); +#else + asm volatile("ud2"); +#endif } } diff --git a/linux/src/include/asm-i386/semaphore.h b/linux/src/include/asm-i386/semaphore.h index 1486d1c1..18e12c10 100644 --- a/linux/src/include/asm-i386/semaphore.h +++ b/linux/src/include/asm-i386/semaphore.h @@ -30,6 +30,10 @@ struct semaphore { #define MUTEX ((struct semaphore) { 1, 0, 0, NULL }) #define MUTEX_LOCKED ((struct semaphore) { 0, 0, 0, NULL }) +/* Special register calling convention: + * eax contains return address + * ecx contains semaphore address + */ asmlinkage void down_failed(void /* special register calling convention */); asmlinkage void up_wakeup(void /* special register calling convention */); @@ -41,20 +45,21 @@ extern void __up(struct semaphore * sem); * "down_failed" is a special asm handler that calls the C * routine that actually waits. See arch/i386/lib/semaphore.S */ -extern inline void down(struct semaphore * sem) +static inline void down(struct semaphore * sem) { + int d0; __asm__ __volatile__( "# atomic down operation\n\t" "movl $1f,%%eax\n\t" #ifdef __SMP__ "lock ; " #endif - "decl 0(%0)\n\t" + "decl %1\n\t" "js " SYMBOL_NAME_STR(down_failed) "\n" "1:\n" - :/* no outputs */ + :"=&a" (d0), "=m" (sem->count) :"c" (sem) - :"ax","dx","memory"); + :"memory"); } /* @@ -81,7 +86,7 @@ asmlinkage int down_failed_interruptible(void); /* params in registers */ * process can be killed. The down_failed_interruptible routine * returns negative for signalled and zero for semaphore acquired. */ -extern inline int down_interruptible(struct semaphore * sem) +static inline int down_interruptible(struct semaphore * sem) { int ret ; @@ -91,13 +96,13 @@ extern inline int down_interruptible(struct semaphore * sem) #ifdef __SMP__ "lock ; " #endif - "decl 0(%1)\n\t" + "decl %1\n\t" "js " SYMBOL_NAME_STR(down_failed_interruptible) "\n\t" "xorl %%eax,%%eax\n" "2:\n" - :"=a" (ret) + :"=&a" (ret), "=m" (sem->count) :"c" (sem) - :"ax","dx","memory"); + :"memory"); return(ret) ; } @@ -108,20 +113,21 @@ extern inline int down_interruptible(struct semaphore * sem) * The default case (no contention) will result in NO * jumps for both down() and up(). */ -extern inline void up(struct semaphore * sem) +static inline void up(struct semaphore * sem) { + int d0; __asm__ __volatile__( "# atomic up operation\n\t" "movl $1f,%%eax\n\t" #ifdef __SMP__ "lock ; " #endif - "incl 0(%0)\n\t" + "incl %1\n\t" "jle " SYMBOL_NAME_STR(up_wakeup) "\n1:" - :/* no outputs */ + :"=&a" (d0), "=m" (sem->count) :"c" (sem) - :"ax", "dx", "memory"); + :"memory"); } #endif |