summaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/acpi.h10
-rw-r--r--include/asm-i386/alternative.h129
-rw-r--r--include/asm-i386/arch_hooks.h3
-rw-r--r--include/asm-i386/atomic.h36
-rw-r--r--include/asm-i386/bitops.h7
-rw-r--r--include/asm-i386/cache.h2
-rw-r--r--include/asm-i386/cpufeature.h1
-rw-r--r--include/asm-i386/dmi.h11
-rw-r--r--include/asm-i386/io.h12
-rw-r--r--include/asm-i386/mach-default/do_timer.h2
-rw-r--r--include/asm-i386/mach-es7000/mach_mpparse.h10
-rw-r--r--include/asm-i386/mach-visws/do_timer.h2
-rw-r--r--include/asm-i386/mach-voyager/do_timer.h2
-rw-r--r--include/asm-i386/mpspec.h1
-rw-r--r--include/asm-i386/mtrr.h1
-rw-r--r--include/asm-i386/mutex.h6
-rw-r--r--include/asm-i386/pgtable-2level.h2
-rw-r--r--include/asm-i386/pgtable-3level.h2
-rw-r--r--include/asm-i386/pgtable.h5
-rw-r--r--include/asm-i386/poll.h1
-rw-r--r--include/asm-i386/rwlock.h56
-rw-r--r--include/asm-i386/semaphore.h8
-rw-r--r--include/asm-i386/spinlock.h34
-rw-r--r--include/asm-i386/system.h64
-rw-r--r--include/asm-i386/uaccess.h12
-rw-r--r--include/asm-i386/unistd.h36
26 files changed, 281 insertions, 174 deletions
diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h
index 55059abf9c95..20f523954218 100644
--- a/include/asm-i386/acpi.h
+++ b/include/asm-i386/acpi.h
@@ -103,6 +103,12 @@ __acpi_release_global_lock (unsigned int *lock)
:"=r"(n_hi), "=r"(n_lo) \
:"0"(n_hi), "1"(n_lo))
+#ifdef CONFIG_X86_IO_APIC
+extern void check_acpi_pci(void);
+#else
+static inline void check_acpi_pci(void) { }
+#endif
+
#ifdef CONFIG_ACPI
extern int acpi_lapic;
extern int acpi_ioapic;
@@ -128,8 +134,6 @@ extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
extern int skip_ioapic_setup;
extern int acpi_skip_timer_override;
-extern void check_acpi_pci(void);
-
static inline void disable_ioapic_setup(void)
{
skip_ioapic_setup = 1;
@@ -142,8 +146,6 @@ static inline int ioapic_setup_disabled(void)
#else
static inline void disable_ioapic_setup(void) { }
-static inline void check_acpi_pci(void) { }
-
#endif
static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h
new file mode 100644
index 000000000000..e201decea0c9
--- /dev/null
+++ b/include/asm-i386/alternative.h
@@ -0,0 +1,129 @@
+#ifndef _I386_ALTERNATIVE_H
+#define _I386_ALTERNATIVE_H
+
+#ifdef __KERNEL__
+
+struct alt_instr {
+ u8 *instr; /* original instruction */
+ u8 *replacement;
+ u8 cpuid; /* cpuid bit set for replacement */
+ u8 instrlen; /* length of original instruction */
+ u8 replacementlen; /* length of new instruction, <= instrlen */
+ u8 pad;
+};
+
+extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
+
+struct module;
+extern void alternatives_smp_module_add(struct module *mod, char *name,
+ void *locks, void *locks_end,
+ void *text, void *text_end);
+extern void alternatives_smp_module_del(struct module *mod);
+extern void alternatives_smp_switch(int smp);
+
+#endif
+
+/*
+ * Alternative instructions for different CPU types or capabilities.
+ *
+ * This allows to use optimized instructions even on generic binary
+ * kernels.
+ *
+ * length of oldinstr must be longer or equal the length of newinstr
+ * It can be padded with nops as needed.
+ *
+ * For non barrier like inlines please define new variants
+ * without volatile and memory clobber.
+ */
+#define alternative(oldinstr, newinstr, feature) \
+ asm volatile ("661:\n\t" oldinstr "\n662:\n" \
+ ".section .altinstructions,\"a\"\n" \
+ " .align 4\n" \
+ " .long 661b\n" /* label */ \
+ " .long 663f\n" /* new instruction */ \
+ " .byte %c0\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* sourcelen */ \
+ " .byte 664f-663f\n" /* replacementlen */ \
+ ".previous\n" \
+ ".section .altinstr_replacement,\"ax\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */\
+ ".previous" :: "i" (feature) : "memory")
+
+/*
+ * Alternative inline assembly with input.
+ *
+ * Pecularities:
+ * No memory clobber here.
+ * Argument numbers start with 1.
+ * Best is to use constraints that are fixed size (like (%1) ... "r")
+ * If you use variable sized constraints like "m" or "g" in the
+ * replacement maake sure to pad to the worst case length.
+ */
+#define alternative_input(oldinstr, newinstr, feature, input...) \
+ asm volatile ("661:\n\t" oldinstr "\n662:\n" \
+ ".section .altinstructions,\"a\"\n" \
+ " .align 4\n" \
+ " .long 661b\n" /* label */ \
+ " .long 663f\n" /* new instruction */ \
+ " .byte %c0\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* sourcelen */ \
+ " .byte 664f-663f\n" /* replacementlen */ \
+ ".previous\n" \
+ ".section .altinstr_replacement,\"ax\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */\
+ ".previous" :: "i" (feature), ##input)
+
+/*
+ * Alternative inline assembly for SMP.
+ *
+ * alternative_smp() takes two versions (SMP first, UP second) and is
+ * for more complex stuff such as spinlocks.
+ *
+ * The LOCK_PREFIX macro defined here replaces the LOCK and
+ * LOCK_PREFIX macros used everywhere in the source tree.
+ *
+ * SMP alternatives use the same data structures as the other
+ * alternatives and the X86_FEATURE_UP flag to indicate the case of a
+ * UP system running a SMP kernel. The existing apply_alternatives()
+ * works fine for patching a SMP kernel for UP.
+ *
+ * The SMP alternative tables can be kept after boot and contain both
+ * UP and SMP versions of the instructions to allow switching back to
+ * SMP at runtime, when hotplugging in a new CPU, which is especially
+ * useful in virtualized environments.
+ *
+ * The very common lock prefix is handled as special case in a
+ * separate table which is a pure address list without replacement ptr
+ * and size information. That keeps the table sizes small.
+ */
+
+#ifdef CONFIG_SMP
+#define alternative_smp(smpinstr, upinstr, args...) \
+ asm volatile ("661:\n\t" smpinstr "\n662:\n" \
+ ".section .smp_altinstructions,\"a\"\n" \
+ " .align 4\n" \
+ " .long 661b\n" /* label */ \
+ " .long 663f\n" /* new instruction */ \
+ " .byte 0x68\n" /* X86_FEATURE_UP */ \
+ " .byte 662b-661b\n" /* sourcelen */ \
+ " .byte 664f-663f\n" /* replacementlen */ \
+ ".previous\n" \
+ ".section .smp_altinstr_replacement,\"awx\"\n" \
+ "663:\n\t" upinstr "\n" /* replacement */ \
+ "664:\n\t.fill 662b-661b,1,0x42\n" /* space for original */ \
+ ".previous" : args)
+
+#define LOCK_PREFIX \
+ ".section .smp_locks,\"a\"\n" \
+ " .align 4\n" \
+ " .long 661f\n" /* address */ \
+ ".previous\n" \
+ "661:\n\tlock; "
+
+#else /* ! CONFIG_SMP */
+#define alternative_smp(smpinstr, upinstr, args...) \
+ asm volatile (upinstr : args)
+#define LOCK_PREFIX ""
+#endif
+
+#endif /* _I386_ALTERNATIVE_H */
diff --git a/include/asm-i386/arch_hooks.h b/include/asm-i386/arch_hooks.h
index 28b96a6fb9fa..238cf4275b96 100644
--- a/include/asm-i386/arch_hooks.h
+++ b/include/asm-i386/arch_hooks.h
@@ -24,4 +24,7 @@ extern void trap_init_hook(void);
extern void time_init_hook(void);
extern void mca_nmi_hook(void);
+extern int setup_early_printk(char *);
+extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2)));
+
#endif
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index de649d3aa2d4..22d80ece95cb 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -10,12 +10,6 @@
* resource counting etc..
*/
-#ifdef CONFIG_SMP
-#define LOCK "lock ; "
-#else
-#define LOCK ""
-#endif
-
/*
* Make sure gcc doesn't try to be clever and move things around
* on us. We need to use _exactly_ the address the user gave us,
@@ -52,7 +46,7 @@ typedef struct { volatile int counter; } atomic_t;
static __inline__ void atomic_add(int i, atomic_t *v)
{
__asm__ __volatile__(
- LOCK "addl %1,%0"
+ LOCK_PREFIX "addl %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
@@ -67,7 +61,7 @@ static __inline__ void atomic_add(int i, atomic_t *v)
static __inline__ void atomic_sub(int i, atomic_t *v)
{
__asm__ __volatile__(
- LOCK "subl %1,%0"
+ LOCK_PREFIX "subl %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
@@ -86,7 +80,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
unsigned char c;
__asm__ __volatile__(
- LOCK "subl %2,%0; sete %1"
+ LOCK_PREFIX "subl %2,%0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
@@ -101,7 +95,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
static __inline__ void atomic_inc(atomic_t *v)
{
__asm__ __volatile__(
- LOCK "incl %0"
+ LOCK_PREFIX "incl %0"
:"=m" (v->counter)
:"m" (v->counter));
}
@@ -115,7 +109,7 @@ static __inline__ void atomic_inc(atomic_t *v)
static __inline__ void atomic_dec(atomic_t *v)
{
__asm__ __volatile__(
- LOCK "decl %0"
+ LOCK_PREFIX "decl %0"
:"=m" (v->counter)
:"m" (v->counter));
}
@@ -133,7 +127,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
unsigned char c;
__asm__ __volatile__(
- LOCK "decl %0; sete %1"
+ LOCK_PREFIX "decl %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
@@ -152,7 +146,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
unsigned char c;
__asm__ __volatile__(
- LOCK "incl %0; sete %1"
+ LOCK_PREFIX "incl %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
@@ -172,7 +166,7 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
unsigned char c;
__asm__ __volatile__(
- LOCK "addl %2,%0; sets %1"
+ LOCK_PREFIX "addl %2,%0; sets %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
@@ -195,7 +189,7 @@ static __inline__ int atomic_add_return(int i, atomic_t *v)
/* Modern 486+ processor */
__i = i;
__asm__ __volatile__(
- LOCK "xaddl %0, %1;"
+ LOCK_PREFIX "xaddl %0, %1;"
:"=r"(i)
:"m"(v->counter), "0"(i));
return i + __i;
@@ -231,8 +225,14 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
({ \
int c, old; \
c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ for (;;) { \
+ if (unlikely(c == (u))) \
+ break; \
+ old = atomic_cmpxchg((v), c, c + (a)); \
+ if (likely(old == c)) \
+ break; \
c = old; \
+ } \
c != (u); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
@@ -242,11 +242,11 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
-__asm__ __volatile__(LOCK "andl %0,%1" \
+__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
: : "r" (~(mask)),"m" (*addr) : "memory")
#define atomic_set_mask(mask, addr) \
-__asm__ __volatile__(LOCK "orl %0,%1" \
+__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
: : "r" (mask),"m" (*(addr)) : "memory")
/* Atomic operations are already serializing on x86 */
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h
index 88e6ca248cd7..7d20b95edb3b 100644
--- a/include/asm-i386/bitops.h
+++ b/include/asm-i386/bitops.h
@@ -7,6 +7,7 @@
#include <linux/config.h>
#include <linux/compiler.h>
+#include <asm/alternative.h>
/*
* These have to be done with inline assembly: that way the bit-setting
@@ -16,12 +17,6 @@
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
-#ifdef CONFIG_SMP
-#define LOCK_PREFIX "lock ; "
-#else
-#define LOCK_PREFIX ""
-#endif
-
#define ADDR (*(volatile long *) addr)
/**
diff --git a/include/asm-i386/cache.h b/include/asm-i386/cache.h
index 615911e5bd24..ca15c9c665cf 100644
--- a/include/asm-i386/cache.h
+++ b/include/asm-i386/cache.h
@@ -10,4 +10,6 @@
#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+
#endif
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h
index c4ec2a4d8fdf..5c0b5876b931 100644
--- a/include/asm-i386/cpufeature.h
+++ b/include/asm-i386/cpufeature.h
@@ -70,6 +70,7 @@
#define X86_FEATURE_P3 (3*32+ 6) /* P3 */
#define X86_FEATURE_P4 (3*32+ 7) /* P4 */
#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
+#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
diff --git a/include/asm-i386/dmi.h b/include/asm-i386/dmi.h
new file mode 100644
index 000000000000..38d4eeb7fc7e
--- /dev/null
+++ b/include/asm-i386/dmi.h
@@ -0,0 +1,11 @@
+#ifndef _ASM_DMI_H
+#define _ASM_DMI_H 1
+
+#include <asm/io.h>
+
+/* Use early IO mappings for DMI because it's initialized early */
+#define dmi_ioremap bt_ioremap
+#define dmi_iounmap bt_iounmap
+#define dmi_alloc alloc_bootmem
+
+#endif
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
index 03233c2ab820..79670bb4b0c7 100644
--- a/include/asm-i386/io.h
+++ b/include/asm-i386/io.h
@@ -219,23 +219,11 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int
*/
#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
-#define isa_readb(a) readb(__ISA_IO_base + (a))
-#define isa_readw(a) readw(__ISA_IO_base + (a))
-#define isa_readl(a) readl(__ISA_IO_base + (a))
-#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
-#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
-#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
-#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c))
-#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c))
-#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c))
-
-
/*
* Again, i386 does not require mem IO specific function.
*/
#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void __force *)(b),(c),(d))
-#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void __force *)(__ISA_IO_base + (b)),(c),(d))
/**
* check_signature - find BIOS signatures
diff --git a/include/asm-i386/mach-default/do_timer.h b/include/asm-i386/mach-default/do_timer.h
index 56211414fc95..6312c3e79814 100644
--- a/include/asm-i386/mach-default/do_timer.h
+++ b/include/asm-i386/mach-default/do_timer.h
@@ -18,7 +18,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs)
{
do_timer(regs);
#ifndef CONFIG_SMP
- update_process_times(user_mode(regs));
+ update_process_times(user_mode_vm(regs));
#endif
/*
* In the SMP case we use the local APIC timer interrupt to do the
diff --git a/include/asm-i386/mach-es7000/mach_mpparse.h b/include/asm-i386/mach-es7000/mach_mpparse.h
index 4a0637a3e208..99f66be240be 100644
--- a/include/asm-i386/mach-es7000/mach_mpparse.h
+++ b/include/asm-i386/mach-es7000/mach_mpparse.h
@@ -30,7 +30,8 @@ static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
return 0;
}
-static inline int es7000_check_dsdt()
+#ifdef CONFIG_ACPI
+static inline int es7000_check_dsdt(void)
{
struct acpi_table_header *header = NULL;
if(!acpi_get_table_header_early(ACPI_DSDT, &header))
@@ -54,6 +55,11 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
}
return 0;
}
-
+#else
+static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+ return 0;
+}
+#endif
#endif /* __ASM_MACH_MPPARSE_H */
diff --git a/include/asm-i386/mach-visws/do_timer.h b/include/asm-i386/mach-visws/do_timer.h
index 92d638fc8b11..95568e6ca91c 100644
--- a/include/asm-i386/mach-visws/do_timer.h
+++ b/include/asm-i386/mach-visws/do_timer.h
@@ -11,7 +11,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs)
do_timer(regs);
#ifndef CONFIG_SMP
- update_process_times(user_mode(regs));
+ update_process_times(user_mode_vm(regs));
#endif
/*
* In the SMP case we use the local APIC timer interrupt to do the
diff --git a/include/asm-i386/mach-voyager/do_timer.h b/include/asm-i386/mach-voyager/do_timer.h
index ae510e5d0d78..eaf518098981 100644
--- a/include/asm-i386/mach-voyager/do_timer.h
+++ b/include/asm-i386/mach-voyager/do_timer.h
@@ -5,7 +5,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs)
{
do_timer(regs);
#ifndef CONFIG_SMP
- update_process_times(user_mode(regs));
+ update_process_times(user_mode_vm(regs));
#endif
voyager_timer_interrupt(regs);
diff --git a/include/asm-i386/mpspec.h b/include/asm-i386/mpspec.h
index 64a0b8e6afeb..62113d3bfdc2 100644
--- a/include/asm-i386/mpspec.h
+++ b/include/asm-i386/mpspec.h
@@ -22,7 +22,6 @@ extern int mp_bus_id_to_type [MAX_MP_BUSSES];
extern int mp_irq_entries;
extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES];
extern int mpc_default_type;
-extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES];
extern unsigned long mp_lapic_addr;
extern int pic_mode;
extern int using_apic_timer;
diff --git a/include/asm-i386/mtrr.h b/include/asm-i386/mtrr.h
index 5b6ceda68c5f..64cf937c7e33 100644
--- a/include/asm-i386/mtrr.h
+++ b/include/asm-i386/mtrr.h
@@ -25,6 +25,7 @@
#include <linux/config.h>
#include <linux/ioctl.h>
+#include <linux/errno.h>
#define MTRR_IOCTL_BASE 'M'
diff --git a/include/asm-i386/mutex.h b/include/asm-i386/mutex.h
index 9b2199e829f3..05a538531229 100644
--- a/include/asm-i386/mutex.h
+++ b/include/asm-i386/mutex.h
@@ -9,6 +9,8 @@
#ifndef _ASM_MUTEX_H
#define _ASM_MUTEX_H
+#include "asm/alternative.h"
+
/**
* __mutex_fastpath_lock - try to take the lock by moving the count
* from 1 to a 0 value
@@ -27,7 +29,7 @@ do { \
typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
\
__asm__ __volatile__( \
- LOCK " decl (%%eax) \n" \
+ LOCK_PREFIX " decl (%%eax) \n" \
" js 2f \n" \
"1: \n" \
\
@@ -83,7 +85,7 @@ do { \
typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
\
__asm__ __volatile__( \
- LOCK " incl (%%eax) \n" \
+ LOCK_PREFIX " incl (%%eax) \n" \
" jle 2f \n" \
"1: \n" \
\
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h
index 74ef721b534d..27bde973abc7 100644
--- a/include/asm-i386/pgtable-2level.h
+++ b/include/asm-i386/pgtable-2level.h
@@ -61,4 +61,6 @@ static inline int pte_exec_kernel(pte_t pte)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+void vmalloc_sync_all(void);
+
#endif /* _I386_PGTABLE_2LEVEL_H */
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h
index f1a8b454920a..36a5aa63cbbf 100644
--- a/include/asm-i386/pgtable-3level.h
+++ b/include/asm-i386/pgtable-3level.h
@@ -152,4 +152,6 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
#define __pmd_free_tlb(tlb, x) do { } while (0)
+#define vmalloc_sync_all() ((void)0)
+
#endif /* _I386_PGTABLE_3LEVEL_H */
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 088a945bf26b..ee056c41a9fb 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -219,13 +219,12 @@ extern unsigned long pg0[];
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
-#define __LARGE_PTE (_PAGE_PSE | _PAGE_PRESENT)
static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
-static inline int pte_huge(pte_t pte) { return ((pte).pte_low & __LARGE_PTE) == __LARGE_PTE; }
+static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; }
/*
* The following only works if pte_present() is not true.
@@ -242,7 +241,7 @@ static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return
static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
-static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= __LARGE_PTE; return pte; }
+static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; }
#ifdef CONFIG_X86_PAE
# include <asm/pgtable-3level.h>
diff --git a/include/asm-i386/poll.h b/include/asm-i386/poll.h
index aecc80a15d36..2cd4929abd40 100644
--- a/include/asm-i386/poll.h
+++ b/include/asm-i386/poll.h
@@ -16,6 +16,7 @@
#define POLLWRBAND 0x0200
#define POLLMSG 0x0400
#define POLLREMOVE 0x1000
+#define POLLRDHUP 0x2000
struct pollfd {
int fd;
diff --git a/include/asm-i386/rwlock.h b/include/asm-i386/rwlock.h
index b57cc7afdf7e..94f00195d543 100644
--- a/include/asm-i386/rwlock.h
+++ b/include/asm-i386/rwlock.h
@@ -21,21 +21,23 @@
#define RW_LOCK_BIAS_STR "0x01000000"
#define __build_read_lock_ptr(rw, helper) \
- asm volatile(LOCK "subl $1,(%0)\n\t" \
- "jns 1f\n" \
- "call " helper "\n\t" \
- "1:\n" \
- ::"a" (rw) : "memory")
+ alternative_smp("lock; subl $1,(%0)\n\t" \
+ "jns 1f\n" \
+ "call " helper "\n\t" \
+ "1:\n", \
+ "subl $1,(%0)\n\t", \
+ :"a" (rw) : "memory")
#define __build_read_lock_const(rw, helper) \
- asm volatile(LOCK "subl $1,%0\n\t" \
- "jns 1f\n" \
- "pushl %%eax\n\t" \
- "leal %0,%%eax\n\t" \
- "call " helper "\n\t" \
- "popl %%eax\n\t" \
- "1:\n" \
- :"=m" (*(volatile int *)rw) : : "memory")
+ alternative_smp("lock; subl $1,%0\n\t" \
+ "jns 1f\n" \
+ "pushl %%eax\n\t" \
+ "leal %0,%%eax\n\t" \
+ "call " helper "\n\t" \
+ "popl %%eax\n\t" \
+ "1:\n", \
+ "subl $1,%0\n\t", \
+ "=m" (*(volatile int *)rw) : : "memory")
#define __build_read_lock(rw, helper) do { \
if (__builtin_constant_p(rw)) \
@@ -45,21 +47,23 @@
} while (0)
#define __build_write_lock_ptr(rw, helper) \
- asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
- "jz 1f\n" \
- "call " helper "\n\t" \
- "1:\n" \
- ::"a" (rw) : "memory")
+ alternative_smp("lock; subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
+ "jz 1f\n" \
+ "call " helper "\n\t" \
+ "1:\n", \
+ "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t", \
+ :"a" (rw) : "memory")
#define __build_write_lock_const(rw, helper) \
- asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \
- "jz 1f\n" \
- "pushl %%eax\n\t" \
- "leal %0,%%eax\n\t" \
- "call " helper "\n\t" \
- "popl %%eax\n\t" \
- "1:\n" \
- :"=m" (*(volatile int *)rw) : : "memory")
+ alternative_smp("lock; subl $" RW_LOCK_BIAS_STR ",%0\n\t" \
+ "jz 1f\n" \
+ "pushl %%eax\n\t" \
+ "leal %0,%%eax\n\t" \
+ "call " helper "\n\t" \
+ "popl %%eax\n\t" \
+ "1:\n", \
+ "subl $" RW_LOCK_BIAS_STR ",%0\n\t", \
+ "=m" (*(volatile int *)rw) : : "memory")
#define __build_write_lock(rw, helper) do { \
if (__builtin_constant_p(rw)) \
diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h
index 6a42b2142fd6..f7a0f310c524 100644
--- a/include/asm-i386/semaphore.h
+++ b/include/asm-i386/semaphore.h
@@ -99,7 +99,7 @@ static inline void down(struct semaphore * sem)
might_sleep();
__asm__ __volatile__(
"# atomic down operation\n\t"
- LOCK "decl %0\n\t" /* --sem->count */
+ LOCK_PREFIX "decl %0\n\t" /* --sem->count */
"js 2f\n"
"1:\n"
LOCK_SECTION_START("")
@@ -123,7 +123,7 @@ static inline int down_interruptible(struct semaphore * sem)
might_sleep();
__asm__ __volatile__(
"# atomic interruptible down operation\n\t"
- LOCK "decl %1\n\t" /* --sem->count */
+ LOCK_PREFIX "decl %1\n\t" /* --sem->count */
"js 2f\n\t"
"xorl %0,%0\n"
"1:\n"
@@ -148,7 +148,7 @@ static inline int down_trylock(struct semaphore * sem)
__asm__ __volatile__(
"# atomic interruptible down operation\n\t"
- LOCK "decl %1\n\t" /* --sem->count */
+ LOCK_PREFIX "decl %1\n\t" /* --sem->count */
"js 2f\n\t"
"xorl %0,%0\n"
"1:\n"
@@ -173,7 +173,7 @@ static inline void up(struct semaphore * sem)
{
__asm__ __volatile__(
"# atomic up operation\n\t"
- LOCK "incl %0\n\t" /* ++sem->count */
+ LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
"jle 2f\n"
"1:\n"
LOCK_SECTION_START("")
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index 23604350cdf4..d76b7693cf1d 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -35,31 +35,41 @@
#define __raw_spin_lock_string_flags \
"\n1:\t" \
"lock ; decb %0\n\t" \
- "jns 4f\n\t" \
+ "jns 5f\n" \
"2:\t" \
"testl $0x200, %1\n\t" \
- "jz 3f\n\t" \
- "sti\n\t" \
+ "jz 4f\n\t" \
+ "sti\n" \
"3:\t" \
"rep;nop\n\t" \
"cmpb $0, %0\n\t" \
"jle 3b\n\t" \
"cli\n\t" \
"jmp 1b\n" \
- "4:\n\t"
+ "4:\t" \
+ "rep;nop\n\t" \
+ "cmpb $0, %0\n\t" \
+ "jg 1b\n\t" \
+ "jmp 4b\n" \
+ "5:\n\t"
+
+#define __raw_spin_lock_string_up \
+ "\n\tdecb %0"
static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
- __asm__ __volatile__(
- __raw_spin_lock_string
- :"=m" (lock->slock) : : "memory");
+ alternative_smp(
+ __raw_spin_lock_string,
+ __raw_spin_lock_string_up,
+ "=m" (lock->slock) : : "memory");
}
static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
{
- __asm__ __volatile__(
- __raw_spin_lock_string_flags
- :"=m" (lock->slock) : "r" (flags) : "memory");
+ alternative_smp(
+ __raw_spin_lock_string_flags,
+ __raw_spin_lock_string_up,
+ "=m" (lock->slock) : "r" (flags) : "memory");
}
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
@@ -178,12 +188,12 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
static inline void __raw_read_unlock(raw_rwlock_t *rw)
{
- asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
+ asm volatile(LOCK_PREFIX "incl %0" :"=m" (rw->lock) : : "memory");
}
static inline void __raw_write_unlock(raw_rwlock_t *rw)
{
- asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ", %0"
+ asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0"
: "=m" (rw->lock) : : "memory");
}
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 399145a247f2..19cc79c9a35d 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -352,67 +352,6 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l
#endif
-#ifdef __KERNEL__
-struct alt_instr {
- __u8 *instr; /* original instruction */
- __u8 *replacement;
- __u8 cpuid; /* cpuid bit set for replacement */
- __u8 instrlen; /* length of original instruction */
- __u8 replacementlen; /* length of new instruction, <= instrlen */
- __u8 pad;
-};
-#endif
-
-/*
- * Alternative instructions for different CPU types or capabilities.
- *
- * This allows to use optimized instructions even on generic binary
- * kernels.
- *
- * length of oldinstr must be longer or equal the length of newinstr
- * It can be padded with nops as needed.
- *
- * For non barrier like inlines please define new variants
- * without volatile and memory clobber.
- */
-#define alternative(oldinstr, newinstr, feature) \
- asm volatile ("661:\n\t" oldinstr "\n662:\n" \
- ".section .altinstructions,\"a\"\n" \
- " .align 4\n" \
- " .long 661b\n" /* label */ \
- " .long 663f\n" /* new instruction */ \
- " .byte %c0\n" /* feature bit */ \
- " .byte 662b-661b\n" /* sourcelen */ \
- " .byte 664f-663f\n" /* replacementlen */ \
- ".previous\n" \
- ".section .altinstr_replacement,\"ax\"\n" \
- "663:\n\t" newinstr "\n664:\n" /* replacement */ \
- ".previous" :: "i" (feature) : "memory")
-
-/*
- * Alternative inline assembly with input.
- *
- * Pecularities:
- * No memory clobber here.
- * Argument numbers start with 1.
- * Best is to use constraints that are fixed size (like (%1) ... "r")
- * If you use variable sized constraints like "m" or "g" in the
- * replacement maake sure to pad to the worst case length.
- */
-#define alternative_input(oldinstr, newinstr, feature, input...) \
- asm volatile ("661:\n\t" oldinstr "\n662:\n" \
- ".section .altinstructions,\"a\"\n" \
- " .align 4\n" \
- " .long 661b\n" /* label */ \
- " .long 663f\n" /* new instruction */ \
- " .byte %c0\n" /* feature bit */ \
- " .byte 662b-661b\n" /* sourcelen */ \
- " .byte 664f-663f\n" /* replacementlen */ \
- ".previous\n" \
- ".section .altinstr_replacement,\"ax\"\n" \
- "663:\n\t" newinstr "\n664:\n" /* replacement */ \
- ".previous" :: "i" (feature), ##input)
-
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
@@ -558,5 +497,8 @@ static inline void sched_cacheflush(void)
}
extern unsigned long arch_align_stack(unsigned long sp);
+extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
+
+void default_idle(void);
#endif
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index 3f1337c34208..371457b1ceb6 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -197,13 +197,15 @@ extern void __put_user_8(void);
#define put_user(x,ptr) \
({ int __ret_pu; \
+ __typeof__(*(ptr)) __pu_val; \
__chk_user_ptr(ptr); \
+ __pu_val = x; \
switch(sizeof(*(ptr))) { \
- case 1: __put_user_1(x, ptr); break; \
- case 2: __put_user_2(x, ptr); break; \
- case 4: __put_user_4(x, ptr); break; \
- case 8: __put_user_8(x, ptr); break; \
- default:__put_user_X(x, ptr); break; \
+ case 1: __put_user_1(__pu_val, ptr); break; \
+ case 2: __put_user_2(__pu_val, ptr); break; \
+ case 4: __put_user_4(__pu_val, ptr); break; \
+ case 8: __put_user_8(__pu_val, ptr); break; \
+ default:__put_user_X(__pu_val, ptr); break; \
} \
__ret_pu; \
})
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index dc81a55dd94d..d8afd0e3b81a 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -347,9 +347,9 @@ __syscall_return(type,__res); \
type name(type1 arg1) \
{ \
long __res; \
-__asm__ volatile ("int $0x80" \
+__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
: "=a" (__res) \
- : "0" (__NR_##name),"b" ((long)(arg1)) : "memory"); \
+ : "0" (__NR_##name),"ri" ((long)(arg1)) : "memory"); \
__syscall_return(type,__res); \
}
@@ -357,9 +357,10 @@ __syscall_return(type,__res); \
type name(type1 arg1,type2 arg2) \
{ \
long __res; \
-__asm__ volatile ("int $0x80" \
+__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
: "=a" (__res) \
- : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)) : "memory"); \
+ : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)) \
+ : "memory"); \
__syscall_return(type,__res); \
}
@@ -367,9 +368,9 @@ __syscall_return(type,__res); \
type name(type1 arg1,type2 arg2,type3 arg3) \
{ \
long __res; \
-__asm__ volatile ("int $0x80" \
+__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
: "=a" (__res) \
- : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
+ : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \
"d" ((long)(arg3)) : "memory"); \
__syscall_return(type,__res); \
}
@@ -378,9 +379,9 @@ __syscall_return(type,__res); \
type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
{ \
long __res; \
-__asm__ volatile ("int $0x80" \
+__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
: "=a" (__res) \
- : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
+ : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \
"d" ((long)(arg3)),"S" ((long)(arg4)) : "memory"); \
__syscall_return(type,__res); \
}
@@ -390,10 +391,12 @@ __syscall_return(type,__res); \
type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
{ \
long __res; \
-__asm__ volatile ("int $0x80" \
+__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; movl %1,%%eax ; " \
+ "int $0x80 ; pop %%ebx" \
: "=a" (__res) \
- : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
- "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) : "memory"); \
+ : "i" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \
+ "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \
+ : "memory"); \
__syscall_return(type,__res); \
}
@@ -402,11 +405,14 @@ __syscall_return(type,__res); \
type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \
{ \
long __res; \
-__asm__ volatile ("push %%ebp ; movl %%eax,%%ebp ; movl %1,%%eax ; int $0x80 ; pop %%ebp" \
+ struct { long __a1; long __a6; } __s = { (long)arg1, (long)arg6 }; \
+__asm__ volatile ("push %%ebp ; push %%ebx ; movl 4(%2),%%ebp ; " \
+ "movl 0(%2),%%ebx ; movl %1,%%eax ; int $0x80 ; " \
+ "pop %%ebx ; pop %%ebp" \
: "=a" (__res) \
- : "i" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
- "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)), \
- "0" ((long)(arg6)) : "memory"); \
+ : "i" (__NR_##name),"0" ((long)(&__s)),"c" ((long)(arg2)), \
+ "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \
+ : "memory"); \
__syscall_return(type,__res); \
}