summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSteve French2007-10-20 06:26:44 +0200
committerSteve French2007-10-20 06:26:44 +0200
commit748c5151deb56e4b7b5a9b07a884243764933831 (patch)
treee9dd45b8806d00b56503cfa55c46f3ebce0d151c /arch
parent[CIFS] ACL support part 5 (diff)
parentRevert "kconfig: tristate choices with mixed tristate and boolean values" (diff)
downloadkernel-qcow2-linux-748c5151deb56e4b7b5a9b07a884243764933831.tar.gz
kernel-qcow2-linux-748c5151deb56e4b7b5a9b07a884243764933831.tar.xz
kernel-qcow2-linux-748c5151deb56e4b7b5a9b07a884243764933831.zip
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/kernel/err_marvel.c2
-rw-r--r--arch/alpha/kernel/err_titan.c2
-rw-r--r--arch/alpha/kernel/osf_sys.c2
-rw-r--r--arch/alpha/kernel/smp.c1
-rw-r--r--arch/alpha/kernel/sys_alcor.c2
-rw-r--r--arch/alpha/kernel/sys_sio.c2
-rw-r--r--arch/alpha/lib/checksum.c2
-rw-r--r--arch/alpha/lib/csum_partial_copy.c2
-rw-r--r--arch/alpha/mm/init.c2
-rw-r--r--arch/alpha/oprofile/op_impl.h2
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/mach-at91/gpio.c2
-rw-r--r--arch/arm/mach-omap1/irq.c2
-rw-r--r--arch/arm/mach-omap2/timer-gp.c2
-rw-r--r--arch/arm/mach-s3c2410/clock.c2
-rw-r--r--arch/arm/mach-s3c2412/clock.c2
-rw-r--r--arch/arm/mach-s3c2443/clock.c2
-rw-r--r--arch/arm/nwfpe/fpopcode.h34
-rw-r--r--arch/arm/plat-omap/dma.c2
-rw-r--r--arch/arm/plat-omap/gpio.c2
-rw-r--r--arch/avr32/Kconfig2
-rw-r--r--arch/blackfin/Kconfig71
-rw-r--r--arch/cris/arch-v10/Kconfig2
-rw-r--r--arch/cris/arch-v10/boot/compressed/misc.c2
-rw-r--r--arch/cris/arch-v10/drivers/pcf8563.c2
-rw-r--r--arch/cris/arch-v10/kernel/debugport.c2
-rw-r--r--arch/cris/arch-v10/kernel/entry.S2
-rw-r--r--arch/cris/arch-v10/kernel/fasttimer.c2
-rw-r--r--arch/cris/arch-v10/kernel/irq.c2
-rw-r--r--arch/cris/arch-v10/kernel/kgdb.c2
-rw-r--r--arch/cris/arch-v10/kernel/process.c2
-rw-r--r--arch/cris/arch-v10/kernel/shadows.c2
-rw-r--r--arch/cris/arch-v10/lib/dram_init.S4
-rw-r--r--arch/cris/arch-v10/lib/string.c2
-rw-r--r--arch/cris/arch-v10/lib/usercopy.c6
-rw-r--r--arch/cris/arch-v32/boot/compressed/misc.c6
-rw-r--r--arch/cris/arch-v32/drivers/axisflashmap.c2
-rw-r--r--arch/cris/arch-v32/drivers/i2c.c2
-rw-r--r--arch/cris/arch-v32/drivers/nandflash.c2
-rw-r--r--arch/cris/arch-v32/drivers/pcf8563.c2
-rw-r--r--arch/cris/arch-v32/kernel/fasttimer.c2
-rw-r--r--arch/cris/arch-v32/kernel/irq.c8
-rw-r--r--arch/cris/arch-v32/kernel/process.c2
-rw-r--r--arch/cris/arch-v32/kernel/signal.c4
-rw-r--r--arch/cris/arch-v32/kernel/smp.c2
-rw-r--r--arch/cris/arch-v32/kernel/time.c2
-rw-r--r--arch/cris/arch-v32/kernel/traps.c2
-rw-r--r--arch/cris/arch-v32/lib/dram_init.S2
-rw-r--r--arch/cris/arch-v32/lib/string.c2
-rw-r--r--arch/cris/arch-v32/lib/usercopy.c6
-rw-r--r--arch/cris/arch-v32/mm/tlb.c6
-rw-r--r--arch/cris/kernel/irq.c8
-rw-r--r--arch/cris/mm/fault.c4
-rw-r--r--arch/cris/mm/init.c2
-rw-r--r--arch/cris/mm/tlb.c2
-rw-r--r--arch/frv/kernel/irq.c2
-rw-r--r--arch/frv/kernel/semaphore.c2
-rw-r--r--arch/frv/kernel/time.c4
-rw-r--r--arch/h8300/Kconfig.debug6
-rw-r--r--arch/h8300/kernel/irq.c2
-rw-r--r--arch/h8300/kernel/time.c2
-rw-r--r--arch/h8300/kernel/traps.c2
-rw-r--r--arch/h8300/platform/h8s/ints.c2
-rw-r--r--arch/h8300/platform/h8s/ints_h8s.c2
-rw-r--r--arch/i386/Kconfig6
-rw-r--r--arch/i386/Makefile6
-rw-r--r--arch/ia64/Kconfig8
-rw-r--r--arch/m32r/kernel/irq.c2
-rw-r--r--arch/m32r/kernel/signal.c2
-rw-r--r--arch/m32r/kernel/smp.c18
-rw-r--r--arch/m32r/kernel/smpboot.c6
-rw-r--r--arch/m32r/kernel/sys_m32r.c2
-rw-r--r--arch/m68k/amiga/pcmcia.c2
-rw-r--r--arch/m68k/ifpsp060/CHANGES2
-rw-r--r--arch/m68k/ifpsp060/MISC2
-rw-r--r--arch/m68k/ifpsp060/README2
-rw-r--r--arch/m68k/ifpsp060/TEST.DOC2
-rw-r--r--arch/m68k/ifpsp060/fplsp.doc2
-rw-r--r--arch/m68k/ifpsp060/fpsp.doc2
-rw-r--r--arch/m68k/ifpsp060/fskeleton.S2
-rw-r--r--arch/m68k/ifpsp060/ilsp.doc2
-rw-r--r--arch/m68k/ifpsp060/iskeleton.S2
-rw-r--r--arch/m68k/ifpsp060/isp.doc2
-rw-r--r--arch/m68k/ifpsp060/os.S2
-rw-r--r--arch/m68k/ifpsp060/src/fplsp.S2
-rw-r--r--arch/m68k/ifpsp060/src/fpsp.S2
-rw-r--r--arch/m68k/ifpsp060/src/ftest.S2
-rw-r--r--arch/m68k/ifpsp060/src/ilsp.S2
-rw-r--r--arch/m68k/ifpsp060/src/isp.S2
-rw-r--r--arch/m68k/ifpsp060/src/itest.S2
-rw-r--r--arch/m68k/ifpsp060/src/pfpsp.S2
-rw-r--r--arch/m68k/mac/config.c4
-rw-r--r--arch/m68k/mac/iop.c4
-rw-r--r--arch/m68k/mac/oss.c2
-rw-r--r--arch/m68k/mac/via.c2
-rw-r--r--arch/m68k/math-emu/fp_log.c2
-rw-r--r--arch/m68k/q40/q40ints.c4
-rw-r--r--arch/m68k/sun3/mmu_emu.c2
-rw-r--r--arch/m68k/tools/amiga/dmesg.c2
-rw-r--r--arch/m68knommu/platform/5307/pit.c2
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/kernel/module.c2
-rw-r--r--arch/mips/pci/pci-excite.c2
-rw-r--r--arch/mips/sni/pcimt.c2
-rw-r--r--arch/powerpc/kernel/cpu_setup_6xx.S2
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/l2cr_6xx.S2
-rw-r--r--arch/powerpc/platforms/Kconfig2
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c2
-rw-r--r--arch/powerpc/platforms/celleb/scc_uhc.c2
-rw-r--r--arch/sh64/kernel/pci_sh5.c2
-rw-r--r--arch/um/Kconfig2
-rw-r--r--arch/um/drivers/line.c2
-rw-r--r--arch/um/drivers/null.c2
-rw-r--r--arch/um/drivers/stderr_console.c2
-rw-r--r--arch/um/kernel/gmon_syms.c4
-rw-r--r--arch/um/kernel/irq.c2
-rw-r--r--arch/um/kernel/ptrace.c2
-rw-r--r--arch/um/sys-i386/bug.c2
-rw-r--r--arch/um/sys-i386/tls.c2
-rw-r--r--arch/um/sys-x86_64/bug.c2
-rw-r--r--arch/v850/kernel/me2.c4
-rw-r--r--arch/v850/kernel/rte_mb_a_pci.c4
-rw-r--r--arch/x86/boot/compressed/misc_32.c4
-rw-r--r--arch/x86/boot/compressed/misc_64.c4
-rw-r--r--arch/x86/ia32/ia32_binfmt.c124
-rw-r--r--arch/x86/kernel/Makefile_323
-rw-r--r--arch/x86/kernel/Makefile_644
-rw-r--r--arch/x86/kernel/acpi/Makefile_323
-rw-r--r--arch/x86/kernel/acpi/boot.c10
-rw-r--r--arch/x86/kernel/acpi/cstate.c4
-rw-r--r--arch/x86/kernel/acpi/earlyquirk_32.c84
-rw-r--r--arch/x86/kernel/acpi/processor.c2
-rw-r--r--arch/x86/kernel/alternative.c6
-rw-r--r--arch/x86/kernel/apic_32.c4
-rw-r--r--arch/x86/kernel/apm_32.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/centaur.c4
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/Kconfig_32 (renamed from arch/x86/kernel/cpu/cpufreq/Kconfig)0
-rw-r--r--arch/x86/kernel/cpu/cpufreq/Kconfig_64 (renamed from arch/x86/kernel/cpufreq/Kconfig)2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/e_powersaver.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/elanfreq.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/gx-suspmod.c8
-rw-r--r--arch/x86/kernel/cpu/cpufreq/longhaul.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/longrun.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k6.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k7.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.h4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/sc520_freq.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-lib.c2
-rw-r--r--arch/x86/kernel/cpu/cyrix.c10
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c12
-rw-r--r--arch/x86/kernel/cpu/mtrr/cyrix.c4
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c2
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c16
-rw-r--r--arch/x86/kernel/cpu/proc.c11
-rw-r--r--arch/x86/kernel/cpuid.c34
-rw-r--r--arch/x86/kernel/e820_32.c2
-rw-r--r--arch/x86/kernel/early-quirks.c (renamed from arch/x86/kernel/early-quirks_64.c)19
-rw-r--r--arch/x86/kernel/genapic_64.c15
-rw-r--r--arch/x86/kernel/genapic_flat_64.c2
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/hpet.c5
-rw-r--r--arch/x86/kernel/i8253.c2
-rw-r--r--arch/x86/kernel/i8259_32.c3
-rw-r--r--arch/x86/kernel/init_task.c (renamed from arch/x86/kernel/init_task_32.c)11
-rw-r--r--arch/x86/kernel/init_task_64.c54
-rw-r--r--arch/x86/kernel/io_apic_32.c17
-rw-r--r--arch/x86/kernel/io_apic_64.c4
-rw-r--r--arch/x86/kernel/mce_64.c7
-rw-r--r--arch/x86/kernel/mce_amd_64.c4
-rw-r--r--arch/x86/kernel/microcode.c6
-rw-r--r--arch/x86/kernel/mpparse_32.c2
-rw-r--r--arch/x86/kernel/mpparse_64.c17
-rw-r--r--arch/x86/kernel/msr.c2
-rw-r--r--arch/x86/kernel/pci-dma_64.c2
-rw-r--r--arch/x86/kernel/process_32.c56
-rw-r--r--arch/x86/kernel/ptrace_32.c2
-rw-r--r--arch/x86/kernel/quirks.c112
-rw-r--r--arch/x86/kernel/reboot_64.c3
-rw-r--r--arch/x86/kernel/reboot_fixups_32.c8
-rw-r--r--arch/x86/kernel/setup64.c10
-rw-r--r--arch/x86/kernel/setup_32.c6
-rw-r--r--arch/x86/kernel/setup_64.c25
-rw-r--r--arch/x86/kernel/signal_32.c2
-rw-r--r--arch/x86/kernel/signal_64.c2
-rw-r--r--arch/x86/kernel/smp_32.c8
-rw-r--r--arch/x86/kernel/smp_64.c119
-rw-r--r--arch/x86/kernel/smpboot_32.c83
-rw-r--r--arch/x86/kernel/smpboot_64.c76
-rw-r--r--arch/x86/kernel/summit_32.c2
-rw-r--r--arch/x86/kernel/suspend_64.c13
-rw-r--r--arch/x86/kernel/traps_32.c54
-rw-r--r--arch/x86/kernel/traps_64.c18
-rw-r--r--arch/x86/kernel/tsc_32.c12
-rw-r--r--arch/x86/kernel/tsc_64.c4
-rw-r--r--arch/x86/kernel/vsyscall_64.c6
-rw-r--r--arch/x86/lib/delay_32.c2
-rw-r--r--arch/x86/lib/delay_64.c3
-rw-r--r--arch/x86/mach-default/setup.c4
-rw-r--r--arch/x86/mach-generic/default.c2
-rw-r--r--arch/x86/mach-generic/probe.c2
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c24
-rw-r--r--arch/x86/mach-voyager/voyager_thread.c2
-rw-r--r--arch/x86/mm/boot_ioremap_32.c2
-rw-r--r--arch/x86/mm/discontig_32.c2
-rw-r--r--arch/x86/mm/fault_32.c7
-rw-r--r--arch/x86/mm/fault_64.c20
-rw-r--r--arch/x86/mm/numa_64.c2
-rw-r--r--arch/x86/mm/srat_64.c2
-rw-r--r--arch/x86/oprofile/backtrace.c110
-rw-r--r--arch/x86/oprofile/op_x86_model.h2
-rw-r--r--arch/x86/pci/irq.c4
-rw-r--r--arch/x86_64/.gitignore1
-rw-r--r--arch/x86_64/Kconfig8
-rw-r--r--arch/x86_64/Makefile6
-rw-r--r--arch/xtensa/platform-iss/network.c2
224 files changed, 921 insertions, 926 deletions
diff --git a/arch/alpha/kernel/err_marvel.c b/arch/alpha/kernel/err_marvel.c
index f2956ac8dccc..497877bf2012 100644
--- a/arch/alpha/kernel/err_marvel.c
+++ b/arch/alpha/kernel/err_marvel.c
@@ -1082,7 +1082,7 @@ marvel_machine_check(u64 vector, u64 la_ptr)
}
/*
- * A system event or error has occured, handle it here.
+ * A system event or error has occurred, handle it here.
*
* Any errors in the logout frame have already been cleared by the
* PALcode, so just parse it.
diff --git a/arch/alpha/kernel/err_titan.c b/arch/alpha/kernel/err_titan.c
index 543d96d7fa2b..6f3867877d9e 100644
--- a/arch/alpha/kernel/err_titan.c
+++ b/arch/alpha/kernel/err_titan.c
@@ -591,7 +591,7 @@ privateer_process_680_frame(struct el_common *mchk_header, int print)
(struct el_PRIVATEER_envdata_mcheck *)
((unsigned long)mchk_header + mchk_header->sys_offset);
- /* TODO - catagorize errors, for now, no error */
+ /* TODO - categorize errors, for now, no error */
if (!print)
return status;
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index ce857158c1ea..6413c5f23226 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -715,7 +715,7 @@ osf_setsysinfo(unsigned long op, void __user *buffer, unsigned long nbytes,
/*
* Alpha Architecture Handbook 4.7.7.3:
* To be fully IEEE compiant, we must track the current IEEE
- * exception state in software, because spurrious bits can be
+ * exception state in software, because spurious bits can be
* set in the trap shadow of a software-complete insn.
*/
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index ad176441be55..f4ab233201b2 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -439,7 +439,6 @@ setup_smp(void)
((char *)cpubase + i*hwrpb->processor_size);
if ((cpu->flags & 0x1cc) == 0x1cc) {
smp_num_probed++;
- /* Assume here that "whami" == index */
cpu_set(i, cpu_present_map);
cpu->pal_revision = boot_cpu_palrev;
}
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c
index 49bedfbbd31b..d187d01d2a17 100644
--- a/arch/alpha/kernel/sys_alcor.c
+++ b/arch/alpha/kernel/sys_alcor.c
@@ -138,7 +138,7 @@ alcor_init_irq(void)
for (i = 16; i < 48; ++i) {
/* On Alcor, at least, lines 20..30 are not connected
- and can generate spurrious interrupts if we turn them
+ and can generate spurious interrupts if we turn them
on while IRQ probing. */
if (i >= 16+20 && i <= 16+30)
continue;
diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c
index 14b5a753aba5..ee7b9009ebb4 100644
--- a/arch/alpha/kernel/sys_sio.c
+++ b/arch/alpha/kernel/sys_sio.c
@@ -78,7 +78,7 @@ alphabook1_init_arch(void)
* example, sound boards seem to like using IRQ 9.
*
* This is NOT how we should do it. PIRQ0-X should have
- * their own IRQ's, the way intel uses the IO-APIC irq's.
+ * their own IRQs, the way intel uses the IO-APIC IRQs.
*/
static void __init
diff --git a/arch/alpha/lib/checksum.c b/arch/alpha/lib/checksum.c
index 8698e0746f9f..199f6efa83fa 100644
--- a/arch/alpha/lib/checksum.c
+++ b/arch/alpha/lib/checksum.c
@@ -5,7 +5,7 @@
* in an architecture-specific manner due to speed..
* Comments in other versions indicate that the algorithms are from RFC1071
*
- * accellerated versions (and 21264 assembly versions ) contributed by
+ * accelerated versions (and 21264 assembly versions ) contributed by
* Rick Gorton <rick.gorton@alpha-processor.com>
*/
diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c
index 4ca75c74ce90..40736da9bea8 100644
--- a/arch/alpha/lib/csum_partial_copy.c
+++ b/arch/alpha/lib/csum_partial_copy.c
@@ -2,7 +2,7 @@
* csum_partial_copy - do IP checksumming and copy
*
* (C) Copyright 1996 Linus Torvalds
- * accellerated versions (and 21264 assembly versions ) contributed by
+ * accelerated versions (and 21264 assembly versions ) contributed by
* Rick Gorton <rick.gorton@alpha-processor.com>
*
* Don't look at this too closely - you'll go mad. The things
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index 5e6da47779a4..40c15e7301de 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -235,7 +235,7 @@ callback_init(void * kernel_end)
unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT;
crb->map[i].va = vaddr;
for (j = 0; j < crb->map[i].count; ++j) {
- /* Newer console's (especially on larger
+ /* Newer consoles (especially on larger
systems) may require more pages of
PTEs. Grab additional pages as needed. */
if (pmd != pmd_offset(pgd, vaddr)) {
diff --git a/arch/alpha/oprofile/op_impl.h b/arch/alpha/oprofile/op_impl.h
index 6b97893c1a80..b2b87ae9a353 100644
--- a/arch/alpha/oprofile/op_impl.h
+++ b/arch/alpha/oprofile/op_impl.h
@@ -38,7 +38,7 @@ struct op_register_config {
unsigned long need_reset;
};
-/* Per-architecture configury and hooks. */
+/* Per-architecture configuration and hooks. */
struct op_axp_model {
void (*reg_setup) (struct op_register_config *,
struct op_counter_config *,
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 4cee938df01e..a0cdaafa115b 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -851,7 +851,7 @@ config KEXEC
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
- but it is indepedent of the system firmware. And like a reboot
+ but it is independent of the system firmware. And like a reboot
you can start any kernel with it, not just Linux.
It is an ongoing process to be certain the hardware in a machine
diff --git a/arch/arm/mach-at91/gpio.c b/arch/arm/mach-at91/gpio.c
index ba4a1bb3ee40..aa2d365c93fb 100644
--- a/arch/arm/mach-at91/gpio.c
+++ b/arch/arm/mach-at91/gpio.c
@@ -439,7 +439,7 @@ void __init at91_gpio_irq_setup(void)
for (i = 0; i < 32; i++, pin++) {
/*
* Can use the "simple" and not "edge" handler since it's
- * shorter, and the AIC handles interupts sanely.
+ * shorter, and the AIC handles interrupts sanely.
*/
set_irq_chip(pin, &gpio_irqchip);
set_irq_handler(pin, handle_simple_irq);
diff --git a/arch/arm/mach-omap1/irq.c b/arch/arm/mach-omap1/irq.c
index 0733078940fa..1da9d59a0347 100644
--- a/arch/arm/mach-omap1/irq.c
+++ b/arch/arm/mach-omap1/irq.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2004 Nokia Corporation
* Written by Tony Lindgren <tony@atomide.com>
- * Major cleanups by Juha Yrjölä <juha.yrjola@nokia.com>
+ * Major cleanups by Juha Yrjölä <juha.yrjola@nokia.com>
*
* Completely re-written to support various OMAP chips with bank specific
* interrupt handlers.
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c
index 62e801ef9ad9..8d322c20ccae 100644
--- a/arch/arm/mach-omap2/timer-gp.c
+++ b/arch/arm/mach-omap2/timer-gp.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2005 Nokia Corporation
* Author: Paul Mundt <paul.mundt@nokia.com>
- * Juha Yrjölä <juha.yrjola@nokia.com>
+ * Juha Yrjölä <juha.yrjola@nokia.com>
* OMAP Dual-mode timer framework support by Timo Teras
*
* Some parts based off of TI's 24xx code:
diff --git a/arch/arm/mach-s3c2410/clock.c b/arch/arm/mach-s3c2410/clock.c
index cab9d6265e9e..2bfaa6102025 100644
--- a/arch/arm/mach-s3c2410/clock.c
+++ b/arch/arm/mach-s3c2410/clock.c
@@ -238,7 +238,7 @@ int __init s3c2410_baseclk_add(void)
}
/* We must be careful disabling the clocks we are not intending to
- * be using at boot time, as subsytems such as the LCD which do
+ * be using at boot time, as subsystems such as the LCD which do
* their own DMA requests to the bus can cause the system to lockup
* if they where in the middle of requesting bus access.
*
diff --git a/arch/arm/mach-s3c2412/clock.c b/arch/arm/mach-s3c2412/clock.c
index 8543dd6df391..458993601897 100644
--- a/arch/arm/mach-s3c2412/clock.c
+++ b/arch/arm/mach-s3c2412/clock.c
@@ -689,7 +689,7 @@ int __init s3c2412_baseclk_add(void)
}
/* We must be careful disabling the clocks we are not intending to
- * be using at boot time, as subsytems such as the LCD which do
+ * be using at boot time, as subsystems such as the LCD which do
* their own DMA requests to the bus can cause the system to lockup
* if they where in the middle of requesting bus access.
*
diff --git a/arch/arm/mach-s3c2443/clock.c b/arch/arm/mach-s3c2443/clock.c
index 58402948c47c..b42f956738d0 100644
--- a/arch/arm/mach-s3c2443/clock.c
+++ b/arch/arm/mach-s3c2443/clock.c
@@ -1005,7 +1005,7 @@ void __init s3c2443_init_clocks(int xtal)
}
/* We must be careful disabling the clocks we are not intending to
- * be using at boot time, as subsytems such as the LCD which do
+ * be using at boot time, as subsystems such as the LCD which do
* their own DMA requests to the bus can cause the system to lockup
* if they where in the middle of requesting bus access.
*
diff --git a/arch/arm/nwfpe/fpopcode.h b/arch/arm/nwfpe/fpopcode.h
index 0090b19bbe61..786e4c96156d 100644
--- a/arch/arm/nwfpe/fpopcode.h
+++ b/arch/arm/nwfpe/fpopcode.h
@@ -78,11 +78,11 @@ TABLE 1
+-------------------------+---+---+---------+---------+
| Precision | u | v | FPSR.EP | length |
+-------------------------+---+---+---------+---------+
-| Single | 0 ü 0 | x | 1 words |
-| Double | 1 ü 1 | x | 2 words |
-| Extended | 1 ü 1 | x | 3 words |
-| Packed decimal | 1 ü 1 | 0 | 3 words |
-| Expanded packed decimal | 1 ü 1 | 1 | 4 words |
+| Single | 0 | 0 | x | 1 words |
+| Double | 1 | 1 | x | 2 words |
+| Extended | 1 | 1 | x | 3 words |
+| Packed decimal | 1 | 1 | 0 | 3 words |
+| Expanded packed decimal | 1 | 1 | 1 | 4 words |
+-------------------------+---+---+---------+---------+
Note: x = don't care
*/
@@ -92,10 +92,10 @@ TABLE 2
+---+---+---------------------------------+
| w | x | Number of registers to transfer |
+---+---+---------------------------------+
-| 0 ü 1 | 1 |
-| 1 ü 0 | 2 |
-| 1 ü 1 | 3 |
-| 0 ü 0 | 4 |
+| 0 | 1 | 1 |
+| 1 | 0 | 2 |
+| 1 | 1 | 3 |
+| 0 | 0 | 4 |
+---+---+---------------------------------+
*/
@@ -156,10 +156,10 @@ TABLE 5
+-------------------------+---+---+
| Rounding Precision | e | f |
+-------------------------+---+---+
-| IEEE Single precision | 0 ü 0 |
-| IEEE Double precision | 0 ü 1 |
-| IEEE Extended precision | 1 ü 0 |
-| undefined (trap) | 1 ü 1 |
+| IEEE Single precision | 0 | 0 |
+| IEEE Double precision | 0 | 1 |
+| IEEE Extended precision | 1 | 0 |
+| undefined (trap) | 1 | 1 |
+-------------------------+---+---+
*/
@@ -168,10 +168,10 @@ TABLE 5
+---------------------------------+---+---+
| Rounding Mode | g | h |
+---------------------------------+---+---+
-| Round to nearest (default) | 0 ü 0 |
-| Round toward plus infinity | 0 ü 1 |
-| Round toward negative infinity | 1 ü 0 |
-| Round toward zero | 1 ü 1 |
+| Round to nearest (default) | 0 | 0 |
+| Round toward plus infinity | 0 | 1 |
+| Round toward negative infinity | 1 | 0 |
+| Round toward zero | 1 | 1 |
+---------------------------------+---+---+
*/
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 05a38498cbe0..dcbba07cf98a 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -2,7 +2,7 @@
* linux/arch/arm/plat-omap/dma.c
*
* Copyright (C) 2003 Nokia Corporation
- * Author: Juha Yrjölä <juha.yrjola@nokia.com>
+ * Author: Juha Yrjölä <juha.yrjola@nokia.com>
* DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
* Graphics DMA and LCD DMA graphics tranformations
* by Imre Deak <imre.deak@nokia.com>
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 337455dfe64d..6097753394ad 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -4,7 +4,7 @@
* Support functions for OMAP GPIO
*
* Copyright (C) 2003-2005 Nokia Corporation
- * Written by Juha Yrjölä <juha.yrjola@nokia.com>
+ * Written by Juha Yrjölä <juha.yrjola@nokia.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index d12346aaa88b..bbecbd8469b5 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -189,7 +189,7 @@ config CMDLINE
endmenu
-menu "Power managment options"
+menu "Power management options"
menu "CPU Frequency scaling"
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 4c5ca9d5e40f..ad28dc76fc97 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -613,85 +613,86 @@ config I_ENTRY_L1
bool "Locate interrupt entry code in L1 Memory"
default y
help
- If enabled interrupt entry code (STORE/RESTORE CONTEXT) is linked
- into L1 instruction memory.(less latency)
+ If enabled, interrupt entry code (STORE/RESTORE CONTEXT) is linked
+ into L1 instruction memory. (less latency)
config EXCPT_IRQ_SYSC_L1
- bool "Locate entire ASM lowlevel excepetion / interrupt - Syscall and CPLB handler code in L1 Memory"
+ bool "Locate entire ASM lowlevel exception / interrupt - Syscall and CPLB handler code in L1 Memory"
default y
help
- If enabled entire ASM lowlevel exception and interrupt entry code (STORE/RESTORE CONTEXT) is linked
- into L1 instruction memory.(less latency)
+ If enabled, the entire ASM lowlevel exception and interrupt entry code
+ (STORE/RESTORE CONTEXT) is linked into L1 instruction memory.
+ (less latency)
config DO_IRQ_L1
bool "Locate frequently called do_irq dispatcher function in L1 Memory"
default y
help
- If enabled frequently called do_irq dispatcher function is linked
- into L1 instruction memory.(less latency)
+ If enabled, the frequently called do_irq dispatcher function is linked
+ into L1 instruction memory. (less latency)
config CORE_TIMER_IRQ_L1
bool "Locate frequently called timer_interrupt() function in L1 Memory"
default y
help
- If enabled frequently called timer_interrupt() function is linked
- into L1 instruction memory.(less latency)
+ If enabled, the frequently called timer_interrupt() function is linked
+ into L1 instruction memory. (less latency)
config IDLE_L1
bool "Locate frequently idle function in L1 Memory"
default y
help
- If enabled frequently called idle function is linked
- into L1 instruction memory.(less latency)
+ If enabled, the frequently called idle function is linked
+ into L1 instruction memory. (less latency)
config SCHEDULE_L1
bool "Locate kernel schedule function in L1 Memory"
default y
help
- If enabled frequently called kernel schedule is linked
- into L1 instruction memory.(less latency)
+ If enabled, the frequently called kernel schedule is linked
+ into L1 instruction memory. (less latency)
config ARITHMETIC_OPS_L1
bool "Locate kernel owned arithmetic functions in L1 Memory"
default y
help
- If enabled arithmetic functions are linked
- into L1 instruction memory.(less latency)
+ If enabled, arithmetic functions are linked
+ into L1 instruction memory. (less latency)
config ACCESS_OK_L1
bool "Locate access_ok function in L1 Memory"
default y
help
- If enabled access_ok function is linked
- into L1 instruction memory.(less latency)
+ If enabled, the access_ok function is linked
+ into L1 instruction memory. (less latency)
config MEMSET_L1
bool "Locate memset function in L1 Memory"
default y
help
- If enabled memset function is linked
- into L1 instruction memory.(less latency)
+ If enabled, the memset function is linked
+ into L1 instruction memory. (less latency)
config MEMCPY_L1
bool "Locate memcpy function in L1 Memory"
default y
help
- If enabled memcpy function is linked
- into L1 instruction memory.(less latency)
+ If enabled, the memcpy function is linked
+ into L1 instruction memory. (less latency)
config SYS_BFIN_SPINLOCK_L1
bool "Locate sys_bfin_spinlock function in L1 Memory"
default y
help
- If enabled sys_bfin_spinlock function is linked
- into L1 instruction memory.(less latency)
+ If enabled, sys_bfin_spinlock function is linked
+ into L1 instruction memory. (less latency)
config IP_CHECKSUM_L1
bool "Locate IP Checksum function in L1 Memory"
default n
help
- If enabled IP Checksum function is linked
- into L1 instruction memory.(less latency)
+ If enabled, the IP Checksum function is linked
+ into L1 instruction memory. (less latency)
config CACHELINE_ALIGNED_L1
bool "Locate cacheline_aligned data to L1 Data Memory"
@@ -699,24 +700,24 @@ config CACHELINE_ALIGNED_L1
default n if BF54x
depends on !BF531
help
- If enabled cacheline_anligned data is linked
- into L1 data memory.(less latency)
+ If enabled, cacheline_anligned data is linked
+ into L1 data memory. (less latency)
config SYSCALL_TAB_L1
bool "Locate Syscall Table L1 Data Memory"
default n
depends on !BF531
help
- If enabled the Syscall LUT is linked
- into L1 data memory.(less latency)
+ If enabled, the Syscall LUT is linked
+ into L1 data memory. (less latency)
config CPLB_SWITCH_TAB_L1
bool "Locate CPLB Switch Tables L1 Data Memory"
default n
depends on !BF531
help
- If enabled the CPLB Switch Tables are linked
- into L1 data memory.(less latency)
+ If enabled, the CPLB Switch Tables are linked
+ into L1 data memory. (less latency)
endmenu
@@ -1029,13 +1030,13 @@ config DEBUG_HWERR
from.
config DEBUG_ICACHE_CHECK
- bool "Check Instruction cache coherancy"
+ bool "Check Instruction cache coherency"
depends on DEBUG_KERNEL
depends on DEBUG_HWERR
help
- Say Y here if you are getting wierd unexplained errors. This will
- ensure that icache is what SDRAM says it should be, by doing a
- byte wise comparision between SDRAM and instruction cache. This
+ Say Y here if you are getting weird unexplained errors. This will
+ ensure that icache is what SDRAM says it should be by doing a
+ byte wise comparison between SDRAM and instruction cache. This
also relocates the irq_panic() function to L1 memory, (which is
un-cached).
diff --git a/arch/cris/arch-v10/Kconfig b/arch/cris/arch-v10/Kconfig
index c7ea9efd0104..f1ce6f64401d 100644
--- a/arch/cris/arch-v10/Kconfig
+++ b/arch/cris/arch-v10/Kconfig
@@ -182,7 +182,7 @@ config ETRAX_LED7G
set this to same as CONFIG_ETRAX_LED1G (normally 2).
config ETRAX_LED8Y
- int "Eigth yellow LED bit"
+ int "Eighth yellow LED bit"
depends on ETRAX_CSP0_LEDS
default "2"
help
diff --git a/arch/cris/arch-v10/boot/compressed/misc.c b/arch/cris/arch-v10/boot/compressed/misc.c
index ffb8d21b2f83..e205d2e7e089 100644
--- a/arch/cris/arch-v10/boot/compressed/misc.c
+++ b/arch/cris/arch-v10/boot/compressed/misc.c
@@ -8,7 +8,7 @@
*
* malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
* puts by Nick Holloway 1993, better puts by Martin Mares 1995
- * adoptation for Linux/CRIS Axis Communications AB, 1999
+ * adaptation for Linux/CRIS Axis Communications AB, 1999
*
*/
diff --git a/arch/cris/arch-v10/drivers/pcf8563.c b/arch/cris/arch-v10/drivers/pcf8563.c
index 1de0026bb94e..c263b8232dbc 100644
--- a/arch/cris/arch-v10/drivers/pcf8563.c
+++ b/arch/cris/arch-v10/drivers/pcf8563.c
@@ -4,7 +4,7 @@
* From Phillips' datasheet:
*
* The PCF8563 is a CMOS real-time clock/calendar optimized for low power
- * consumption. A programmable clock output, interupt output and voltage
+ * consumption. A programmable clock output, interrupt output and voltage
* low detector are also provided. All address and data are transferred
* serially via two-line bidirectional I2C-bus. Maximum bus speed is
* 400 kbits/s. The built-in word address register is incremented
diff --git a/arch/cris/arch-v10/kernel/debugport.c b/arch/cris/arch-v10/kernel/debugport.c
index 2b536ca6f444..93679a48c791 100644
--- a/arch/cris/arch-v10/kernel/debugport.c
+++ b/arch/cris/arch-v10/kernel/debugport.c
@@ -83,7 +83,7 @@
*
* Revision 1.4 2002/11/19 14:35:24 starvik
* Changes from linux 2.4
- * Changed struct initializer syntax to the currently prefered notation
+ * Changed struct initializer syntax to the currently preferred notation
*
* Revision 1.3 2002/11/06 09:47:03 starvik
* Modified for new interrupt macros
diff --git a/arch/cris/arch-v10/kernel/entry.S b/arch/cris/arch-v10/kernel/entry.S
index ae45d4522e65..c5844cb70f09 100644
--- a/arch/cris/arch-v10/kernel/entry.S
+++ b/arch/cris/arch-v10/kernel/entry.S
@@ -97,7 +97,7 @@
*
* Revision 1.36 2001/11/22 13:36:36 bjornw
* * In ret_from_intr, check regs->dccr for usermode reentrance instead of
- * DCCR explicitely (because the latter might not reflect current reality)
+ * DCCR explicitly (because the latter might not reflect current reality)
* * In mmu_bus_fault, set $r9 _after_ calling the C-code instead of before
* since $r9 is call-clobbered and is potentially needed afterwards
*
diff --git a/arch/cris/arch-v10/kernel/fasttimer.c b/arch/cris/arch-v10/kernel/fasttimer.c
index 8cbdf594b369..d3ea052e5ee1 100644
--- a/arch/cris/arch-v10/kernel/fasttimer.c
+++ b/arch/cris/arch-v10/kernel/fasttimer.c
@@ -84,7 +84,7 @@
* with time based on jiffies and *R_TIMER0_DATA, uses a table
* for fast conversion of timer value to microseconds.
* (Much faster the standard do_gettimeofday() and we don't really
- * wan't to use the true time - we wan't the "uptime" so timers don't screw up
+ * want to use the true time - we want the "uptime" so timers don't screw up
* when we change the time.
* TODO: Add efficient support for continuous timers as well.
*
diff --git a/arch/cris/arch-v10/kernel/irq.c b/arch/cris/arch-v10/kernel/irq.c
index 96094cbf1255..845c95f6e871 100644
--- a/arch/cris/arch-v10/kernel/irq.c
+++ b/arch/cris/arch-v10/kernel/irq.c
@@ -169,7 +169,7 @@ init_IRQ(void)
for (i = 0; i < 256; i++)
etrax_irv->v[i] = weird_irq;
- /* Initialize IRQ handler descriptiors. */
+ /* Initialize IRQ handler descriptors. */
for(i = 2; i < NR_IRQS; i++) {
irq_desc[i].chip = &crisv10_irq_type;
set_int_vector(i, interrupt[i]);
diff --git a/arch/cris/arch-v10/kernel/kgdb.c b/arch/cris/arch-v10/kernel/kgdb.c
index 07628a13c6c4..77f4b1423725 100644
--- a/arch/cris/arch-v10/kernel/kgdb.c
+++ b/arch/cris/arch-v10/kernel/kgdb.c
@@ -959,7 +959,7 @@ stub_is_stopped(int sigval)
/* Send register contents. We probably only need to send the
* PC, frame pointer and stack pointer here. Other registers will be
- * explicitely asked for. But for now, send all.
+ * explicitly asked for. But for now, send all.
*/
for (regno = R0; regno <= USP; regno++) {
diff --git a/arch/cris/arch-v10/kernel/process.c b/arch/cris/arch-v10/kernel/process.c
index b6831ceb6a62..1a3760c94f85 100644
--- a/arch/cris/arch-v10/kernel/process.c
+++ b/arch/cris/arch-v10/kernel/process.c
@@ -64,7 +64,7 @@ void hard_reset_now (void)
#if defined(CONFIG_ETRAX_WATCHDOG) && !defined(CONFIG_SVINTO_SIM)
cause_of_death = 0xbedead;
#else
- /* Since we dont plan to keep on reseting the watchdog,
+ /* Since we dont plan to keep on resetting the watchdog,
the key can be arbitrary hence three */
*R_WATCHDOG = IO_FIELD(R_WATCHDOG, key, 3) |
IO_STATE(R_WATCHDOG, enable, start);
diff --git a/arch/cris/arch-v10/kernel/shadows.c b/arch/cris/arch-v10/kernel/shadows.c
index 38fd44dfbc5b..326178aef6ee 100644
--- a/arch/cris/arch-v10/kernel/shadows.c
+++ b/arch/cris/arch-v10/kernel/shadows.c
@@ -20,7 +20,7 @@ unsigned long r_timer_ctrl_shadow;
* These are only usable if there actually IS a latch connected
* to the corresponding external chip-select pin.
*
- * A common usage is that CSP0 controls LED's and CSP4 video chips.
+ * A common usage is that CSP0 controls LEDs and CSP4 video chips.
*/
unsigned long port_cse1_shadow;
diff --git a/arch/cris/arch-v10/lib/dram_init.S b/arch/cris/arch-v10/lib/dram_init.S
index 9cf83932cd5d..6a6bdfd6984d 100644
--- a/arch/cris/arch-v10/lib/dram_init.S
+++ b/arch/cris/arch-v10/lib/dram_init.S
@@ -40,7 +40,7 @@
* Copy warning from head.S about r8 and r9
*
* Revision 1.7 2001/04/18 12:05:39 bjornw
- * Fixed comments, and explicitely include config.h to be sure its there
+ * Fixed comments, and explicitly include config.h to be sure its there
*
* Revision 1.6 2001/04/10 06:20:16 starvik
* Delay should be 200us, not 200ns
@@ -66,7 +66,7 @@
*/
/* Just to be certain the config file is included, we include it here
- * explicitely instead of depending on it being included in the file that
+ * explicitly instead of depending on it being included in the file that
* uses this code.
*/
diff --git a/arch/cris/arch-v10/lib/string.c b/arch/cris/arch-v10/lib/string.c
index 8ffde4901b57..15d6662b03b1 100644
--- a/arch/cris/arch-v10/lib/string.c
+++ b/arch/cris/arch-v10/lib/string.c
@@ -41,7 +41,7 @@ void *memcpy(void *pdst,
Make sure the compiler is able to make something useful of this.
As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
- If gcc was allright, it really would need no temporaries, and no
+ If gcc was alright, it really would need no temporaries, and no
stack space to save stuff on. */
register void *return_dst __asm__ ("r10") = pdst;
diff --git a/arch/cris/arch-v10/lib/usercopy.c b/arch/cris/arch-v10/lib/usercopy.c
index 43778d53c254..a12c708afc9a 100644
--- a/arch/cris/arch-v10/lib/usercopy.c
+++ b/arch/cris/arch-v10/lib/usercopy.c
@@ -38,7 +38,7 @@ __copy_user (void __user *pdst, const void *psrc, unsigned long pn)
As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
FIXME: Comment for old gcc version. Check.
- If gcc was allright, it really would need no temporaries, and no
+ If gcc was alright, it really would need no temporaries, and no
stack space to save stuff on. */
register char *dst __asm__ ("r13") = pdst;
@@ -200,7 +200,7 @@ __copy_user_zeroing (void __user *pdst, const void *psrc, unsigned long pn)
As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
FIXME: Comment for old gcc version. Check.
- If gcc was allright, it really would need no temporaries, and no
+ If gcc was alright, it really would need no temporaries, and no
stack space to save stuff on. */
register char *dst __asm__ ("r13") = pdst;
@@ -380,7 +380,7 @@ __do_clear_user (void __user *pto, unsigned long pn)
As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
FIXME: Comment for old gcc version. Check.
- If gcc was allright, it really would need no temporaries, and no
+ If gcc was alright, it really would need no temporaries, and no
stack space to save stuff on. */
register char *dst __asm__ ("r13") = pto;
diff --git a/arch/cris/arch-v32/boot/compressed/misc.c b/arch/cris/arch-v32/boot/compressed/misc.c
index 11902697196d..0169ba1ca9c9 100644
--- a/arch/cris/arch-v32/boot/compressed/misc.c
+++ b/arch/cris/arch-v32/boot/compressed/misc.c
@@ -8,7 +8,7 @@
*
* malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
* puts by Nick Holloway 1993, better puts by Martin Mares 1995
- * adoptation for Linux/CRIS Axis Communications AB, 1999
+ * adaptation for Linux/CRIS Axis Communications AB, 1999
*
*/
@@ -151,7 +151,7 @@ serout(const char *s, reg_scope_instances regi_ser)
do {
rs = REG_RD(ser, regi_ser, rs_stat_din);
}
- while (!rs.tr_rdy);/* Wait for tranceiver. */
+ while (!rs.tr_rdy);/* Wait for transceiver. */
REG_WR(ser, regi_ser, rw_dout, dout);
}
@@ -264,7 +264,7 @@ serial_setup(reg_scope_instances regi_ser)
tr_ctrl.stop_bits = 1; /* 2 stop bits. */
/*
- * The baudrate setup is a bit fishy, but in the end the tranceiver is
+ * The baudrate setup is a bit fishy, but in the end the transceiver is
* set to 4800 and the receiver to 115200. The magic value is
* 29.493 MHz.
*/
diff --git a/arch/cris/arch-v32/drivers/axisflashmap.c b/arch/cris/arch-v32/drivers/axisflashmap.c
index 5180d45412fc..3ec12ea44e8e 100644
--- a/arch/cris/arch-v32/drivers/axisflashmap.c
+++ b/arch/cris/arch-v32/drivers/axisflashmap.c
@@ -205,7 +205,7 @@ static struct mtd_info *probe_cs(struct map_info *map_cs)
/*
* Probe each chip select individually for flash chips. If there are chips on
* both cse0 and cse1, the mtd_info structs will be concatenated to one struct
- * so that MTD partitions can cross chip boundries.
+ * so that MTD partitions can cross chip boundaries.
*
* The only known restriction to how you can mount your chips is that each
* chip select must hold similar flash chips. But you need external hardware
diff --git a/arch/cris/arch-v32/drivers/i2c.c b/arch/cris/arch-v32/drivers/i2c.c
index e12f6cc6f4a2..f1edd2e359b2 100644
--- a/arch/cris/arch-v32/drivers/i2c.c
+++ b/arch/cris/arch-v32/drivers/i2c.c
@@ -275,7 +275,7 @@ i2c_getack(void)
ack = 0;
i2c_delay(CLOCK_HIGH_TIME/2);
if(!ack){
- if(!i2c_getbit()) /* receiver pulld SDA low */
+ if(!i2c_getbit()) /* receiver pulled SDA low */
ack = 1;
i2c_delay(CLOCK_HIGH_TIME/2);
}
diff --git a/arch/cris/arch-v32/drivers/nandflash.c b/arch/cris/arch-v32/drivers/nandflash.c
index 93ddea4d9564..5ce015c6bb0d 100644
--- a/arch/cris/arch-v32/drivers/nandflash.c
+++ b/arch/cris/arch-v32/drivers/nandflash.c
@@ -138,7 +138,7 @@ struct mtd_info* __init crisv32_nand_flash_probe (void)
/* Enable the following for a flash based bad block table */
this->options = NAND_USE_FLASH_BBT;
- /* Scan to find existance of the device */
+ /* Scan to find existence of the device */
if (nand_scan (crisv32_mtd, 1)) {
err = -ENXIO;
goto out_ior;
diff --git a/arch/cris/arch-v32/drivers/pcf8563.c b/arch/cris/arch-v32/drivers/pcf8563.c
index da479a14f836..6dbd700d3d66 100644
--- a/arch/cris/arch-v32/drivers/pcf8563.c
+++ b/arch/cris/arch-v32/drivers/pcf8563.c
@@ -4,7 +4,7 @@
* From Phillips' datasheet:
*
* The PCF8563 is a CMOS real-time clock/calendar optimized for low power
- * consumption. A programmable clock output, interupt output and voltage
+ * consumption. A programmable clock output, interrupt output and voltage
* low detector are also provided. All address and data are transferred
* serially via two-line bidirectional I2C-bus. Maximum bus speed is
* 400 kbits/s. The built-in word address register is incremented
diff --git a/arch/cris/arch-v32/kernel/fasttimer.c b/arch/cris/arch-v32/kernel/fasttimer.c
index 79e1e4c2ca1d..b40551f9f40d 100644
--- a/arch/cris/arch-v32/kernel/fasttimer.c
+++ b/arch/cris/arch-v32/kernel/fasttimer.c
@@ -97,7 +97,7 @@
* with time based on jiffies and *R_TIMER0_DATA, uses a table
* for fast conversion of timer value to microseconds.
* (Much faster the standard do_gettimeofday() and we don't really
- * wan't to use the true time - we wan't the "uptime" so timers don't screw up
+ * want to use the true time - we want the "uptime" so timers don't screw up
* when we change the time.
* TODO: Add efficient support for continuous timers as well.
*
diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c
index cc361bf578ae..a9acaa270243 100644
--- a/arch/cris/arch-v32/kernel/irq.c
+++ b/arch/cris/arch-v32/kernel/irq.c
@@ -140,7 +140,7 @@ block_irq(int irq, int cpu)
spin_lock_irqsave(&irq_lock, flags);
intr_mask = REG_RD_INT(intr_vect, irq_regs[cpu], rw_mask);
- /* Remember; 1 let thru, 0 block. */
+ /* Remember; 1 let through, 0 block. */
intr_mask &= ~(1 << (irq - FIRST_IRQ));
REG_WR_INT(intr_vect, irq_regs[cpu], rw_mask, intr_mask);
@@ -156,7 +156,7 @@ unblock_irq(int irq, int cpu)
spin_lock_irqsave(&irq_lock, flags);
intr_mask = REG_RD_INT(intr_vect, irq_regs[cpu], rw_mask);
- /* Remember; 1 let thru, 0 block. */
+ /* Remember; 1 let through, 0 block. */
intr_mask |= (1 << (irq - FIRST_IRQ));
REG_WR_INT(intr_vect, irq_regs[cpu], rw_mask, intr_mask);
@@ -308,7 +308,7 @@ crisv32_do_multiple(struct pt_regs* regs)
*/
irq_enter();
- /* Get which IRQs that happend. */
+ /* Get which IRQs that happened. */
masked = REG_RD_INT(intr_vect, irq_regs[cpu], r_masked_vect);
/* Calculate new IRQ mask with these IRQs disabled. */
@@ -366,7 +366,7 @@ init_IRQ(void)
for (i = 0; i < 256; i++)
etrax_irv->v[i] = weird_irq;
- /* Point all IRQ's to bad handlers. */
+ /* Point all IRQs to bad handlers. */
for (i = FIRST_IRQ, j = 0; j < NR_IRQS; i++, j++) {
irq_desc[j].chip = &crisv32_irq_type;
set_exception_vector(i, interrupt[j]);
diff --git a/arch/cris/arch-v32/kernel/process.c b/arch/cris/arch-v32/kernel/process.c
index 6326351af252..b72a15580dc7 100644
--- a/arch/cris/arch-v32/kernel/process.c
+++ b/arch/cris/arch-v32/kernel/process.c
@@ -162,7 +162,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
/* Put the switch stack right below the pt_regs. */
swstack = ((struct switch_stack *) childregs) - 1;
- /* Paramater to ret_from_sys_call. 0 is don't restart the syscall. */
+ /* Parameter to ret_from_sys_call. 0 is don't restart the syscall. */
swstack->r9 = 0;
/*
diff --git a/arch/cris/arch-v32/kernel/signal.c b/arch/cris/arch-v32/kernel/signal.c
index 7cd6ac803409..024cc6901974 100644
--- a/arch/cris/arch-v32/kernel/signal.c
+++ b/arch/cris/arch-v32/kernel/signal.c
@@ -347,7 +347,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
/* Grab and setup a signal frame.
*
* Basically a lot of state-info is stacked, and arranged for the
- * user-mode program to return to the kernel using either a trampiline
+ * user-mode program to return to the kernel using either a trampoline
* which performs the syscall sigreturn(), or a provided user-mode
* trampoline.
*/
@@ -641,7 +641,7 @@ ugdb_trap_user(struct thread_info *ti, int sig)
user_regs(ti)->spc = 0;
}
/* FIXME: Filter out false h/w breakpoint hits (i.e. EDA
- not withing any configured h/w breakpoint range). Synchronize with
+ not within any configured h/w breakpoint range). Synchronize with
what already exists for kernel debugging. */
if (((user_regs(ti)->exs & 0xff00) >> 8) == BREAK_8_INTR_VECT) {
/* Break 8: subtract 2 from ERP unless in a delay slot. */
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c
index 697494bc2de1..171c96e0a5d3 100644
--- a/arch/cris/arch-v32/kernel/smp.c
+++ b/arch/cris/arch-v32/kernel/smp.c
@@ -142,7 +142,7 @@ smp_boot_one_cpu(int cpuid)
return -1;
}
-/* Secondary CPUs starts uing C here. Here we need to setup CPU
+/* Secondary CPUs starts using C here. Here we need to setup CPU
* specific stuff such as the local timer and the MMU. */
void __init smp_callin(void)
{
diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c
index be0a01657d4f..2f7e8e200f2c 100644
--- a/arch/cris/arch-v32/kernel/time.c
+++ b/arch/cris/arch-v32/kernel/time.c
@@ -99,7 +99,7 @@ unsigned long do_slow_gettimeoffset(void)
/* From timer MDS describing the hardware watchdog:
* 4.3.1 Watchdog Operation
* The watchdog timer is an 8-bit timer with a configurable start value.
- * Once started the whatchdog counts downwards with a frequency of 763 Hz
+ * Once started the watchdog counts downwards with a frequency of 763 Hz
* (100/131072 MHz). When the watchdog counts down to 1, it generates an
* NMI (Non Maskable Interrupt), and when it counts down to 0, it resets the
* chip.
diff --git a/arch/cris/arch-v32/kernel/traps.c b/arch/cris/arch-v32/kernel/traps.c
index 2462b1ef1fbb..17fd3dbd1c80 100644
--- a/arch/cris/arch-v32/kernel/traps.c
+++ b/arch/cris/arch-v32/kernel/traps.c
@@ -105,7 +105,7 @@ bad_value:
/*
* This gets called from entry.S when the watchdog has bitten. Show something
- * similiar to an Oops dump, and if the kernel if configured to be a nice doggy;
+ * similar to an Oops dump, and if the kernel is configured to be a nice doggy;
* halt instead of reboot.
*/
void
diff --git a/arch/cris/arch-v32/lib/dram_init.S b/arch/cris/arch-v32/lib/dram_init.S
index 158b3dbb4d9d..218fbe259ee5 100644
--- a/arch/cris/arch-v32/lib/dram_init.S
+++ b/arch/cris/arch-v32/lib/dram_init.S
@@ -12,7 +12,7 @@
*/
/* Just to be certain the config file is included, we include it here
- * explicitely instead of depending on it being included in the file that
+ * explicitly instead of depending on it being included in the file that
* uses this code.
*/
diff --git a/arch/cris/arch-v32/lib/string.c b/arch/cris/arch-v32/lib/string.c
index 98e282ac824a..6740b2cebae5 100644
--- a/arch/cris/arch-v32/lib/string.c
+++ b/arch/cris/arch-v32/lib/string.c
@@ -41,7 +41,7 @@ void *memcpy(void *pdst,
Make sure the compiler is able to make something useful of this.
As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
- If gcc was allright, it really would need no temporaries, and no
+ If gcc was alright, it really would need no temporaries, and no
stack space to save stuff on. */
register void *return_dst __asm__ ("r10") = pdst;
diff --git a/arch/cris/arch-v32/lib/usercopy.c b/arch/cris/arch-v32/lib/usercopy.c
index f0b08460c1be..04d0cf35a276 100644
--- a/arch/cris/arch-v32/lib/usercopy.c
+++ b/arch/cris/arch-v32/lib/usercopy.c
@@ -34,7 +34,7 @@ __copy_user (void __user *pdst, const void *psrc, unsigned long pn)
As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
FIXME: Comment for old gcc version. Check.
- If gcc was allright, it really would need no temporaries, and no
+ If gcc was alright, it really would need no temporaries, and no
stack space to save stuff on. */
register char *dst __asm__ ("r13") = pdst;
@@ -168,7 +168,7 @@ __copy_user_zeroing (void __user *pdst, const void *psrc, unsigned long pn)
As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
FIXME: Comment for old gcc version. Check.
- If gcc was allright, it really would need no temporaries, and no
+ If gcc was alright, it really would need no temporaries, and no
stack space to save stuff on. */
register char *dst __asm__ ("r13") = pdst;
@@ -332,7 +332,7 @@ __do_clear_user (void __user *pto, unsigned long pn)
As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
FIXME: Comment for old gcc version. Check.
- If gcc was allright, it really would need no temporaries, and no
+ If gcc was alright, it really would need no temporaries, and no
stack space to save stuff on. */
register char *dst __asm__ ("r13") = pto;
diff --git a/arch/cris/arch-v32/mm/tlb.c b/arch/cris/arch-v32/mm/tlb.c
index c2d12e9c40d7..a076ef6e9389 100644
--- a/arch/cris/arch-v32/mm/tlb.c
+++ b/arch/cris/arch-v32/mm/tlb.c
@@ -30,8 +30,8 @@ do { \
* The TLB can host up to 256 different mm contexts at the same time. The running
* context is found in the PID register. Each TLB entry contains a page_id that
* has to match the PID register to give a hit. page_id_map keeps track of which
- * mm's is assigned to which page_id's, making sure it's known when to
- * invalidate TLB entries.
+ * mm is assigned to which page_id, making sure it's known when to invalidate TLB
+ * entries.
*
* The last page_id is never running, it is used as an invalid page_id so that
* it's possible to make TLB entries that will nerver match.
@@ -188,7 +188,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
spin_unlock(&mmu_context_lock);
/*
- * Remember the pgd for the fault handlers. Keep a seperate copy of it
+ * Remember the pgd for the fault handlers. Keep a separate copy of it
* because current and active_mm might be invalid at points where
* there's still a need to derefer the pgd.
*/
diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c
index 903ea62c6e21..5c27ff86121b 100644
--- a/arch/cris/kernel/irq.c
+++ b/arch/cris/kernel/irq.c
@@ -7,7 +7,7 @@
* Authors: Bjorn Wesen (bjornw@axis.com)
*
* This file contains the code used by various IRQ handling routines:
- * asking for different IRQ's should be done through these routines
+ * asking for different IRQs should be done through these routines
* instead of just grabbing them. Thus setups with different IRQ numbers
* shouldn't result in any weird surprises, and installing new handlers
* should be easier.
@@ -15,7 +15,7 @@
*/
/*
- * IRQ's are in fact implemented a bit like signal handlers for the kernel.
+ * IRQs are in fact implemented a bit like signal handlers for the kernel.
* Naturally it's not a 1:1 relation, but there are similarities.
*/
@@ -83,9 +83,9 @@ skip:
/* called by the assembler IRQ entry functions defined in irq.h
- * to dispatch the interrupts to registred handlers
+ * to dispatch the interrupts to registered handlers
* interrupts are disabled upon entry - depending on if the
- * interrupt was registred with IRQF_DISABLED or not, interrupts
+ * interrupt was registered with IRQF_DISABLED or not, interrupts
* are re-enabled or not.
*/
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index 8aab81430695..3034f3ff950c 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -13,7 +13,7 @@
* Fixed warning.
*
* Revision 1.18 2005/01/12 08:10:14 starvik
- * Readded the change of frametype when handling kernel page fault fixup
+ * Re-added the change of frametype when handling kernel page fault fixup
* for v10. This is necessary to avoid that the CPU remakes the faulting
* access.
*
@@ -49,7 +49,7 @@
*
* Revision 1.8 2003/07/04 13:02:48 tobiasa
* Moved code snippet from arch/cris/mm/fault.c that searches for fixup code
- * to seperate function in arch-specific files.
+ * to separate function in arch-specific files.
*
* Revision 1.7 2003/01/22 06:48:38 starvik
* Fixed warnings issued by GCC 3.2.1
diff --git a/arch/cris/mm/init.c b/arch/cris/mm/init.c
index b7842ff213a6..0c833d176226 100644
--- a/arch/cris/mm/init.c
+++ b/arch/cris/mm/init.c
@@ -8,7 +8,7 @@
*
* $Log: init.c,v $
* Revision 1.11 2004/05/28 09:28:56 starvik
- * Calculation of loops_per_usec moved because initalization order has changed
+ * Calculation of loops_per_usec moved because initialization order has changed
* in Linux 2.6.
*
* Revision 1.10 2004/05/14 07:58:05 starvik
diff --git a/arch/cris/mm/tlb.c b/arch/cris/mm/tlb.c
index c4a98e2e529e..b7f8de576777 100644
--- a/arch/cris/mm/tlb.c
+++ b/arch/cris/mm/tlb.c
@@ -16,7 +16,7 @@
/* The TLB can host up to 64 different mm contexts at the same time.
* The running context is R_MMU_CONTEXT, and each TLB entry contains a
* page_id that has to match to give a hit. In page_id_map, we keep track
- * of which mm's we have assigned which page_id's, so that we know when
+ * of which mm we have assigned to which page_id, so that we know when
* to invalidate TLB entries.
*
* The last page_id is never running - it is used as an invalid page_id
diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c
index 7ddb69089ed4..73abae767fdc 100644
--- a/arch/frv/kernel/irq.c
+++ b/arch/frv/kernel/irq.c
@@ -134,7 +134,7 @@ static struct irq_chip frv_cpu_pic = {
};
/*
- * handles all normal device IRQ's
+ * handles all normal device IRQs
* - registers are referred to by the __frame variable (GR28)
* - IRQ distribution is complicated in this arch because of the many PICs, the
* way they work and the way they cascade
diff --git a/arch/frv/kernel/semaphore.c b/arch/frv/kernel/semaphore.c
index 8e182ced1a0f..7ee3a147b471 100644
--- a/arch/frv/kernel/semaphore.c
+++ b/arch/frv/kernel/semaphore.c
@@ -139,7 +139,7 @@ void __up(struct semaphore *sem)
waiter = list_entry(sem->wait_list.next, struct sem_waiter, list);
/* We must be careful not to touch 'waiter' after we set ->task = NULL.
- * It is an allocated on the waiter's stack and may become invalid at
+ * It is allocated on the waiter's stack and may become invalid at
* any time after that point (due to a wakeup from another source).
*/
list_del_init(&waiter->list);
diff --git a/arch/frv/kernel/time.c b/arch/frv/kernel/time.c
index e83e0bccfab9..925fb0199a0f 100644
--- a/arch/frv/kernel/time.c
+++ b/arch/frv/kernel/time.c
@@ -66,7 +66,7 @@ static irqreturn_t timer_interrupt(int irq, void *dummy)
/*
* Here we are in the timer irq handler. We just have irqs locally
* disabled but we don't know if the timer_bh is running on the other
- * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
+ * CPU. We need to avoid to SMP race with it. NOTE: we don't need
* the irq version of write_lock because as just said we have irq
* locally disabled. -arca
*/
@@ -126,7 +126,7 @@ void time_init(void)
/* FIX by dqg : Set to zero for platforms that don't have tod */
/* without this time is undefined and can overflow time_t, causing */
- /* very stange errors */
+ /* very strange errors */
year = 1980;
mon = day = 1;
hour = min = sec = 0;
diff --git a/arch/h8300/Kconfig.debug b/arch/h8300/Kconfig.debug
index 996d97e953b0..ee671c3f2c74 100644
--- a/arch/h8300/Kconfig.debug
+++ b/arch/h8300/Kconfig.debug
@@ -42,16 +42,16 @@ config SH_STANDARD_BIOS
Require eCos/RedBoot
config DEFAULT_CMDLINE
- bool "Use buildin commandline"
+ bool "Use builtin commandline"
default n
help
- buildin kernel commandline enabled.
+ builtin kernel commandline enabled.
config KERNEL_COMMAND
string "Buildin commmand string"
depends on DEFAULT_CMDLINE
help
- buildin kernel commandline strings.
+ builtin kernel commandline strings.
config BLKDEV_RESERVE
bool "BLKDEV Reserved Memory"
diff --git a/arch/h8300/kernel/irq.c b/arch/h8300/kernel/irq.c
index 43d21e93f41f..8dec4dd57b4e 100644
--- a/arch/h8300/kernel/irq.c
+++ b/arch/h8300/kernel/irq.c
@@ -68,7 +68,7 @@ static void h8300_shutdown_irq(unsigned int irq)
}
/*
- * h8300 interrupt controler implementation
+ * h8300 interrupt controller implementation
*/
struct irq_chip h8300irq_chip = {
.name = "H8300-INTC",
diff --git a/arch/h8300/kernel/time.c b/arch/h8300/kernel/time.c
index 330638220a2e..e37c835e67cf 100644
--- a/arch/h8300/kernel/time.c
+++ b/arch/h8300/kernel/time.c
@@ -53,7 +53,7 @@ void time_init(void)
/* FIX by dqg : Set to zero for platforms that don't have tod */
/* without this time is undefined and can overflow time_t, causing */
- /* very stange errors */
+ /* very strange errors */
year = 1980;
mon = day = 1;
hour = min = sec = 0;
diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c
index f97183011c2c..f8f7d7ea97f1 100644
--- a/arch/h8300/kernel/traps.c
+++ b/arch/h8300/kernel/traps.c
@@ -5,7 +5,7 @@
* Cloned from Linux/m68k.
*
* No original Copyright holder listed,
- * Probabily original (C) Roman Zippel (assigned DJD, 1999)
+ * Probable original (C) Roman Zippel (assigned DJD, 1999)
*
* Copyright 1999-2000 D. Jeff Dionne, <jeff@rt-control.com>
*
diff --git a/arch/h8300/platform/h8s/ints.c b/arch/h8300/platform/h8s/ints.c
index a71d6e2a3919..551fd5f30d82 100644
--- a/arch/h8300/platform/h8s/ints.c
+++ b/arch/h8300/platform/h8s/ints.c
@@ -179,7 +179,7 @@ int request_irq(unsigned int irq,
if (use_kmalloc)
irq_handle = kmalloc(sizeof(irq_handler_t), GFP_ATOMIC);
else {
- /* use bootmem allocater */
+ /* use bootmem allocator */
irq_handle = (irq_handler_t *)alloc_bootmem(sizeof(irq_handler_t));
irq_handle = (irq_handler_t *)((unsigned long)irq_handle | 0x80000000);
}
diff --git a/arch/h8300/platform/h8s/ints_h8s.c b/arch/h8300/platform/h8s/ints_h8s.c
index 93395d2a8a07..faa8a459d952 100644
--- a/arch/h8300/platform/h8s/ints_h8s.c
+++ b/arch/h8300/platform/h8s/ints_h8s.c
@@ -63,7 +63,7 @@ static const struct irq_pins irq_assign_table1[16]={
{H8300_GPIO_P2,H8300_GPIO_B6},{H8300_GPIO_P2,H8300_GPIO_B7},
};
-/* IRQ to GPIO pinno transrate */
+/* IRQ to GPIO pin translation */
#define IRQ_GPIO_MAP(irqbit,irq,port,bit) \
do { \
if (*(volatile unsigned short *)ITSR & irqbit) { \
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index d0a4ea1ba14d..f6e44fc5283c 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -392,7 +392,7 @@ config X86_MCE_NONFATAL
will look at the machine check registers to see if anything happened.
Non-fatal problems automatically get corrected (but still logged).
Disable this if you don't want to see these messages.
- Seeing the messages this option prints out may be indicative of dying hardware,
+ Seeing the messages this option prints out may be indicative of dying
or out-of-spec (ie, overclocked) hardware.
This option only does something on certain CPUs.
(AMD Athlon/Duron and Intel Pentium 4)
@@ -631,7 +631,7 @@ config NUMA
default n if X86_PC
default y if (X86_NUMAQ || X86_SUMMIT)
help
- NUMA support for i386. This is currently high experimental
+ NUMA support for i386. This is currently highly experimental
and should be only used for kernel development. It might also
cause boot failures.
@@ -1080,7 +1080,7 @@ config APM_REAL_MODE_POWER_OFF
endif # APM
-source "arch/x86/kernel/cpu/cpufreq/Kconfig"
+source "arch/x86/kernel/cpu/cpufreq/Kconfig_32"
source "drivers/cpuidle/Kconfig"
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index f036d2dee3de..b88e47ca3032 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -102,7 +102,7 @@ core-$(CONFIG_XEN) += arch/x86/xen/
# default subarch .h files
mflags-y += -Iinclude/asm-x86/mach-default
-head-y := arch/x86/kernel/head_32.o arch/x86/kernel/init_task_32.o
+head-y := arch/x86/kernel/head_32.o arch/x86/kernel/init_task.o
libs-y += arch/x86/lib/
core-y += arch/x86/kernel/ \
@@ -131,9 +131,9 @@ all: bzImage
zImage zlilo zdisk: KBUILD_IMAGE := arch/x86/boot/zImage
zImage bzImage: vmlinux
- $(Q)mkdir -p $(objtree)/arch/i386/boot
- $(Q)ln -fsn $(objtree)/arch/x86/boot/bzImage $(objtree)/arch/i386/boot/bzImage
$(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE)
+ $(Q)mkdir -p $(objtree)/arch/i386/boot
+ $(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/i386/boot/bzImage
compressed: zImage
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index c89108e9770d..bef47725d4ad 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -452,9 +452,9 @@ config IA64_PALINFO
config IA64_MC_ERR_INJECT
tristate "MC error injection support"
help
- Selets whether support for MC error injection. By enabling the
- support, kernel provide sysfs interface for user application to
- call MC error injection PAL procedure to inject various errors.
+ Adds support for MC error injection. If enabled, the kernel
+ will provide a sysfs interface for user applications to
+ call MC error injection PAL procedures to inject various errors.
This is a useful tool for MCA testing.
If you're unsure, do not select this option.
@@ -491,7 +491,7 @@ config KEXEC
but it is independent of the system firmware. And like a reboot
you can start any kernel with it, not just Linux.
- The name comes from the similiarity to the exec system call.
+ The name comes from the similarity to the exec system call.
It is an ongoing process to be certain the hardware in a machine
is properly shutdown, so do not be surprised if this code does not
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c
index f8d8650383e0..d0c5b0b7da2f 100644
--- a/arch/m32r/kernel/irq.c
+++ b/arch/m32r/kernel/irq.c
@@ -71,7 +71,7 @@ skip:
}
/*
- * do_IRQ handles all normal device IRQ's (the special
+ * do_IRQ handles all normal device IRQs (the special
* SMP cross-CPU interrupts have their own specific
* handlers).
*/
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
index 916faf6070af..a753d79c4e89 100644
--- a/arch/m32r/kernel/signal.c
+++ b/arch/m32r/kernel/signal.c
@@ -358,7 +358,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
- /* Reenable any watchpoints before delivering the
+ /* Re-enable any watchpoints before delivering the
* signal to user space. The processor register will
* have been cleared if the watchpoint triggered
* inside the kernel.
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index 360129174b2b..c837bc13b015 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -202,7 +202,7 @@ void smp_flush_cache_all_interrupt(void)
}
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
-/* TLB flush request Routins */
+/* TLB flush request Routines */
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
/*==========================================================================*
@@ -378,7 +378,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
* Name: flush_tlb_others
*
* Description: This routine requests other CPU to execute flush TLB.
- * 1.Setup parmeters.
+ * 1.Setup parameters.
* 2.Send 'INVALIDATE_TLB_IPI' to other CPU.
* Request other CPU to execute 'smp_invalidate_interrupt()'.
* 3.Wait for other CPUs operation finished.
@@ -502,7 +502,7 @@ void smp_invalidate_interrupt(void)
}
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
-/* Stop CPU request Routins */
+/* Stop CPU request Routines */
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
/*==========================================================================*
@@ -566,7 +566,7 @@ static void stop_this_cpu(void *dummy)
}
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
-/* Call function Routins */
+/* Call function Routines */
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
/*==========================================================================*
@@ -690,7 +690,7 @@ void smp_call_function_interrupt(void)
}
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
-/* Timer Routins */
+/* Timer Routines */
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
/*==========================================================================*
@@ -802,7 +802,7 @@ void smp_local_timer_interrupt(void)
}
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
-/* Send IPI Routins */
+/* Send IPI Routines */
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
/*==========================================================================*
@@ -814,7 +814,7 @@ void smp_local_timer_interrupt(void)
*
* Arguments: ipi_num - Number of IPI
* try - 0 : Send IPI certainly.
- * !0 : The following IPI is not sended when Target CPU
+ * !0 : The following IPI is not sent when Target CPU
* has not received the before IPI.
*
* Returns: void (cannot fail)
@@ -844,7 +844,7 @@ void send_IPI_allbutself(int ipi_num, int try)
* Arguments: cpu_mask - Bitmap of target CPUs logical ID
* ipi_num - Number of IPI
* try - 0 : Send IPI certainly.
- * !0 : The following IPI is not sended when Target CPU
+ * !0 : The following IPI is not sent when Target CPU
* has not received the before IPI.
*
* Returns: void (cannot fail)
@@ -885,7 +885,7 @@ static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try)
* Arguments: cpu_mask - Bitmap of target CPUs physical ID
* ipi_num - Number of IPI
* try - 0 : Send IPI certainly.
- * !0 : The following IPI is not sended when Target CPU
+ * !0 : The following IPI is not sent when Target CPU
* has not received the before IPI.
*
* Returns: IPICRi regster value.
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index 9dae410014d8..0e383da158e9 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -133,7 +133,7 @@ static void map_cpu_to_physid(int, int);
static void unmap_cpu_to_physid(int, int);
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
-/* Boot up APs Routins : BSP */
+/* Boot up APs Routines : BSP */
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
void __devinit smp_prepare_boot_cpu(void)
{
@@ -404,7 +404,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
}
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
-/* Activate a secondary processor Routins */
+/* Activate a secondary processor Routines */
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
/*==========================================================================*
@@ -509,7 +509,7 @@ static void __init smp_online(void)
}
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
-/* Boot up CPUs common Routins */
+/* Boot up CPUs common Routines */
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
static void __init show_mp_info(int nr_cpu)
{
diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c
index 0fc2efec18f6..6d7a80fdad48 100644
--- a/arch/m32r/kernel/sys_m32r.c
+++ b/arch/m32r/kernel/sys_m32r.c
@@ -214,7 +214,7 @@ asmlinkage int sys_uname(struct old_utsname __user * name)
asmlinkage int sys_cacheflush(void *addr, int bytes, int cache)
{
- /* This should flush more selectivly ... */
+ /* This should flush more selectively ... */
_flush_cache_all();
return 0;
}
diff --git a/arch/m68k/amiga/pcmcia.c b/arch/m68k/amiga/pcmcia.c
index fc57c6e72acf..186662ca1a89 100644
--- a/arch/m68k/amiga/pcmcia.c
+++ b/arch/m68k/amiga/pcmcia.c
@@ -33,7 +33,7 @@ void pcmcia_reset(void)
/* copy a tuple, including tuple header. return nb bytes copied */
-/* be carefull as this may trigger a GAYLE_IRQ_WR interrupt ! */
+/* be careful as this may trigger a GAYLE_IRQ_WR interrupt ! */
int pcmcia_copy_tuple(unsigned char tuple_id, void *tuple, int max_len)
{
diff --git a/arch/m68k/ifpsp060/CHANGES b/arch/m68k/ifpsp060/CHANGES
index c1e712dfc2e7..ba96596910fd 100644
--- a/arch/m68k/ifpsp060/CHANGES
+++ b/arch/m68k/ifpsp060/CHANGES
@@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division
M68060 Software Package
Production Release P1.00 -- October 10, 1994
-M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
THE SOFTWARE is provided on an "AS IS" basis and without warranty.
To the maximum extent permitted by applicable law,
diff --git a/arch/m68k/ifpsp060/MISC b/arch/m68k/ifpsp060/MISC
index b7e644b94ae2..1a63913daa16 100644
--- a/arch/m68k/ifpsp060/MISC
+++ b/arch/m68k/ifpsp060/MISC
@@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division
M68060 Software Package
Production Release P1.00 -- October 10, 1994
-M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
THE SOFTWARE is provided on an "AS IS" basis and without warranty.
To the maximum extent permitted by applicable law,
diff --git a/arch/m68k/ifpsp060/README b/arch/m68k/ifpsp060/README
index e3bced429bd3..f6f8f5c59419 100644
--- a/arch/m68k/ifpsp060/README
+++ b/arch/m68k/ifpsp060/README
@@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division
M68060 Software Package
Production Release P1.00 -- October 10, 1994
-M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
THE SOFTWARE is provided on an "AS IS" basis and without warranty.
To the maximum extent permitted by applicable law,
diff --git a/arch/m68k/ifpsp060/TEST.DOC b/arch/m68k/ifpsp060/TEST.DOC
index 5e5900cb2dc4..1ba3aef1500f 100644
--- a/arch/m68k/ifpsp060/TEST.DOC
+++ b/arch/m68k/ifpsp060/TEST.DOC
@@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division
M68060 Software Package
Production Release P1.00 -- October 10, 1994
-M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
THE SOFTWARE is provided on an "AS IS" basis and without warranty.
To the maximum extent permitted by applicable law,
diff --git a/arch/m68k/ifpsp060/fplsp.doc b/arch/m68k/ifpsp060/fplsp.doc
index fb637c436762..89730a934c5e 100644
--- a/arch/m68k/ifpsp060/fplsp.doc
+++ b/arch/m68k/ifpsp060/fplsp.doc
@@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division
M68060 Software Package
Production Release P1.00 -- October 10, 1994
-M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
THE SOFTWARE is provided on an "AS IS" basis and without warranty.
To the maximum extent permitted by applicable law,
diff --git a/arch/m68k/ifpsp060/fpsp.doc b/arch/m68k/ifpsp060/fpsp.doc
index 408315209e62..23d513f72ed9 100644
--- a/arch/m68k/ifpsp060/fpsp.doc
+++ b/arch/m68k/ifpsp060/fpsp.doc
@@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division
M68060 Software Package
Production Release P1.00 -- October 10, 1994
-M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
THE SOFTWARE is provided on an "AS IS" basis and without warranty.
To the maximum extent permitted by applicable law,
diff --git a/arch/m68k/ifpsp060/fskeleton.S b/arch/m68k/ifpsp060/fskeleton.S
index a45a4ff9d2af..0a1ae4f44130 100644
--- a/arch/m68k/ifpsp060/fskeleton.S
+++ b/arch/m68k/ifpsp060/fskeleton.S
@@ -4,7 +4,7 @@
|M68060 Software Package
|Production Release P1.00 -- October 10, 1994
|
-|M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+|M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
|
|THE SOFTWARE is provided on an "AS IS" basis and without warranty.
|To the maximum extent permitted by applicable law,
diff --git a/arch/m68k/ifpsp060/ilsp.doc b/arch/m68k/ifpsp060/ilsp.doc
index f6fae6d900ae..4e6292f095aa 100644
--- a/arch/m68k/ifpsp060/ilsp.doc
+++ b/arch/m68k/ifpsp060/ilsp.doc
@@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division
M68060 Software Package
Production Release P1.00 -- October 10, 1994
-M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
THE SOFTWARE is provided on an "AS IS" basis and without warranty.
To the maximum extent permitted by applicable law,
diff --git a/arch/m68k/ifpsp060/iskeleton.S b/arch/m68k/ifpsp060/iskeleton.S
index b2dbdf5ee309..91a9c65fee8a 100644
--- a/arch/m68k/ifpsp060/iskeleton.S
+++ b/arch/m68k/ifpsp060/iskeleton.S
@@ -4,7 +4,7 @@
|M68060 Software Package
|Production Release P1.00 -- October 10, 1994
|
-|M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+|M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
|
|THE SOFTWARE is provided on an "AS IS" basis and without warranty.
|To the maximum extent permitted by applicable law,
diff --git a/arch/m68k/ifpsp060/isp.doc b/arch/m68k/ifpsp060/isp.doc
index 5a90fded3f0b..9dadd727fc50 100644
--- a/arch/m68k/ifpsp060/isp.doc
+++ b/arch/m68k/ifpsp060/isp.doc
@@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division
M68060 Software Package
Production Release P1.00 -- October 10, 1994
-M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
THE SOFTWARE is provided on an "AS IS" basis and without warranty.
To the maximum extent permitted by applicable law,
diff --git a/arch/m68k/ifpsp060/os.S b/arch/m68k/ifpsp060/os.S
index aa4df87a6c42..7a0d6e428066 100644
--- a/arch/m68k/ifpsp060/os.S
+++ b/arch/m68k/ifpsp060/os.S
@@ -4,7 +4,7 @@
|M68060 Software Package
|Production Release P1.00 -- October 10, 1994
|
-|M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+|M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
|
|THE SOFTWARE is provided on an "AS IS" basis and without warranty.
|To the maximum extent permitted by applicable law,
diff --git a/arch/m68k/ifpsp060/src/fplsp.S b/arch/m68k/ifpsp060/src/fplsp.S
index fdb79b927ef1..3b7ea2dc9f1b 100644
--- a/arch/m68k/ifpsp060/src/fplsp.S
+++ b/arch/m68k/ifpsp060/src/fplsp.S
@@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division
M68060 Software Package
Production Release P1.00 -- October 10, 1994
-M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
THE SOFTWARE is provided on an "AS IS" basis and without warranty.
To the maximum extent permitted by applicable law,
diff --git a/arch/m68k/ifpsp060/src/fpsp.S b/arch/m68k/ifpsp060/src/fpsp.S
index 3b597a9bbf43..6c1a9a217887 100644
--- a/arch/m68k/ifpsp060/src/fpsp.S
+++ b/arch/m68k/ifpsp060/src/fpsp.S
@@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division
M68060 Software Package
Production Release P1.00 -- October 10, 1994
-M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
THE SOFTWARE is provided on an "AS IS" basis and without warranty.
To the maximum extent permitted by applicable law,
diff --git a/arch/m68k/ifpsp060/src/ftest.S b/arch/m68k/ifpsp060/src/ftest.S
index 2edcbae0fd53..1f947915d81e 100644
--- a/arch/m68k/ifpsp060/src/ftest.S
+++ b/arch/m68k/ifpsp060/src/ftest.S
@@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division
M68060 Software Package
Production Release P1.00 -- October 10, 1994
-M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
THE SOFTWARE is provided on an "AS IS" basis and without warranty.
To the maximum extent permitted by applicable law,
diff --git a/arch/m68k/ifpsp060/src/ilsp.S b/arch/m68k/ifpsp060/src/ilsp.S
index afa7422cddb5..970abaf3303e 100644
--- a/arch/m68k/ifpsp060/src/ilsp.S
+++ b/arch/m68k/ifpsp060/src/ilsp.S
@@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division
M68060 Software Package
Production Release P1.00 -- October 10, 1994
-M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
THE SOFTWARE is provided on an "AS IS" basis and without warranty.
To the maximum extent permitted by applicable law,
diff --git a/arch/m68k/ifpsp060/src/isp.S b/arch/m68k/ifpsp060/src/isp.S
index b269091d9df6..6dccda766e22 100644
--- a/arch/m68k/ifpsp060/src/isp.S
+++ b/arch/m68k/ifpsp060/src/isp.S
@@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division
M68060 Software Package
Production Release P1.00 -- October 10, 1994
-M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
THE SOFTWARE is provided on an "AS IS" basis and without warranty.
To the maximum extent permitted by applicable law,
diff --git a/arch/m68k/ifpsp060/src/itest.S b/arch/m68k/ifpsp060/src/itest.S
index ba4a30cbcbea..beca47e7d514 100644
--- a/arch/m68k/ifpsp060/src/itest.S
+++ b/arch/m68k/ifpsp060/src/itest.S
@@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division
M68060 Software Package
Production Release P1.00 -- October 10, 1994
-M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
THE SOFTWARE is provided on an "AS IS" basis and without warranty.
To the maximum extent permitted by applicable law,
diff --git a/arch/m68k/ifpsp060/src/pfpsp.S b/arch/m68k/ifpsp060/src/pfpsp.S
index 0c997c436beb..51b9f7d879dd 100644
--- a/arch/m68k/ifpsp060/src/pfpsp.S
+++ b/arch/m68k/ifpsp060/src/pfpsp.S
@@ -4,7 +4,7 @@ M68000 Hi-Performance Microprocessor Division
M68060 Software Package
Production Release P1.00 -- October 10, 1994
-M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
THE SOFTWARE is provided on an "AS IS" basis and without warranty.
To the maximum extent permitted by applicable law,
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index 8547dbc5e8d7..01b468b9392e 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -284,7 +284,7 @@ static struct mac_model mac_data_table[] = {
},
/*
- * Weirdified MacII hardware - all subtley different. Gee thanks
+ * Weirdified MacII hardware - all subtly different. Gee thanks
* Apple. All these boxes seem to have VIA2 in a different place to
* the MacII (+1A000 rather than +4000)
* CSA: see http://developer.apple.com/technotes/hw/hw_09.html
@@ -707,7 +707,7 @@ static struct mac_model mac_data_table[] = {
* All of these probably have onboard SONIC in the Dock which
* means we'll have to probe for it eventually.
*
- * Are these reallly MAC_VIA_IIci? The developer notes for the
+ * Are these really MAC_VIA_IIci? The developer notes for the
* Duos show pretty much the same custom parts as in most of
* the other PowerBooks which would imply MAC_VIA_QUADRA.
*/
diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c
index 0cea21f58192..5b2799eb96a6 100644
--- a/arch/m68k/mac/iop.c
+++ b/arch/m68k/mac/iop.c
@@ -100,7 +100,7 @@
* finished; this function moves the message state to MSG_COMPLETE and signals
* the IOP. This two-step process is provided to allow the handler to defer
* message processing to a bottom-half handler if the processing will take
- * a signifigant amount of time (handlers are called at interrupt time so they
+ * a significant amount of time (handlers are called at interrupt time so they
* should execute quickly.)
*/
@@ -120,7 +120,7 @@
/*#define DEBUG_IOP*/
-/* Set to nonezero if the IOPs are present. Set by iop_init() */
+/* Set to non-zero if the IOPs are present. Set by iop_init() */
int iop_scc_present,iop_ism_present;
diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c
index d7be16917efd..50603d3dce84 100644
--- a/arch/m68k/mac/oss.c
+++ b/arch/m68k/mac/oss.c
@@ -8,7 +8,7 @@
*
* 990502 (jmt) - Major rewrite for new interrupt architecture as well as some
* recent insights into OSS operational details.
- * 990610 (jmt) - Now taking fulll advantage of the OSS. Interrupts are mapped
+ * 990610 (jmt) - Now taking full advantage of the OSS. Interrupts are mapped
* to mostly match the A/UX interrupt scheme supported on the
* VIA side. Also added support for enabling the ISM irq again
* since we now have a functional IOP manager.
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index d5cac72eb3db..8df270e950fa 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -1,7 +1,7 @@
/*
* 6522 Versatile Interface Adapter (VIA)
*
- * There are two of these on the Mac II. Some IRQ's are vectored
+ * There are two of these on the Mac II. Some IRQs are vectored
* via them as are assorted bits and bobs - eg RTC, ADB.
*
* CSA: Motorola seems to have removed documentation on the 6522 from
diff --git a/arch/m68k/math-emu/fp_log.c b/arch/m68k/math-emu/fp_log.c
index 87b4f0158560..b1033ae0d6f0 100644
--- a/arch/m68k/math-emu/fp_log.c
+++ b/arch/m68k/math-emu/fp_log.c
@@ -65,7 +65,7 @@ fp_fsqrt(struct fp_ext *dest, struct fp_ext *src)
fp_copy_ext(&src2, dest);
/*
- * The taylor row arround a for sqrt(x) is:
+ * The taylor row around a for sqrt(x) is:
* sqrt(x) = sqrt(a) + 1/(2*sqrt(a))*(x-a) + R
* With a=1 this gives:
* sqrt(x) = 1 + 1/2*(x-1)
diff --git a/arch/m68k/q40/q40ints.c b/arch/m68k/q40/q40ints.c
index ad3ed1fb8879..46161cef08b9 100644
--- a/arch/m68k/q40/q40ints.c
+++ b/arch/m68k/q40/q40ints.c
@@ -184,7 +184,7 @@ static struct IRQ_TABLE eirqs[] = {
};
/* complain only this many times about spurious ints : */
-static int ccleirq=60; /* ISA dev IRQ's*/
+static int ccleirq=60; /* ISA dev IRQs*/
/*static int cclirq=60;*/ /* internal */
/* FIXME: add shared ints,mask,unmask,probing.... */
@@ -234,7 +234,7 @@ static void q40_irq_handler(unsigned int irq, struct pt_regs *fp)
* There is a little mess wrt which IRQ really caused this irq request. The
* main problem is that IIRQ_REG and EIRQ_REG reflect the state when they
* are read - which is long after the request came in. In theory IRQs should
- * not just go away but they occassionally do
+ * not just go away but they occasionally do
*/
if (irq > 4 && irq <= 15 && mext_disabled) {
/*aliased_irq++;*/
diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c
index 7a0e3a220687..fb0f6a20cc3c 100644
--- a/arch/m68k/sun3/mmu_emu.c
+++ b/arch/m68k/sun3/mmu_emu.c
@@ -239,7 +239,7 @@ void clear_context(unsigned long context)
/* gets an empty context. if full, kills the next context listed to
die first */
/* This context invalidation scheme is, well, totally arbitrary, I'm
- sure it could be much more intellegent... but it gets the job done
+ sure it could be much more intelligent... but it gets the job done
for now without much overhead in making it's decision. */
/* todo: come up with optimized scheme for flushing contexts */
unsigned long get_free_context(struct mm_struct *mm)
diff --git a/arch/m68k/tools/amiga/dmesg.c b/arch/m68k/tools/amiga/dmesg.c
index e892748e7386..7340f5b6cf6d 100644
--- a/arch/m68k/tools/amiga/dmesg.c
+++ b/arch/m68k/tools/amiga/dmesg.c
@@ -3,7 +3,7 @@
* in Chip RAM with the kernel command
* line option `debug=mem'.
*
- * © Copyright 1996 by Geert Uytterhoeven <geert@linux-m68k.org>
+ * © Copyright 1996 by Geert Uytterhoeven <geert@linux-m68k.org>
*
*
* Usage:
diff --git a/arch/m68knommu/platform/5307/pit.c b/arch/m68knommu/platform/5307/pit.c
index e53c446d10e4..f18352fa35a6 100644
--- a/arch/m68knommu/platform/5307/pit.c
+++ b/arch/m68knommu/platform/5307/pit.c
@@ -83,7 +83,7 @@ unsigned long coldfire_pit_offset(void)
/*
* If we are still in the first half of the upcount and a
- * timer interupt is pending, then add on a ticks worth of time.
+ * timer interrupt is pending, then add on a ticks worth of time.
*/
offset = ((pmr - pcntr) * (1000000 / HZ)) / pmr;
if ((offset < (1000000 / HZ / 2)) && (*ipr & MCFPIT_IMR_IBIT))
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 4dc142d394a3..3ecff5e9e4f3 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1812,7 +1812,7 @@ config KEXEC
but it is independent of the system firmware. And like a reboot
you can start any kernel with it, not just Linux.
- The name comes from the similiarity to the exec system call.
+ The name comes from the similarity to the exec system call.
It is an ongoing process to be certain the hardware in a machine
is properly shutdown, so do not be surprised if this code does not
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index cb0801437b66..e7ed0ac48537 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -381,7 +381,7 @@ const struct exception_table_entry *search_module_dbetables(unsigned long addr)
return e;
}
-/* Put in dbe list if neccessary. */
+/* Put in dbe list if necessary. */
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
diff --git a/arch/mips/pci/pci-excite.c b/arch/mips/pci/pci-excite.c
index 3c86c77cb74f..8a56876afcc6 100644
--- a/arch/mips/pci/pci-excite.c
+++ b/arch/mips/pci/pci-excite.c
@@ -131,7 +131,7 @@ static int __init basler_excite_pci_setup(void)
ocd_writel(0x00000000, bar + 0x100);
}
- /* Finally, enable the PCI interupt */
+ /* Finally, enable the PCI interrupt */
#if USB_IRQ > 7
set_c0_intcontrol(1 << USB_IRQ);
#else
diff --git a/arch/mips/sni/pcimt.c b/arch/mips/sni/pcimt.c
index 39bb15f1f2a6..4df070f2ff5d 100644
--- a/arch/mips/sni/pcimt.c
+++ b/arch/mips/sni/pcimt.c
@@ -246,7 +246,7 @@ static void pcimt_hwint1(void)
/*
* Note: ASIC PCI's builtin interrupt achknowledge feature is
* broken. Using it may result in loss of some or all i8259
- * interupts, so don't use PCIMT_INT_ACKNOWLEDGE ...
+ * interrupts, so don't use PCIMT_INT_ACKNOWLEDGE ...
*/
irq = i8259_irq();
if (unlikely(irq < 0))
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
index 8b4a4ee85eca..f1ee0b3f78f2 100644
--- a/arch/powerpc/kernel/cpu_setup_6xx.S
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -113,7 +113,7 @@ setup_604_hid0:
* around #3 and with the same fix we use. We may want to
* check if the CPU is using 60x bus mode in which case
* the workaround for errata #4 is useless. Also, we may
- * want to explicitely clear HID0_NOPDST as this is not
+ * want to explicitly clear HID0_NOPDST as this is not
* needed once we have applied workaround #5 (though it's
* not set by Apple's firmware at least).
*/
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 2250f9e6c5ca..b0e5deb4274f 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -491,7 +491,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
/* Legacy flags are left to default at this point,
* one can then use irq_create_mapping() to
- * explicitely change them
+ * explicitly change them
*/
ops->map(host, i, i);
}
diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S
index 858f28ac8a06..2a2f3c3f6d80 100644
--- a/arch/powerpc/kernel/l2cr_6xx.S
+++ b/arch/powerpc/kernel/l2cr_6xx.S
@@ -1,6 +1,6 @@
/*
L2CR functions
- Copyright © 1997-1998 by PowerLogix R & D, Inc.
+ Copyright © 1997-1998 by PowerLogix R & D, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 229d355ed86a..ea22cad2cd0a 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -120,7 +120,7 @@ config PPC_PMI
depends on PPC_IBM_CELL_BLADE
help
PMI (Platform Management Interrupt) is a way to
- communicate with the BMC (Baseboard Mangement Controller).
+ communicate with the BMC (Baseboard Management Controller).
It is used in some IBM Cell blades.
default m
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index d72b16d6816e..d9e56a503795 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -748,7 +748,7 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
if (count)
goto out;
- /* write aѕ much as possible */
+ /* write as much as possible */
for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
int ret;
ret = __get_user(wbox_data, udata);
diff --git a/arch/powerpc/platforms/celleb/scc_uhc.c b/arch/powerpc/platforms/celleb/scc_uhc.c
index a7c548bde2e3..b59c38a06e3e 100644
--- a/arch/powerpc/platforms/celleb/scc_uhc.c
+++ b/arch/powerpc/platforms/celleb/scc_uhc.c
@@ -36,7 +36,7 @@ static inline int uhc_clkctrl_ready(u32 val)
}
/*
- * UHC(usb host controler) enable function.
+ * UHC(usb host controller) enable function.
* affect to both of OHCI and EHCI core module.
*/
static void enable_scc_uhc(struct pci_dev *dev)
diff --git a/arch/sh64/kernel/pci_sh5.c b/arch/sh64/kernel/pci_sh5.c
index 388bb711f1b0..b4d9534d2b0e 100644
--- a/arch/sh64/kernel/pci_sh5.c
+++ b/arch/sh64/kernel/pci_sh5.c
@@ -480,7 +480,7 @@ static int __init pcibios_init(void)
return -EINVAL;
}
- /* The pci subsytem needs to know where memory is and how much
+ /* The pci subsystem needs to know where memory is and how much
* of it there is. I've simply made these globals. A better mechanism
* is probably needed.
*/
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index d8925d285573..dd1689b814cb 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -3,7 +3,7 @@ config DEFCONFIG_LIST
option defconfig_list
default "arch/$ARCH/defconfig"
-# UML uses the generic IRQ sugsystem
+# UML uses the generic IRQ subsystem
config GENERIC_HARDIRQS
bool
default y
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 76fe0b0da996..83bf15a3dda8 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -35,7 +35,7 @@ static void line_timer_cb(struct work_struct *work)
/*
* Returns the free space inside the ring buffer of this line.
*
- * Should be called while holding line->lock (this does not modify datas).
+ * Should be called while holding line->lock (this does not modify data).
*/
static int write_room(struct line *line)
{
diff --git a/arch/um/drivers/null.c b/arch/um/drivers/null.c
index 21ad3d7932b3..2b45a1446c86 100644
--- a/arch/um/drivers/null.c
+++ b/arch/um/drivers/null.c
@@ -9,7 +9,7 @@
#include "chan_user.h"
#include "os.h"
-/* This address is used only as a unique identifer */
+/* This address is used only as a unique identifier */
static int null_chan;
static void *null_init(char *str, int device, const struct chan_opts *opts)
diff --git a/arch/um/drivers/stderr_console.c b/arch/um/drivers/stderr_console.c
index 4739dd527b43..d07a97f8b994 100644
--- a/arch/um/drivers/stderr_console.c
+++ b/arch/um/drivers/stderr_console.c
@@ -8,7 +8,7 @@
/* trivial console driver -- simply dump everything to stderr */
/*
- * Don't register by default -- as this registeres very early in the
+ * Don't register by default -- as this registers very early in the
* boot process it becomes the default console.
*
* Initialized at init time.
diff --git a/arch/um/kernel/gmon_syms.c b/arch/um/kernel/gmon_syms.c
index 13aa115cd1b4..734f873cab12 100644
--- a/arch/um/kernel/gmon_syms.c
+++ b/arch/um/kernel/gmon_syms.c
@@ -12,8 +12,8 @@ EXPORT_SYMBOL(__bb_init_func);
* versions in libgcov.
*
* Since SuSE backported the fix, we cannot handle it depending on GCC version.
- * So, unconditinally export it. But also give it a weak declaration, which will
- * be overriden by any other one.
+ * So, unconditionally export it. But also give it a weak declaration, which will
+ * be overridden by any other one.
*/
extern void __gcov_init(void *) __attribute__((weak));
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 277fce17b088..70c2d625b070 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -326,7 +326,7 @@ int deactivate_all_fds(void)
}
/*
- * do_IRQ handles all normal device IRQ's (the special
+ * do_IRQ handles all normal device IRQs (the special
* SMP cross-CPU interrupts have their own specific
* handlers).
*/
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c
index a0eba0833068..47b57b497d55 100644
--- a/arch/um/kernel/ptrace.c
+++ b/arch/um/kernel/ptrace.c
@@ -237,7 +237,7 @@ void send_sigtrap(struct task_struct *tsk, struct uml_pt_regs *regs,
/* User-mode eip? */
info.si_addr = UPT_IS_USER(regs) ? (void __user *) UPT_IP(regs) : NULL;
- /* Send us the fakey SIGTRAP */
+ /* Send us the fake SIGTRAP */
force_sig_info(SIGTRAP, &info, tsk);
}
diff --git a/arch/um/sys-i386/bug.c b/arch/um/sys-i386/bug.c
index 200c8ba2879b..a4360b5207db 100644
--- a/arch/um/sys-i386/bug.c
+++ b/arch/um/sys-i386/bug.c
@@ -6,7 +6,7 @@
#include <linux/uaccess.h>
/* Mostly copied from i386/x86_86 - eliminated the eip < PAGE_OFFSET because
- * that's not relevent in skas mode.
+ * that's not relevant in skas mode.
*/
int is_valid_bugaddr(unsigned long eip)
diff --git a/arch/um/sys-i386/tls.c b/arch/um/sys-i386/tls.c
index b02266ab5c55..fcaff86b000c 100644
--- a/arch/um/sys-i386/tls.c
+++ b/arch/um/sys-i386/tls.c
@@ -45,7 +45,7 @@ int do_get_thread_area(struct user_desc *info)
* XXX: Consider leaving one free slot for glibc usage at first place. This must
* be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
*
- * Also, this must be tested when compiling in SKAS mode with dinamic linking
+ * Also, this must be tested when compiling in SKAS mode with dynamic linking
* and running against NPTL.
*/
static int get_free_idx(struct task_struct* task)
diff --git a/arch/um/sys-x86_64/bug.c b/arch/um/sys-x86_64/bug.c
index 200c8ba2879b..a4360b5207db 100644
--- a/arch/um/sys-x86_64/bug.c
+++ b/arch/um/sys-x86_64/bug.c
@@ -6,7 +6,7 @@
#include <linux/uaccess.h>
/* Mostly copied from i386/x86_86 - eliminated the eip < PAGE_OFFSET because
- * that's not relevent in skas mode.
+ * that's not relevant in skas mode.
*/
int is_valid_bugaddr(unsigned long eip)
diff --git a/arch/v850/kernel/me2.c b/arch/v850/kernel/me2.c
index 38be5c194f6b..007115dc9ce0 100644
--- a/arch/v850/kernel/me2.c
+++ b/arch/v850/kernel/me2.c
@@ -58,13 +58,13 @@ void __init me2_init_irqs (void)
void me2_uart_pre_configure (unsigned chan, unsigned cflags, unsigned baud)
{
if (chan == 0) {
- /* Specify that the relevent pins on the chip should do
+ /* Specify that the relevant pins on the chip should do
serial I/O, not direct I/O. */
ME2_PORT1_PMC |= 0xC;
/* Specify that we're using the UART, not the CSI device. */
ME2_PORT1_PFC |= 0xC;
} else if (chan == 1) {
- /* Specify that the relevent pins on the chip should do
+ /* Specify that the relevant pins on the chip should do
serial I/O, not direct I/O. */
ME2_PORT2_PMC |= 0x6;
/* Specify that we're using the UART, not the CSI device. */
diff --git a/arch/v850/kernel/rte_mb_a_pci.c b/arch/v850/kernel/rte_mb_a_pci.c
index 35a4bd5515cb..7165478824e7 100644
--- a/arch/v850/kernel/rte_mb_a_pci.c
+++ b/arch/v850/kernel/rte_mb_a_pci.c
@@ -179,7 +179,7 @@ static int __devinit pcibios_init (void)
default uses. */
/* Significant address bits used for decoding PCI GCS5 space
- accessess. */
+ accesses. */
MB_A_PCI_DMRR = ~(MB_A_PCI_MEM_SIZE - 1);
/* I don't understand this, but the SolutionGear example code
@@ -775,7 +775,7 @@ pci_alloc_consistent (struct pci_dev *pdev, size_t size, dma_addr_t *dma_addr)
/* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
be values that were returned from pci_alloc_consistent. SIZE must be
the same as what as passed into pci_alloc_consistent. References to
- the memory and mappings assosciated with CPU_ADDR or DMA_ADDR past
+ the memory and mappings associated with CPU_ADDR or DMA_ADDR past
this call are illegal. */
void
pci_free_consistent (struct pci_dev *pdev, size_t size, void *cpu_addr,
diff --git a/arch/x86/boot/compressed/misc_32.c b/arch/x86/boot/compressed/misc_32.c
index b28505c544c9..1dc1e19c0a9f 100644
--- a/arch/x86/boot/compressed/misc_32.c
+++ b/arch/x86/boot/compressed/misc_32.c
@@ -25,7 +25,7 @@
/*
* Getting to provable safe in place decompression is hard.
- * Worst case behaviours need to be analized.
+ * Worst case behaviours need to be analyzed.
* Background information:
*
* The file layout is:
@@ -94,7 +94,7 @@
* Adding 32768 instead of 32767 just makes for round numbers.
* Adding the decompressor_size is necessary as it musht live after all
* of the data as well. Last I measured the decompressor is about 14K.
- * 10K of actuall data and 4K of bss.
+ * 10K of actual data and 4K of bss.
*
*/
diff --git a/arch/x86/boot/compressed/misc_64.c b/arch/x86/boot/compressed/misc_64.c
index f932b0e89096..6ea015aa65e4 100644
--- a/arch/x86/boot/compressed/misc_64.c
+++ b/arch/x86/boot/compressed/misc_64.c
@@ -25,7 +25,7 @@
/*
* Getting to provable safe in place decompression is hard.
- * Worst case behaviours need to be analized.
+ * Worst case behaviours need to be analyzed.
* Background information:
*
* The file layout is:
@@ -94,7 +94,7 @@
* Adding 32768 instead of 32767 just makes for round numbers.
* Adding the decompressor_size is necessary as it musht live after all
* of the data as well. Last I measured the decompressor is about 14K.
- * 10K of actuall data and 4K of bss.
+ * 10K of actual data and 4K of bss.
*
*/
diff --git a/arch/x86/ia32/ia32_binfmt.c b/arch/x86/ia32/ia32_binfmt.c
index 5027650eb273..55822d2cf053 100644
--- a/arch/x86/ia32/ia32_binfmt.c
+++ b/arch/x86/ia32/ia32_binfmt.c
@@ -5,10 +5,6 @@
* This tricks binfmt_elf.c into loading 32bit binaries using lots
* of ugly preprocessor tricks. Talk about very very poor man's inheritance.
*/
-#define __ASM_X86_64_ELF_H 1
-
-#undef ELF_CLASS
-#define ELF_CLASS ELFCLASS32
#include <linux/types.h>
#include <linux/stddef.h>
@@ -19,6 +15,7 @@
#include <linux/binfmts.h>
#include <linux/mm.h>
#include <linux/security.h>
+#include <linux/elfcore-compat.h>
#include <asm/segment.h>
#include <asm/ptrace.h>
@@ -31,6 +28,20 @@
#include <asm/ia32.h>
#include <asm/vsyscall32.h>
+#undef ELF_ARCH
+#undef ELF_CLASS
+#define ELF_CLASS ELFCLASS32
+#define ELF_ARCH EM_386
+
+#undef elfhdr
+#undef elf_phdr
+#undef elf_note
+#undef elf_addr_t
+#define elfhdr elf32_hdr
+#define elf_phdr elf32_phdr
+#define elf_note elf32_note
+#define elf_addr_t Elf32_Off
+
#define ELF_NAME "elf/i386"
#define AT_SYSINFO 32
@@ -48,74 +59,20 @@ int sysctl_vsyscall32 = 1;
} while(0)
struct file;
-struct elf_phdr;
#define IA32_EMULATOR 1
-#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
-
-#undef ELF_ARCH
-#define ELF_ARCH EM_386
-
-#define ELF_DATA ELFDATA2LSB
+#undef ELF_ET_DYN_BASE
-#define USE_ELF_CORE_DUMP 1
-
-/* Override elfcore.h */
-#define _LINUX_ELFCORE_H 1
-typedef unsigned int elf_greg_t;
-
-#define ELF_NGREG (sizeof (struct user_regs_struct32) / sizeof(elf_greg_t))
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-
-struct elf_siginfo
-{
- int si_signo; /* signal number */
- int si_code; /* extra code */
- int si_errno; /* errno */
-};
+#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
#define jiffies_to_timeval(a,b) do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; }while(0)
-struct elf_prstatus
-{
- struct elf_siginfo pr_info; /* Info associated with signal */
- short pr_cursig; /* Current signal */
- unsigned int pr_sigpend; /* Set of pending signals */
- unsigned int pr_sighold; /* Set of held signals */
- pid_t pr_pid;
- pid_t pr_ppid;
- pid_t pr_pgrp;
- pid_t pr_sid;
- struct compat_timeval pr_utime; /* User time */
- struct compat_timeval pr_stime; /* System time */
- struct compat_timeval pr_cutime; /* Cumulative user time */
- struct compat_timeval pr_cstime; /* Cumulative system time */
- elf_gregset_t pr_reg; /* GP registers */
- int pr_fpvalid; /* True if math co-processor being used. */
-};
-
-#define ELF_PRARGSZ (80) /* Number of chars for args */
-
-struct elf_prpsinfo
-{
- char pr_state; /* numeric process state */
- char pr_sname; /* char for pr_state */
- char pr_zomb; /* zombie */
- char pr_nice; /* nice val */
- unsigned int pr_flag; /* flags */
- __u16 pr_uid;
- __u16 pr_gid;
- pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
- /* Lots missing */
- char pr_fname[16]; /* filename of executable */
- char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
-};
-
#define _GET_SEG(x) \
({ __u32 seg; asm("movl %%" __stringify(x) ",%0" : "=r"(seg)); seg; })
/* Assumes current==process to be dumped */
+#undef ELF_CORE_COPY_REGS
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
pr_reg[0] = regs->rbx; \
pr_reg[1] = regs->rcx; \
@@ -135,36 +92,41 @@ struct elf_prpsinfo
pr_reg[15] = regs->rsp; \
pr_reg[16] = regs->ss;
-#define user user32
+
+#define elf_prstatus compat_elf_prstatus
+#define elf_prpsinfo compat_elf_prpsinfo
+#define elf_fpregset_t struct user_i387_ia32_struct
+#define elf_fpxregset_t struct user32_fxsr_struct
+#define user user32
#undef elf_read_implies_exec
#define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X)
-//#include <asm/ia32.h>
-#include <linux/elf.h>
-
-typedef struct user_i387_ia32_struct elf_fpregset_t;
-typedef struct user32_fxsr_struct elf_fpxregset_t;
-
-static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *regs)
+#define elf_core_copy_regs elf32_core_copy_regs
+static inline void elf32_core_copy_regs(compat_elf_gregset_t *elfregs,
+ struct pt_regs *regs)
{
- ELF_CORE_COPY_REGS((*elfregs), regs)
+ ELF_CORE_COPY_REGS((&elfregs->ebx), regs)
}
-static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs)
+#define elf_core_copy_task_regs elf32_core_copy_task_regs
+static inline int elf32_core_copy_task_regs(struct task_struct *t,
+ compat_elf_gregset_t* elfregs)
{
struct pt_regs *pp = task_pt_regs(t);
- ELF_CORE_COPY_REGS((*elfregs), pp);
+ ELF_CORE_COPY_REGS((&elfregs->ebx), pp);
/* fix wrong segments */
- (*elfregs)[7] = t->thread.ds;
- (*elfregs)[9] = t->thread.fsindex;
- (*elfregs)[10] = t->thread.gsindex;
- (*elfregs)[8] = t->thread.es;
+ elfregs->ds = t->thread.ds;
+ elfregs->fs = t->thread.fsindex;
+ elfregs->gs = t->thread.gsindex;
+ elfregs->es = t->thread.es;
return 1;
}
+#define elf_core_copy_task_fpregs elf32_core_copy_task_fpregs
static inline int
-elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpregset_t *fpu)
+elf32_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs,
+ elf_fpregset_t *fpu)
{
struct _fpstate_ia32 *fpstate = (void*)fpu;
mm_segment_t oldfs = get_fs();
@@ -186,8 +148,9 @@ elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpr
#define ELF_CORE_COPY_XFPREGS 1
#define ELF_CORE_XFPREG_TYPE NT_PRXFPREG
+#define elf_core_copy_task_xfpregs elf32_core_copy_task_xfpregs
static inline int
-elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu)
+elf32_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu)
{
struct pt_regs *regs = task_pt_regs(t);
if (!tsk_used_math(t))
@@ -206,6 +169,10 @@ elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu)
extern int force_personality32;
+#undef ELF_EXEC_PAGESIZE
+#undef ELF_HWCAP
+#undef ELF_PLATFORM
+#undef SET_PERSONALITY
#define ELF_EXEC_PAGESIZE PAGE_SIZE
#define ELF_HWCAP (boot_cpu_data.x86_capability[0])
#define ELF_PLATFORM ("i686")
@@ -231,6 +198,7 @@ do { \
#define load_elf_binary load_elf32_binary
+#undef ELF_PLAT_INIT
#define ELF_PLAT_INIT(r, load_addr) elf32_init(r)
#undef start_thread
diff --git a/arch/x86/kernel/Makefile_32 b/arch/x86/kernel/Makefile_32
index a3fa11f8f460..ccea590bbb92 100644
--- a/arch/x86/kernel/Makefile_32
+++ b/arch/x86/kernel/Makefile_32
@@ -2,7 +2,7 @@
# Makefile for the linux kernel.
#
-extra-y := head_32.o init_task_32.o vmlinux.lds
+extra-y := head_32.o init_task.o vmlinux.lds
obj-y := process_32.o signal_32.o entry_32.o traps_32.o irq_32.o \
ptrace_32.o time_32.o ioport_32.o ldt_32.o setup_32.o i8259_32.o sys_i386_32.o \
@@ -17,6 +17,7 @@ obj-$(CONFIG_MCA) += mca_32.o
obj-$(CONFIG_X86_MSR) += msr.o
obj-$(CONFIG_X86_CPUID) += cpuid.o
obj-$(CONFIG_MICROCODE) += microcode.o
+obj-$(CONFIG_PCI) += early-quirks.o
obj-$(CONFIG_APM) += apm_32.o
obj-$(CONFIG_X86_SMP) += smp_32.o smpboot_32.o tsc_sync.o
obj-$(CONFIG_SMP) += smpcommon_32.o
diff --git a/arch/x86/kernel/Makefile_64 b/arch/x86/kernel/Makefile_64
index 43da66213a47..dec06e769281 100644
--- a/arch/x86/kernel/Makefile_64
+++ b/arch/x86/kernel/Makefile_64
@@ -2,7 +2,7 @@
# Makefile for the linux kernel.
#
-extra-y := head_64.o head64.o init_task_64.o vmlinux.lds
+extra-y := head_64.o head64.o init_task.o vmlinux.lds
EXTRA_AFLAGS := -traditional
obj-y := process_64.o signal_64.o entry_64.o traps_64.o irq_64.o \
ptrace_64.o time_64.o ioport_64.o ldt_64.o setup_64.o i8259_64.o sys_x86_64.o \
@@ -39,7 +39,7 @@ obj-$(CONFIG_K8_NB) += k8.o
obj-$(CONFIG_AUDIT) += audit_64.o
obj-$(CONFIG_MODULES) += module_64.o
-obj-$(CONFIG_PCI) += early-quirks_64.o
+obj-$(CONFIG_PCI) += early-quirks.o
obj-y += topology.o
obj-y += intel_cacheinfo.o
diff --git a/arch/x86/kernel/acpi/Makefile_32 b/arch/x86/kernel/acpi/Makefile_32
index a4852a2e9190..045dd54b33e0 100644
--- a/arch/x86/kernel/acpi/Makefile_32
+++ b/arch/x86/kernel/acpi/Makefile_32
@@ -1,7 +1,4 @@
obj-$(CONFIG_ACPI) += boot.o
-ifneq ($(CONFIG_PCI),)
-obj-$(CONFIG_X86_IO_APIC) += earlyquirk_32.o
-endif
obj-$(CONFIG_ACPI_SLEEP) += sleep_32.o wakeup_32.o
ifneq ($(CONFIG_ACPI_PROCESSOR),)
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index afd2afe9102d..289247d974c6 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -99,7 +99,7 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
/*
* The default interrupt routing model is PIC (8259). This gets
- * overriden if IOAPICs are enumerated (below).
+ * overridden if IOAPICs are enumerated (below).
*/
enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
@@ -414,8 +414,8 @@ acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end
*
* Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
* for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge.
- * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0)
- * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0)
+ * ECLR1 is IRQs 0-7 (IRQ 0, 1, 2 must be 0)
+ * ECLR2 is IRQs 8-15 (IRQ 8, 13 must be 0)
*/
void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
@@ -427,7 +427,7 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
old = inb(0x4d0) | (inb(0x4d1) << 8);
/*
- * If we use ACPI to set PCI irq's, then we should clear ELCR
+ * If we use ACPI to set PCI IRQs, then we should clear ELCR
* since we will set it correctly as we enable the PCI irq
* routing.
*/
@@ -555,7 +555,7 @@ EXPORT_SYMBOL(acpi_map_lsapic);
int acpi_unmap_lsapic(int cpu)
{
- x86_cpu_to_apicid[cpu] = -1;
+ per_cpu(x86_cpu_to_apicid, cpu) = -1;
cpu_clear(cpu, cpu_present_map);
num_processors--;
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 2d39f55d29a8..10b67170b133 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -29,7 +29,7 @@
void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
unsigned int cpu)
{
- struct cpuinfo_x86 *c = cpu_data + cpu;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
flags->bm_check = 0;
if (num_online_cpus() == 1)
@@ -72,7 +72,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
struct acpi_processor_cx *cx, struct acpi_power_register *reg)
{
struct cstate_entry *percpu_entry;
- struct cpuinfo_x86 *c = cpu_data + cpu;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
cpumask_t saved_mask;
int retval;
diff --git a/arch/x86/kernel/acpi/earlyquirk_32.c b/arch/x86/kernel/acpi/earlyquirk_32.c
deleted file mode 100644
index 23f78efc577d..000000000000
--- a/arch/x86/kernel/acpi/earlyquirk_32.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Do early PCI probing for bug detection when the main PCI subsystem is
- * not up yet.
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/acpi.h>
-
-#include <asm/pci-direct.h>
-#include <asm/acpi.h>
-#include <asm/apic.h>
-
-#ifdef CONFIG_ACPI
-
-static int __init nvidia_hpet_check(struct acpi_table_header *header)
-{
- return 0;
-}
-#endif
-
-static int __init check_bridge(int vendor, int device)
-{
-#ifdef CONFIG_ACPI
- static int warned;
- /* According to Nvidia all timer overrides are bogus unless HPET
- is enabled. */
- if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) {
- if (!warned && acpi_table_parse(ACPI_SIG_HPET,
- nvidia_hpet_check)) {
- warned = 1;
- acpi_skip_timer_override = 1;
- printk(KERN_INFO "Nvidia board "
- "detected. Ignoring ACPI "
- "timer override.\n");
- printk(KERN_INFO "If you got timer trouble "
- "try acpi_use_timer_override\n");
-
- }
- }
-#endif
- if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) {
- timer_over_8254 = 0;
- printk(KERN_INFO "ATI board detected. Disabling timer routing "
- "over 8254.\n");
- }
- return 0;
-}
-
-void __init check_acpi_pci(void)
-{
- int num, slot, func;
-
- /* Assume the machine supports type 1. If not it will
- always read ffffffff and should not have any side effect.
- Actually a few buggy systems can machine check. Allow the user
- to disable it by command line option at least -AK */
- if (!early_pci_allowed())
- return;
-
- /* Poor man's PCI discovery */
- for (num = 0; num < 32; num++) {
- for (slot = 0; slot < 32; slot++) {
- for (func = 0; func < 8; func++) {
- u32 class;
- u32 vendor;
- class = read_pci_config(num, slot, func,
- PCI_CLASS_REVISION);
- if (class == 0xffffffff)
- break;
-
- if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
- continue;
-
- vendor = read_pci_config(num, slot, func,
- PCI_VENDOR_ID);
-
- if (check_bridge(vendor & 0xffff, vendor >> 16))
- return;
- }
-
- }
- }
-}
diff --git a/arch/x86/kernel/acpi/processor.c b/arch/x86/kernel/acpi/processor.c
index b54fded49834..2ed0a4ce62f0 100644
--- a/arch/x86/kernel/acpi/processor.c
+++ b/arch/x86/kernel/acpi/processor.c
@@ -63,7 +63,7 @@ static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
{
unsigned int cpu = pr->id;
- struct cpuinfo_x86 *c = cpu_data + cpu;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
pr->pdc = NULL;
if (c->x86_vendor == X86_VENDOR_INTEL)
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 3bd2688bd443..d6405e0842b5 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -357,14 +357,14 @@ void alternatives_smp_switch(int smp)
if (smp) {
printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
- clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
+ clear_bit(X86_FEATURE_UP, cpu_data(0).x86_capability);
list_for_each_entry(mod, &smp_alt_modules, next)
alternatives_smp_lock(mod->locks, mod->locks_end,
mod->text, mod->text_end);
} else {
printk(KERN_INFO "SMP alternatives: switching to UP code\n");
set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
- set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
+ set_bit(X86_FEATURE_UP, cpu_data(0).x86_capability);
list_for_each_entry(mod, &smp_alt_modules, next)
alternatives_smp_unlock(mod->locks, mod->locks_end,
mod->text, mod->text_end);
@@ -432,7 +432,7 @@ void __init alternative_instructions(void)
if (1 == num_possible_cpus()) {
printk(KERN_INFO "SMP alternatives: switching to UP code\n");
set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
- set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
+ set_bit(X86_FEATURE_UP, cpu_data(0).x86_capability);
alternatives_smp_unlock(__smp_locks, __smp_locks_end,
_text, _etext);
}
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
index 793341fffc81..08b07c176962 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic_32.c
@@ -947,7 +947,7 @@ void __devinit setup_local_APIC(void)
* Set up LVT0, LVT1:
*
* set up through-local-APIC on the BP's LINT0. This is not
- * strictly necessery in pure symmetric-IO mode, but sometimes
+ * strictly necessary in pure symmetric-IO mode, but sometimes
* we delegate interrupts to the 8259A.
*/
/*
@@ -998,7 +998,7 @@ void __devinit setup_local_APIC(void)
} else {
if (esr_disable)
/*
- * Something untraceble is creating bad interrupts on
+ * Something untraceable is creating bad interrupts on
* secondary quads ... for the moment, just leave the
* ESR disabled - we can't do anything useful with the
* errors anyway - mbligh
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 32f2365c26ed..17089a041028 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -57,7 +57,7 @@
* screen-blanking and gpm (Stephen Rothwell); Linux 1.99.4
* 1.2a:Simple change to stop mysterious bug reports with SMP also added
* levels to the printk calls. APM is not defined for SMP machines.
- * The new replacment for it is, but Linux doesn't yet support this.
+ * The new replacement for it is, but Linux doesn't yet support this.
* Alan Cox Linux 2.1.55
* 1.3: Set up a valid data descriptor 0x40 for buggy BIOS's
* 1.4: Upgraded to support APM 1.2. Integrated ThinkPad suspend patch by
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 5f8af875f457..1ff88c7f45cf 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -266,7 +266,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
#ifdef CONFIG_X86_HT
/*
* On a AMD multi core setup the lower bits of the APIC id
- * distingush the cores.
+ * distinguish the cores.
*/
if (c->x86_max_cores > 1) {
int cpu = smp_processor_id();
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index 473eac883c7b..9681fa15ddf0 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -53,7 +53,7 @@ static u32 __cpuinit ramtop(void) /* 16388 */
continue;
/*
* Don't MCR over reserved space. Ignore the ISA hole
- * we frob around that catastrophy already
+ * we frob around that catastrophe already
*/
if (e820.map[i].type == E820_RESERVED)
@@ -287,7 +287,7 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c)
c->x86_capability[5] = cpuid_edx(0xC0000001);
}
- /* Cyrix III family needs CX8 & PGE explicity enabled. */
+ /* Cyrix III family needs CX8 & PGE explicitly enabled. */
if (c->x86_model >=6 && c->x86_model <= 9) {
rdmsr (MSR_VIA_FCR, lo, hi);
lo |= (1<<1 | 1<<7);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index d506201d397c..e2fcf2051bdb 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -207,7 +207,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
static int __init x86_fxsr_setup(char * s)
{
- /* Tell all the other CPU's to not use it... */
+ /* Tell all the other CPUs to not use it... */
disable_x86_fxsr = 1;
/*
diff --git a/arch/x86/kernel/cpu/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig_32
index d8c6f132dc7a..d8c6f132dc7a 100644
--- a/arch/x86/kernel/cpu/cpufreq/Kconfig
+++ b/arch/x86/kernel/cpu/cpufreq/Kconfig_32
diff --git a/arch/x86/kernel/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig_64
index a3fd51926cbd..9c9699fdcf52 100644
--- a/arch/x86/kernel/cpufreq/Kconfig
+++ b/arch/x86/kernel/cpu/cpufreq/Kconfig_64
@@ -19,7 +19,7 @@ config X86_POWERNOW_K8
To compile this driver as a module, choose M here: the
module will be called powernow-k8.
- For details, take a look at <file:Documentation/cpu-freq/>.
+ For details, take a look at <file:Documentation/cpu-freq/>.
If in doubt, say N.
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 2ca43ba32bc0..fea0af0476b9 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -77,7 +77,7 @@ static unsigned int acpi_pstate_strict;
static int check_est_cpu(unsigned int cpuid)
{
- struct cpuinfo_x86 *cpu = &cpu_data[cpuid];
+ struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
if (cpu->x86_vendor != X86_VENDOR_INTEL ||
!cpu_has(cpu, X86_FEATURE_EST))
@@ -560,7 +560,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
unsigned int cpu = policy->cpu;
struct acpi_cpufreq_data *data;
unsigned int result = 0;
- struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
+ struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
struct acpi_processor_performance *perf;
dprintk("acpi_cpufreq_cpu_init\n");
diff --git a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c
index 32f0bda3fc95..f03e9153618e 100644
--- a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c
+++ b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c
@@ -260,7 +260,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
freqs.old = nforce2_get(policy->cpu);
freqs.new = target_fsb * fid * 100;
- freqs.cpu = 0; /* Only one CPU on nForce2 plattforms */
+ freqs.cpu = 0; /* Only one CPU on nForce2 platforms */
if (freqs.old == freqs.new)
return 0;
diff --git a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
index c11baaf9f2b4..326a4c81f684 100644
--- a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
+++ b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
@@ -305,7 +305,7 @@ static struct cpufreq_driver eps_driver = {
static int __init eps_init(void)
{
- struct cpuinfo_x86 *c = cpu_data;
+ struct cpuinfo_x86 *c = &cpu_data(0);
/* This driver will work only on Centaur C7 processors with
* Enhanced SpeedStep/PowerSaver registers */
diff --git a/arch/x86/kernel/cpu/cpufreq/elanfreq.c b/arch/x86/kernel/cpu/cpufreq/elanfreq.c
index 1e7ae7dafcf6..94619c22f563 100644
--- a/arch/x86/kernel/cpu/cpufreq/elanfreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/elanfreq.c
@@ -199,7 +199,7 @@ static int elanfreq_target (struct cpufreq_policy *policy,
static int elanfreq_cpu_init(struct cpufreq_policy *policy)
{
- struct cpuinfo_x86 *c = cpu_data;
+ struct cpuinfo_x86 *c = &cpu_data(0);
unsigned int i;
int result;
@@ -280,7 +280,7 @@ static struct cpufreq_driver elanfreq_driver = {
static int __init elanfreq_init(void)
{
- struct cpuinfo_x86 *c = cpu_data;
+ struct cpuinfo_x86 *c = &cpu_data(0);
/* Test if we have the right hardware */
if ((c->x86_vendor != X86_VENDOR_AMD) ||
diff --git a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c b/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c
index ed2bda127c44..2ed7db2fd257 100644
--- a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c
@@ -12,12 +12,12 @@
* of any nature resulting due to the use of this software. This
* software is provided AS-IS with no warranties.
*
- * Theoritical note:
+ * Theoretical note:
*
* (see Geode(tm) CS5530 manual (rev.4.1) page.56)
*
* CPU frequency control on NatSemi Geode GX1/GXLV processor and CS55x0
- * are based on Suspend Moduration.
+ * are based on Suspend Modulation.
*
* Suspend Modulation works by asserting and de-asserting the SUSP# pin
* to CPU(GX1/GXLV) for configurable durations. When asserting SUSP#
@@ -101,11 +101,11 @@
/* SUSCFG bits */
#define SUSMOD (1<<0) /* enable/disable suspend modulation */
-/* the belows support only with cs5530 (after rev.1.2)/cs5530A */
+/* the below is supported only with cs5530 (after rev.1.2)/cs5530A */
#define SMISPDUP (1<<1) /* select how SMI re-enable suspend modulation: */
/* IRQTC timer or read SMI speedup disable reg.(F1BAR[08-09h]) */
#define SUSCFG (1<<2) /* enable powering down a GXLV processor. "Special 3Volt Suspend" mode */
-/* the belows support only with cs5530A */
+/* the below is supported only with cs5530A */
#define PWRSVE_ISA (1<<3) /* stop ISA clock */
#define PWRSVE (1<<4) /* active idle */
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c
index 5045f5d583c8..749d00cb2ebd 100644
--- a/arch/x86/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c
@@ -780,7 +780,7 @@ static int longhaul_setup_southbridge(void)
static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
{
- struct cpuinfo_x86 *c = cpu_data;
+ struct cpuinfo_x86 *c = &cpu_data(0);
char *cpuname=NULL;
int ret;
u32 lo, hi;
@@ -959,7 +959,7 @@ static struct cpufreq_driver longhaul_driver = {
static int __init longhaul_init(void)
{
- struct cpuinfo_x86 *c = cpu_data;
+ struct cpuinfo_x86 *c = &cpu_data(0);
if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6)
return -ENODEV;
diff --git a/arch/x86/kernel/cpu/cpufreq/longrun.c b/arch/x86/kernel/cpu/cpufreq/longrun.c
index b2689514295a..af4a867a097c 100644
--- a/arch/x86/kernel/cpu/cpufreq/longrun.c
+++ b/arch/x86/kernel/cpu/cpufreq/longrun.c
@@ -172,7 +172,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
u32 save_lo, save_hi;
u32 eax, ebx, ecx, edx;
u32 try_hi;
- struct cpuinfo_x86 *c = cpu_data;
+ struct cpuinfo_x86 *c = &cpu_data(0);
if (!low_freq || !high_freq)
return -EINVAL;
@@ -298,7 +298,7 @@ static struct cpufreq_driver longrun_driver = {
*/
static int __init longrun_init(void)
{
- struct cpuinfo_x86 *c = cpu_data;
+ struct cpuinfo_x86 *c = &cpu_data(0);
if (c->x86_vendor != X86_VENDOR_TRANSMETA ||
!cpu_has(c, X86_FEATURE_LONGRUN))
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index 793eae854f4f..14791ec55cfd 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -195,7 +195,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
{
- struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
+ struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
int cpuid = 0;
unsigned int i;
@@ -279,7 +279,7 @@ static struct cpufreq_driver p4clockmod_driver = {
static int __init cpufreq_p4_init(void)
{
- struct cpuinfo_x86 *c = cpu_data;
+ struct cpuinfo_x86 *c = &cpu_data(0);
int ret;
/*
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
index 6d0285339317..eb9b62b0830c 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
@@ -1,6 +1,6 @@
/*
* This file was based upon code in Powertweak Linux (http://powertweak.sf.net)
- * (C) 2000-2003 Dave Jones, Arjan van de Ven, Janne Pänkälä, Dominik Brodowski.
+ * (C) 2000-2003 Dave Jones, Arjan van de Ven, Janne Pänkälä, Dominik Brodowski.
*
* Licensed under the terms of the GNU GPL License version 2.
*
@@ -215,7 +215,7 @@ static struct cpufreq_driver powernow_k6_driver = {
*/
static int __init powernow_k6_init(void)
{
- struct cpuinfo_x86 *c = cpu_data;
+ struct cpuinfo_x86 *c = &cpu_data(0);
if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 5) ||
((c->x86_model != 12) && (c->x86_model != 13)))
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
index f3686a5f2308..b5a9863d6cdc 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
@@ -114,7 +114,7 @@ static int check_fsb(unsigned int fsbspeed)
static int check_powernow(void)
{
- struct cpuinfo_x86 *c = cpu_data;
+ struct cpuinfo_x86 *c = &cpu_data(0);
unsigned int maxei, eax, ebx, ecx, edx;
if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 !=6)) {
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index c06ac680c9ca..9c36a53676b7 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -168,7 +168,7 @@ static void count_off_irt(struct powernow_k8_data *data)
return;
}
-/* the voltage stabalization time */
+/* the voltage stabilization time */
static void count_off_vst(struct powernow_k8_data *data)
{
udelay(data->vstable * VST_UNITS_20US);
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
index b06c812208ca..7c4f6e0faed4 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
@@ -148,10 +148,10 @@ struct powernow_k8_data {
#define PLL_LOCK_CONVERSION (1000/5) /* ms to ns, then divide by clock period */
#define MAXIMUM_VID_STEPS 1 /* Current cpus only allow a single step of 25mV */
-#define VST_UNITS_20US 20 /* Voltage Stabalization Time is in units of 20us */
+#define VST_UNITS_20US 20 /* Voltage Stabilization Time is in units of 20us */
/*
- * Most values of interest are enocoded in a single field of the _PSS
+ * Most values of interest are encoded in a single field of the _PSS
* entries: the "control" value.
*/
diff --git a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c b/arch/x86/kernel/cpu/cpufreq/sc520_freq.c
index d9f3e90a7ae0..42da9bd677d6 100644
--- a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c
+++ b/arch/x86/kernel/cpu/cpufreq/sc520_freq.c
@@ -102,7 +102,7 @@ static int sc520_freq_target (struct cpufreq_policy *policy,
static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
{
- struct cpuinfo_x86 *c = cpu_data;
+ struct cpuinfo_x86 *c = &cpu_data(0);
int result;
/* capability check */
@@ -151,7 +151,7 @@ static struct cpufreq_driver sc520_freq_driver = {
static int __init sc520_freq_init(void)
{
- struct cpuinfo_x86 *c = cpu_data;
+ struct cpuinfo_x86 *c = &cpu_data(0);
int err;
/* Test if we have the right hardware */
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index 811d47438546..3031f1196192 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -230,7 +230,7 @@ static struct cpu_model models[] =
static int centrino_cpu_init_table(struct cpufreq_policy *policy)
{
- struct cpuinfo_x86 *cpu = &cpu_data[policy->cpu];
+ struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
struct cpu_model *model;
for(model = models; model->cpu_id != NULL; model++)
@@ -340,7 +340,7 @@ static unsigned int get_cur_freq(unsigned int cpu)
static int centrino_cpu_init(struct cpufreq_policy *policy)
{
- struct cpuinfo_x86 *cpu = &cpu_data[policy->cpu];
+ struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
unsigned freq;
unsigned l, h;
int ret;
@@ -612,7 +612,7 @@ static struct cpufreq_driver centrino_driver = {
*/
static int __init centrino_init(void)
{
- struct cpuinfo_x86 *cpu = cpu_data;
+ struct cpuinfo_x86 *cpu = &cpu_data(0);
if (!cpu_has(cpu, X86_FEATURE_EST))
return -ENODEV;
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
index b1acc8ce3167..76c3ab0da468 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
@@ -228,7 +228,7 @@ EXPORT_SYMBOL_GPL(speedstep_get_processor_frequency);
unsigned int speedstep_detect_processor (void)
{
- struct cpuinfo_x86 *c = cpu_data;
+ struct cpuinfo_x86 *c = &cpu_data(0);
u32 ebx, msr_lo, msr_hi;
dprintk("x86: %x, model: %x\n", c->x86, c->x86_model);
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 122d2d75aa9f..88d66fb8411d 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -93,7 +93,7 @@ static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c)
local_irq_save(flags);
ccr3 = getCx86(CX86_CCR3);
- setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
+ setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
ccr5 = getCx86(CX86_CCR5);
if (ccr5 & 2)
setCx86(CX86_CCR5, ccr5 & 0xfd); /* reset SLOP */
@@ -115,9 +115,9 @@ static void __cpuinit set_cx86_reorder(void)
printk(KERN_INFO "Enable Memory access reorder on Cyrix/NSC processor.\n");
ccr3 = getCx86(CX86_CCR3);
- setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
+ setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
- /* Load/Store Serialize to mem access disable (=reorder it)  */
+ /* Load/Store Serialize to mem access disable (=reorder it) */
setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
/* set load/store serialize from 1GB to 4GB */
ccr3 |= 0xe0;
@@ -146,7 +146,7 @@ static void __cpuinit set_cx86_inc(void)
printk(KERN_INFO "Enable Incrementor on Cyrix/NSC processor.\n");
ccr3 = getCx86(CX86_CCR3);
- setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
+ setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
/* PCR1 -- Performance Control */
/* Incrementor on, whatever that is */
setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02);
@@ -256,7 +256,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
u32 vendor, device;
/* It isn't really a PCI quirk directly, but the cure is the
same. The MediaGX has deep magic SMM stuff that handles the
- SB emulation. It thows away the fifo on disable_dma() which
+ SB emulation. It throws away the fifo on disable_dma() which
is wrong and ruins the audio.
Bug2: VSA1 has a wrap bug so that using maximum sized DMA
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 297a24116949..9921b01fe199 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -295,7 +295,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
#ifdef CONFIG_X86_HT
- unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data);
+ unsigned int cpu = c->cpu_index;
#endif
if (c->cpuid_level > 3) {
@@ -417,14 +417,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
if (new_l2) {
l2 = new_l2;
#ifdef CONFIG_X86_HT
- cpu_llc_id[cpu] = l2_id;
+ per_cpu(cpu_llc_id, cpu) = l2_id;
#endif
}
if (new_l3) {
l3 = new_l3;
#ifdef CONFIG_X86_HT
- cpu_llc_id[cpu] = l3_id;
+ per_cpu(cpu_llc_id, cpu) = l3_id;
#endif
}
@@ -459,7 +459,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
struct _cpuid4_info *this_leaf, *sibling_leaf;
unsigned long num_threads_sharing;
int index_msb, i;
- struct cpuinfo_x86 *c = cpu_data;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
this_leaf = CPUID4_INFO_IDX(cpu, index);
num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
@@ -470,8 +470,8 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
index_msb = get_count_order(num_threads_sharing);
for_each_online_cpu(i) {
- if (c[i].apicid >> index_msb ==
- c[cpu].apicid >> index_msb) {
+ if (cpu_data(i).apicid >> index_msb ==
+ c->apicid >> index_msb) {
cpu_set(i, this_leaf->shared_cpu_map);
if (i != cpu && cpuid4_info[i]) {
sibling_leaf = CPUID4_INFO_IDX(i, index);
diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
index 2287d4863a8a..9964be3de2b7 100644
--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
@@ -147,10 +147,10 @@ static void prepare_set(void)
write_cr0(cr0);
wbinvd();
- /* Cyrix ARRs - everything else were excluded at the top */
+ /* Cyrix ARRs - everything else was excluded at the top */
ccr3 = getCx86(CX86_CCR3);
- /* Cyrix ARRs - everything else were excluded at the top */
+ /* Cyrix ARRs - everything else was excluded at the top */
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
}
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 56f64e34829f..992f08dfbb6c 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -182,7 +182,7 @@ static inline void k8_enable_fixed_iorrs(void)
/**
* Checks and updates an fixed-range MTRR if it differs from the value it
- * should have. If K8 extenstions are wanted, update the K8 SYSCFG MSR also.
+ * should have. If K8 extentions are wanted, update the K8 SYSCFG MSR also.
* see AMD publication no. 24593, chapter 7.8.1, page 233 for more information
* \param msr MSR address of the MTTR which should be checked and updated
* \param changed pointer which indicates whether the MTRR needed to be changed
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 5e4be30ff903..9abbdf7562c5 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -748,7 +748,7 @@ static int __init mtrr_init_finialize(void)
if (use_intel())
mtrr_state_warn();
else {
- /* The CPUs haven't MTRR and seemes not support SMP. They have
+ /* The CPUs haven't MTRR and seem to not support SMP. They have
* specific drivers, we use a tricky method to support
* suspend/resume for them.
* TBD: is there any system with such CPU which supports
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 54cdbf1a40f1..c02541e6e653 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -120,7 +120,9 @@ int reserve_perfctr_nmi(unsigned int msr)
unsigned int counter;
counter = nmi_perfctr_msr_to_bit(msr);
- BUG_ON(counter > NMI_MAX_COUNTER_BITS);
+ /* register not managed by the allocator? */
+ if (counter > NMI_MAX_COUNTER_BITS)
+ return 1;
if (!test_and_set_bit(counter, perfctr_nmi_owner))
return 1;
@@ -132,7 +134,9 @@ void release_perfctr_nmi(unsigned int msr)
unsigned int counter;
counter = nmi_perfctr_msr_to_bit(msr);
- BUG_ON(counter > NMI_MAX_COUNTER_BITS);
+ /* register not managed by the allocator? */
+ if (counter > NMI_MAX_COUNTER_BITS)
+ return;
clear_bit(counter, perfctr_nmi_owner);
}
@@ -142,7 +146,9 @@ int reserve_evntsel_nmi(unsigned int msr)
unsigned int counter;
counter = nmi_evntsel_msr_to_bit(msr);
- BUG_ON(counter > NMI_MAX_COUNTER_BITS);
+ /* register not managed by the allocator? */
+ if (counter > NMI_MAX_COUNTER_BITS)
+ return 1;
if (!test_and_set_bit(counter, evntsel_nmi_owner))
return 1;
@@ -154,7 +160,9 @@ void release_evntsel_nmi(unsigned int msr)
unsigned int counter;
counter = nmi_evntsel_msr_to_bit(msr);
- BUG_ON(counter > NMI_MAX_COUNTER_BITS);
+ /* register not managed by the allocator? */
+ if (counter > NMI_MAX_COUNTER_BITS)
+ return;
clear_bit(counter, evntsel_nmi_owner);
}
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 879a0f789b1e..2d42b414b777 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -85,12 +85,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
/* nothing */
};
struct cpuinfo_x86 *c = v;
- int i, n = c - cpu_data;
+ int i, n = 0;
int fpu_exception;
#ifdef CONFIG_SMP
if (!cpu_online(n))
return 0;
+ n = c->cpu_index;
#endif
seq_printf(m, "processor\t: %d\n"
"vendor_id\t: %s\n"
@@ -175,11 +176,15 @@ static int show_cpuinfo(struct seq_file *m, void *v)
static void *c_start(struct seq_file *m, loff_t *pos)
{
- return *pos < NR_CPUS ? cpu_data + *pos : NULL;
+ if (*pos == 0) /* just in case, cpu 0 is not the first */
+ *pos = first_cpu(cpu_possible_map);
+ if ((*pos) < NR_CPUS && cpu_possible(*pos))
+ return &cpu_data(*pos);
+ return NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
- ++*pos;
+ *pos = next_cpu(*pos, cpu_possible_map);
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 70dcf912d9fb..05c9936a16cc 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -114,7 +114,7 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
static int cpuid_open(struct inode *inode, struct file *file)
{
unsigned int cpu = iminor(file->f_path.dentry->d_inode);
- struct cpuinfo_x86 *c = &(cpu_data)[cpu];
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
if (cpu >= NR_CPUS || !cpu_online(cpu))
return -ENXIO; /* No such CPU */
@@ -134,15 +134,18 @@ static const struct file_operations cpuid_fops = {
.open = cpuid_open,
};
-static int __cpuinit cpuid_device_create(int i)
+static __cpuinit int cpuid_device_create(int cpu)
{
- int err = 0;
struct device *dev;
- dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, i), "cpu%d",i);
- if (IS_ERR(dev))
- err = PTR_ERR(dev);
- return err;
+ dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu),
+ "cpu%d", cpu);
+ return IS_ERR(dev) ? PTR_ERR(dev) : 0;
+}
+
+static void cpuid_device_destroy(int cpu)
+{
+ device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
}
static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
@@ -150,18 +153,21 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
+ int err = 0;
switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- cpuid_device_create(cpu);
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ err = cpuid_device_create(cpu);
break;
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
+ cpuid_device_destroy(cpu);
break;
}
- return NOTIFY_OK;
+ return err ? NOTIFY_BAD : NOTIFY_OK;
}
static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier =
@@ -198,7 +204,7 @@ static int __init cpuid_init(void)
out_class:
i = 0;
for_each_online_cpu(i) {
- device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, i));
+ cpuid_device_destroy(i);
}
class_destroy(cpuid_class);
out_chrdev:
@@ -212,7 +218,7 @@ static void __exit cpuid_exit(void)
int cpu = 0;
for_each_online_cpu(cpu)
- device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
+ cpuid_device_destroy(cpu);
class_destroy(cpuid_class);
unregister_chrdev(CPUID_MAJOR, "cpu/cpuid");
unregister_hotcpu_notifier(&cpuid_class_cpu_notifier);
diff --git a/arch/x86/kernel/e820_32.c b/arch/x86/kernel/e820_32.c
index d58039e8de74..58fd54eb5577 100644
--- a/arch/x86/kernel/e820_32.c
+++ b/arch/x86/kernel/e820_32.c
@@ -706,7 +706,7 @@ void __init e820_register_memory(void)
int i;
/*
- * Search for the bigest gap in the low 32 bits of the e820
+ * Search for the biggest gap in the low 32 bits of the e820
* memory space.
*/
last = 0x100000000ull;
diff --git a/arch/x86/kernel/early-quirks_64.c b/arch/x86/kernel/early-quirks.c
index 13aa4fd728f3..dc34acbd54aa 100644
--- a/arch/x86/kernel/early-quirks_64.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -13,9 +13,13 @@
#include <linux/acpi.h>
#include <linux/pci_ids.h>
#include <asm/pci-direct.h>
-#include <asm/proto.h>
-#include <asm/iommu.h>
#include <asm/dma.h>
+#include <asm/io_apic.h>
+#include <asm/apic.h>
+
+#ifdef CONFIG_IOMMU
+#include <asm/iommu.h>
+#endif
static void __init via_bugs(void)
{
@@ -23,7 +27,8 @@ static void __init via_bugs(void)
if ((end_pfn > MAX_DMA32_PFN || force_iommu) &&
!iommu_aperture_allowed) {
printk(KERN_INFO
- "Looks like a VIA chipset. Disabling IOMMU. Override with iommu=allowed\n");
+ "Looks like a VIA chipset. Disabling IOMMU."
+ " Override with iommu=allowed\n");
iommu_aperture_disabled = 1;
}
#endif
@@ -40,6 +45,7 @@ static int __init nvidia_hpet_check(struct acpi_table_header *header)
static void __init nvidia_bugs(void)
{
#ifdef CONFIG_ACPI
+#ifdef CONFIG_X86_IO_APIC
/*
* All timer overrides on Nvidia are
* wrong unless HPET is enabled.
@@ -59,17 +65,20 @@ static void __init nvidia_bugs(void)
"try acpi_use_timer_override\n");
}
#endif
+#endif
/* RED-PEN skip them on mptables too? */
}
static void __init ati_bugs(void)
{
+#ifdef CONFIG_X86_IO_APIC
if (timer_over_8254 == 1) {
timer_over_8254 = 0;
printk(KERN_INFO
- "ATI board detected. Disabling timer routing over 8254.\n");
+ "ATI board detected. Disabling timer routing over 8254.\n");
}
+#endif
}
struct chipset {
@@ -104,7 +113,7 @@ void __init early_quirks(void)
if (class == 0xffffffff)
break;
- if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
+ if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
continue;
vendor = read_pci_config(num, slot, func,
diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c
index 4ae03e3e8294..ce703e21c912 100644
--- a/arch/x86/kernel/genapic_64.c
+++ b/arch/x86/kernel/genapic_64.c
@@ -24,10 +24,19 @@
#include <acpi/acpi_bus.h>
#endif
-/* which logical CPU number maps to which CPU (physical APIC ID) */
-u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly
+/*
+ * which logical CPU number maps to which CPU (physical APIC ID)
+ *
+ * The following static array is used during kernel startup
+ * and the x86_cpu_to_apicid_ptr contains the address of the
+ * array during this time. Is it zeroed when the per_cpu
+ * data area is removed.
+ */
+u8 x86_cpu_to_apicid_init[NR_CPUS] __initdata
= { [0 ... NR_CPUS-1] = BAD_APICID };
-EXPORT_SYMBOL(x86_cpu_to_apicid);
+void *x86_cpu_to_apicid_ptr;
+DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID;
+EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
struct genapic __read_mostly *genapic = &apic_flat;
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c
index 91c7526768ee..07352b74bda6 100644
--- a/arch/x86/kernel/genapic_flat_64.c
+++ b/arch/x86/kernel/genapic_flat_64.c
@@ -172,7 +172,7 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask)
*/
cpu = first_cpu(cpumask);
if ((unsigned)cpu < NR_CPUS)
- return x86_cpu_to_apicid[cpu];
+ return per_cpu(x86_cpu_to_apicid, cpu);
else
return BAD_APICID;
}
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index a7eee0a4751d..6b3469311e42 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -58,7 +58,7 @@ void __init x86_64_start_kernel(char * real_mode_data)
for (i = 0; i < IDT_ENTRIES; i++)
set_intr_gate(i, early_idt_handler);
- asm volatile("lidt %0" :: "m" (idt_descr));
+ load_idt((const struct desc_ptr *)&idt_descr);
early_printk("Kernel alive\n");
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index f8367074da0d..53303f2e5475 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -69,12 +69,15 @@ static inline void hpet_clear_mapping(void)
* HPET command line enable / disable
*/
static int boot_hpet_disable;
+int hpet_force_user;
static int __init hpet_setup(char* str)
{
if (str) {
if (!strncmp("disable", str, 7))
boot_hpet_disable = 1;
+ if (!strncmp("force", str, 5))
+ hpet_force_user = 1;
}
return 1;
}
@@ -350,7 +353,7 @@ static int hpet_clocksource_register(void)
*
* hpet period is in femto seconds per cycle
* so we need to convert this to ns/cyc units
- * aproximated by mult/2^shift
+ * approximated by mult/2^shift
*
* fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift
* fsec/cyc * 1ns/1000000fsec * 2^shift = mult
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index 5cc8841ca2c6..a42c80745325 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -86,7 +86,7 @@ static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
* On UP the PIT can serve all of the possible timer functions. On SMP systems
* it can be solely used for the global tick.
*
- * The profiling and update capabilites are switched off once the local apic is
+ * The profiling and update capabilities are switched off once the local apic is
* registered. This mechanism replaces the previous #ifdef LOCAL_APIC -
* !using_apic_timer decisions in do_timer_interrupt_hook()
*/
diff --git a/arch/x86/kernel/i8259_32.c b/arch/x86/kernel/i8259_32.c
index d34a10cc13a7..f634fc715c99 100644
--- a/arch/x86/kernel/i8259_32.c
+++ b/arch/x86/kernel/i8259_32.c
@@ -403,7 +403,8 @@ void __init native_init_IRQ(void)
int vector = FIRST_EXTERNAL_VECTOR + i;
if (i >= NR_IRQS)
break;
- if (vector != SYSCALL_VECTOR)
+ /* SYSCALL_VECTOR was reserved in trap_init. */
+ if (!test_bit(vector, used_vectors))
set_intr_gate(vector, interrupt[i]);
}
diff --git a/arch/x86/kernel/init_task_32.c b/arch/x86/kernel/init_task.c
index d26fc063a760..468c9c437842 100644
--- a/arch/x86/kernel/init_task_32.c
+++ b/arch/x86/kernel/init_task.c
@@ -15,7 +15,6 @@ static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct mm_struct init_mm = INIT_MM(init_mm);
-
EXPORT_SYMBOL(init_mm);
/*
@@ -25,7 +24,7 @@ EXPORT_SYMBOL(init_mm);
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
-union thread_union init_thread_union
+union thread_union init_thread_union
__attribute__((__section__(".data.init_task"))) =
{ INIT_THREAD_INFO(init_task) };
@@ -35,12 +34,14 @@ union thread_union init_thread_union
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
-
EXPORT_SYMBOL(init_task);
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
- * no more per-task TSS's.
- */
+ * no more per-task TSS's. The TSS size is kept cacheline-aligned
+ * so they are allowed to end up in the .data.cacheline_aligned
+ * section. Since TSS's are completely CPU-local, we want them
+ * on exact cacheline boundaries, to eliminate cacheline ping-pong.
+ */
DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
diff --git a/arch/x86/kernel/init_task_64.c b/arch/x86/kernel/init_task_64.c
deleted file mode 100644
index 4ff33d4f8551..000000000000
--- a/arch/x86/kernel/init_task_64.c
+++ /dev/null
@@ -1,54 +0,0 @@
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/init_task.h>
-#include <linux/fs.h>
-#include <linux/mqueue.h>
-
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/desc.h>
-
-static struct fs_struct init_fs = INIT_FS;
-static struct files_struct init_files = INIT_FILES;
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
-/*
- * Initial task structure.
- *
- * We need to make sure that this is 8192-byte aligned due to the
- * way process stacks are handled. This is done by having a special
- * "init_task" linker map entry..
- */
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"))) =
- { INIT_THREAD_INFO(init_task) };
-
-/*
- * Initial task structure.
- *
- * All other task structs will be allocated on slabs in fork.c
- */
-struct task_struct init_task = INIT_TASK(init_task);
-
-EXPORT_SYMBOL(init_task);
-/*
- * per-CPU TSS segments. Threads are completely 'soft' on Linux,
- * no more per-task TSS's. The TSS size is kept cacheline-aligned
- * so they are allowed to end up in the .data.cacheline_aligned
- * section. Since TSS's are completely CPU-local, we want them
- * on exact cacheline boundaries, to eliminate cacheline ping-pong.
- */
-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
-
-/* Copies of the original ist values from the tss are only accessed during
- * debugging, no special alignment required.
- */
-DEFINE_PER_CPU(struct orig_ist, orig_ist);
-
-#define ALIGN_TO_4K __attribute__((section(".data.init_task")))
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index 5f10c7189534..f35c6eb33da9 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -584,7 +584,7 @@ tryanotherirq:
imbalance = move_this_load;
- /* For physical_balance case, we accumlated both load
+ /* For physical_balance case, we accumulated both load
* values in the one of the siblings cpu_irq[],
* to use the same code for physical and logical processors
* as much as possible.
@@ -1198,7 +1198,7 @@ static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 }
static int __assign_irq_vector(int irq)
{
static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
- int vector, offset, i;
+ int vector, offset;
BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
@@ -1215,11 +1215,8 @@ next:
}
if (vector == current_vector)
return -ENOSPC;
- if (vector == SYSCALL_VECTOR)
+ if (test_and_set_bit(vector, used_vectors))
goto next;
- for (i = 0; i < NR_IRQ_VECTORS; i++)
- if (irq_vector[i] == vector)
- goto next;
current_vector = vector;
current_offset = offset;
@@ -2295,6 +2292,12 @@ static inline void __init check_timer(void)
void __init setup_IO_APIC(void)
{
+ int i;
+
+ /* Reserve all the system vectors. */
+ for (i = FIRST_SYSTEM_VECTOR; i < NR_VECTORS; i++)
+ set_bit(i, used_vectors);
+
enable_IO_APIC();
if (acpi_ioapic)
@@ -2472,7 +2475,7 @@ void destroy_irq(unsigned int irq)
}
/*
- * MSI mesage composition
+ * MSI message composition
*/
#ifdef CONFIG_PCI_MSI
static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index 1c2c7bf6a9d3..b3c2d268d708 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -1770,7 +1770,7 @@ __setup("no_timer_check", notimercheck);
/*
*
- * IRQ's that are handled by the PIC in the MPS IOAPIC case.
+ * IRQs that are handled by the PIC in the MPS IOAPIC case.
* - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
* Linux doesn't really care, as it's not actually used
* for any interrupt handling anyway.
@@ -1921,7 +1921,7 @@ void destroy_irq(unsigned int irq)
}
/*
- * MSI mesage composition
+ * MSI message composition
*/
#ifdef CONFIG_PCI_MSI
static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
diff --git a/arch/x86/kernel/mce_64.c b/arch/x86/kernel/mce_64.c
index 66e6b797b2cb..07bbfe7aa7f7 100644
--- a/arch/x86/kernel/mce_64.c
+++ b/arch/x86/kernel/mce_64.c
@@ -320,7 +320,7 @@ void do_machine_check(struct pt_regs * regs, long error_code)
#ifdef CONFIG_X86_MCE_INTEL
/***
* mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
- * @cpu: The CPU on which the event occured.
+ * @cpu: The CPU on which the event occurred.
* @status: Event status information
*
* This function should be called by the thermal interrupt after the
@@ -688,7 +688,7 @@ static int __init mcheck_disable(char *str)
return 1;
}
-/* mce=off disables machine check. Note you can reenable it later
+/* mce=off disables machine check. Note you can re-enable it later
using sysfs.
mce=TOLERANCELEVEL (number, see above)
mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
@@ -799,7 +799,8 @@ static __cpuinit int mce_create_device(unsigned int cpu)
{
int err;
int i;
- if (!mce_available(&cpu_data[cpu]))
+
+ if (!mce_available(&cpu_data(cpu)))
return -EIO;
memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
diff --git a/arch/x86/kernel/mce_amd_64.c b/arch/x86/kernel/mce_amd_64.c
index 0d2afd96aca4..752fb16a817d 100644
--- a/arch/x86/kernel/mce_amd_64.c
+++ b/arch/x86/kernel/mce_amd_64.c
@@ -472,11 +472,11 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
sprintf(name, "threshold_bank%i", bank);
#ifdef CONFIG_SMP
- if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */
+ if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
i = first_cpu(per_cpu(cpu_core_map, cpu));
/* first core not up yet */
- if (cpu_data[i].cpu_core_id)
+ if (cpu_data(i).cpu_core_id)
goto out;
/* already linked */
diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c
index 09cf78110358..09c315214a5e 100644
--- a/arch/x86/kernel/microcode.c
+++ b/arch/x86/kernel/microcode.c
@@ -132,7 +132,7 @@ static struct ucode_cpu_info {
static void collect_cpu_info(int cpu_num)
{
- struct cpuinfo_x86 *c = cpu_data + cpu_num;
+ struct cpuinfo_x86 *c = &cpu_data(cpu_num);
struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
unsigned int val[2];
@@ -522,7 +522,7 @@ static struct platform_device *microcode_pdev;
static int cpu_request_microcode(int cpu)
{
char name[30];
- struct cpuinfo_x86 *c = cpu_data + cpu;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
const struct firmware *firmware;
void *buf;
unsigned long size;
@@ -570,7 +570,7 @@ static int cpu_request_microcode(int cpu)
static int apply_microcode_check_cpu(int cpu)
{
- struct cpuinfo_x86 *c = cpu_data + cpu;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpumask_t old;
unsigned int val[2];
diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c
index 13abb4ebfb79..7a05a7f6099a 100644
--- a/arch/x86/kernel/mpparse_32.c
+++ b/arch/x86/kernel/mpparse_32.c
@@ -1001,7 +1001,7 @@ void __init mp_config_acpi_legacy_irqs (void)
/*
* Use the default configuration for the IRQs 0-15. Unless
- * overriden by (MADT) interrupt source override entries.
+ * overridden by (MADT) interrupt source override entries.
*/
for (i = 0; i < 16; i++) {
int idx;
diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c
index 8bf0ca03ac8e..ef4aab123581 100644
--- a/arch/x86/kernel/mpparse_64.c
+++ b/arch/x86/kernel/mpparse_64.c
@@ -57,6 +57,8 @@ unsigned long mp_lapic_addr = 0;
/* Processor that is doing the boot up */
unsigned int boot_cpu_id = -1U;
+EXPORT_SYMBOL(boot_cpu_id);
+
/* Internal processor count */
unsigned int num_processors __cpuinitdata = 0;
@@ -86,7 +88,7 @@ static int __init mpf_checksum(unsigned char *mp, int len)
return sum & 0xFF;
}
-static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
+static void __cpuinit MP_processor_info(struct mpc_config_processor *m)
{
int cpu;
cpumask_t tmp_map;
@@ -123,7 +125,18 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
cpu = 0;
}
bios_cpu_apicid[cpu] = m->mpc_apicid;
- x86_cpu_to_apicid[cpu] = m->mpc_apicid;
+ /*
+ * We get called early in the the start_kernel initialization
+ * process when the per_cpu data area is not yet setup, so we
+ * use a static array that is removed after the per_cpu data
+ * area is created.
+ */
+ if (x86_cpu_to_apicid_ptr) {
+ u8 *x86_cpu_to_apicid = (u8 *)x86_cpu_to_apicid_ptr;
+ x86_cpu_to_apicid[cpu] = m->mpc_apicid;
+ } else {
+ per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid;
+ }
cpu_set(cpu, cpu_possible_map);
cpu_set(cpu, cpu_present_map);
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index e18e516cf549..ee6eba4ecfea 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -112,7 +112,7 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
static int msr_open(struct inode *inode, struct file *file)
{
unsigned int cpu = iminor(file->f_path.dentry->d_inode);
- struct cpuinfo_x86 *c = &(cpu_data)[cpu];
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
if (cpu >= NR_CPUS || !cpu_online(cpu))
return -ENXIO; /* No such CPU */
diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
index b2b42bdb0a15..afaf9f12c032 100644
--- a/arch/x86/kernel/pci-dma_64.c
+++ b/arch/x86/kernel/pci-dma_64.c
@@ -11,7 +11,7 @@
#include <asm/iommu.h>
#include <asm/calgary.h>
-int iommu_merge __read_mostly = 0;
+int iommu_merge __read_mostly = 1;
EXPORT_SYMBOL(iommu_merge);
dma_addr_t bad_dma_address __read_mostly;
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 044a47745a5c..7b899584d290 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -295,34 +295,52 @@ static int __init idle_setup(char *str)
}
early_param("idle", idle_setup);
-void show_regs(struct pt_regs * regs)
+void __show_registers(struct pt_regs *regs, int all)
{
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
unsigned long d0, d1, d2, d3, d6, d7;
+ unsigned long esp;
+ unsigned short ss, gs;
+
+ if (user_mode_vm(regs)) {
+ esp = regs->esp;
+ ss = regs->xss & 0xffff;
+ savesegment(gs, gs);
+ } else {
+ esp = (unsigned long) (&regs->esp);
+ savesegment(ss, ss);
+ savesegment(gs, gs);
+ }
printk("\n");
- printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm);
- printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
+ printk("Pid: %d, comm: %s %s (%s %.*s)\n",
+ task_pid_nr(current), current->comm,
+ print_tainted(), init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+
+ printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
+ 0xffff & regs->xcs, regs->eip, regs->eflags,
+ smp_processor_id());
print_symbol("EIP is at %s\n", regs->eip);
- if (user_mode_vm(regs))
- printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
- printk(" EFLAGS: %08lx %s (%s %.*s)\n",
- regs->eflags, print_tainted(), init_utsname()->release,
- (int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
- regs->eax,regs->ebx,regs->ecx,regs->edx);
- printk("ESI: %08lx EDI: %08lx EBP: %08lx",
- regs->esi, regs->edi, regs->ebp);
- printk(" DS: %04x ES: %04x FS: %04x\n",
- 0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xfs);
+ regs->eax, regs->ebx, regs->ecx, regs->edx);
+ printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
+ regs->esi, regs->edi, regs->ebp, esp);
+ printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
+ regs->xds & 0xffff, regs->xes & 0xffff,
+ regs->xfs & 0xffff, gs, ss);
+
+ if (!all)
+ return;
cr0 = read_cr0();
cr2 = read_cr2();
cr3 = read_cr3();
cr4 = read_cr4_safe();
- printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
+ printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
+ cr0, cr2, cr3, cr4);
get_debugreg(d0, 0);
get_debugreg(d1, 1);
@@ -330,10 +348,16 @@ void show_regs(struct pt_regs * regs)
get_debugreg(d3, 3);
printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
d0, d1, d2, d3);
+
get_debugreg(d6, 6);
get_debugreg(d7, 7);
- printk("DR6: %08lx DR7: %08lx\n", d6, d7);
+ printk("DR6: %08lx DR7: %08lx\n",
+ d6, d7);
+}
+void show_regs(struct pt_regs *regs)
+{
+ __show_registers(regs, 1);
show_trace(NULL, regs, &regs->esp);
}
diff --git a/arch/x86/kernel/ptrace_32.c b/arch/x86/kernel/ptrace_32.c
index 99102ec5fade..ff5431cc03ee 100644
--- a/arch/x86/kernel/ptrace_32.c
+++ b/arch/x86/kernel/ptrace_32.c
@@ -632,7 +632,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
/* User-mode eip? */
info.si_addr = user_mode_vm(regs) ? (void __user *) regs->eip : NULL;
- /* Send us the fakey SIGTRAP */
+ /* Send us the fake SIGTRAP */
force_sig_info(SIGTRAP, &info, tsk);
}
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index d769e204f942..a4ce1911efdf 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -45,9 +45,12 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
if (!(config & 0x2))
pci_write_config_byte(dev, 0xf4, config);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH,
+ quirk_intel_irqbalance);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH,
+ quirk_intel_irqbalance);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH,
+ quirk_intel_irqbalance);
#endif
#if defined(CONFIG_HPET_TIMER)
@@ -56,7 +59,8 @@ unsigned long force_hpet_address;
static enum {
NONE_FORCE_HPET_RESUME,
OLD_ICH_FORCE_HPET_RESUME,
- ICH_FORCE_HPET_RESUME
+ ICH_FORCE_HPET_RESUME,
+ VT8237_FORCE_HPET_RESUME
} force_hpet_resume_type;
static void __iomem *rcba_base;
@@ -146,17 +150,17 @@ static void ich_force_enable_hpet(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
- ich_force_enable_hpet);
+ ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,
- ich_force_enable_hpet);
+ ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,
- ich_force_enable_hpet);
+ ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,
- ich_force_enable_hpet);
+ ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
- ich_force_enable_hpet);
+ ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
- ich_force_enable_hpet);
+ ich_force_enable_hpet);
static struct pci_dev *cached_dev;
@@ -232,10 +236,91 @@ static void old_ich_force_enable_hpet(struct pci_dev *dev)
printk(KERN_DEBUG "Failed to force enable HPET\n");
}
+/*
+ * Undocumented chipset features. Make sure that the user enforced
+ * this.
+ */
+static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
+{
+ if (hpet_force_user)
+ old_ich_force_enable_hpet(dev);
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
+ old_ich_force_enable_hpet_user);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12,
+ old_ich_force_enable_hpet_user);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
+ old_ich_force_enable_hpet_user);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,
+ old_ich_force_enable_hpet_user);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
- old_ich_force_enable_hpet);
+ old_ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12,
- old_ich_force_enable_hpet);
+ old_ich_force_enable_hpet);
+
+
+static void vt8237_force_hpet_resume(void)
+{
+ u32 val;
+
+ if (!force_hpet_address || !cached_dev)
+ return;
+
+ val = 0xfed00000 | 0x80;
+ pci_write_config_dword(cached_dev, 0x68, val);
+
+ pci_read_config_dword(cached_dev, 0x68, &val);
+ if (val & 0x80)
+ printk(KERN_DEBUG "Force enabled HPET at resume\n");
+ else
+ BUG();
+}
+
+static void vt8237_force_enable_hpet(struct pci_dev *dev)
+{
+ u32 uninitialized_var(val);
+
+ if (!hpet_force_user || hpet_address || force_hpet_address)
+ return;
+
+ pci_read_config_dword(dev, 0x68, &val);
+ /*
+ * Bit 7 is HPET enable bit.
+ * Bit 31:10 is HPET base address (contrary to what datasheet claims)
+ */
+ if (val & 0x80) {
+ force_hpet_address = (val & ~0x3ff);
+ printk(KERN_DEBUG "HPET at base address 0x%lx\n",
+ force_hpet_address);
+ return;
+ }
+
+ /*
+ * HPET is disabled. Trying enabling at FED00000 and check
+ * whether it sticks
+ */
+ val = 0xfed00000 | 0x80;
+ pci_write_config_dword(dev, 0x68, val);
+
+ pci_read_config_dword(dev, 0x68, &val);
+ if (val & 0x80) {
+ force_hpet_address = (val & ~0x3ff);
+ printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n",
+ force_hpet_address);
+ cached_dev = dev;
+ force_hpet_resume_type = VT8237_FORCE_HPET_RESUME;
+ return;
+ }
+
+ printk(KERN_DEBUG "Failed to force enable HPET\n");
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
+ vt8237_force_enable_hpet);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
+ vt8237_force_enable_hpet);
+
void force_hpet_resume(void)
{
@@ -246,6 +331,9 @@ void force_hpet_resume(void)
case OLD_ICH_FORCE_HPET_RESUME:
return old_ich_force_hpet_resume();
+ case VT8237_FORCE_HPET_RESUME:
+ return vt8237_force_hpet_resume();
+
default:
break;
}
diff --git a/arch/x86/kernel/reboot_64.c b/arch/x86/kernel/reboot_64.c
index 368db2b9c5ac..776eb06b6512 100644
--- a/arch/x86/kernel/reboot_64.c
+++ b/arch/x86/kernel/reboot_64.c
@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <asm/io.h>
#include <asm/delay.h>
+#include <asm/desc.h>
#include <asm/hw_irq.h>
#include <asm/system.h>
#include <asm/pgtable.h>
@@ -136,7 +137,7 @@ void machine_emergency_restart(void)
}
case BOOT_TRIPLE:
- __asm__ __volatile__("lidt (%0)": :"r" (&no_idt));
+ load_idt((const struct desc_ptr *)&no_idt);
__asm__ __volatile__("int3");
reboot_type = BOOT_KBD;
diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
index 8b30b26ad069..1a07bbea7be3 100644
--- a/arch/x86/kernel/reboot_fixups_32.c
+++ b/arch/x86/kernel/reboot_fixups_32.c
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <asm/reboot_fixups.h>
#include <asm/msr.h>
+#include <asm/geode.h>
static void cs5530a_warm_reset(struct pci_dev *dev)
{
@@ -24,11 +25,8 @@ static void cs5530a_warm_reset(struct pci_dev *dev)
static void cs5536_warm_reset(struct pci_dev *dev)
{
- /*
- * 6.6.2.12 Soft Reset (DIVIL_SOFT_RESET)
- * writing 1 to the LSB of this MSR causes a hard reset.
- */
- wrmsrl(0x51400017, 1ULL);
+ /* writing 1 to the LSB of this MSR causes a hard reset */
+ wrmsrl(MSR_DIVIL_SOFT_RESET, 1ULL);
udelay(50); /* shouldn't get here but be safe and spin a while */
}
diff --git a/arch/x86/kernel/setup64.c b/arch/x86/kernel/setup64.c
index ba9188235057..3558ac78c926 100644
--- a/arch/x86/kernel/setup64.c
+++ b/arch/x86/kernel/setup64.c
@@ -185,6 +185,12 @@ void __cpuinit check_efer(void)
unsigned long kernel_eflags;
/*
+ * Copies of the original ist values from the tss are only accessed during
+ * debugging, no special alignment required.
+ */
+DEFINE_PER_CPU(struct orig_ist, orig_ist);
+
+/*
* cpu_init() initializes state that is per-CPU. Some data is already
* initialized (naturally) in the bootstrap process, such as the GDT
* and IDT. We reload them nevertheless, this function acts as a
@@ -224,8 +230,8 @@ void __cpuinit cpu_init (void)
memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE);
cpu_gdt_descr[cpu].size = GDT_SIZE;
- asm volatile("lgdt %0" :: "m" (cpu_gdt_descr[cpu]));
- asm volatile("lidt %0" :: "m" (idt_descr));
+ load_gdt((const struct desc_ptr *)&cpu_gdt_descr[cpu]);
+ load_idt((const struct desc_ptr *)&idt_descr);
memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
syscall_init();
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
index 978dc0196a0f..ba2e165a8a0f 100644
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@ -624,7 +624,7 @@ void __init setup_arch(char **cmdline_p)
/*
* NOTE: before this point _nobody_ is allowed to allocate
* any memory using the bootmem allocator. Although the
- * alloctor is now initialised only the first 8Mb of the kernel
+ * allocator is now initialised only the first 8Mb of the kernel
* virtual address space has been mapped. All allocations before
* paging_init() has completed must use the alloc_bootmem_low_pages()
* variant (which allocates DMA'able memory) and care must be taken
@@ -661,9 +661,7 @@ void __init setup_arch(char **cmdline_p)
#endif
#ifdef CONFIG_PCI
-#ifdef CONFIG_X86_IO_APIC
- check_acpi_pci(); /* Checks more than just ACPI actually */
-#endif
+ early_quirks();
#endif
#ifdef CONFIG_ACPI
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index cdcba6975226..31322d42eaae 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -302,6 +302,11 @@ void __init setup_arch(char **cmdline_p)
dmi_scan_machine();
+#ifdef CONFIG_SMP
+ /* setup to use the static apicid table during kernel startup */
+ x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init;
+#endif
+
#ifdef CONFIG_ACPI
/*
* Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
@@ -554,7 +559,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
but in the same order as the HT nodeids.
If that doesn't result in a usable node fall back to the
path for the previous case. */
- int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
+ int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
if (ht_nodeid >= 0 &&
apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
node = apicid_to_node[ht_nodeid];
@@ -878,6 +883,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
#ifdef CONFIG_SMP
c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
+ c->cpu_index = 0;
#endif
}
@@ -984,6 +990,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
static int show_cpuinfo(struct seq_file *m, void *v)
{
struct cpuinfo_x86 *c = v;
+ int cpu = 0;
/*
* These flag bits must match the definitions in <asm/cpufeature.h>.
@@ -1062,8 +1069,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#ifdef CONFIG_SMP
- if (!cpu_online(c-cpu_data))
+ if (!cpu_online(c->cpu_index))
return 0;
+ cpu = c->cpu_index;
#endif
seq_printf(m,"processor\t: %u\n"
@@ -1071,7 +1079,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
"cpu family\t: %d\n"
"model\t\t: %d\n"
"model name\t: %s\n",
- (unsigned)(c-cpu_data),
+ (unsigned)cpu,
c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
c->x86,
(int)c->x86_model,
@@ -1083,7 +1091,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "stepping\t: unknown\n");
if (cpu_has(c,X86_FEATURE_TSC)) {
- unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
+ unsigned int freq = cpufreq_quick_get((unsigned)cpu);
if (!freq)
freq = cpu_khz;
seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
@@ -1096,7 +1104,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#ifdef CONFIG_SMP
if (smp_num_siblings * c->x86_max_cores > 1) {
- int cpu = c - cpu_data;
seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
seq_printf(m, "siblings\t: %d\n",
cpus_weight(per_cpu(cpu_core_map, cpu)));
@@ -1154,12 +1161,16 @@ static int show_cpuinfo(struct seq_file *m, void *v)
static void *c_start(struct seq_file *m, loff_t *pos)
{
- return *pos < NR_CPUS ? cpu_data + *pos : NULL;
+ if (*pos == 0) /* just in case, cpu 0 is not the first */
+ *pos = first_cpu(cpu_possible_map);
+ if ((*pos) < NR_CPUS && cpu_possible(*pos))
+ return &cpu_data(*pos);
+ return NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
- ++*pos;
+ *pos = next_cpu(*pos, cpu_possible_map);
return c_start(m, pos);
}
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c
index 6dc394b87255..9bdd83022f5f 100644
--- a/arch/x86/kernel/signal_32.c
+++ b/arch/x86/kernel/signal_32.c
@@ -594,7 +594,7 @@ static void fastcall do_signal(struct pt_regs *regs)
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
- /* Reenable any watchpoints before delivering the
+ /* Re-enable any watchpoints before delivering the
* signal to user space. The processor register will
* have been cleared if the watchpoint triggered
* inside the kernel.
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
index 683802bec419..ab086b0357fc 100644
--- a/arch/x86/kernel/signal_64.c
+++ b/arch/x86/kernel/signal_64.c
@@ -410,7 +410,7 @@ static void do_signal(struct pt_regs *regs)
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
- /* Reenable any watchpoints before delivering the
+ /* Re-enable any watchpoints before delivering the
* signal to user space. The processor register will
* have been cleared if the watchpoint triggered
* inside the kernel.
diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c
index 791d9f8036ae..f32115308399 100644
--- a/arch/x86/kernel/smp_32.c
+++ b/arch/x86/kernel/smp_32.c
@@ -69,7 +69,7 @@
*
* B stepping CPUs may hang. There are hardware work arounds
* for this. We warn about it in case your board doesn't have the work
- * arounds. Basically thats so I can tell anyone with a B stepping
+ * arounds. Basically that's so I can tell anyone with a B stepping
* CPU and SMP problems "tough".
*
* Specific items [From Pentium Processor Specification Update]
@@ -273,7 +273,7 @@ void leave_mm(unsigned long cpu)
* 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
* Stop ipi delivery for the old mm. This is not synchronized with
* the other cpus, but smp_invalidate_interrupt ignore flush ipis
- * for the wrong mm, and in the worst case we perform a superflous
+ * for the wrong mm, and in the worst case we perform a superfluous
* tlb flush.
* 1a2) set cpu_tlbstate to TLBSTATE_OK
* Now the smp_invalidate_interrupt won't call leave_mm if cpu0
@@ -610,7 +610,7 @@ static void stop_this_cpu (void * dummy)
*/
cpu_clear(smp_processor_id(), cpu_online_map);
disable_local_APIC();
- if (cpu_data[smp_processor_id()].hlt_works_ok)
+ if (cpu_data(smp_processor_id()).hlt_works_ok)
for(;;) halt();
for (;;);
}
@@ -676,7 +676,7 @@ static int convert_apicid_to_cpu(int apic_id)
int i;
for (i = 0; i < NR_CPUS; i++) {
- if (x86_cpu_to_apicid[i] == apic_id)
+ if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
return i;
}
return -1;
diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c
index 5c2964727d19..03fa6ed559c6 100644
--- a/arch/x86/kernel/smp_64.c
+++ b/arch/x86/kernel/smp_64.c
@@ -322,17 +322,27 @@ void unlock_ipi_call_lock(void)
}
/*
- * this function sends a 'generic call function' IPI to one other CPU
- * in the system.
- *
- * cpu is a standard Linux logical CPU number.
+ * this function sends a 'generic call function' IPI to all other CPU
+ * of the system defined in the mask.
*/
-static void
-__smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int nonatomic, int wait)
+
+static int
+__smp_call_function_mask(cpumask_t mask,
+ void (*func)(void *), void *info,
+ int wait)
{
struct call_data_struct data;
- int cpus = 1;
+ cpumask_t allbutself;
+ int cpus;
+
+ allbutself = cpu_online_map;
+ cpu_clear(smp_processor_id(), allbutself);
+
+ cpus_and(mask, mask, allbutself);
+ cpus = cpus_weight(mask);
+
+ if (!cpus)
+ return 0;
data.func = func;
data.info = info;
@@ -343,19 +353,55 @@ __smp_call_function_single(int cpu, void (*func) (void *info), void *info,
call_data = &data;
wmb();
- /* Send a message to all other CPUs and wait for them to respond */
- send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
+
+ /* Send a message to other CPUs */
+ if (cpus_equal(mask, allbutself))
+ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
+ else
+ send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
/* Wait for response */
while (atomic_read(&data.started) != cpus)
cpu_relax();
if (!wait)
- return;
+ return 0;
while (atomic_read(&data.finished) != cpus)
cpu_relax();
+
+ return 0;
+}
+/**
+ * smp_call_function_mask(): Run a function on a set of other CPUs.
+ * @mask: The set of cpus to run on. Must not include the current cpu.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function_mask(cpumask_t mask,
+ void (*func)(void *), void *info,
+ int wait)
+{
+ int ret;
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(irqs_disabled());
+
+ spin_lock(&call_lock);
+ ret = __smp_call_function_mask(mask, func, info, wait);
+ spin_unlock(&call_lock);
+ return ret;
}
+EXPORT_SYMBOL(smp_call_function_mask);
/*
* smp_call_function_single - Run a function on a specific CPU
@@ -374,6 +420,7 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
int nonatomic, int wait)
{
/* prevent preemption and reschedule on another processor */
+ int ret;
int me = get_cpu();
/* Can deadlock when called with interrupts disabled */
@@ -387,51 +434,14 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
return 0;
}
- spin_lock(&call_lock);
- __smp_call_function_single(cpu, func, info, nonatomic, wait);
- spin_unlock(&call_lock);
+ ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
+
put_cpu();
- return 0;
+ return ret;
}
EXPORT_SYMBOL(smp_call_function_single);
/*
- * this function sends a 'generic call function' IPI to all other CPUs
- * in the system.
- */
-static void __smp_call_function (void (*func) (void *info), void *info,
- int nonatomic, int wait)
-{
- struct call_data_struct data;
- int cpus = num_online_cpus()-1;
-
- if (!cpus)
- return;
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- call_data = &data;
- wmb();
- /* Send a message to all other CPUs and wait for them to respond */
- send_IPI_allbutself(CALL_FUNCTION_VECTOR);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- cpu_relax();
-
- if (!wait)
- return;
-
- while (atomic_read(&data.finished) != cpus)
- cpu_relax();
-}
-
-/*
* smp_call_function - run a function on all other CPUs.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
@@ -449,10 +459,7 @@ static void __smp_call_function (void (*func) (void *info), void *info,
int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
int wait)
{
- spin_lock(&call_lock);
- __smp_call_function(func,info,nonatomic,wait);
- spin_unlock(&call_lock);
- return 0;
+ return smp_call_function_mask(cpu_online_map, func, info, wait);
}
EXPORT_SYMBOL(smp_call_function);
@@ -479,7 +486,7 @@ void smp_send_stop(void)
/* Don't deadlock on the call lock in panic */
nolock = !spin_trylock(&call_lock);
local_irq_save(flags);
- __smp_call_function(stop_this_cpu, NULL, 0, 0);
+ __smp_call_function_mask(cpu_online_map, stop_this_cpu, NULL, 0);
if (!nolock)
spin_unlock(&call_lock);
disable_local_APIC();
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
index be3faac04719..ef0f34ede1ab 100644
--- a/arch/x86/kernel/smpboot_32.c
+++ b/arch/x86/kernel/smpboot_32.c
@@ -67,7 +67,7 @@ int smp_num_siblings = 1;
EXPORT_SYMBOL(smp_num_siblings);
/* Last level cache ID of each logical CPU */
-int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
+DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID;
/* representing HT siblings of each logical CPU */
DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
@@ -89,12 +89,20 @@ EXPORT_SYMBOL(cpu_possible_map);
static cpumask_t smp_commenced_mask;
/* Per CPU bogomips and other parameters */
-struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
-EXPORT_SYMBOL(cpu_data);
+DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
+EXPORT_PER_CPU_SYMBOL(cpu_info);
-u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly =
- { [0 ... NR_CPUS-1] = 0xff };
-EXPORT_SYMBOL(x86_cpu_to_apicid);
+/*
+ * The following static array is used during kernel startup
+ * and the x86_cpu_to_apicid_ptr contains the address of the
+ * array during this time. Is it zeroed when the per_cpu
+ * data area is removed.
+ */
+u8 x86_cpu_to_apicid_init[NR_CPUS] __initdata =
+ { [0 ... NR_CPUS-1] = BAD_APICID };
+void *x86_cpu_to_apicid_ptr;
+DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID;
+EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
u8 apicid_2_node[MAX_APICID];
@@ -150,9 +158,10 @@ void __init smp_alloc_memory(void)
void __cpuinit smp_store_cpu_info(int id)
{
- struct cpuinfo_x86 *c = cpu_data + id;
+ struct cpuinfo_x86 *c = &cpu_data(id);
*c = boot_cpu_data;
+ c->cpu_index = id;
if (id!=0)
identify_secondary_cpu(c);
/*
@@ -294,7 +303,7 @@ static int cpucount;
/* maps the cpu to the sched domain representing multi-core */
cpumask_t cpu_coregroup_map(int cpu)
{
- struct cpuinfo_x86 *c = cpu_data + cpu;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
/*
* For perf, we return last level cache shared map.
* And for power savings, we return cpu_core_map
@@ -311,41 +320,41 @@ static cpumask_t cpu_sibling_setup_map;
void __cpuinit set_cpu_sibling_map(int cpu)
{
int i;
- struct cpuinfo_x86 *c = cpu_data;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu_set(cpu, cpu_sibling_setup_map);
if (smp_num_siblings > 1) {
for_each_cpu_mask(i, cpu_sibling_setup_map) {
- if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
- c[cpu].cpu_core_id == c[i].cpu_core_id) {
+ if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
+ c->cpu_core_id == cpu_data(i).cpu_core_id) {
cpu_set(i, per_cpu(cpu_sibling_map, cpu));
cpu_set(cpu, per_cpu(cpu_sibling_map, i));
cpu_set(i, per_cpu(cpu_core_map, cpu));
cpu_set(cpu, per_cpu(cpu_core_map, i));
- cpu_set(i, c[cpu].llc_shared_map);
- cpu_set(cpu, c[i].llc_shared_map);
+ cpu_set(i, c->llc_shared_map);
+ cpu_set(cpu, cpu_data(i).llc_shared_map);
}
}
} else {
cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
}
- cpu_set(cpu, c[cpu].llc_shared_map);
+ cpu_set(cpu, c->llc_shared_map);
if (current_cpu_data.x86_max_cores == 1) {
per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
- c[cpu].booted_cores = 1;
+ c->booted_cores = 1;
return;
}
for_each_cpu_mask(i, cpu_sibling_setup_map) {
- if (cpu_llc_id[cpu] != BAD_APICID &&
- cpu_llc_id[cpu] == cpu_llc_id[i]) {
- cpu_set(i, c[cpu].llc_shared_map);
- cpu_set(cpu, c[i].llc_shared_map);
+ if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
+ per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
+ cpu_set(i, c->llc_shared_map);
+ cpu_set(cpu, cpu_data(i).llc_shared_map);
}
- if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
+ if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
cpu_set(i, per_cpu(cpu_core_map, cpu));
cpu_set(cpu, per_cpu(cpu_core_map, i));
/*
@@ -357,15 +366,15 @@ void __cpuinit set_cpu_sibling_map(int cpu)
* the booted_cores for this new cpu
*/
if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
- c[cpu].booted_cores++;
+ c->booted_cores++;
/*
* increment the core count for all
* the other cpus in this package
*/
if (i != cpu)
- c[i].booted_cores++;
- } else if (i != cpu && !c[cpu].booted_cores)
- c[cpu].booted_cores = c[i].booted_cores;
+ cpu_data(i).booted_cores++;
+ } else if (i != cpu && !c->booted_cores)
+ c->booted_cores = cpu_data(i).booted_cores;
}
}
}
@@ -412,7 +421,7 @@ static void __cpuinit start_secondary(void *unused)
/*
* We need to hold call_lock, so there is no inconsistency
* between the time smp_call_function() determines number of
- * IPI receipients, and the time when the determination is made
+ * IPI recipients, and the time when the determination is made
* for which cpus receive the IPI. Holding this
* lock helps us to not include this cpu in a currently in progress
* smp_call_function().
@@ -804,7 +813,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
irq_ctx_init(cpu);
- x86_cpu_to_apicid[cpu] = apicid;
+ per_cpu(x86_cpu_to_apicid, cpu) = apicid;
/*
* This grunge runs the startup process for
* the targeted processor.
@@ -844,7 +853,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
/* number CPUs logically, starting from 1 (BSP is 0) */
Dprintk("OK.\n");
printk("CPU%d: ", cpu);
- print_cpu_info(&cpu_data[cpu]);
+ print_cpu_info(&cpu_data(cpu));
Dprintk("CPU has booted.\n");
} else {
boot_error= 1;
@@ -866,7 +875,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
cpucount--;
} else {
- x86_cpu_to_apicid[cpu] = apicid;
+ per_cpu(x86_cpu_to_apicid, cpu) = apicid;
cpu_set(cpu, cpu_present_map);
}
@@ -915,7 +924,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
struct warm_boot_cpu_info info;
int apicid, ret;
- apicid = x86_cpu_to_apicid[cpu];
+ apicid = per_cpu(x86_cpu_to_apicid, cpu);
if (apicid == BAD_APICID) {
ret = -ENODEV;
goto exit;
@@ -961,11 +970,11 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
*/
smp_store_cpu_info(0); /* Final full version of the data */
printk("CPU%d: ", 0);
- print_cpu_info(&cpu_data[0]);
+ print_cpu_info(&cpu_data(0));
boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
boot_cpu_logical_apicid = logical_smp_processor_id();
- x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;
+ per_cpu(x86_cpu_to_apicid, 0) = boot_cpu_physical_apicid;
current_thread_info()->cpu = 0;
@@ -1008,6 +1017,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
smpboot_clear_io_apic_irqs();
phys_cpu_present_map = physid_mask_of_physid(0);
+ map_cpu_to_logical_apicid();
cpu_set(0, per_cpu(cpu_sibling_map, 0));
cpu_set(0, per_cpu(cpu_core_map, 0));
return;
@@ -1029,6 +1039,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
}
smpboot_clear_io_apic_irqs();
phys_cpu_present_map = physid_mask_of_physid(0);
+ map_cpu_to_logical_apicid();
cpu_set(0, per_cpu(cpu_sibling_map, 0));
cpu_set(0, per_cpu(cpu_core_map, 0));
return;
@@ -1082,7 +1093,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
Dprintk("Before bogomips.\n");
for (cpu = 0; cpu < NR_CPUS; cpu++)
if (cpu_isset(cpu, cpu_callout_map))
- bogosum += cpu_data[cpu].loops_per_jiffy;
+ bogosum += cpu_data(cpu).loops_per_jiffy;
printk(KERN_INFO
"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
cpucount+1,
@@ -1152,7 +1163,7 @@ void __init native_smp_prepare_boot_cpu(void)
void remove_siblinginfo(int cpu)
{
int sibling;
- struct cpuinfo_x86 *c = cpu_data;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
@@ -1160,15 +1171,15 @@ void remove_siblinginfo(int cpu)
* last thread sibling in this cpu core going down
*/
if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
- c[sibling].booted_cores--;
+ cpu_data(sibling).booted_cores--;
}
for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
cpus_clear(per_cpu(cpu_sibling_map, cpu));
cpus_clear(per_cpu(cpu_core_map, cpu));
- c[cpu].phys_proc_id = 0;
- c[cpu].cpu_core_id = 0;
+ c->phys_proc_id = 0;
+ c->cpu_core_id = 0;
cpu_clear(cpu, cpu_sibling_setup_map);
}
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index e351ac4ab5b1..b7e768dd87c9 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -65,7 +65,7 @@ int smp_num_siblings = 1;
EXPORT_SYMBOL(smp_num_siblings);
/* Last level cache ID of each logical CPU */
-u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
+DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID;
/* Bitmask of currently online CPUs */
cpumask_t cpu_online_map __read_mostly;
@@ -84,8 +84,8 @@ cpumask_t cpu_possible_map;
EXPORT_SYMBOL(cpu_possible_map);
/* Per CPU bogomips and other parameters */
-struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
-EXPORT_SYMBOL(cpu_data);
+DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
+EXPORT_PER_CPU_SYMBOL(cpu_info);
/* Set when the idlers are all forked */
int smp_threads_ready;
@@ -138,9 +138,10 @@ static unsigned long __cpuinit setup_trampoline(void)
static void __cpuinit smp_store_cpu_info(int id)
{
- struct cpuinfo_x86 *c = cpu_data + id;
+ struct cpuinfo_x86 *c = &cpu_data(id);
*c = boot_cpu_data;
+ c->cpu_index = id;
identify_cpu(c);
print_cpu_info(c);
}
@@ -237,7 +238,7 @@ void __cpuinit smp_callin(void)
/* maps the cpu to the sched domain representing multi-core */
cpumask_t cpu_coregroup_map(int cpu)
{
- struct cpuinfo_x86 *c = cpu_data + cpu;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
/*
* For perf, we return last level cache shared map.
* And for power savings, we return cpu_core_map
@@ -254,41 +255,41 @@ static cpumask_t cpu_sibling_setup_map;
static inline void set_cpu_sibling_map(int cpu)
{
int i;
- struct cpuinfo_x86 *c = cpu_data;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu_set(cpu, cpu_sibling_setup_map);
if (smp_num_siblings > 1) {
for_each_cpu_mask(i, cpu_sibling_setup_map) {
- if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
- c[cpu].cpu_core_id == c[i].cpu_core_id) {
+ if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
+ c->cpu_core_id == cpu_data(i).cpu_core_id) {
cpu_set(i, per_cpu(cpu_sibling_map, cpu));
cpu_set(cpu, per_cpu(cpu_sibling_map, i));
cpu_set(i, per_cpu(cpu_core_map, cpu));
cpu_set(cpu, per_cpu(cpu_core_map, i));
- cpu_set(i, c[cpu].llc_shared_map);
- cpu_set(cpu, c[i].llc_shared_map);
+ cpu_set(i, c->llc_shared_map);
+ cpu_set(cpu, cpu_data(i).llc_shared_map);
}
}
} else {
cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
}
- cpu_set(cpu, c[cpu].llc_shared_map);
+ cpu_set(cpu, c->llc_shared_map);
if (current_cpu_data.x86_max_cores == 1) {
per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
- c[cpu].booted_cores = 1;
+ c->booted_cores = 1;
return;
}
for_each_cpu_mask(i, cpu_sibling_setup_map) {
- if (cpu_llc_id[cpu] != BAD_APICID &&
- cpu_llc_id[cpu] == cpu_llc_id[i]) {
- cpu_set(i, c[cpu].llc_shared_map);
- cpu_set(cpu, c[i].llc_shared_map);
+ if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
+ per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
+ cpu_set(i, c->llc_shared_map);
+ cpu_set(cpu, cpu_data(i).llc_shared_map);
}
- if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
+ if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
cpu_set(i, per_cpu(cpu_core_map, cpu));
cpu_set(cpu, per_cpu(cpu_core_map, i));
/*
@@ -300,15 +301,15 @@ static inline void set_cpu_sibling_map(int cpu)
* the booted_cores for this new cpu
*/
if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
- c[cpu].booted_cores++;
+ c->booted_cores++;
/*
* increment the core count for all
* the other cpus in this package
*/
if (i != cpu)
- c[i].booted_cores++;
- } else if (i != cpu && !c[cpu].booted_cores)
- c[cpu].booted_cores = c[i].booted_cores;
+ cpu_data(i).booted_cores++;
+ } else if (i != cpu && !c->booted_cores)
+ c->booted_cores = cpu_data(i).booted_cores;
}
}
}
@@ -350,7 +351,7 @@ void __cpuinit start_secondary(void)
/*
* We need to hold call_lock, so there is no inconsistency
* between the time smp_call_function() determines number of
- * IPI receipients, and the time when the determination is made
+ * IPI recipients, and the time when the determination is made
* for which cpus receive the IPI in genapic_flat.c. Holding this
* lock helps us to not include this cpu in a currently in progress
* smp_call_function().
@@ -694,7 +695,7 @@ do_rest:
clear_node_cpumask(cpu); /* was set by numa_add_cpu */
cpu_clear(cpu, cpu_present_map);
cpu_clear(cpu, cpu_possible_map);
- x86_cpu_to_apicid[cpu] = BAD_APICID;
+ per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
return -EIO;
}
@@ -841,6 +842,26 @@ static int __init smp_sanity_check(unsigned max_cpus)
}
/*
+ * Copy apicid's found by MP_processor_info from initial array to the per cpu
+ * data area. The x86_cpu_to_apicid_init array is then expendable and the
+ * x86_cpu_to_apicid_ptr is zeroed indicating that the static array is no
+ * longer available.
+ */
+void __init smp_set_apicids(void)
+{
+ int cpu;
+
+ for_each_cpu_mask(cpu, cpu_possible_map) {
+ if (per_cpu_offset(cpu))
+ per_cpu(x86_cpu_to_apicid, cpu) =
+ x86_cpu_to_apicid_init[cpu];
+ }
+
+ /* indicate the static array will be going away soon */
+ x86_cpu_to_apicid_ptr = NULL;
+}
+
+/*
* Prepare for SMP bootup. The MP table or ACPI has been read
* earlier. Just do some sanity checking here and enable APIC mode.
*/
@@ -849,6 +870,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
nmi_watchdog_default();
current_cpu_data = boot_cpu_data;
current_thread_info()->cpu = 0; /* needed? */
+ smp_set_apicids();
set_cpu_sibling_map(0);
if (smp_sanity_check(max_cpus) < 0) {
@@ -968,7 +990,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
static void remove_siblinginfo(int cpu)
{
int sibling;
- struct cpuinfo_x86 *c = cpu_data;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
@@ -976,15 +998,15 @@ static void remove_siblinginfo(int cpu)
* last thread sibling in this cpu core going down
*/
if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
- c[sibling].booted_cores--;
+ cpu_data(sibling).booted_cores--;
}
for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
cpus_clear(per_cpu(cpu_sibling_map, cpu));
cpus_clear(per_cpu(cpu_core_map, cpu));
- c[cpu].phys_proc_id = 0;
- c[cpu].cpu_core_id = 0;
+ c->phys_proc_id = 0;
+ c->cpu_core_id = 0;
cpu_clear(cpu, cpu_sibling_setup_map);
}
diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c
index 91c7acc8d999..72f463401592 100644
--- a/arch/x86/kernel/summit_32.c
+++ b/arch/x86/kernel/summit_32.c
@@ -64,7 +64,7 @@ static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus)
switch (rio_devs[wpeg_num]->type){
case CompatWPEG:
- /* The Compatability Winnipeg controls the 2 legacy buses,
+ /* The Compatibility Winnipeg controls the 2 legacy buses,
* the 66MHz PCI bus [2 slots] and the 2 "extra" buses in case
* a PCI-PCI bridge card is used in either slot: total 5 buses.
*/
diff --git a/arch/x86/kernel/suspend_64.c b/arch/x86/kernel/suspend_64.c
index f8fafe527ff1..bc9f59c246fd 100644
--- a/arch/x86/kernel/suspend_64.c
+++ b/arch/x86/kernel/suspend_64.c
@@ -32,9 +32,9 @@ void __save_processor_state(struct saved_context *ctxt)
/*
* descriptor tables
*/
- asm volatile ("sgdt %0" : "=m" (ctxt->gdt_limit));
- asm volatile ("sidt %0" : "=m" (ctxt->idt_limit));
- asm volatile ("str %0" : "=m" (ctxt->tr));
+ store_gdt((struct desc_ptr *)&ctxt->gdt_limit);
+ store_idt((struct desc_ptr *)&ctxt->idt_limit);
+ store_tr(ctxt->tr);
/* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
/*
@@ -91,8 +91,9 @@ void __restore_processor_state(struct saved_context *ctxt)
* now restore the descriptor tables to their proper values
* ltr is done i fix_processor_context().
*/
- asm volatile ("lgdt %0" :: "m" (ctxt->gdt_limit));
- asm volatile ("lidt %0" :: "m" (ctxt->idt_limit));
+ load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
+ load_idt((const struct desc_ptr *)&ctxt->idt_limit);
+
/*
* segment registers
@@ -123,7 +124,7 @@ void fix_processor_context(void)
int cpu = smp_processor_id();
struct tss_struct *t = &per_cpu(init_tss, cpu);
- set_tss_desc(cpu,t); /* This just modifies memory; should not be neccessary. But... This is neccessary, because 386 hardware has concept of busy TSS or some similar stupidity. */
+ set_tss_desc(cpu,t); /* This just modifies memory; should not be necessary. But... This is necessary, because 386 hardware has concept of busy TSS or some similar stupidity. */
cpu_gdt(cpu)[GDT_ENTRY_TSS].type = 9;
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c
index 1e9d57256eb1..cc9acace7e23 100644
--- a/arch/x86/kernel/traps_32.c
+++ b/arch/x86/kernel/traps_32.c
@@ -63,6 +63,9 @@
int panic_on_unrecovered_nmi;
+DECLARE_BITMAP(used_vectors, NR_VECTORS);
+EXPORT_SYMBOL_GPL(used_vectors);
+
asmlinkage int system_call(void);
/* Do we ignore FPU interrupts ? */
@@ -288,33 +291,9 @@ EXPORT_SYMBOL(dump_stack);
void show_registers(struct pt_regs *regs)
{
int i;
- int in_kernel = 1;
- unsigned long esp;
- unsigned short ss, gs;
-
- esp = (unsigned long) (&regs->esp);
- savesegment(ss, ss);
- savesegment(gs, gs);
- if (user_mode_vm(regs)) {
- in_kernel = 0;
- esp = regs->esp;
- ss = regs->xss & 0xffff;
- }
+
print_modules();
- printk(KERN_EMERG "CPU: %d\n"
- KERN_EMERG "EIP: %04x:[<%08lx>] %s VLI\n"
- KERN_EMERG "EFLAGS: %08lx (%s %.*s)\n",
- smp_processor_id(), 0xffff & regs->xcs, regs->eip,
- print_tainted(), regs->eflags, init_utsname()->release,
- (int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
- print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
- printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
- regs->eax, regs->ebx, regs->ecx, regs->edx);
- printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
- regs->esi, regs->edi, regs->ebp, esp);
- printk(KERN_EMERG "ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n",
- regs->xds & 0xffff, regs->xes & 0xffff, regs->xfs & 0xffff, gs, ss);
+ __show_registers(regs, 0);
printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
TASK_COMM_LEN, current->comm, task_pid_nr(current),
current_thread_info(), current, task_thread_info(current));
@@ -322,14 +301,14 @@ void show_registers(struct pt_regs *regs)
* When in-kernel, we also print out the stack and code at the
* time of the fault..
*/
- if (in_kernel) {
+ if (!user_mode_vm(regs)) {
u8 *eip;
unsigned int code_prologue = code_bytes * 43 / 64;
unsigned int code_len = code_bytes;
unsigned char c;
printk("\n" KERN_EMERG "Stack: ");
- show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
+ show_stack_log_lvl(NULL, regs, &regs->esp, KERN_EMERG);
printk(KERN_EMERG "Code: ");
@@ -374,11 +353,11 @@ int is_valid_bugaddr(unsigned long eip)
void die(const char * str, struct pt_regs * regs, long err)
{
static struct {
- spinlock_t lock;
+ raw_spinlock_t lock;
u32 lock_owner;
int lock_owner_depth;
} die = {
- .lock = __SPIN_LOCK_UNLOCKED(die.lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED,
.lock_owner = -1,
.lock_owner_depth = 0
};
@@ -389,13 +368,14 @@ void die(const char * str, struct pt_regs * regs, long err)
if (die.lock_owner != raw_smp_processor_id()) {
console_verbose();
- spin_lock_irqsave(&die.lock, flags);
+ __raw_spin_lock(&die.lock);
+ raw_local_save_flags(flags);
die.lock_owner = smp_processor_id();
die.lock_owner_depth = 0;
bust_spinlocks(1);
}
else
- local_save_flags(flags);
+ raw_local_save_flags(flags);
if (++die.lock_owner_depth < 3) {
unsigned long esp;
@@ -439,7 +419,8 @@ void die(const char * str, struct pt_regs * regs, long err)
bust_spinlocks(0);
die.lock_owner = -1;
add_taint(TAINT_DIE);
- spin_unlock_irqrestore(&die.lock, flags);
+ __raw_spin_unlock(&die.lock);
+ raw_local_irq_restore(flags);
if (!regs)
return;
@@ -1142,6 +1123,8 @@ static void __init set_task_gate(unsigned int n, unsigned int gdt_entry)
void __init trap_init(void)
{
+ int i;
+
#ifdef CONFIG_EISA
void __iomem *p = ioremap(0x0FFFD9, 4);
if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) {
@@ -1201,6 +1184,11 @@ void __init trap_init(void)
set_system_gate(SYSCALL_VECTOR,&system_call);
+ /* Reserve all the builtin and the syscall vector. */
+ for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
+ set_bit(i, used_vectors);
+ set_bit(SYSCALL_VECTOR, used_vectors);
+
/*
* Should be a barrier for any external CPU state.
*/
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
index b4a9b3db1994..d0c2bc7ab2ec 100644
--- a/arch/x86/kernel/traps_64.c
+++ b/arch/x86/kernel/traps_64.c
@@ -201,7 +201,7 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
#define MSG(txt) ops->warning(data, txt)
/*
- * x86-64 can have upto three kernel stacks:
+ * x86-64 can have up to three kernel stacks:
* process stack
* interrupt stack
* severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
@@ -462,7 +462,7 @@ void out_of_line_bug(void)
EXPORT_SYMBOL(out_of_line_bug);
#endif
-static DEFINE_SPINLOCK(die_lock);
+static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
static int die_owner = -1;
static unsigned int die_nest_count;
@@ -474,13 +474,13 @@ unsigned __kprobes long oops_begin(void)
oops_enter();
/* racy, but better than risking deadlock. */
- local_irq_save(flags);
+ raw_local_irq_save(flags);
cpu = smp_processor_id();
- if (!spin_trylock(&die_lock)) {
+ if (!__raw_spin_trylock(&die_lock)) {
if (cpu == die_owner)
/* nested oops. should stop eventually */;
else
- spin_lock(&die_lock);
+ __raw_spin_lock(&die_lock);
}
die_nest_count++;
die_owner = cpu;
@@ -494,12 +494,10 @@ void __kprobes oops_end(unsigned long flags)
die_owner = -1;
bust_spinlocks(0);
die_nest_count--;
- if (die_nest_count)
- /* We still own the lock */
- local_irq_restore(flags);
- else
+ if (!die_nest_count)
/* Nest count reaches zero, release the lock. */
- spin_unlock_irqrestore(&die_lock, flags);
+ __raw_spin_unlock(&die_lock);
+ raw_local_irq_restore(flags);
if (panic_on_oops)
panic("Fatal exception");
oops_exit();
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c
index e87a3939ed40..d78444c788a3 100644
--- a/arch/x86/kernel/tsc_32.c
+++ b/arch/x86/kernel/tsc_32.c
@@ -59,7 +59,7 @@ int check_tsc_unstable(void)
}
EXPORT_SYMBOL_GPL(check_tsc_unstable);
-/* Accellerators for sched_clock()
+/* Accelerators for sched_clock()
* convert from cycles(64bits) => nanoseconds (64bits)
* basic equation:
* ns = cycles / (freq / ns_per_sec)
@@ -74,7 +74,7 @@ EXPORT_SYMBOL_GPL(check_tsc_unstable);
* And since SC is a constant power of two, we can convert the div
* into a shift.
*
- * We can use khz divisor instead of mhz to keep a better percision, since
+ * We can use khz divisor instead of mhz to keep a better precision, since
* cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
* (mathieu.desnoyers@polymtl.ca)
*
@@ -181,8 +181,8 @@ int recalibrate_cpu_khz(void)
if (cpu_has_tsc) {
cpu_khz = calculate_cpu_khz();
tsc_khz = cpu_khz;
- cpu_data[0].loops_per_jiffy =
- cpufreq_scale(cpu_data[0].loops_per_jiffy,
+ cpu_data(0).loops_per_jiffy =
+ cpufreq_scale(cpu_data(0).loops_per_jiffy,
cpu_khz_old, cpu_khz);
return 0;
} else
@@ -215,7 +215,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
return 0;
}
ref_freq = freq->old;
- loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
+ loops_per_jiffy_ref = cpu_data(freq->cpu).loops_per_jiffy;
cpu_khz_ref = cpu_khz;
}
@@ -223,7 +223,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
(val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
(val == CPUFREQ_RESUMECHANGE)) {
if (!(freq->flags & CPUFREQ_CONST_LOOPS))
- cpu_data[freq->cpu].loops_per_jiffy =
+ cpu_data(freq->cpu).loops_per_jiffy =
cpufreq_scale(loops_per_jiffy_ref,
ref_freq, freq->new);
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c
index 9f22e542c374..9c70af45b42b 100644
--- a/arch/x86/kernel/tsc_64.c
+++ b/arch/x86/kernel/tsc_64.c
@@ -73,13 +73,13 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
struct cpufreq_freqs *freq = data;
unsigned long *lpj, dummy;
- if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
+ if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC))
return 0;
lpj = &dummy;
if (!(freq->flags & CPUFREQ_CONST_LOOPS))
#ifdef CONFIG_SMP
- lpj = &cpu_data[freq->cpu].loops_per_jiffy;
+ lpj = &cpu_data(freq->cpu).loops_per_jiffy;
#else
lpj = &boot_cpu_data.loops_per_jiffy;
#endif
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 585541ca1a7e..ad4005c6d4a1 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -48,12 +48,12 @@
({unsigned long v; \
extern char __vsyscall_0; \
asm("" : "=r" (v) : "0" (x)); \
- ((v - VSYSCALL_FIRST_PAGE) + __pa_symbol(&__vsyscall_0)); })
+ ((v - VSYSCALL_START) + __pa_symbol(&__vsyscall_0)); })
/*
* vsyscall_gtod_data contains data that is :
* - readonly from vsyscalls
- * - writen by timer interrupt or systcl (/proc/sys/kernel/vsyscall64)
+ * - written by timer interrupt or systcl (/proc/sys/kernel/vsyscall64)
* Try to keep this structure as small as possible to avoid cache line ping pongs
*/
int __vgetcpu_mode __section_vgetcpu_mode;
@@ -291,7 +291,7 @@ static void __cpuinit vsyscall_set_cpu(int cpu)
#ifdef CONFIG_NUMA
node = cpu_to_node(cpu);
#endif
- if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP))
+ if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
write_rdtscp_aux((node << 12) | cpu);
/* Store cpu number in limit so that it can be loaded quickly
diff --git a/arch/x86/lib/delay_32.c b/arch/x86/lib/delay_32.c
index f6edb11364df..952e7a89c2ac 100644
--- a/arch/x86/lib/delay_32.c
+++ b/arch/x86/lib/delay_32.c
@@ -82,7 +82,7 @@ inline void __const_udelay(unsigned long xloops)
__asm__("mull %0"
:"=d" (xloops), "=&a" (d0)
:"1" (xloops), "0"
- (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4)));
+ (cpu_data(raw_smp_processor_id()).loops_per_jiffy * (HZ/4)));
__delay(++xloops);
}
diff --git a/arch/x86/lib/delay_64.c b/arch/x86/lib/delay_64.c
index 2dbebd308347..0ebbfb9e7c7f 100644
--- a/arch/x86/lib/delay_64.c
+++ b/arch/x86/lib/delay_64.c
@@ -40,7 +40,8 @@ EXPORT_SYMBOL(__delay);
inline void __const_udelay(unsigned long xloops)
{
- __delay(((xloops * HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32) + 1);
+ __delay(((xloops * HZ *
+ cpu_data(raw_smp_processor_id()).loops_per_jiffy) >> 32) + 1);
}
EXPORT_SYMBOL(__const_udelay);
diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c
index 3f08010f3517..0c28a071824c 100644
--- a/arch/x86/mach-default/setup.c
+++ b/arch/x86/mach-default/setup.c
@@ -108,7 +108,7 @@ void __init time_init_hook(void)
* mca_nmi_hook - hook into MCA specific NMI chain
*
* Description:
- * The MCA (Microchannel Arcitecture) has an NMI chain for NMI sources
+ * The MCA (Microchannel Architecture) has an NMI chain for NMI sources
* along the MCA bus. Use this to hook into that chain if you will need
* it.
**/
@@ -131,7 +131,7 @@ static __init int no_ipi_broadcast(char *str)
return 1;
}
-__setup("no_ipi_broadcast", no_ipi_broadcast);
+__setup("no_ipi_broadcast=", no_ipi_broadcast);
static int __init print_ipi_mode(void)
{
diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c
index 8685208d8512..1af0cc7648f0 100644
--- a/arch/x86/mach-generic/default.c
+++ b/arch/x86/mach-generic/default.c
@@ -1,5 +1,5 @@
/*
- * Default generic APIC driver. This handles upto 8 CPUs.
+ * Default generic APIC driver. This handles up to 8 CPUs.
*/
#define APIC_DEFINITION 1
#include <linux/threads.h>
diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c
index 4121d1551800..f410d3cb5659 100644
--- a/arch/x86/mach-generic/probe.c
+++ b/arch/x86/mach-generic/probe.c
@@ -56,7 +56,7 @@ void __init generic_bigsmp_probe(void)
/*
* This routine is used to switch to bigsmp mode when
* - There is no apic= option specified by the user
- * - generic_apic_probe() has choosen apic_default as the sub_arch
+ * - generic_apic_probe() has chosen apic_default as the sub_arch
* - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support
*/
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index e4928aa6bdfb..361ac5107b33 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -36,8 +36,8 @@ static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR
/* per CPU data structure (for /proc/cpuinfo et al), visible externally
* indexed physically */
-struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
-EXPORT_SYMBOL(cpu_data);
+DEFINE_PER_CPU(cpuinfo_x86, cpu_info) __cacheline_aligned;
+EXPORT_PER_CPU_SYMBOL(cpu_info);
/* physical ID of the CPU used to boot the system */
unsigned char boot_cpu_id;
@@ -389,7 +389,7 @@ find_smp_config(void)
/* The boot CPU must be extended */
voyager_extended_vic_processors = 1<<boot_cpu_id;
- /* initially, all of the first 8 cpu's can boot */
+ /* initially, all of the first 8 CPUs can boot */
voyager_allowed_boot_processors = 0xff;
/* set up everything for just this CPU, we can alter
* this as we start the other CPUs later */
@@ -430,7 +430,7 @@ find_smp_config(void)
void __init
smp_store_cpu_info(int id)
{
- struct cpuinfo_x86 *c=&cpu_data[id];
+ struct cpuinfo_x86 *c = &cpu_data(id);
*c = boot_cpu_data;
@@ -634,7 +634,7 @@ do_boot_cpu(__u8 cpu)
cpu, smp_processor_id()));
printk("CPU%d: ", cpu);
- print_cpu_info(&cpu_data[cpu]);
+ print_cpu_info(&cpu_data(cpu));
wmb();
cpu_set(cpu, cpu_callout_map);
cpu_set(cpu, cpu_present_map);
@@ -683,7 +683,7 @@ smp_boot_cpus(void)
*/
smp_store_cpu_info(boot_cpu_id);
printk("CPU%d: ", boot_cpu_id);
- print_cpu_info(&cpu_data[boot_cpu_id]);
+ print_cpu_info(&cpu_data(boot_cpu_id));
if(is_cpu_quad()) {
/* booting on a Quad CPU */
@@ -714,7 +714,7 @@ smp_boot_cpus(void)
unsigned long bogosum = 0;
for (i = 0; i < NR_CPUS; i++)
if (cpu_isset(i, cpu_online_map))
- bogosum += cpu_data[i].loops_per_jiffy;
+ bogosum += cpu_data(i).loops_per_jiffy;
printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
cpucount+1,
bogosum/(500000/HZ),
@@ -1010,7 +1010,7 @@ static struct call_data_struct * call_data;
/* execute a thread on a new CPU. The function to be called must be
* previously set up. This is used to schedule a function for
- * execution on all CPU's - set up the function then broadcast a
+ * execution on all CPUs - set up the function then broadcast a
* function_interrupt CPI to come here on each CPU */
static void
smp_call_function_interrupt(void)
@@ -1095,7 +1095,7 @@ voyager_smp_call_function_mask (cpumask_t cpumask,
* CPI here. We don't use this actually for counting so losing
* ticks doesn't matter
*
- * FIXME: For those CPU's which actually have a local APIC, we could
+ * FIXME: For those CPUs which actually have a local APIC, we could
* try to use it to trigger this interrupt instead of having to
* broadcast the timer tick. Unfortunately, all my pentium DYADs have
* no local APIC, so I can't do this
@@ -1287,7 +1287,7 @@ smp_local_timer_interrupt(void)
/*
* We take the 'long' return path, and there every subsystem
- * grabs the apropriate locks (kernel lock/ irq lock).
+ * grabs the appropriate locks (kernel lock/ irq lock).
*
* we might want to decouple profiling from the 'long path',
* and do the profiling totally in assembly.
@@ -1759,7 +1759,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors;
if(cpus_addr(mask)[0] == 0)
- /* can't have no cpu's to accept the interrupt -- extremely
+ /* can't have no CPUs to accept the interrupt -- extremely
* bad things will happen */
return;
@@ -1791,7 +1791,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
}
/* this is magic, we now have the correct affinity maps, so
* enable the interrupt. This will send an enable CPI to
- * those cpu's who need to enable it in their local masks,
+ * those CPUs who need to enable it in their local masks,
* causing them to correct for the new affinity . If the
* interrupt is currently globally disabled, it will simply be
* disabled again as it comes in (voyager lazy disable). If
diff --git a/arch/x86/mach-voyager/voyager_thread.c b/arch/x86/mach-voyager/voyager_thread.c
index f9d595338159..50f9366c411e 100644
--- a/arch/x86/mach-voyager/voyager_thread.c
+++ b/arch/x86/mach-voyager/voyager_thread.c
@@ -64,7 +64,7 @@ check_from_kernel(void)
{
if(voyager_status.switch_off) {
- /* FIXME: This should be configureable via proc */
+ /* FIXME: This should be configurable via proc */
execute("umask 600; echo 0 > /etc/initrunlvl; kill -HUP 1");
} else if(voyager_status.power_fail) {
VDEBUG(("Voyager daemon detected AC power failure\n"));
diff --git a/arch/x86/mm/boot_ioremap_32.c b/arch/x86/mm/boot_ioremap_32.c
index 4de95a17a7d4..f14da2a53ece 100644
--- a/arch/x86/mm/boot_ioremap_32.c
+++ b/arch/x86/mm/boot_ioremap_32.c
@@ -10,7 +10,7 @@
/*
* We need to use the 2-level pagetable functions, but CONFIG_X86_PAE
- * keeps that from happenning. If anyone has a better way, I'm listening.
+ * keeps that from happening. If anyone has a better way, I'm listening.
*
* boot_pte_t is defined only if this all works correctly
*/
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c
index 13893772cc48..fe608a45ffb6 100644
--- a/arch/x86/mm/discontig_32.c
+++ b/arch/x86/mm/discontig_32.c
@@ -273,7 +273,7 @@ unsigned long __init setup_memory(void)
* When mapping a NUMA machine we allocate the node_mem_map arrays
* from node local memory. They are then mapped directly into KVA
* between zone normal and vmalloc space. Calculate the size of
- * this space and use it to adjust the boundry between ZONE_NORMAL
+ * this space and use it to adjust the boundary between ZONE_NORMAL
* and ZONE_HIGHMEM.
*/
find_max_pfn();
diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c
index 4d3e538c57ab..503dfc05111b 100644
--- a/arch/x86/mm/fault_32.c
+++ b/arch/x86/mm/fault_32.c
@@ -354,7 +354,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
- * kernel and should generate an OOPS. Unfortunatly, in the case of an
+ * kernel and should generate an OOPS. Unfortunately, in the case of an
* erroneous fault occurring in a code path which already holds mmap_sem
* we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user
@@ -362,7 +362,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
* exceptions table.
*
* As the vast majority of faults will be valid we will only perform
- * the source reference check when there is a possibilty of a deadlock.
+ * the source reference check when there is a possibility of a deadlock.
* Attempt to lock the address space, if we cannot we then validate the
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
@@ -564,7 +564,8 @@ no_context:
* it's allocated already.
*/
if ((page >> PAGE_SHIFT) < max_low_pfn
- && (page & _PAGE_PRESENT)) {
+ && (page & _PAGE_PRESENT)
+ && !(page & _PAGE_PSE)) {
page &= PAGE_MASK;
page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
& (PTRS_PER_PTE - 1)];
diff --git a/arch/x86/mm/fault_64.c b/arch/x86/mm/fault_64.c
index 5149ac136a5d..644b4f7ece10 100644
--- a/arch/x86/mm/fault_64.c
+++ b/arch/x86/mm/fault_64.c
@@ -169,7 +169,7 @@ void dump_pagetable(unsigned long address)
pmd = pmd_offset(pud, address);
if (bad_address(pmd)) goto bad;
printk("PMD %lx ", pmd_val(*pmd));
- if (!pmd_present(*pmd)) goto ret;
+ if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret;
pte = pte_offset_kernel(pmd, address);
if (bad_address(pte)) goto bad;
@@ -285,7 +285,6 @@ static int vmalloc_fault(unsigned long address)
return 0;
}
-static int page_fault_trace;
int show_unhandled_signals = 1;
/*
@@ -354,10 +353,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
if (likely(regs->eflags & X86_EFLAGS_IF))
local_irq_enable();
- if (unlikely(page_fault_trace))
- printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
- regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code);
-
if (unlikely(error_code & PF_RSVD))
pgtable_bad(address, regs, error_code);
@@ -378,7 +373,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
again:
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
- * kernel and should generate an OOPS. Unfortunatly, in the case of an
+ * kernel and should generate an OOPS. Unfortunately, in the case of an
* erroneous fault occurring in a code path which already holds mmap_sem
* we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user
@@ -386,7 +381,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
* exceptions table.
*
* As the vast majority of faults will be valid we will only perform
- * the source reference check when there is a possibilty of a deadlock.
+ * the source reference check when there is a possibility of a deadlock.
* Attempt to lock the address space, if we cannot we then validate the
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
@@ -488,7 +483,7 @@ bad_area_nosemaphore:
if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
printk_ratelimit()) {
printk(
- "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n",
+ "%s%s[%d]: segfault at %lx rip %lx rsp %lx error %lx\n",
tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
tsk->comm, tsk->pid, address, regs->rip,
regs->rsp, error_code);
@@ -621,10 +616,3 @@ void vmalloc_sync_all(void)
BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
(__START_KERNEL & PGDIR_MASK)));
}
-
-static int __init enable_pagefaulttrace(char *str)
-{
- page_fault_trace = 1;
- return 1;
-}
-__setup("pagefaulttrace", enable_pagefaulttrace);
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 5eec5e56d07f..3d6926ba8995 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -612,7 +612,7 @@ void __init init_cpu_to_node(void)
{
int i;
for (i = 0; i < NR_CPUS; i++) {
- u8 apicid = x86_cpu_to_apicid[i];
+ u8 apicid = x86_cpu_to_apicid_init[i];
if (apicid == BAD_APICID)
continue;
if (apicid_to_node[apicid] == NUMA_NO_NODE)
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 56089ccc3949..ea85172fc0cc 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -218,7 +218,7 @@ static inline int save_add_info(void) {return 0;}
/*
* Update nodes_add and decide if to include add are in the zone.
* Both SPARSE and RESERVE need nodes_add infomation.
- * This code supports one contigious hot add area per node.
+ * This code supports one contiguous hot add area per node.
*/
static int reserve_hotadd(int node, unsigned long start, unsigned long end)
{
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index c049ce414f01..0ed046a187f7 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -13,25 +13,45 @@
#include <linux/mm.h>
#include <asm/ptrace.h>
#include <asm/uaccess.h>
+#include <asm/stacktrace.h>
-struct frame_head {
- struct frame_head * ebp;
- unsigned long ret;
-} __attribute__((packed));
+static void backtrace_warning_symbol(void *data, char *msg,
+ unsigned long symbol)
+{
+ /* Ignore warnings */
+}
-static struct frame_head *
-dump_kernel_backtrace(struct frame_head * head)
+static void backtrace_warning(void *data, char *msg)
{
- oprofile_add_trace(head->ret);
+ /* Ignore warnings */
+}
- /* frame pointers should strictly progress back up the stack
- * (towards higher addresses) */
- if (head >= head->ebp)
- return NULL;
+static int backtrace_stack(void *data, char *name)
+{
+ /* Yes, we want all stacks */
+ return 0;
+}
+
+static void backtrace_address(void *data, unsigned long addr)
+{
+ unsigned int *depth = data;
- return head->ebp;
+ if ((*depth)--)
+ oprofile_add_trace(addr);
}
+static struct stacktrace_ops backtrace_ops = {
+ .warning = backtrace_warning,
+ .warning_symbol = backtrace_warning_symbol,
+ .stack = backtrace_stack,
+ .address = backtrace_address,
+};
+
+struct frame_head {
+ struct frame_head *ebp;
+ unsigned long ret;
+} __attribute__((packed));
+
static struct frame_head *
dump_user_backtrace(struct frame_head * head)
{
@@ -53,72 +73,16 @@ dump_user_backtrace(struct frame_head * head)
return bufhead[0].ebp;
}
-/*
- * | | /\ Higher addresses
- * | |
- * --------------- stack base (address of current_thread_info)
- * | thread info |
- * . .
- * | stack |
- * --------------- saved regs->ebp value if valid (frame_head address)
- * . .
- * --------------- saved regs->rsp value if x86_64
- * | |
- * --------------- struct pt_regs * stored on stack if 32-bit
- * | |
- * . .
- * | |
- * --------------- %esp
- * | |
- * | | \/ Lower addresses
- *
- * Thus, regs (or regs->rsp for x86_64) <-> stack base restricts the
- * valid(ish) ebp values. Note: (1) for x86_64, NMI and several other
- * exceptions use special stacks, maintained by the interrupt stack table
- * (IST). These stacks are set up in trap_init() in
- * arch/x86_64/kernel/traps.c. Thus, for x86_64, regs now does not point
- * to the kernel stack; instead, it points to some location on the NMI
- * stack. On the other hand, regs->rsp is the stack pointer saved when the
- * NMI occurred. (2) For 32-bit, regs->esp is not valid because the
- * processor does not save %esp on the kernel stack when interrupts occur
- * in the kernel mode.
- */
-#ifdef CONFIG_FRAME_POINTER
-static int valid_kernel_stack(struct frame_head * head, struct pt_regs * regs)
-{
- unsigned long headaddr = (unsigned long)head;
-#ifdef CONFIG_X86_64
- unsigned long stack = (unsigned long)regs->rsp;
-#else
- unsigned long stack = (unsigned long)regs;
-#endif
- unsigned long stack_base = (stack & ~(THREAD_SIZE - 1)) + THREAD_SIZE;
-
- return headaddr > stack && headaddr < stack_base;
-}
-#else
-/* without fp, it's just junk */
-static int valid_kernel_stack(struct frame_head * head, struct pt_regs * regs)
-{
- return 0;
-}
-#endif
-
-
void
x86_backtrace(struct pt_regs * const regs, unsigned int depth)
{
- struct frame_head *head;
-
-#ifdef CONFIG_X86_64
- head = (struct frame_head *)regs->rbp;
-#else
- head = (struct frame_head *)regs->ebp;
-#endif
+ struct frame_head *head = (struct frame_head *)frame_pointer(regs);
+ unsigned long stack = stack_pointer(regs);
if (!user_mode_vm(regs)) {
- while (depth-- && valid_kernel_stack(head, regs))
- head = dump_kernel_backtrace(head);
+ if (depth)
+ dump_trace(NULL, regs, (unsigned long *)stack,
+ &backtrace_ops, &depth);
return;
}
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
index abb1aa95b979..45b605fa71d0 100644
--- a/arch/x86/oprofile/op_x86_model.h
+++ b/arch/x86/oprofile/op_x86_model.h
@@ -29,7 +29,7 @@ struct op_msrs {
struct pt_regs;
/* The model vtable abstracts the differences between
- * various x86 CPU model's perfctr support.
+ * various x86 CPU models' perfctr support.
*/
struct op_x86_model_spec {
unsigned int const num_counters;
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index c52150fdf82b..88d8f5c0ecb5 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -169,7 +169,7 @@ void eisa_set_level_irq(unsigned int irq)
}
/*
- * Common IRQ routing practice: nybbles in config space,
+ * Common IRQ routing practice: nibbles in config space,
* offset by some magic constant.
*/
static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr)
@@ -585,7 +585,7 @@ static __init int via_router_probe(struct irq_router *r,
/* FIXME: We should move some of the quirk fixup stuff here */
/*
- * work arounds for some buggy BIOSes
+ * workarounds for some buggy BIOSes
*/
if (device == PCI_DEVICE_ID_VIA_82C586_0) {
switch(router->device) {
diff --git a/arch/x86_64/.gitignore b/arch/x86_64/.gitignore
new file mode 100644
index 000000000000..36ef4c374d25
--- /dev/null
+++ b/arch/x86_64/.gitignore
@@ -0,0 +1 @@
+boot
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 25785b23df87..aab25f3ba3ce 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -723,7 +723,7 @@ config ARCH_HIBERNATION_HEADER
source "drivers/acpi/Kconfig"
-source "arch/x86/kernel/cpufreq/Kconfig"
+source "arch/x86/kernel/cpu/cpufreq/Kconfig_64"
source "drivers/cpuidle/Kconfig"
@@ -768,9 +768,9 @@ source "fs/Kconfig.binfmt"
config IA32_EMULATION
bool "IA32 Emulation"
help
- Include code to run 32-bit programs under a 64-bit kernel. You should likely
- turn this on, unless you're 100% sure that you don't have any 32-bit programs
- left.
+ Include code to run 32-bit programs under a 64-bit kernel. You should
+ likely turn this on, unless you're 100% sure that you don't have any
+ 32-bit programs left.
config IA32_AOUT
tristate "IA32 a.out support"
diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile
index 03e1ede27b85..6d89ab762ffc 100644
--- a/arch/x86_64/Makefile
+++ b/arch/x86_64/Makefile
@@ -74,7 +74,7 @@ KBUILD_CFLAGS += $(cflags-y)
CFLAGS_KERNEL += $(cflags-kernel-y)
KBUILD_AFLAGS += -m64
-head-y := arch/x86/kernel/head_64.o arch/x86/kernel/head64.o arch/x86/kernel/init_task_64.o
+head-y := arch/x86/kernel/head_64.o arch/x86/kernel/head64.o arch/x86/kernel/init_task.o
libs-y += arch/x86/lib/
core-y += arch/x86/kernel/ \
@@ -97,9 +97,9 @@ BOOTIMAGE := arch/x86/boot/bzImage
KBUILD_IMAGE := $(BOOTIMAGE)
bzImage: vmlinux
- $(Q)mkdir -p $(objtree)/arch/x86_64/boot
- $(Q)ln -fsn $(objtree)/arch/x86/boot/bzImage $(objtree)/arch/x86_64/boot/bzImage
$(Q)$(MAKE) $(build)=$(boot) $(BOOTIMAGE)
+ $(Q)mkdir -p $(objtree)/arch/x86_64/boot
+ $(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/x86_64/boot/bzImage
bzlilo: vmlinux
$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) zlilo
diff --git a/arch/xtensa/platform-iss/network.c b/arch/xtensa/platform-iss/network.c
index f09962fa98c0..b61fb36674e7 100644
--- a/arch/xtensa/platform-iss/network.c
+++ b/arch/xtensa/platform-iss/network.c
@@ -798,7 +798,7 @@ static int iss_net_setup(char *str)
#undef ERR
-__setup("eth", iss_net_setup);
+__setup("eth=", iss_net_setup);
/*
* Initialize all ISS Ethernet devices previously registered in iss_net_setup.