summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorLinus Torvalds2007-05-09 22:05:57 +0200
committerLinus Torvalds2007-05-09 22:05:57 +0200
commit932c37c375cca25175f9b6acee4c75d7a96d985f (patch)
treed7ba3620cd9a7a21c2de1bdfc7badd7637ed635e /arch/arm/mm
parentFix a bad error case handling in read_cache_page_async() (diff)
parentMerge branches 'armv7', 'at91', 'misc' and 'omap' into devel (diff)
downloadkernel-qcow2-linux-932c37c375cca25175f9b6acee4c75d7a96d985f.tar.gz
kernel-qcow2-linux-932c37c375cca25175f9b6acee4c75d7a96d985f.tar.xz
kernel-qcow2-linux-932c37c375cca25175f9b6acee4c75d7a96d985f.zip
Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm
* 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm: (28 commits) ARM: OMAP: Fix GCC-reported compile time bug ARM: OMAP: restore CONFIG_GENERIC_TIME ARM: OMAP: partial LED fixes ARM: OMAP: add SoSSI clock (call propagate_rate for childrens) ARM: OMAP: FB sync with N800 tree (support for dynamic SRAM allocations) ARM: OMAP: Sync framebuffer headers with N800 tree ARM: OMAP: Mostly cosmetic to sync up with linux-omap tree ARM: OMAP: Fix gpmc header ARM: OMAP: Add mailbox support for IVA [ARM] armv7: add Makefile and Kconfig entries [ARM] armv7: add support for asid-tagged VIVT I-cache [ARM] armv7: add dedicated ARMv7 barrier instructions [ARM] armv7: Add ARMv7 cacheid macros [ARM] armv7: add support for ARMv7 cores. [ARM] Fix ARM branch relocation range [ARM] 4363/1: AT91: Remove legacy PIO definitions [ARM] 4361/1: AT91: Build error ARM: OMAP: Sync core code with linux-omap ARM: OMAP: Sync headers with linux-omap ARM: OMAP: h4 must have blinky leds!! ...
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig32
-rw-r--r--arch/arm/mm/Makefile3
-rw-r--r--arch/arm/mm/abort-ev7.S32
-rw-r--r--arch/arm/mm/cache-v7.S253
-rw-r--r--arch/arm/mm/context.c17
-rw-r--r--arch/arm/mm/proc-macros.S12
-rw-r--r--arch/arm/mm/proc-v7.S262
7 files changed, 606 insertions, 5 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index e684e9b38216..b81391a4e374 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -366,6 +366,19 @@ config CPU_32v6K
enabled will not boot on processors with do not support these
instructions.
+# ARMv7
+config CPU_V7
+ bool "Support ARM V7 processor"
+ depends on ARCH_INTEGRATOR
+ select CPU_32v6K
+ select CPU_32v7
+ select CPU_ABRT_EV7
+ select CPU_CACHE_V7
+ select CPU_CACHE_VIPT
+ select CPU_CP15_MMU
+ select CPU_COPY_V6 if MMU
+ select CPU_TLB_V6 if MMU
+
# Figure out what processor architecture version we should be using.
# This defines the compiler instruction set which depends on the machine type.
config CPU_32v3
@@ -391,6 +404,9 @@ config CPU_32v5
config CPU_32v6
bool
+config CPU_32v7
+ bool
+
# The abort model
config CPU_ABRT_NOMMU
bool
@@ -413,6 +429,9 @@ config CPU_ABRT_EV5TJ
config CPU_ABRT_EV6
bool
+config CPU_ABRT_EV7
+ bool
+
# The cache model
config CPU_CACHE_V3
bool
@@ -429,6 +448,9 @@ config CPU_CACHE_V4WB
config CPU_CACHE_V6
bool
+config CPU_CACHE_V7
+ bool
+
config CPU_CACHE_VIVT
bool
@@ -503,7 +525,7 @@ comment "Processor Features"
config ARM_THUMB
bool "Support Thumb user binaries"
- depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_V6
+ depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_V6 || CPU_V7
default y
help
Say Y if you want to include kernel support for running user space
@@ -578,9 +600,15 @@ config CPU_CACHE_ROUND_ROBIN
Say Y here to use the predictable round-robin cache replacement
policy. Unless you specifically require this or are unsure, say N.
+config CPU_L2CACHE_DISABLE
+ bool "Disable level 2 cache"
+ depends on CPU_V7
+ help
+ Say Y here to disable the level 2 cache. If unsure, say N.
+
config CPU_BPREDICT_DISABLE
bool "Disable branch prediction"
- depends on CPU_ARM1020 || CPU_V6 || CPU_XSC3
+ depends on CPU_ARM1020 || CPU_V6 || CPU_XSC3 || CPU_V7
help
Say Y here to disable branch prediction. If unsure, say N.
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 2f8b95947774..b5bd335ff14a 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -24,12 +24,14 @@ obj-$(CONFIG_CPU_ABRT_LV4T) += abort-lv4t.o
obj-$(CONFIG_CPU_ABRT_EV5T) += abort-ev5t.o
obj-$(CONFIG_CPU_ABRT_EV5TJ) += abort-ev5tj.o
obj-$(CONFIG_CPU_ABRT_EV6) += abort-ev6.o
+obj-$(CONFIG_CPU_ABRT_EV7) += abort-ev7.o
obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o
obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o
obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o
obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o
obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o
+obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o
obj-$(CONFIG_CPU_COPY_V3) += copypage-v3.o
obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o
@@ -66,5 +68,6 @@ obj-$(CONFIG_CPU_SA1100) += proc-sa1100.o
obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o
obj-$(CONFIG_CPU_XSC3) += proc-xsc3.o
obj-$(CONFIG_CPU_V6) += proc-v6.o
+obj-$(CONFIG_CPU_V7) += proc-v7.o
obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
new file mode 100644
index 000000000000..eb90bce38e14
--- /dev/null
+++ b/arch/arm/mm/abort-ev7.S
@@ -0,0 +1,32 @@
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+/*
+ * Function: v7_early_abort
+ *
+ * Params : r2 = address of aborted instruction
+ * : r3 = saved SPSR
+ *
+ * Returns : r0 = address of abort
+ * : r1 = FSR, bit 11 = write
+ * : r2-r8 = corrupted
+ * : r9 = preserved
+ * : sp = pointer to registers
+ *
+ * Purpose : obtain information about current aborted instruction.
+ */
+ .align 5
+ENTRY(v7_early_abort)
+ /*
+ * The effect of data aborts on on the exclusive access monitor are
+ * UNPREDICTABLE. Do a CLREX to clear the state
+ */
+ clrex
+
+ mrc p15, 0, r1, c5, c0, 0 @ get FSR
+ mrc p15, 0, r0, c6, c0, 0 @ get FAR
+
+ /*
+ * V6 code adjusts the returned DFSR.
+ * New designs should not need to patch up faults.
+ */
+ mov pc, lr
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
new file mode 100644
index 000000000000..35ffc4d95997
--- /dev/null
+++ b/arch/arm/mm/cache-v7.S
@@ -0,0 +1,253 @@
+/*
+ * linux/arch/arm/mm/cache-v7.S
+ *
+ * Copyright (C) 2001 Deep Blue Solutions Ltd.
+ * Copyright (C) 2005 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is the "shell" of the ARMv7 processor support.
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/assembler.h>
+
+#include "proc-macros.S"
+
+/*
+ * v7_flush_dcache_all()
+ *
+ * Flush the whole D-cache.
+ *
+ * Corrupted registers: r0-r5, r7, r9-r11
+ *
+ * - mm - mm_struct describing address space
+ */
+ENTRY(v7_flush_dcache_all)
+ mrc p15, 1, r0, c0, c0, 1 @ read clidr
+ ands r3, r0, #0x7000000 @ extract loc from clidr
+ mov r3, r3, lsr #23 @ left align loc bit field
+ beq finished @ if loc is 0, then no need to clean
+ mov r10, #0 @ start clean at cache level 0
+loop1:
+ add r2, r10, r10, lsr #1 @ work out 3x current cache level
+ mov r1, r0, lsr r2 @ extract cache type bits from clidr
+ and r1, r1, #7 @ mask of the bits for current cache only
+ cmp r1, #2 @ see what cache we have at this level
+ blt skip @ skip if no cache, or just i-cache
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ isb @ isb to sych the new cssr&csidr
+ mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
+ and r2, r1, #7 @ extract the length of the cache lines
+ add r2, r2, #4 @ add 4 (line length offset)
+ ldr r4, =0x3ff
+ ands r4, r4, r1, lsr #3 @ find maximum number on the way size
+ clz r5, r4 @ find bit position of way size increment
+ ldr r7, =0x7fff
+ ands r7, r7, r1, lsr #13 @ extract max number of the index size
+loop2:
+ mov r9, r4 @ create working copy of max way size
+loop3:
+ orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
+ orr r11, r11, r7, lsl r2 @ factor index number into r11
+ mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
+ subs r9, r9, #1 @ decrement the way
+ bge loop3
+ subs r7, r7, #1 @ decrement the index
+ bge loop2
+skip:
+ add r10, r10, #2 @ increment cache number
+ cmp r3, r10
+ bgt loop1
+finished:
+ mov r10, #0 @ swith back to cache level 0
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ isb
+ mov pc, lr
+
+/*
+ * v7_flush_cache_all()
+ *
+ * Flush the entire cache system.
+ * The data cache flush is now achieved using atomic clean / invalidates
+ * working outwards from L1 cache. This is done using Set/Way based cache
+ * maintainance instructions.
+ * The instruction cache can still be invalidated back to the point of
+ * unification in a single instruction.
+ *
+ */
+ENTRY(v7_flush_kern_cache_all)
+ stmfd sp!, {r4-r5, r7, r9-r11, lr}
+ bl v7_flush_dcache_all
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
+ ldmfd sp!, {r4-r5, r7, r9-r11, lr}
+ mov pc, lr
+
+/*
+ * v7_flush_cache_all()
+ *
+ * Flush all TLB entries in a particular address space
+ *
+ * - mm - mm_struct describing address space
+ */
+ENTRY(v7_flush_user_cache_all)
+ /*FALLTHROUGH*/
+
+/*
+ * v7_flush_cache_range(start, end, flags)
+ *
+ * Flush a range of TLB entries in the specified address space.
+ *
+ * - start - start address (may not be aligned)
+ * - end - end address (exclusive, may not be aligned)
+ * - flags - vm_area_struct flags describing address space
+ *
+ * It is assumed that:
+ * - we have a VIPT cache.
+ */
+ENTRY(v7_flush_user_cache_range)
+ mov pc, lr
+
+/*
+ * v7_coherent_kern_range(start,end)
+ *
+ * Ensure that the I and D caches are coherent within specified
+ * region. This is typically used when code has been written to
+ * a memory region, and will be executed.
+ *
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ *
+ * It is assumed that:
+ * - the Icache does not read data from the write buffer
+ */
+ENTRY(v7_coherent_kern_range)
+ /* FALLTHROUGH */
+
+/*
+ * v7_coherent_user_range(start,end)
+ *
+ * Ensure that the I and D caches are coherent within specified
+ * region. This is typically used when code has been written to
+ * a memory region, and will be executed.
+ *
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ *
+ * It is assumed that:
+ * - the Icache does not read data from the write buffer
+ */
+ENTRY(v7_coherent_user_range)
+ dcache_line_size r2, r3
+ sub r3, r2, #1
+ bic r0, r0, r3
+1: mcr p15, 0, r0, c7, c11, 1 @ clean D line to the point of unification
+ dsb
+ mcr p15, 0, r0, c7, c5, 1 @ invalidate I line
+ add r0, r0, r2
+ cmp r0, r1
+ blo 1b
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
+ dsb
+ isb
+ mov pc, lr
+
+/*
+ * v7_flush_kern_dcache_page(kaddr)
+ *
+ * Ensure that the data held in the page kaddr is written back
+ * to the page in question.
+ *
+ * - kaddr - kernel address (guaranteed to be page aligned)
+ */
+ENTRY(v7_flush_kern_dcache_page)
+ dcache_line_size r2, r3
+ add r1, r0, #PAGE_SZ
+1:
+ mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
+ add r0, r0, r2
+ cmp r0, r1
+ blo 1b
+ dsb
+ mov pc, lr
+
+/*
+ * v7_dma_inv_range(start,end)
+ *
+ * Invalidate the data cache within the specified region; we will
+ * be performing a DMA operation in this region and we want to
+ * purge old data in the cache.
+ *
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+ENTRY(v7_dma_inv_range)
+ dcache_line_size r2, r3
+ sub r3, r2, #1
+ tst r0, r3
+ bic r0, r0, r3
+ mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
+
+ tst r1, r3
+ bic r1, r1, r3
+ mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line
+1:
+ mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line
+ add r0, r0, r2
+ cmp r0, r1
+ blo 1b
+ dsb
+ mov pc, lr
+
+/*
+ * v7_dma_clean_range(start,end)
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+ENTRY(v7_dma_clean_range)
+ dcache_line_size r2, r3
+ sub r3, r2, #1
+ bic r0, r0, r3
+1:
+ mcr p15, 0, r0, c7, c10, 1 @ clean D / U line
+ add r0, r0, r2
+ cmp r0, r1
+ blo 1b
+ dsb
+ mov pc, lr
+
+/*
+ * v7_dma_flush_range(start,end)
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+ENTRY(v7_dma_flush_range)
+ dcache_line_size r2, r3
+ sub r3, r2, #1
+ bic r0, r0, r3
+1:
+ mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
+ add r0, r0, r2
+ cmp r0, r1
+ blo 1b
+ dsb
+ mov pc, lr
+
+ __INITDATA
+
+ .type v7_cache_fns, #object
+ENTRY(v7_cache_fns)
+ .long v7_flush_kern_cache_all
+ .long v7_flush_user_cache_all
+ .long v7_flush_user_cache_range
+ .long v7_coherent_kern_range
+ .long v7_coherent_user_range
+ .long v7_flush_kern_dcache_page
+ .long v7_dma_inv_range
+ .long v7_dma_clean_range
+ .long v7_dma_flush_range
+ .size v7_cache_fns, . - v7_cache_fns
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 9da43a0fdcdf..fc84fcc74380 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -14,7 +14,8 @@
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
-unsigned int cpu_last_asid = { 1 << ASID_BITS };
+static DEFINE_SPINLOCK(cpu_asid_lock);
+unsigned int cpu_last_asid = ASID_FIRST_VERSION;
/*
* We fork()ed a process, and we need a new context for the child
@@ -31,15 +32,16 @@ void __new_context(struct mm_struct *mm)
{
unsigned int asid;
+ spin_lock(&cpu_asid_lock);
asid = ++cpu_last_asid;
if (asid == 0)
- asid = cpu_last_asid = 1 << ASID_BITS;
+ asid = cpu_last_asid = ASID_FIRST_VERSION;
/*
* If we've used up all our ASIDs, we need
* to start a new version and flush the TLB.
*/
- if ((asid & ~ASID_MASK) == 0) {
+ if (unlikely((asid & ~ASID_MASK) == 0)) {
asid = ++cpu_last_asid;
/* set the reserved ASID before flushing the TLB */
asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n"
@@ -47,7 +49,16 @@ void __new_context(struct mm_struct *mm)
: "r" (0));
isb();
flush_tlb_all();
+ if (icache_is_vivt_asid_tagged()) {
+ asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
+ "mcr p15, 0, %0, c7, c5, 6 @ flush BTAC/BTB\n"
+ :
+ : "r" (0));
+ dsb();
+ }
}
+ spin_unlock(&cpu_asid_lock);
+ mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id());
mm->context.id = asid;
}
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 9e2c89eb2115..b13150052a76 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -59,3 +59,15 @@
.word \ucset
#endif
.endm
+
+/*
+ * cache_line_size - get the cache line size from the CSIDR register
+ * (available on ARMv7+). It assumes that the CSSR register was configured
+ * to access the L1 data cache CSIDR.
+ */
+ .macro dcache_line_size, reg, tmp
+ mrc p15, 1, \tmp, c0, c0, 0 @ read CSIDR
+ and \tmp, \tmp, #7 @ cache line size encoding
+ mov \reg, #16 @ size offset
+ mov \reg, \reg, lsl \tmp @ actual cache line size
+ .endm
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
new file mode 100644
index 000000000000..dd823dd4a374
--- /dev/null
+++ b/arch/arm/mm/proc-v7.S
@@ -0,0 +1,262 @@
+/*
+ * linux/arch/arm/mm/proc-v7.S
+ *
+ * Copyright (C) 2001 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is the "shell" of the ARMv7 processor support.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
+#include <asm/elf.h>
+#include <asm/pgtable-hwdef.h>
+#include <asm/pgtable.h>
+
+#include "proc-macros.S"
+
+#define TTB_C (1 << 0)
+#define TTB_S (1 << 1)
+#define TTB_RGN_OC_WT (2 << 3)
+#define TTB_RGN_OC_WB (3 << 3)
+
+ENTRY(cpu_v7_proc_init)
+ mov pc, lr
+
+ENTRY(cpu_v7_proc_fin)
+ mov pc, lr
+
+/*
+ * cpu_v7_reset(loc)
+ *
+ * Perform a soft reset of the system. Put the CPU into the
+ * same state as it would be if it had been reset, and branch
+ * to what would be the reset vector.
+ *
+ * - loc - location to jump to for soft reset
+ *
+ * It is assumed that:
+ */
+ .align 5
+ENTRY(cpu_v7_reset)
+ mov pc, r0
+
+/*
+ * cpu_v7_do_idle()
+ *
+ * Idle the processor (eg, wait for interrupt).
+ *
+ * IRQs are already disabled.
+ */
+ENTRY(cpu_v7_do_idle)
+ .long 0xe320f003 @ ARM V7 WFI instruction
+ mov pc, lr
+
+ENTRY(cpu_v7_dcache_clean_area)
+#ifndef TLB_CAN_READ_FROM_L1_CACHE
+ dcache_line_size r2, r3
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ add r0, r0, r2
+ subs r1, r1, r2
+ bhi 1b
+ dsb
+#endif
+ mov pc, lr
+
+/*
+ * cpu_v7_switch_mm(pgd_phys, tsk)
+ *
+ * Set the translation table base pointer to be pgd_phys
+ *
+ * - pgd_phys - physical address of new TTB
+ *
+ * It is assumed that:
+ * - we are not using split page tables
+ */
+ENTRY(cpu_v7_switch_mm)
+ mov r2, #0
+ ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
+ orr r0, r0, #TTB_RGN_OC_WB @ mark PTWs outer cacheable, WB
+ mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID
+ isb
+1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
+ isb
+ mcr p15, 0, r1, c13, c0, 1 @ set context ID
+ isb
+ mov pc, lr
+
+/*
+ * cpu_v7_set_pte_ext(ptep, pte)
+ *
+ * Set a level 2 translation table entry.
+ *
+ * - ptep - pointer to level 2 translation table entry
+ * (hardware version is stored at -1024 bytes)
+ * - pte - PTE value to store
+ * - ext - value for extended PTE bits
+ *
+ * Permissions:
+ * YUWD APX AP1 AP0 SVC User
+ * 0xxx 0 0 0 no acc no acc
+ * 100x 1 0 1 r/o no acc
+ * 10x0 1 0 1 r/o no acc
+ * 1011 0 0 1 r/w no acc
+ * 110x 0 1 0 r/w r/o
+ * 11x0 0 1 0 r/w r/o
+ * 1111 0 1 1 r/w r/w
+ */
+ENTRY(cpu_v7_set_pte_ext)
+ str r1, [r0], #-2048 @ linux version
+
+ bic r3, r1, #0x000003f0
+ bic r3, r3, #0x00000003
+ orr r3, r3, r2
+ orr r3, r3, #PTE_EXT_AP0 | 2
+
+ tst r1, #L_PTE_WRITE
+ tstne r1, #L_PTE_DIRTY
+ orreq r3, r3, #PTE_EXT_APX
+
+ tst r1, #L_PTE_USER
+ orrne r3, r3, #PTE_EXT_AP1
+ tstne r3, #PTE_EXT_APX
+ bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
+
+ tst r1, #L_PTE_YOUNG
+ biceq r3, r3, #PTE_EXT_APX | PTE_EXT_AP_MASK
+
+ tst r1, #L_PTE_EXEC
+ orreq r3, r3, #PTE_EXT_XN
+
+ tst r1, #L_PTE_PRESENT
+ moveq r3, #0
+
+ str r3, [r0]
+ mcr p15, 0, r0, c7, c10, 1 @ flush_pte
+ mov pc, lr
+
+cpu_v7_name:
+ .ascii "ARMv7 Processor"
+ .align
+
+ .section ".text.init", #alloc, #execinstr
+
+/*
+ * __v7_setup
+ *
+ * Initialise TLB, Caches, and MMU state ready to switch the MMU
+ * on. Return in r0 the new CP15 C1 control register setting.
+ *
+ * We automatically detect if we have a Harvard cache, and use the
+ * Harvard cache control instructions insead of the unified cache
+ * control instructions.
+ *
+ * This should be able to cover all ARMv7 cores.
+ *
+ * It is assumed that:
+ * - cache type register is implemented
+ */
+__v7_setup:
+ adr r12, __v7_setup_stack @ the local stack
+ stmia r12, {r0-r5, r7, r9, r11, lr}
+ bl v7_flush_dcache_all
+ ldmia r12, {r0-r5, r7, r9, r11, lr}
+ mov r10, #0
+#ifdef HARVARD_CACHE
+ mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
+#endif
+ dsb
+ mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
+ mcr p15, 0, r10, c2, c0, 2 @ TTB control register
+ orr r4, r4, #TTB_RGN_OC_WB @ mark PTWs outer cacheable, WB
+ mcr p15, 0, r4, c2, c0, 0 @ load TTB0
+ mcr p15, 0, r4, c2, c0, 1 @ load TTB1
+ mov r10, #0x1f @ domains 0, 1 = manager
+ mcr p15, 0, r10, c3, c0, 0 @ load domain access register
+#ifndef CONFIG_CPU_L2CACHE_DISABLE
+ @ L2 cache configuration in the L2 aux control register
+ mrc p15, 1, r10, c9, c0, 2
+ bic r10, r10, #(1 << 16) @ L2 outer cache
+ mcr p15, 1, r10, c9, c0, 2
+ @ L2 cache is enabled in the aux control register
+ mrc p15, 0, r10, c1, c0, 1
+ orr r10, r10, #2
+ mcr p15, 0, r10, c1, c0, 1
+#endif
+ mrc p15, 0, r0, c1, c0, 0 @ read control register
+ ldr r10, cr1_clear @ get mask for bits to clear
+ bic r0, r0, r10 @ clear bits them
+ ldr r10, cr1_set @ get mask for bits to set
+ orr r0, r0, r10 @ set them
+ mov pc, lr @ return to head.S:__ret
+
+ /*
+ * V X F I D LR
+ * .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM
+ * rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced
+ * 0 110 0011 1.00 .111 1101 < we want
+ */
+ .type cr1_clear, #object
+ .type cr1_set, #object
+cr1_clear:
+ .word 0x0120c302
+cr1_set:
+ .word 0x00c0387d
+
+__v7_setup_stack:
+ .space 4 * 11 @ 11 registers
+
+ .type v7_processor_functions, #object
+ENTRY(v7_processor_functions)
+ .word v7_early_abort
+ .word cpu_v7_proc_init
+ .word cpu_v7_proc_fin
+ .word cpu_v7_reset
+ .word cpu_v7_do_idle
+ .word cpu_v7_dcache_clean_area
+ .word cpu_v7_switch_mm
+ .word cpu_v7_set_pte_ext
+ .size v7_processor_functions, . - v7_processor_functions
+
+ .type cpu_arch_name, #object
+cpu_arch_name:
+ .asciz "armv7"
+ .size cpu_arch_name, . - cpu_arch_name
+
+ .type cpu_elf_name, #object
+cpu_elf_name:
+ .asciz "v7"
+ .size cpu_elf_name, . - cpu_elf_name
+ .align
+
+ .section ".proc.info.init", #alloc, #execinstr
+
+ /*
+ * Match any ARMv7 processor core.
+ */
+ .type __v7_proc_info, #object
+__v7_proc_info:
+ .long 0x000f0000 @ Required ID value
+ .long 0x000f0000 @ Mask for ID
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_BUFFERABLE | \
+ PMD_SECT_CACHEABLE | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_XN | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
+ b __v7_setup
+ .long cpu_arch_name
+ .long cpu_elf_name
+ .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
+ .long cpu_v7_name
+ .long v7_processor_functions
+ .long v6wbi_tlb_fns
+ .long v6_user_fns
+ .long v7_cache_fns
+ .size __v7_proc_info, . - __v7_proc_info