summaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/c-octeon.c2
-rw-r--r--arch/mips/mm/fault.c12
-rw-r--r--arch/mips/mm/init.c12
-rw-r--r--arch/mips/mm/pgtable-64.c3
-rw-r--r--arch/mips/mm/sc-mips.c5
-rw-r--r--arch/mips/mm/tlb-r4k.c2
-rw-r--r--arch/mips/mm/tlbex.c53
7 files changed, 24 insertions, 65 deletions
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 10ab69f7183f..94e05e5733c1 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -79,7 +79,7 @@ static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
* cores it has been used on
*/
if (vma)
- mask = vma->vm_mm->cpu_vm_mask;
+ mask = *mm_cpumask(vma->vm_mm);
else
mask = cpu_online_map;
cpu_clear(cpu, mask);
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index f956ecbb8136..e97a7a2fb2c0 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -58,11 +58,17 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
* only copy the information from the master page table,
* nothing more.
*/
+#ifdef CONFIG_64BIT
+# define VMALLOC_FAULT_TARGET no_context
+#else
+# define VMALLOC_FAULT_TARGET vmalloc_fault
+#endif
+
if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
- goto vmalloc_fault;
+ goto VMALLOC_FAULT_TARGET;
#ifdef MODULE_START
if (unlikely(address >= MODULE_START && address < MODULE_END))
- goto vmalloc_fault;
+ goto VMALLOC_FAULT_TARGET;
#endif
/*
@@ -203,6 +209,7 @@ do_sigbus:
force_sig_info(SIGBUS, &info, tsk);
return;
+#ifndef CONFIG_64BIT
vmalloc_fault:
{
/*
@@ -241,4 +248,5 @@ vmalloc_fault:
goto no_context;
return;
}
+#endif
}
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 0e820508ff23..15aa1902a788 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -352,7 +352,6 @@ void __init paging_init(void)
free_area_init_nodes(max_zone_pfns);
}
-static struct kcore_list kcore_mem, kcore_vmalloc;
#ifdef CONFIG_64BIT
static struct kcore_list kcore_kseg0;
#endif
@@ -409,15 +408,13 @@ void __init mem_init(void)
if ((unsigned long) &_text > (unsigned long) CKSEG0)
/* The -4 is a hack so that user tools don't have to handle
the overflow. */
- kclist_add(&kcore_kseg0, (void *) CKSEG0, 0x80000000 - 4);
+ kclist_add(&kcore_kseg0, (void *) CKSEG0,
+ 0x80000000 - 4, KCORE_TEXT);
#endif
- kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
- kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
- VMALLOC_END-VMALLOC_START);
printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
"%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
- (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ nr_free_pages() << (PAGE_SHIFT-10),
ram << (PAGE_SHIFT-10),
codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
@@ -475,9 +472,6 @@ unsigned long pgd_current[NR_CPUS];
*/
pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER);
#ifdef CONFIG_64BIT
-#ifdef MODULE_START
-pgd_t module_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
-#endif
pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
#endif
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index e4b565aeb008..1121019fa456 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -59,9 +59,6 @@ void __init pagetable_init(void)
/* Initialize the entire pgd. */
pgd_init((unsigned long)swapper_pg_dir);
-#ifdef MODULE_START
- pgd_init((unsigned long)module_pg_dir);
-#endif
pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
pgd_base = swapper_pg_dir;
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index b55c2d1b998f..5ab5fa8c1d82 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -32,6 +32,11 @@ static void mips_sc_wback_inv(unsigned long addr, unsigned long size)
*/
static void mips_sc_inv(unsigned long addr, unsigned long size)
{
+ unsigned long lsize = cpu_scache_line_size();
+ unsigned long almask = ~(lsize - 1);
+
+ cache_op(Hit_Writeback_Inv_SD, addr & almask);
+ cache_op(Hit_Writeback_Inv_SD, (addr + size - 1) & almask);
blast_inv_scache_range(addr, addr + size);
}
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index cee502caf398..d73428b18b0a 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -475,7 +475,7 @@ static void __cpuinit probe_tlb(unsigned long config)
c->tlbsize = ((reg >> 25) & 0x3f) + 1;
}
-static int __cpuinitdata ntlb = 0;
+static int __cpuinitdata ntlb;
static int __init set_ntlb(char *str)
{
get_option(&str, &ntlb);
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 9a17bf8395df..bb1719a55d22 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -321,6 +321,10 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
case CPU_BCM3302:
case CPU_BCM4710:
case CPU_LOONGSON2:
+ case CPU_BCM6338:
+ case CPU_BCM6345:
+ case CPU_BCM6348:
+ case CPU_BCM6358:
case CPU_R5500:
if (m4kc_tlbp_war())
uasm_i_nop(p);
@@ -499,11 +503,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
* The vmalloc handling is not in the hotpath.
*/
uasm_i_dmfc0(p, tmp, C0_BADVADDR);
-#ifdef MODULE_START
- uasm_il_bltz(p, r, tmp, label_module_alloc);
-#else
uasm_il_bltz(p, r, tmp, label_vmalloc);
-#endif
/* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
#ifdef CONFIG_SMP
@@ -556,52 +556,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
{
long swpd = (long)swapper_pg_dir;
-#ifdef MODULE_START
- long modd = (long)module_pg_dir;
-
- uasm_l_module_alloc(l, *p);
- /*
- * Assumption:
- * VMALLOC_START >= 0xc000000000000000UL
- * MODULE_START >= 0xe000000000000000UL
- */
- UASM_i_SLL(p, ptr, bvaddr, 2);
- uasm_il_bgez(p, r, ptr, label_vmalloc);
-
- if (uasm_in_compat_space_p(MODULE_START) &&
- !uasm_rel_lo(MODULE_START)) {
- uasm_i_lui(p, ptr, uasm_rel_hi(MODULE_START)); /* delay slot */
- } else {
- /* unlikely configuration */
- uasm_i_nop(p); /* delay slot */
- UASM_i_LA(p, ptr, MODULE_START);
- }
- uasm_i_dsubu(p, bvaddr, bvaddr, ptr);
-
- if (uasm_in_compat_space_p(modd) && !uasm_rel_lo(modd)) {
- uasm_il_b(p, r, label_vmalloc_done);
- uasm_i_lui(p, ptr, uasm_rel_hi(modd));
- } else {
- UASM_i_LA_mostly(p, ptr, modd);
- uasm_il_b(p, r, label_vmalloc_done);
- if (uasm_in_compat_space_p(modd))
- uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(modd));
- else
- uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(modd));
- }
-
uasm_l_vmalloc(l, *p);
- if (uasm_in_compat_space_p(MODULE_START) &&
- !uasm_rel_lo(MODULE_START) &&
- MODULE_START << 32 == VMALLOC_START)
- uasm_i_dsll32(p, ptr, ptr, 0); /* typical case */
- else
- UASM_i_LA(p, ptr, VMALLOC_START);
-#else
- uasm_l_vmalloc(l, *p);
- UASM_i_LA(p, ptr, VMALLOC_START);
-#endif
- uasm_i_dsubu(p, bvaddr, bvaddr, ptr);
if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
uasm_il_b(p, r, label_vmalloc_done);