diff options
Diffstat (limited to 'arch/microblaze/kernel')
33 files changed, 1640 insertions, 1428 deletions
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile index d487729683de..e51bc1520825 100644 --- a/arch/microblaze/kernel/Makefile +++ b/arch/microblaze/kernel/Makefile @@ -2,12 +2,22 @@ # Makefile # +ifdef CONFIG_FUNCTION_TRACER +# Do not trace early boot code and low level code +CFLAGS_REMOVE_timer.o = -pg +CFLAGS_REMOVE_intc.o = -pg +CFLAGS_REMOVE_early_printk.o = -pg +CFLAGS_REMOVE_selfmod.o = -pg +CFLAGS_REMOVE_heartbeat.o = -pg +CFLAGS_REMOVE_ftrace.o = -pg +endif + extra-y := head.o vmlinux.lds -obj-y += exceptions.o \ +obj-y += dma.o exceptions.o \ hw_exception_handler.o init_task.o intc.o irq.o of_device.o \ of_platform.o process.o prom.o prom_parse.o ptrace.o \ - setup.o signal.o sys_microblaze.o timer.o traps.o + setup.o signal.o sys_microblaze.o timer.o traps.o reset.o obj-y += cpu/ @@ -16,5 +26,7 @@ obj-$(CONFIG_SELFMOD) += selfmod.o obj-$(CONFIG_HEART_BEAT) += heartbeat.o obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o obj-$(CONFIG_MMU) += misc.o +obj-$(CONFIG_STACKTRACE) += stacktrace.o +obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount.o obj-y += entry$(MMU).o diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c index 7bc7b68f97db..0071260a672c 100644 --- a/arch/microblaze/kernel/asm-offsets.c +++ b/arch/microblaze/kernel/asm-offsets.c @@ -90,6 +90,7 @@ int main(int argc, char *argv[]) DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context)); + DEFINE(TI_PREEMPT_COUNT, offsetof(struct thread_info, preempt_count)); BLANK(); /* struct cpu_context */ diff --git a/arch/microblaze/kernel/cpu/Makefile b/arch/microblaze/kernel/cpu/Makefile index 20646e549271..59cc7bceaf8c 100644 --- a/arch/microblaze/kernel/cpu/Makefile +++ b/arch/microblaze/kernel/cpu/Makefile @@ -2,6 +2,10 @@ # Build the appropriate CPU version support # +ifdef CONFIG_FUNCTION_TRACER +CFLAGS_REMOVE_cache.o = -pg +endif + EXTRA_CFLAGS += -DCPU_MAJOR=$(CPU_MAJOR) -DCPU_MINOR=$(CPU_MINOR) \ -DCPU_REV=$(CPU_REV) diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c index af866a450125..f04d8a86dead 100644 --- a/arch/microblaze/kernel/cpu/cache.c +++ b/arch/microblaze/kernel/cpu/cache.c @@ -3,7 +3,7 @@ * * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> * Copyright (C) 2007-2009 PetaLogix - * Copyright (C) 2007 John Williams <john.williams@petalogix.com> + * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com> * * This file is subject to the terms and conditions of the GNU General * Public License. See the file COPYING in the main directory of this @@ -13,243 +13,646 @@ #include <asm/cacheflush.h> #include <linux/cache.h> #include <asm/cpuinfo.h> +#include <asm/pvr.h> -/* Exported functions */ +static inline void __enable_icache_msr(void) +{ + __asm__ __volatile__ (" msrset r0, %0; \ + nop; " \ + : : "i" (MSR_ICE) : "memory"); +} + +static inline void __disable_icache_msr(void) +{ + __asm__ __volatile__ (" msrclr r0, %0; \ + nop; " \ + : : "i" (MSR_ICE) : "memory"); +} -void _enable_icache(void) +static inline void __enable_dcache_msr(void) { - if (cpuinfo.use_icache) { -#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR - __asm__ __volatile__ (" \ - msrset r0, %0; \ - nop; " \ - : \ - : "i" (MSR_ICE) \ + __asm__ __volatile__ (" msrset r0, %0; \ + nop; " \ + : \ + : "i" (MSR_DCE) \ : "memory"); -#else - __asm__ __volatile__ (" \ - mfs r12, rmsr; \ - nop; \ - ori r12, r12, %0; \ - mts rmsr, r12; \ - nop; " \ - : \ - : "i" (MSR_ICE) \ - : "memory", "r12"); -#endif - } } -void _disable_icache(void) +static inline void __disable_dcache_msr(void) { - if (cpuinfo.use_icache) { -#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR - __asm__ __volatile__ (" \ - msrclr r0, %0; \ - nop; " \ - : \ - : "i" (MSR_ICE) \ + __asm__ __volatile__ (" msrclr r0, %0; \ + nop; " \ + : \ + : "i" (MSR_DCE) \ : "memory"); -#else - __asm__ __volatile__ (" \ - mfs r12, rmsr; \ - nop; \ - andi r12, r12, ~%0; \ - mts rmsr, r12; \ - nop; " \ - : \ - : "i" (MSR_ICE) \ +} + +static inline void __enable_icache_nomsr(void) +{ + __asm__ __volatile__ (" mfs r12, rmsr; \ + nop; \ + ori r12, r12, %0; \ + mts rmsr, r12; \ + nop; " \ + : \ + : "i" (MSR_ICE) \ : "memory", "r12"); -#endif - } } -void _invalidate_icache(unsigned int addr) +static inline void __disable_icache_nomsr(void) { - if (cpuinfo.use_icache) { - __asm__ __volatile__ (" \ - wic %0, r0" \ - : \ - : "r" (addr)); - } + __asm__ __volatile__ (" mfs r12, rmsr; \ + nop; \ + andi r12, r12, ~%0; \ + mts rmsr, r12; \ + nop; " \ + : \ + : "i" (MSR_ICE) \ + : "memory", "r12"); } -void _enable_dcache(void) +static inline void __enable_dcache_nomsr(void) { - if (cpuinfo.use_dcache) { -#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR - __asm__ __volatile__ (" \ - msrset r0, %0; \ - nop; " \ - : \ - : "i" (MSR_DCE) \ - : "memory"); -#else - __asm__ __volatile__ (" \ - mfs r12, rmsr; \ - nop; \ - ori r12, r12, %0; \ - mts rmsr, r12; \ - nop; " \ - : \ - : "i" (MSR_DCE) \ + __asm__ __volatile__ (" mfs r12, rmsr; \ + nop; \ + ori r12, r12, %0; \ + mts rmsr, r12; \ + nop; " \ + : \ + : "i" (MSR_DCE) \ + : "memory", "r12"); +} + +static inline void __disable_dcache_nomsr(void) +{ + __asm__ __volatile__ (" mfs r12, rmsr; \ + nop; \ + andi r12, r12, ~%0; \ + mts rmsr, r12; \ + nop; " \ + : \ + : "i" (MSR_DCE) \ : "memory", "r12"); +} + + +/* Helper macro for computing the limits of cache range loops */ +#define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \ +do { \ + int align = ~(cache_line_length - 1); \ + end = min(start + cache_size, end); \ + start &= align; \ + end = ((end & align) + cache_line_length); \ +} while (0); + +/* + * Helper macro to loop over the specified cache_size/line_length and + * execute 'op' on that cacheline + */ +#define CACHE_ALL_LOOP(cache_size, line_length, op) \ +do { \ + unsigned int len = cache_size; \ + int step = -line_length; \ + BUG_ON(step >= 0); \ + \ + __asm__ __volatile__ (" 1: " #op " %0, r0; \ + bgtid %0, 1b; \ + addk %0, %0, %1; \ + " : : "r" (len), "r" (step) \ + : "memory"); \ +} while (0); + + +#define CACHE_ALL_LOOP2(cache_size, line_length, op) \ +do { \ + unsigned int len = cache_size; \ + int step = -line_length; \ + BUG_ON(step >= 0); \ + \ + __asm__ __volatile__ (" 1: " #op " r0, %0; \ + bgtid %0, 1b; \ + addk %0, %0, %1; \ + " : : "r" (len), "r" (step) \ + : "memory"); \ +} while (0); + +/* for wdc.flush/clear */ +#define CACHE_RANGE_LOOP_2(start, end, line_length, op) \ +do { \ + int step = -line_length; \ + int count = end - start; \ + BUG_ON(count <= 0); \ + \ + __asm__ __volatile__ (" 1: " #op " %0, %1; \ + bgtid %1, 1b; \ + addk %1, %1, %2; \ + " : : "r" (start), "r" (count), \ + "r" (step) : "memory"); \ +} while (0); + +/* It is used only first parameter for OP - for wic, wdc */ +#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \ +do { \ + int volatile temp; \ + BUG_ON(end - start <= 0); \ + \ + __asm__ __volatile__ (" 1: " #op " %1, r0; \ + cmpu %0, %1, %2; \ + bgtid %0, 1b; \ + addk %1, %1, %3; \ + " : : "r" (temp), "r" (start), "r" (end),\ + "r" (line_length) : "memory"); \ +} while (0); + +#define ASM_LOOP + +static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end) +{ + unsigned long flags; +#ifndef ASM_LOOP + int i; #endif - } + pr_debug("%s: start 0x%x, end 0x%x\n", __func__, + (unsigned int)start, (unsigned int) end); + + CACHE_LOOP_LIMITS(start, end, + cpuinfo.icache_line_length, cpuinfo.icache_size); + + local_irq_save(flags); + __disable_icache_msr(); + +#ifdef ASM_LOOP + CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); +#else + for (i = start; i < end; i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wic %0, r0;" \ + : : "r" (i)); +#endif + __enable_icache_msr(); + local_irq_restore(flags); } -void _disable_dcache(void) +static void __flush_icache_range_nomsr_irq(unsigned long start, + unsigned long end) { -#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR - __asm__ __volatile__ (" \ - msrclr r0, %0; \ - nop; " \ - : \ - : "i" (MSR_DCE) \ - : "memory"); + unsigned long flags; +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s: start 0x%x, end 0x%x\n", __func__, + (unsigned int)start, (unsigned int) end); + + CACHE_LOOP_LIMITS(start, end, + cpuinfo.icache_line_length, cpuinfo.icache_size); + + local_irq_save(flags); + __disable_icache_nomsr(); + +#ifdef ASM_LOOP + CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); #else - __asm__ __volatile__ (" \ - mfs r12, rmsr; \ - nop; \ - andi r12, r12, ~%0; \ - mts rmsr, r12; \ - nop; " \ - : \ - : "i" (MSR_DCE) \ - : "memory", "r12"); + for (i = start; i < end; i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wic %0, r0;" \ + : : "r" (i)); #endif + + __enable_icache_nomsr(); + local_irq_restore(flags); } -void _invalidate_dcache(unsigned int addr) +static void __flush_icache_range_noirq(unsigned long start, + unsigned long end) { - __asm__ __volatile__ (" \ - wdc %0, r0" \ - : \ - : "r" (addr)); +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s: start 0x%x, end 0x%x\n", __func__, + (unsigned int)start, (unsigned int) end); + + CACHE_LOOP_LIMITS(start, end, + cpuinfo.icache_line_length, cpuinfo.icache_size); +#ifdef ASM_LOOP + CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); +#else + for (i = start; i < end; i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wic %0, r0;" \ + : : "r" (i)); +#endif } -void __invalidate_icache_all(void) +static void __flush_icache_all_msr_irq(void) { - unsigned int i; - unsigned flags; + unsigned long flags; +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s\n", __func__); - if (cpuinfo.use_icache) { - local_irq_save(flags); - __disable_icache(); + local_irq_save(flags); + __disable_icache_msr(); +#ifdef ASM_LOOP + CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); +#else + for (i = 0; i < cpuinfo.icache_size; + i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wic %0, r0;" \ + : : "r" (i)); +#endif + __enable_icache_msr(); + local_irq_restore(flags); +} - /* Just loop through cache size and invalidate, no need to add - CACHE_BASE address */ - for (i = 0; i < cpuinfo.icache_size; - i += cpuinfo.icache_line) - __invalidate_icache(i); +static void __flush_icache_all_nomsr_irq(void) +{ + unsigned long flags; +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s\n", __func__); - __enable_icache(); - local_irq_restore(flags); - } + local_irq_save(flags); + __disable_icache_nomsr(); +#ifdef ASM_LOOP + CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); +#else + for (i = 0; i < cpuinfo.icache_size; + i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wic %0, r0;" \ + : : "r" (i)); +#endif + __enable_icache_nomsr(); + local_irq_restore(flags); } -void __invalidate_icache_range(unsigned long start, unsigned long end) +static void __flush_icache_all_noirq(void) { - unsigned int i; - unsigned flags; - unsigned int align; +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s\n", __func__); +#ifdef ASM_LOOP + CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); +#else + for (i = 0; i < cpuinfo.icache_size; + i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wic %0, r0;" \ + : : "r" (i)); +#endif +} - if (cpuinfo.use_icache) { - /* - * No need to cover entire cache range, - * just cover cache footprint - */ - end = min(start + cpuinfo.icache_size, end); - align = ~(cpuinfo.icache_line - 1); - start &= align; /* Make sure we are aligned */ - /* Push end up to the next cache line */ - end = ((end & align) + cpuinfo.icache_line); +static void __invalidate_dcache_all_msr_irq(void) +{ + unsigned long flags; +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s\n", __func__); - local_irq_save(flags); - __disable_icache(); + local_irq_save(flags); + __disable_dcache_msr(); +#ifdef ASM_LOOP + CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); +#else + for (i = 0; i < cpuinfo.dcache_size; + i += cpuinfo.dcache_line_length) + __asm__ __volatile__ ("wdc %0, r0;" \ + : : "r" (i)); +#endif + __enable_dcache_msr(); + local_irq_restore(flags); +} - for (i = start; i < end; i += cpuinfo.icache_line) - __invalidate_icache(i); +static void __invalidate_dcache_all_nomsr_irq(void) +{ + unsigned long flags; +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s\n", __func__); - __enable_icache(); - local_irq_restore(flags); - } + local_irq_save(flags); + __disable_dcache_nomsr(); +#ifdef ASM_LOOP + CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); +#else + for (i = 0; i < cpuinfo.dcache_size; + i += cpuinfo.dcache_line_length) + __asm__ __volatile__ ("wdc %0, r0;" \ + : : "r" (i)); +#endif + __enable_dcache_nomsr(); + local_irq_restore(flags); } -void __invalidate_icache_page(struct vm_area_struct *vma, struct page *page) +static void __invalidate_dcache_all_noirq_wt(void) { - __invalidate_icache_all(); +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s\n", __func__); +#ifdef ASM_LOOP + CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc) +#else + for (i = 0; i < cpuinfo.dcache_size; + i += cpuinfo.dcache_line_length) + __asm__ __volatile__ ("wdc %0, r0;" \ + : : "r" (i)); +#endif } -void __invalidate_icache_user_range(struct vm_area_struct *vma, - struct page *page, unsigned long adr, - int len) +/* FIXME this is weird - should be only wdc but not work + * MS: I am getting bus errors and other weird things */ +static void __invalidate_dcache_all_wb(void) { - __invalidate_icache_all(); +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s\n", __func__); +#ifdef ASM_LOOP + CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length, + wdc.clear) +#else + for (i = 0; i < cpuinfo.dcache_size; + i += cpuinfo.dcache_line_length) + __asm__ __volatile__ ("wdc.clear %0, r0;" \ + : : "r" (i)); +#endif } -void __invalidate_cache_sigtramp(unsigned long addr) +static void __invalidate_dcache_range_wb(unsigned long start, + unsigned long end) { - __invalidate_icache_range(addr, addr + 8); +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s: start 0x%x, end 0x%x\n", __func__, + (unsigned int)start, (unsigned int) end); + + CACHE_LOOP_LIMITS(start, end, + cpuinfo.dcache_line_length, cpuinfo.dcache_size); +#ifdef ASM_LOOP + CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear); +#else + for (i = start; i < end; i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wdc.clear %0, r0;" \ + : : "r" (i)); +#endif } -void __invalidate_dcache_all(void) +static void __invalidate_dcache_range_nomsr_wt(unsigned long start, + unsigned long end) { - unsigned int i; - unsigned flags; +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s: start 0x%x, end 0x%x\n", __func__, + (unsigned int)start, (unsigned int) end); + CACHE_LOOP_LIMITS(start, end, + cpuinfo.dcache_line_length, cpuinfo.dcache_size); - if (cpuinfo.use_dcache) { - local_irq_save(flags); - __disable_dcache(); +#ifdef ASM_LOOP + CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); +#else + for (i = start; i < end; i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wdc %0, r0;" \ + : : "r" (i)); +#endif +} - /* - * Just loop through cache size and invalidate, - * no need to add CACHE_BASE address - */ - for (i = 0; i < cpuinfo.dcache_size; - i += cpuinfo.dcache_line) - __invalidate_dcache(i); +static void __invalidate_dcache_range_msr_irq_wt(unsigned long start, + unsigned long end) +{ + unsigned long flags; +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s: start 0x%x, end 0x%x\n", __func__, + (unsigned int)start, (unsigned int) end); + CACHE_LOOP_LIMITS(start, end, + cpuinfo.dcache_line_length, cpuinfo.dcache_size); - __enable_dcache(); - local_irq_restore(flags); - } + local_irq_save(flags); + __disable_dcache_msr(); + +#ifdef ASM_LOOP + CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); +#else + for (i = start; i < end; i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wdc %0, r0;" \ + : : "r" (i)); +#endif + + __enable_dcache_msr(); + local_irq_restore(flags); } -void __invalidate_dcache_range(unsigned long start, unsigned long end) +static void __invalidate_dcache_range_nomsr_irq(unsigned long start, + unsigned long end) { - unsigned int i; - unsigned flags; - unsigned int align; + unsigned long flags; +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s: start 0x%x, end 0x%x\n", __func__, + (unsigned int)start, (unsigned int) end); - if (cpuinfo.use_dcache) { - /* - * No need to cover entire cache range, - * just cover cache footprint - */ - end = min(start + cpuinfo.dcache_size, end); - align = ~(cpuinfo.dcache_line - 1); - start &= align; /* Make sure we are aligned */ - /* Push end up to the next cache line */ - end = ((end & align) + cpuinfo.dcache_line); - local_irq_save(flags); - __disable_dcache(); + CACHE_LOOP_LIMITS(start, end, + cpuinfo.dcache_line_length, cpuinfo.dcache_size); - for (i = start; i < end; i += cpuinfo.dcache_line) - __invalidate_dcache(i); + local_irq_save(flags); + __disable_dcache_nomsr(); - __enable_dcache(); - local_irq_restore(flags); - } +#ifdef ASM_LOOP + CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); +#else + for (i = start; i < end; i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wdc %0, r0;" \ + : : "r" (i)); +#endif + + __enable_dcache_nomsr(); + local_irq_restore(flags); +} + +static void __flush_dcache_all_wb(void) +{ +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s\n", __func__); +#ifdef ASM_LOOP + CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, + wdc.flush); +#else + for (i = 0; i < cpuinfo.dcache_size; + i += cpuinfo.dcache_line_length) + __asm__ __volatile__ ("wdc.flush %0, r0;" \ + : : "r" (i)); +#endif } -void __invalidate_dcache_page(struct vm_area_struct *vma, struct page *page) +static void __flush_dcache_range_wb(unsigned long start, unsigned long end) { - __invalidate_dcache_all(); +#ifndef ASM_LOOP + int i; +#endif + pr_debug("%s: start 0x%x, end 0x%x\n", __func__, + (unsigned int)start, (unsigned int) end); + + CACHE_LOOP_LIMITS(start, end, + cpuinfo.dcache_line_length, cpuinfo.dcache_size); +#ifdef ASM_LOOP + CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush); +#else + for (i = start; i < end; i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wdc.flush %0, r0;" \ + : : "r" (i)); +#endif } -void __invalidate_dcache_user_range(struct vm_area_struct *vma, - struct page *page, unsigned long adr, - int len) +/* struct for wb caches and for wt caches */ +struct scache *mbc; + +/* new wb cache model */ +const struct scache wb_msr = { + .ie = __enable_icache_msr, + .id = __disable_icache_msr, + .ifl = __flush_icache_all_noirq, + .iflr = __flush_icache_range_noirq, + .iin = __flush_icache_all_noirq, + .iinr = __flush_icache_range_noirq, + .de = __enable_dcache_msr, + .dd = __disable_dcache_msr, + .dfl = __flush_dcache_all_wb, + .dflr = __flush_dcache_range_wb, + .din = __invalidate_dcache_all_wb, + .dinr = __invalidate_dcache_range_wb, +}; + +/* There is only difference in ie, id, de, dd functions */ +const struct scache wb_nomsr = { + .ie = __enable_icache_nomsr, + .id = __disable_icache_nomsr, + .ifl = __flush_icache_all_noirq, + .iflr = __flush_icache_range_noirq, + .iin = __flush_icache_all_noirq, + .iinr = __flush_icache_range_noirq, + .de = __enable_dcache_nomsr, + .dd = __disable_dcache_nomsr, + .dfl = __flush_dcache_all_wb, + .dflr = __flush_dcache_range_wb, + .din = __invalidate_dcache_all_wb, + .dinr = __invalidate_dcache_range_wb, +}; + +/* Old wt cache model with disabling irq and turn off cache */ +const struct scache wt_msr = { + .ie = __enable_icache_msr, + .id = __disable_icache_msr, + .ifl = __flush_icache_all_msr_irq, + .iflr = __flush_icache_range_msr_irq, + .iin = __flush_icache_all_msr_irq, + .iinr = __flush_icache_range_msr_irq, + .de = __enable_dcache_msr, + .dd = __disable_dcache_msr, + .dfl = __invalidate_dcache_all_msr_irq, + .dflr = __invalidate_dcache_range_msr_irq_wt, + .din = __invalidate_dcache_all_msr_irq, + .dinr = __invalidate_dcache_range_msr_irq_wt, +}; + +const struct scache wt_nomsr = { + .ie = __enable_icache_nomsr, + .id = __disable_icache_nomsr, + .ifl = __flush_icache_all_nomsr_irq, + .iflr = __flush_icache_range_nomsr_irq, + .iin = __flush_icache_all_nomsr_irq, + .iinr = __flush_icache_range_nomsr_irq, + .de = __enable_dcache_nomsr, + .dd = __disable_dcache_nomsr, + .dfl = __invalidate_dcache_all_nomsr_irq, + .dflr = __invalidate_dcache_range_nomsr_irq, + .din = __invalidate_dcache_all_nomsr_irq, + .dinr = __invalidate_dcache_range_nomsr_irq, +}; + +/* New wt cache model for newer Microblaze versions */ +const struct scache wt_msr_noirq = { + .ie = __enable_icache_msr, + .id = __disable_icache_msr, + .ifl = __flush_icache_all_noirq, + .iflr = __flush_icache_range_noirq, + .iin = __flush_icache_all_noirq, + .iinr = __flush_icache_range_noirq, + .de = __enable_dcache_msr, + .dd = __disable_dcache_msr, + .dfl = __invalidate_dcache_all_noirq_wt, + .dflr = __invalidate_dcache_range_nomsr_wt, + .din = __invalidate_dcache_all_noirq_wt, + .dinr = __invalidate_dcache_range_nomsr_wt, +}; + +const struct scache wt_nomsr_noirq = { + .ie = __enable_icache_nomsr, + .id = __disable_icache_nomsr, + .ifl = __flush_icache_all_noirq, + .iflr = __flush_icache_range_noirq, + .iin = __flush_icache_all_noirq, + .iinr = __flush_icache_range_noirq, + .de = __enable_dcache_nomsr, + .dd = __disable_dcache_nomsr, + .dfl = __invalidate_dcache_all_noirq_wt, + .dflr = __invalidate_dcache_range_nomsr_wt, + .din = __invalidate_dcache_all_noirq_wt, + .dinr = __invalidate_dcache_range_nomsr_wt, +}; + +/* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */ +#define CPUVER_7_20_A 0x0c +#define CPUVER_7_20_D 0x0f + +#define INFO(s) printk(KERN_INFO "cache: " s "\n"); + +void microblaze_cache_init(void) { - __invalidate_dcache_all(); + if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) { + if (cpuinfo.dcache_wb) { + INFO("wb_msr"); + mbc = (struct scache *)&wb_msr; + if (cpuinfo.ver_code < CPUVER_7_20_D) { + /* MS: problem with signal handling - hw bug */ + INFO("WB won't work properly"); + } + } else { + if (cpuinfo.ver_code >= CPUVER_7_20_A) { + INFO("wt_msr_noirq"); + mbc = (struct scache *)&wt_msr_noirq; + } else { + INFO("wt_msr"); + mbc = (struct scache *)&wt_msr; + } + } + } else { + if (cpuinfo.dcache_wb) { + INFO("wb_nomsr"); + mbc = (struct scache *)&wb_nomsr; + if (cpuinfo.ver_code < CPUVER_7_20_D) { + /* MS: problem with signal handling - hw bug */ + INFO("WB won't work properly"); + } + } else { + if (cpuinfo.ver_code >= CPUVER_7_20_A) { + INFO("wt_nomsr_noirq"); + mbc = (struct scache *)&wt_nomsr_noirq; + } else { + INFO("wt_nomsr"); + mbc = (struct scache *)&wt_nomsr; + } + } + } + invalidate_dcache(); + enable_dcache(); + + invalidate_icache(); + enable_icache(); } diff --git a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c index c259786e7faa..f72dbd66c844 100644 --- a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c +++ b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c @@ -21,8 +21,14 @@ */ #define CI(c, p) { ci->c = PVR_##p(pvr); } + +#if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE) #define err_printk(x) \ early_printk("ERROR: Microblaze " x "-different for PVR and DTS\n"); +#else +#define err_printk(x) \ + printk(KERN_INFO "ERROR: Microblaze " x "-different for PVR and DTS\n"); +#endif void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu) { @@ -70,7 +76,7 @@ void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu) CI(use_icache, USE_ICACHE); CI(icache_tagbits, ICACHE_ADDR_TAG_BITS); CI(icache_write, ICACHE_ALLOW_WR); - CI(icache_line, ICACHE_LINE_LEN); + ci->icache_line_length = PVR_ICACHE_LINE_LEN(pvr) << 2; CI(icache_size, ICACHE_BYTE_SIZE); CI(icache_base, ICACHE_BASEADDR); CI(icache_high, ICACHE_HIGHADDR); @@ -78,11 +84,16 @@ void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu) CI(use_dcache, USE_DCACHE); CI(dcache_tagbits, DCACHE_ADDR_TAG_BITS); CI(dcache_write, DCACHE_ALLOW_WR); - CI(dcache_line, DCACHE_LINE_LEN); + ci->dcache_line_length = PVR_DCACHE_LINE_LEN(pvr) << 2; CI(dcache_size, DCACHE_BYTE_SIZE); CI(dcache_base, DCACHE_BASEADDR); CI(dcache_high, DCACHE_HIGHADDR); + temp = PVR_DCACHE_USE_WRITEBACK(pvr); + if (ci->dcache_wb != temp) + err_printk("DCACHE WB"); + ci->dcache_wb = temp; + CI(use_dopb, D_OPB); CI(use_iopb, I_OPB); CI(use_dlmb, D_LMB); diff --git a/arch/microblaze/kernel/cpu/cpuinfo-static.c b/arch/microblaze/kernel/cpu/cpuinfo-static.c index adb448f93d5f..6095aa6b5c88 100644 --- a/arch/microblaze/kernel/cpu/cpuinfo-static.c +++ b/arch/microblaze/kernel/cpu/cpuinfo-static.c @@ -72,12 +72,12 @@ void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu) ci->use_icache = fcpu(cpu, "xlnx,use-icache"); ci->icache_tagbits = fcpu(cpu, "xlnx,addr-tag-bits"); ci->icache_write = fcpu(cpu, "xlnx,allow-icache-wr"); - ci->icache_line = fcpu(cpu, "xlnx,icache-line-len") << 2; - if (!ci->icache_line) { + ci->icache_line_length = fcpu(cpu, "xlnx,icache-line-len") << 2; + if (!ci->icache_line_length) { if (fcpu(cpu, "xlnx,icache-use-fsl")) - ci->icache_line = 4 << 2; + ci->icache_line_length = 4 << 2; else - ci->icache_line = 1 << 2; + ci->icache_line_length = 1 << 2; } ci->icache_size = fcpu(cpu, "i-cache-size"); ci->icache_base = fcpu(cpu, "i-cache-baseaddr"); @@ -86,16 +86,17 @@ void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu) ci->use_dcache = fcpu(cpu, "xlnx,use-dcache"); ci->dcache_tagbits = fcpu(cpu, "xlnx,dcache-addr-tag"); ci->dcache_write = fcpu(cpu, "xlnx,allow-dcache-wr"); - ci->dcache_line = fcpu(cpu, "xlnx,dcache-line-len") << 2; - if (!ci->dcache_line) { + ci->dcache_line_length = fcpu(cpu, "xlnx,dcache-line-len") << 2; + if (!ci->dcache_line_length) { if (fcpu(cpu, "xlnx,dcache-use-fsl")) - ci->dcache_line = 4 << 2; + ci->dcache_line_length = 4 << 2; else - ci->dcache_line = 1 << 2; + ci->dcache_line_length = 1 << 2; } ci->dcache_size = fcpu(cpu, "d-cache-size"); ci->dcache_base = fcpu(cpu, "d-cache-baseaddr"); ci->dcache_high = fcpu(cpu, "d-cache-highaddr"); + ci->dcache_wb = fcpu(cpu, "xlnx,dcache-use-writeback"); ci->use_dopb = fcpu(cpu, "xlnx,d-opb"); ci->use_iopb = fcpu(cpu, "xlnx,i-opb"); diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c index 3539babc1c18..991d71311b0e 100644 --- a/arch/microblaze/kernel/cpu/cpuinfo.c +++ b/arch/microblaze/kernel/cpu/cpuinfo.c @@ -29,11 +29,8 @@ const struct cpu_ver_key cpu_ver_lookup[] = { {"7.20.a", 0x0c}, {"7.20.b", 0x0d}, {"7.20.c", 0x0e}, - /* FIXME There is no keycode defined in MBV for these versions */ - {"2.10.a", 0x10}, - {"3.00.a", 0x20}, - {"4.00.a", 0x30}, - {"4.00.b", 0x40}, + {"7.20.d", 0x0f}, + {"7.30.a", 0x10}, {NULL, 0}, }; diff --git a/arch/microblaze/kernel/cpu/mb.c b/arch/microblaze/kernel/cpu/mb.c index 4dcfccdbc364..0c912b2a8e03 100644 --- a/arch/microblaze/kernel/cpu/mb.c +++ b/arch/microblaze/kernel/cpu/mb.c @@ -103,11 +103,15 @@ static int show_cpuinfo(struct seq_file *m, void *v) else count += seq_printf(m, "Icache:\t\tno\n"); - if (cpuinfo.use_dcache) + if (cpuinfo.use_dcache) { count += seq_printf(m, "Dcache:\t\t%ukB\n", cpuinfo.dcache_size >> 10); - else + if (cpuinfo.dcache_wb) + count += seq_printf(m, "\t\twrite-back\n"); + else + count += seq_printf(m, "\t\twrite-through\n"); + } else count += seq_printf(m, "Dcache:\t\tno\n"); count += seq_printf(m, diff --git a/arch/microblaze/kernel/cpu/pvr.c b/arch/microblaze/kernel/cpu/pvr.c index c9a4340ddd53..9bee9382bf74 100644 --- a/arch/microblaze/kernel/cpu/pvr.c +++ b/arch/microblaze/kernel/cpu/pvr.c @@ -45,7 +45,7 @@ int cpu_has_pvr(void) { - unsigned flags; + unsigned long flags; unsigned pvr0; local_save_flags(flags); diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c new file mode 100644 index 000000000000..b1084974fccd --- /dev/null +++ b/arch/microblaze/kernel/dma.c @@ -0,0 +1,156 @@ +/* + * Copyright (C) 2009-2010 PetaLogix + * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation + * + * Provide default implementations of the DMA mapping callbacks for + * directly mapped busses. + */ + +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/dma-debug.h> +#include <asm/bug.h> +#include <asm/cacheflush.h> + +/* + * Generic direct DMA implementation + * + * This implementation supports a per-device offset that can be applied if + * the address at which memory is visible to devices is not 0. Platform code + * can set archdata.dma_data to an unsigned long holding the offset. By + * default the offset is PCI_DRAM_OFFSET. + */ +static inline void __dma_sync_page(unsigned long paddr, unsigned long offset, + size_t size, enum dma_data_direction direction) +{ + switch (direction) { + case DMA_TO_DEVICE: + flush_dcache_range(paddr + offset, paddr + offset + size); + break; + case DMA_FROM_DEVICE: + invalidate_dcache_range(paddr + offset, paddr + offset + size); + break; + default: + BUG(); + } +} + +static unsigned long get_dma_direct_offset(struct device *dev) +{ + if (dev) + return (unsigned long)dev->archdata.dma_data; + + return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */ +} + +#define NOT_COHERENT_CACHE + +static void *dma_direct_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ +#ifdef NOT_COHERENT_CACHE + return consistent_alloc(flag, size, dma_handle); +#else + void *ret; + struct page *page; + int node = dev_to_node(dev); + + /* ignore region specifiers */ + flag &= ~(__GFP_HIGHMEM); + + page = alloc_pages_node(node, flag, get_order(size)); + if (page == NULL) + return NULL; + ret = page_address(page); + memset(ret, 0, size); + *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev); + + return ret; +#endif +} + +static void dma_direct_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ +#ifdef NOT_COHERENT_CACHE + consistent_free(vaddr); +#else + free_pages((unsigned long)vaddr, get_order(size)); +#endif +} + +static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction direction, + struct dma_attrs *attrs) +{ + struct scatterlist *sg; + int i; + + /* FIXME this part of code is untested */ + for_each_sg(sgl, sg, nents, i) { + sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); + sg->dma_length = sg->length; + __dma_sync_page(page_to_phys(sg_page(sg)), sg->offset, + sg->length, direction); + } + + return nents; +} + +static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction direction, + struct dma_attrs *attrs) +{ +} + +static int dma_direct_dma_supported(struct device *dev, u64 mask) +{ + return 1; +} + +static inline dma_addr_t dma_direct_map_page(struct device *dev, + struct page *page, + unsigned long offset, + size_t size, + enum dma_data_direction direction, + struct dma_attrs *attrs) +{ + __dma_sync_page(page_to_phys(page), offset, size, direction); + return page_to_phys(page) + offset + get_dma_direct_offset(dev); +} + +static inline void dma_direct_unmap_page(struct device *dev, + dma_addr_t dma_address, + size_t size, + enum dma_data_direction direction, + struct dma_attrs *attrs) +{ +/* There is not necessary to do cache cleanup + * + * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and + * dma_address is physical address + */ + __dma_sync_page(dma_address, 0 , size, direction); +} + +struct dma_map_ops dma_direct_ops = { + .alloc_coherent = dma_direct_alloc_coherent, + .free_coherent = dma_direct_free_coherent, + .map_sg = dma_direct_map_sg, + .unmap_sg = dma_direct_unmap_sg, + .dma_supported = dma_direct_dma_supported, + .map_page = dma_direct_map_page, + .unmap_page = dma_direct_unmap_page, +}; +EXPORT_SYMBOL(dma_direct_ops); + +/* Number of entries preallocated for DMA-API debugging */ +#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) + +static int __init dma_init(void) +{ + dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); + + return 0; +} +fs_initcall(dma_init); diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S index 9083d85376a4..391d6197fc3b 100644 --- a/arch/microblaze/kernel/entry-nommu.S +++ b/arch/microblaze/kernel/entry-nommu.S @@ -122,7 +122,7 @@ ENTRY(_interrupt) ret_from_intr: lwi r11, r1, PT_MODE - bneid r11, 3f + bneid r11, no_intr_resched lwi r6, r31, TS_THREAD_INFO /* get thread info */ lwi r19, r6, TI_FLAGS /* get flags in thread info */ @@ -133,16 +133,18 @@ ret_from_intr: bralid r15, schedule nop 1: andi r11, r19, _TIF_SIGPENDING - beqid r11, no_intr_reshed + beqid r11, no_intr_resched addk r5, r1, r0 addk r7, r0, r0 bralid r15, do_signal addk r6, r0, r0 -no_intr_reshed: +no_intr_resched: + /* Disable interrupts, we are now committed to the state restore */ + disable_irq + /* save mode indicator */ lwi r11, r1, PT_MODE -3: swi r11, r0, PER_CPU(KM) /* save r31 */ @@ -208,8 +210,6 @@ ENTRY(_user_exception) lwi r1, r1, TS_THREAD_INFO /* get the thread info */ /* calculate kernel stack pointer */ addik r1, r1, THREAD_SIZE - PT_SIZE - swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */ - lwi r11, r0, PER_CPU(KM) /* load mode indicator */ 2: swi r11, r1, PT_MODE /* store the mode */ lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */ diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S index acc1f05d1e2c..c0ede25c5b99 100644 --- a/arch/microblaze/kernel/entry.S +++ b/arch/microblaze/kernel/entry.S @@ -31,6 +31,8 @@ #include <linux/errno.h> #include <asm/signal.h> +#undef DEBUG + /* The size of a state save frame. */ #define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE) @@ -303,7 +305,7 @@ C_ENTRY(_user_exception): swi r11, r1, PTO+PT_R1; /* Store user SP. */ addi r11, r0, 1; swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ -2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ +2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* Save away the syscall number. */ swi r12, r1, PTO+PT_R0; tovirt(r1,r1) @@ -320,8 +322,7 @@ C_ENTRY(_user_exception): rtid r11, 0 nop 3: - add r11, r0, CURRENT_TASK /* Get current task ptr into r11 */ - lwi r11, r11, TS_THREAD_INFO /* get thread info */ + lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */ lwi r11, r11, TI_FLAGS /* get flags in thread info */ andi r11, r11, _TIF_WORK_SYSCALL_MASK beqi r11, 4f @@ -352,10 +353,12 @@ C_ENTRY(_user_exception): add r12, r12, r12; /* convert num -> ptr */ add r12, r12, r12; +#ifdef DEBUG /* Trac syscalls and stored them to r0_ram */ lwi r3, r12, 0x400 + r0_ram addi r3, r3, 1 swi r3, r12, 0x400 + r0_ram +#endif # Find and jump into the syscall handler. lwi r12, r12, sys_call_table @@ -378,60 +381,50 @@ C_ENTRY(ret_from_trap): /* See if returning to kernel mode, if so, skip resched &c. */ bnei r11, 2f; + swi r3, r1, PTO + PT_R3 + swi r4, r1, PTO + PT_R4 + /* We're returning to user mode, so check for various conditions that * trigger rescheduling. */ - # FIXME: Restructure all these flag checks. - add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ - lwi r11, r11, TS_THREAD_INFO; /* get thread info */ + /* FIXME: Restructure all these flag checks. */ + lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ lwi r11, r11, TI_FLAGS; /* get flags in thread info */ andi r11, r11, _TIF_WORK_SYSCALL_MASK beqi r11, 1f - swi r3, r1, PTO + PT_R3 - swi r4, r1, PTO + PT_R4 brlid r15, do_syscall_trace_leave addik r5, r1, PTO + PT_R0 - lwi r3, r1, PTO + PT_R3 - lwi r4, r1, PTO + PT_R4 1: - /* We're returning to user mode, so check for various conditions that * trigger rescheduling. */ - /* Get current task ptr into r11 */ - add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ - lwi r11, r11, TS_THREAD_INFO; /* get thread info */ + /* get thread info from current task */ + lwi r11, CURRENT_TASK, TS_THREAD_INFO; lwi r11, r11, TI_FLAGS; /* get flags in thread info */ andi r11, r11, _TIF_NEED_RESCHED; beqi r11, 5f; - swi r3, r1, PTO + PT_R3; /* store syscall result */ - swi r4, r1, PTO + PT_R4; bralid r15, schedule; /* Call scheduler */ nop; /* delay slot */ - lwi r3, r1, PTO + PT_R3; /* restore syscall result */ - lwi r4, r1, PTO + PT_R4; /* Maybe handle a signal */ -5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ - lwi r11, r11, TS_THREAD_INFO; /* get thread info */ +5: /* get thread info from current task*/ + lwi r11, CURRENT_TASK, TS_THREAD_INFO; lwi r11, r11, TI_FLAGS; /* get flags in thread info */ andi r11, r11, _TIF_SIGPENDING; beqi r11, 1f; /* Signals to handle, handle them */ - swi r3, r1, PTO + PT_R3; /* store syscall result */ - swi r4, r1, PTO + PT_R4; la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ - add r6, r0, r0; /* Arg 2: sigset_t *oldset */ addi r7, r0, 1; /* Arg 3: int in_syscall */ bralid r15, do_signal; /* Handle any signals */ - nop; + add r6, r0, r0; /* Arg 2: sigset_t *oldset */ + +/* Finally, return to user state. */ +1: lwi r3, r1, PTO + PT_R3; /* restore syscall result */ lwi r4, r1, PTO + PT_R4; -/* Finally, return to user state. */ -1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ - add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ - swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */ + swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ + swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ VM_OFF; tophys(r1,r1); RESTORE_REGS; @@ -496,17 +489,6 @@ C_ENTRY(sys_execve): brid microblaze_execve; /* Do real work (tail-call).*/ nop; -C_ENTRY(sys_rt_sigsuspend_wrapper): - swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ - swi r4, r1, PTO+PT_R4; - la r7, r1, PTO; /* add user context as 3rd arg */ - brlid r15, sys_rt_sigsuspend; /* Do real work.*/ - nop; - lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ - lwi r4, r1, PTO+PT_R4; - bri ret_from_trap /* fall through will not work here due to align */ - nop; - C_ENTRY(sys_rt_sigreturn_wrapper): swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ swi r4, r1, PTO+PT_R4; @@ -572,7 +554,7 @@ C_ENTRY(sys_rt_sigreturn_wrapper): swi r11, r1, PTO+PT_R1; /* Store user SP. */ \ addi r11, r0, 1; \ swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\ -2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\ +2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \ /* Save away the syscall number. */ \ swi r0, r1, PTO+PT_R0; \ tovirt(r1,r1) @@ -592,6 +574,8 @@ C_ENTRY(full_exception_trap): nop mfs r7, rfsr; /* save FSR */ nop + mts rfsr, r0; /* Clear sticky fsr */ + nop la r12, r0, full_exception set_vms; rtbd r12, 0; @@ -678,9 +662,7 @@ C_ENTRY(ret_from_exc): /* We're returning to user mode, so check for various conditions that trigger rescheduling. */ - /* Get current task ptr into r11 */ - add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ - lwi r11, r11, TS_THREAD_INFO; /* get thread info */ + lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ lwi r11, r11, TI_FLAGS; /* get flags in thread info */ andi r11, r11, _TIF_NEED_RESCHED; beqi r11, 5f; @@ -690,8 +672,7 @@ C_ENTRY(ret_from_exc): nop; /* delay slot */ /* Maybe handle a signal */ -5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ - lwi r11, r11, TS_THREAD_INFO; /* get thread info */ +5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ lwi r11, r11, TI_FLAGS; /* get flags in thread info */ andi r11, r11, _TIF_SIGPENDING; beqi r11, 1f; /* Signals to handle, handle them */ @@ -709,20 +690,14 @@ C_ENTRY(ret_from_exc): * (in a possibly modified form) after do_signal returns. * store return registers separately because this macros is use * for others exceptions */ - swi r3, r1, PTO + PT_R3; - swi r4, r1, PTO + PT_R4; la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ - add r6, r0, r0; /* Arg 2: sigset_t *oldset */ addi r7, r0, 0; /* Arg 3: int in_syscall */ bralid r15, do_signal; /* Handle any signals */ - nop; - lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ - lwi r4, r1, PTO+PT_R4; + add r6, r0, r0; /* Arg 2: sigset_t *oldset */ /* Finally, return to user state. */ 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ - add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ - swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */ + swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ VM_OFF; tophys(r1,r1); @@ -811,7 +786,7 @@ C_ENTRY(_interrupt): swi r11, r0, TOPHYS(PER_CPU(KM)); 2: - lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); + lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); swi r0, r1, PTO + PT_R0; tovirt(r1,r1) la r5, r1, PTO; @@ -826,8 +801,7 @@ ret_from_irq: lwi r11, r1, PTO + PT_MODE; bnei r11, 2f; - add r11, r0, CURRENT_TASK; - lwi r11, r11, TS_THREAD_INFO; + lwi r11, CURRENT_TASK, TS_THREAD_INFO; lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */ andi r11, r11, _TIF_NEED_RESCHED; beqi r11, 5f @@ -835,8 +809,7 @@ ret_from_irq: nop; /* delay slot */ /* Maybe handle a signal */ -5: add r11, r0, CURRENT_TASK; - lwi r11, r11, TS_THREAD_INFO; /* MS: get thread info */ +5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */ lwi r11, r11, TI_FLAGS; /* get flags in thread info */ andi r11, r11, _TIF_SIGPENDING; beqid r11, no_intr_resched @@ -851,8 +824,7 @@ no_intr_resched: /* Disable interrupts, we are now committed to the state restore */ disable_irq swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */ - add r11, r0, CURRENT_TASK; - swi r11, r0, PER_CPU(CURRENT_SAVE); + swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); VM_OFF; tophys(r1,r1); lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ @@ -862,7 +834,28 @@ no_intr_resched: lwi r1, r1, PT_R1 - PT_SIZE; bri 6f; /* MS: Return to kernel state. */ -2: VM_OFF /* MS: turn off MMU */ +2: +#ifdef CONFIG_PREEMPT + lwi r11, CURRENT_TASK, TS_THREAD_INFO; + /* MS: get preempt_count from thread info */ + lwi r5, r11, TI_PREEMPT_COUNT; + bgti r5, restore; + + lwi r5, r11, TI_FLAGS; /* get flags in thread info */ + andi r5, r5, _TIF_NEED_RESCHED; + beqi r5, restore /* if zero jump over */ + +preempt: + /* interrupts are off that's why I am calling preempt_chedule_irq */ + bralid r15, preempt_schedule_irq + nop + lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ + lwi r5, r11, TI_FLAGS; /* get flags in thread info */ + andi r5, r5, _TIF_NEED_RESCHED; + bnei r5, preempt /* if non zero jump to resched */ +restore: +#endif + VM_OFF /* MS: turn off MMU */ tophys(r1,r1) lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ lwi r4, r1, PTO + PT_R4; @@ -924,7 +917,7 @@ C_ENTRY(_debug_exception): swi r11, r1, PTO+PT_R1; /* Store user SP. */ addi r11, r0, 1; swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ -2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ +2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* Save away the syscall number. */ swi r0, r1, PTO+PT_R0; tovirt(r1,r1) @@ -944,8 +937,7 @@ dbtrap_call: rtbd r11, 0; bnei r11, 2f; /* Get current task ptr into r11 */ - add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ - lwi r11, r11, TS_THREAD_INFO; /* get thread info */ + lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ lwi r11, r11, TI_FLAGS; /* get flags in thread info */ andi r11, r11, _TIF_NEED_RESCHED; beqi r11, 5f; @@ -958,8 +950,7 @@ dbtrap_call: rtbd r11, 0; /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */ /* Maybe handle a signal */ -5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ - lwi r11, r11, TS_THREAD_INFO; /* get thread info */ +5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ lwi r11, r11, TI_FLAGS; /* get flags in thread info */ andi r11, r11, _TIF_SIGPENDING; beqi r11, 1f; /* Signals to handle, handle them */ @@ -975,16 +966,14 @@ dbtrap_call: rtbd r11, 0; (in a possibly modified form) after do_signal returns. */ la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ - add r6, r0, r0; /* Arg 2: sigset_t *oldset */ addi r7, r0, 0; /* Arg 3: int in_syscall */ bralid r15, do_signal; /* Handle any signals */ - nop; + add r6, r0, r0; /* Arg 2: sigset_t *oldset */ /* Finally, return to user state. */ 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ - add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ - swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */ + swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ VM_OFF; tophys(r1,r1); @@ -1016,7 +1005,7 @@ DBTRAP_return: /* Make global symbol for debugging */ ENTRY(_switch_to) /* prepare return value */ - addk r3, r0, r31 + addk r3, r0, CURRENT_TASK /* save registers in cpu_context */ /* use r11 and r12, volatile registers, as temp register */ @@ -1060,10 +1049,10 @@ ENTRY(_switch_to) nop swi r12, r11, CC_FSR - /* update r31, the current */ - lwi r31, r6, TI_TASK/* give me pointer to task which will be next */ + /* update r31, the current-give me pointer to task which will be next */ + lwi CURRENT_TASK, r6, TI_TASK /* stored it to current_save too */ - swi r31, r0, PER_CPU(CURRENT_SAVE) + swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE) /* get new process' cpu context and restore */ /* give me start where start context of next task */ diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c new file mode 100644 index 000000000000..388b31ca65a1 --- /dev/null +++ b/arch/microblaze/kernel/ftrace.c @@ -0,0 +1,237 @@ +/* + * Ftrace support for Microblaze. + * + * Copyright (C) 2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2009 PetaLogix + * + * Based on MIPS and PowerPC ftrace code + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <asm/cacheflush.h> +#include <linux/ftrace.h> + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +/* + * Hook the return address and push it in the stack of return addrs + * in current thread info. + */ +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) +{ + unsigned long old; + int faulted, err; + struct ftrace_graph_ent trace; + unsigned long return_hooker = (unsigned long) + &return_to_handler; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + /* + * Protect against fault, even if it shouldn't + * happen. This tool is too much intrusive to + * ignore such a protection. + */ + asm volatile(" 1: lwi %0, %2, 0; \ + 2: swi %3, %2, 0; \ + addik %1, r0, 0; \ + 3: \ + .section .fixup, \"ax\"; \ + 4: brid 3b; \ + addik %1, r0, 1; \ + .previous; \ + .section __ex_table,\"a\"; \ + .word 1b,4b; \ + .word 2b,4b; \ + .previous;" \ + : "=&r" (old), "=r" (faulted) + : "r" (parent), "r" (return_hooker) + ); + + if (unlikely(faulted)) { + ftrace_graph_stop(); + WARN_ON(1); + return; + } + + err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0); + if (err == -EBUSY) { + *parent = old; + return; + } + + trace.func = self_addr; + /* Only trace if the calling function expects to */ + if (!ftrace_graph_entry(&trace)) { + current->curr_ret_stack--; + *parent = old; + } +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + +#ifdef CONFIG_DYNAMIC_FTRACE +/* save value to addr - it is save to do it in asm */ +static int ftrace_modify_code(unsigned long addr, unsigned int value) +{ + int faulted = 0; + + __asm__ __volatile__(" 1: swi %2, %1, 0; \ + addik %0, r0, 0; \ + 2: \ + .section .fixup, \"ax\"; \ + 3: brid 2b; \ + addik %0, r0, 1; \ + .previous; \ + .section __ex_table,\"a\"; \ + .word 1b,3b; \ + .previous;" \ + : "=r" (faulted) + : "r" (addr), "r" (value) + ); + + if (unlikely(faulted)) + return -EFAULT; + + return 0; +} + +#define MICROBLAZE_NOP 0x80000000 +#define MICROBLAZE_BRI 0xb800000C + +static unsigned int recorded; /* if save was or not */ +static unsigned int imm; /* saving whole imm instruction */ + +/* There are two approaches howto solve ftrace_make nop function - look below */ +#undef USE_FTRACE_NOP + +#ifdef USE_FTRACE_NOP +static unsigned int bralid; /* saving whole bralid instruction */ +#endif + +int ftrace_make_nop(struct module *mod, + struct dyn_ftrace *rec, unsigned long addr) +{ + /* we have this part of code which we are working with + * b000c000 imm -16384 + * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount> + * 80000000 or r0, r0, r0 + * + * The first solution (!USE_FTRACE_NOP-could be called branch solution) + * b000c000 bri 12 (0xC - jump to any other instruction) + * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount> + * 80000000 or r0, r0, r0 + * any other instruction + * + * The second solution (USE_FTRACE_NOP) - no jump just nops + * 80000000 or r0, r0, r0 + * 80000000 or r0, r0, r0 + * 80000000 or r0, r0, r0 + */ + int ret = 0; + + if (recorded == 0) { + recorded = 1; + imm = *(unsigned int *)rec->ip; + pr_debug("%s: imm:0x%x\n", __func__, imm); +#ifdef USE_FTRACE_NOP + bralid = *(unsigned int *)(rec->ip + 4); + pr_debug("%s: bralid 0x%x\n", __func__, bralid); +#endif /* USE_FTRACE_NOP */ + } + +#ifdef USE_FTRACE_NOP + ret = ftrace_modify_code(rec->ip, MICROBLAZE_NOP); + ret += ftrace_modify_code(rec->ip + 4, MICROBLAZE_NOP); +#else /* USE_FTRACE_NOP */ + ret = ftrace_modify_code(rec->ip, MICROBLAZE_BRI); +#endif /* USE_FTRACE_NOP */ + return ret; +} + +static int ret_addr; /* initialized as 0 by default */ + +/* I believe that first is called ftrace_make_nop before this function */ +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + int ret; + ret_addr = addr; /* saving where the barrier jump is */ + pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n", + __func__, (unsigned int)addr, (unsigned int)rec->ip, imm); + ret = ftrace_modify_code(rec->ip, imm); +#ifdef USE_FTRACE_NOP + pr_debug("%s: bralid:0x%x\n", __func__, bralid); + ret += ftrace_modify_code(rec->ip + 4, bralid); +#endif /* USE_FTRACE_NOP */ + return ret; +} + +int __init ftrace_dyn_arch_init(void *data) +{ + /* The return code is retured via data */ + *(unsigned long *)data = 0; + + return 0; +} + +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + unsigned long ip = (unsigned long)(&ftrace_call); + unsigned int upper = (unsigned int)func; + unsigned int lower = (unsigned int)func; + int ret = 0; + + /* create proper saving to ftrace_call poll */ + upper = 0xb0000000 + (upper >> 16); /* imm func_upper */ + lower = 0x32800000 + (lower & 0xFFFF); /* addik r20, r0, func_lower */ + + pr_debug("%s: func=0x%x, ip=0x%x, upper=0x%x, lower=0x%x\n", + __func__, (unsigned int)func, (unsigned int)ip, upper, lower); + + /* save upper and lower code */ + ret = ftrace_modify_code(ip, upper); + ret += ftrace_modify_code(ip + 4, lower); + + /* We just need to remove the rtsd r15, 8 by NOP */ + BUG_ON(!ret_addr); + if (ret_addr) + ret += ftrace_modify_code(ret_addr, MICROBLAZE_NOP); + else + ret = 1; /* fault */ + + /* All changes are done - lets do caches consistent */ + flush_icache(); + return ret; +} + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +unsigned int old_jump; /* saving place for jump instruction */ + +int ftrace_enable_ftrace_graph_caller(void) +{ + unsigned int ret; + unsigned long ip = (unsigned long)(&ftrace_call_graph); + + old_jump = *(unsigned int *)ip; /* save jump over instruction */ + ret = ftrace_modify_code(ip, MICROBLAZE_NOP); + flush_icache(); + + pr_debug("%s: Replace instruction: 0x%x\n", __func__, old_jump); + return ret; +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + unsigned int ret; + unsigned long ip = (unsigned long)(&ftrace_call_graph); + + ret = ftrace_modify_code(ip, old_jump); + flush_icache(); + + pr_debug("%s\n", __func__); + return ret; +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ +#endif /* CONFIG_DYNAMIC_FTRACE */ diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index 697ce3007f30..cb7815cfe5ab 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S @@ -31,7 +31,7 @@ #include <linux/linkage.h> #include <asm/thread_info.h> #include <asm/page.h> -#include <asm/prom.h> /* for OF_DT_HEADER */ +#include <linux/of_fdt.h> /* for OF_DT_HEADER */ #ifdef CONFIG_MMU #include <asm/setup.h> /* COMMAND_LINE_SIZE */ @@ -99,8 +99,8 @@ no_fdt_arg: tophys(r4,r4) /* convert to phys address */ ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */ _copy_command_line: - lbu r7, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */ - sb r7, r4, r6 /* addr[r4+r6]= r7*/ + lbu r2, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */ + sb r2, r4, r6 /* addr[r4+r6]= r7*/ addik r6, r6, 1 /* increment counting */ bgtid r3, _copy_command_line /* loop for all entries */ addik r3, r3, -1 /* descrement loop */ @@ -136,6 +136,11 @@ _invalidate: addik r3, r3, -1 /* sync */ + /* Setup the kernel PID */ + mts rpid,r0 /* Load the kernel PID */ + nop + bri 4 + /* * We should still be executing code at physical address area * RAM_BASEADDR at this point. However, kernel code is at @@ -146,10 +151,6 @@ _invalidate: addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */ tophys(r4,r3) /* Load the kernel physical address */ - mts rpid,r0 /* Load the kernel PID */ - nop - bri 4 - /* * Configure and load two entries into TLB slots 0 and 1. * In case we are pinning TLBs, these are reserved in by the diff --git a/arch/microblaze/kernel/heartbeat.c b/arch/microblaze/kernel/heartbeat.c index 1bdf20222b92..522751737cfa 100644 --- a/arch/microblaze/kernel/heartbeat.c +++ b/arch/microblaze/kernel/heartbeat.c @@ -45,6 +45,7 @@ void heartbeat(void) void setup_heartbeat(void) { struct device_node *gpio = NULL; + int *prop; int j; char *gpio_list[] = { "xlnx,xps-gpio-1.00.a", @@ -58,10 +59,14 @@ void setup_heartbeat(void) break; } - base_addr = *(int *) of_get_property(gpio, "reg", NULL); - base_addr = (unsigned long) ioremap(base_addr, PAGE_SIZE); - printk(KERN_NOTICE "Heartbeat GPIO at 0x%x\n", base_addr); + if (gpio) { + base_addr = *(int *) of_get_property(gpio, "reg", NULL); + base_addr = (unsigned long) ioremap(base_addr, PAGE_SIZE); + printk(KERN_NOTICE "Heartbeat GPIO at 0x%x\n", base_addr); - if (*(int *) of_get_property(gpio, "xlnx,is-bidir", NULL)) - out_be32(base_addr + 4, 0); /* GPIO is configured as output */ + /* GPIO is configured as output */ + prop = (int *) of_get_property(gpio, "xlnx,is-bidir", NULL); + if (prop) + out_be32(base_addr + 4, 0); + } } diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S index 6b0288ebccd6..2b86c03aa841 100644 --- a/arch/microblaze/kernel/hw_exception_handler.S +++ b/arch/microblaze/kernel/hw_exception_handler.S @@ -384,7 +384,7 @@ handle_other_ex: /* Handle Other exceptions here */ addk r8, r17, r0; /* Load exception address */ bralid r15, full_exception; /* Branch to the handler */ nop; - mts r0, rfsr; /* Clear sticky fsr */ + mts rfsr, r0; /* Clear sticky fsr */ nop /* diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c index 6eea6f92b84e..03172c1da770 100644 --- a/arch/microblaze/kernel/intc.c +++ b/arch/microblaze/kernel/intc.c @@ -42,8 +42,16 @@ unsigned int nr_irq; static void intc_enable_or_unmask(unsigned int irq) { + unsigned long mask = 1 << irq; pr_debug("enable_or_unmask: %d\n", irq); - out_be32(INTC_BASE + SIE, 1 << irq); + out_be32(INTC_BASE + SIE, mask); + + /* ack level irqs because they can't be acked during + * ack function since the handle_level_irq function + * acks the irq before calling the interrupt handler + */ + if (irq_desc[irq].status & IRQ_LEVEL) + out_be32(INTC_BASE + IAR, mask); } static void intc_disable_or_mask(unsigned int irq) diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c index 7d5ddd62d4d2..6f39e2c001f3 100644 --- a/arch/microblaze/kernel/irq.c +++ b/arch/microblaze/kernel/irq.c @@ -68,7 +68,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < nr_irq) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -89,7 +89,22 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; } + +/* MS: There is no any advance mapping mechanism. We are using simple 32bit + intc without any cascades or any connection that's why mapping is 1:1 */ +unsigned int irq_create_mapping(struct irq_host *host, irq_hw_number_t hwirq) +{ + return hwirq; +} +EXPORT_SYMBOL_GPL(irq_create_mapping); + +unsigned int irq_create_of_mapping(struct device_node *controller, + u32 *intspec, unsigned int intsize) +{ + return intspec[0]; +} +EXPORT_SYMBOL_GPL(irq_create_of_mapping); diff --git a/arch/microblaze/kernel/mcount.S b/arch/microblaze/kernel/mcount.S new file mode 100644 index 000000000000..e7eaa7a8cbd3 --- /dev/null +++ b/arch/microblaze/kernel/mcount.S @@ -0,0 +1,170 @@ +/* + * Low-level ftrace handling + * + * Copyright (C) 2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2009 PetaLogix + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + */ + +#include <linux/linkage.h> + +#define NOALIGN_ENTRY(name) .globl name; name: + +/* FIXME MS: I think that I don't need to save all regs */ +#define SAVE_REGS \ + addik r1, r1, -120; \ + swi r2, r1, 4; \ + swi r3, r1, 8; \ + swi r4, r1, 12; \ + swi r5, r1, 116; \ + swi r6, r1, 16; \ + swi r7, r1, 20; \ + swi r8, r1, 24; \ + swi r9, r1, 28; \ + swi r10, r1, 32; \ + swi r11, r1, 36; \ + swi r12, r1, 40; \ + swi r13, r1, 44; \ + swi r14, r1, 48; \ + swi r16, r1, 52; \ + swi r17, r1, 56; \ + swi r18, r1, 60; \ + swi r19, r1, 64; \ + swi r20, r1, 68; \ + swi r21, r1, 72; \ + swi r22, r1, 76; \ + swi r23, r1, 80; \ + swi r24, r1, 84; \ + swi r25, r1, 88; \ + swi r26, r1, 92; \ + swi r27, r1, 96; \ + swi r28, r1, 100; \ + swi r29, r1, 104; \ + swi r30, r1, 108; \ + swi r31, r1, 112; + +#define RESTORE_REGS \ + lwi r2, r1, 4; \ + lwi r3, r1, 8; \ + lwi r4, r1, 12; \ + lwi r5, r1, 116; \ + lwi r6, r1, 16; \ + lwi r7, r1, 20; \ + lwi r8, r1, 24; \ + lwi r9, r1, 28; \ + lwi r10, r1, 32; \ + lwi r11, r1, 36; \ + lwi r12, r1, 40; \ + lwi r13, r1, 44; \ + lwi r14, r1, 48; \ + lwi r16, r1, 52; \ + lwi r17, r1, 56; \ + lwi r18, r1, 60; \ + lwi r19, r1, 64; \ + lwi r20, r1, 68; \ + lwi r21, r1, 72; \ + lwi r22, r1, 76; \ + lwi r23, r1, 80; \ + lwi r24, r1, 84; \ + lwi r25, r1, 88; \ + lwi r26, r1, 92; \ + lwi r27, r1, 96; \ + lwi r28, r1, 100; \ + lwi r29, r1, 104; \ + lwi r30, r1, 108; \ + lwi r31, r1, 112; \ + addik r1, r1, 120; + +ENTRY(ftrace_stub) + rtsd r15, 8; + nop; + +ENTRY(_mcount) +#ifdef CONFIG_DYNAMIC_FTRACE +ENTRY(ftrace_caller) + /* MS: It is just barrier which is removed from C code */ + rtsd r15, 8 + nop +#endif /* CONFIG_DYNAMIC_FTRACE */ + SAVE_REGS + swi r15, r1, 0; + /* MS: HAVE_FUNCTION_TRACE_MCOUNT_TEST begin of checking */ + lwi r5, r0, function_trace_stop; + bneid r5, end; + nop; + /* MS: HAVE_FUNCTION_TRACE_MCOUNT_TEST end of checking */ +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#ifndef CONFIG_DYNAMIC_FTRACE + lwi r5, r0, ftrace_graph_return; + addik r6, r0, ftrace_stub; /* asm implementation */ + cmpu r5, r5, r6; /* ftrace_graph_return != ftrace_stub */ + beqid r5, end_graph_tracer; + nop; + + lwi r6, r0, ftrace_graph_entry; + addik r5, r0, ftrace_graph_entry_stub; /* implemented in C */ + cmpu r5, r5, r6; /* ftrace_graph_entry != ftrace_graph_entry_stub */ + beqid r5, end_graph_tracer; + nop; +#else /* CONFIG_DYNAMIC_FTRACE */ +NOALIGN_ENTRY(ftrace_call_graph) + /* MS: jump over graph function - replaced from C code */ + bri end_graph_tracer +#endif /* CONFIG_DYNAMIC_FTRACE */ + addik r5, r1, 120; /* MS: load parent addr */ + addik r6, r15, 0; /* MS: load current function addr */ + bralid r15, prepare_ftrace_return; + nop; + /* MS: graph was taken that's why - can jump over function trace */ + brid end; + nop; +end_graph_tracer: +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ +#ifndef CONFIG_DYNAMIC_FTRACE + /* MS: test function trace if is taken or not */ + lwi r20, r0, ftrace_trace_function; + addik r6, r0, ftrace_stub; + cmpu r5, r20, r6; /* ftrace_trace_function != ftrace_stub */ + beqid r5, end; /* MS: not taken -> jump over */ + nop; +#else /* CONFIG_DYNAMIC_FTRACE */ +NOALIGN_ENTRY(ftrace_call) +/* instruction for setup imm FUNC_part1, addik r20, r0, FUNC_part2 */ + nop + nop +#endif /* CONFIG_DYNAMIC_FTRACE */ +/* static normal trace */ + lwi r6, r1, 120; /* MS: load parent addr */ + addik r5, r15, 0; /* MS: load current function addr */ + /* MS: here is dependency on previous code */ + brald r15, r20; /* MS: jump to ftrace handler */ + nop; +end: + lwi r15, r1, 0; + RESTORE_REGS + + rtsd r15, 8; /* MS: jump back */ + nop; + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +ENTRY(return_to_handler) + nop; /* MS: just barrier for rtsd r15, 8 */ + nop; + SAVE_REGS + swi r15, r1, 0; + + /* MS: find out returning address */ + bralid r15, ftrace_return_to_handler; + nop; + + /* MS: return value from ftrace_return_to_handler is my returning addr + * must be before restore regs because I have to restore r3 content */ + addik r15, r3, 0; + RESTORE_REGS + + rtsd r15, 8; /* MS: jump back */ + nop; +#endif /* CONFIG_FUNCTION_TRACER */ diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c index 59ff20e33e0c..bc4dcb7d3861 100644 --- a/arch/microblaze/kernel/microblaze_ksyms.c +++ b/arch/microblaze/kernel/microblaze_ksyms.c @@ -18,6 +18,7 @@ #include <linux/io.h> #include <asm/page.h> #include <asm/system.h> +#include <linux/ftrace.h> #include <linux/uaccess.h> /* @@ -47,3 +48,7 @@ extern void __umodsi3(void); EXPORT_SYMBOL(__umodsi3); extern char *_ebss; EXPORT_SYMBOL_GPL(_ebss); +#ifdef CONFIG_FUNCTION_TRACER +extern void _mcount(void); +EXPORT_SYMBOL(_mcount); +#endif diff --git a/arch/microblaze/kernel/of_platform.c b/arch/microblaze/kernel/of_platform.c index acf4574d0f18..1c6d684996d7 100644 --- a/arch/microblaze/kernel/of_platform.c +++ b/arch/microblaze/kernel/of_platform.c @@ -185,7 +185,7 @@ EXPORT_SYMBOL(of_find_device_by_node); static int of_dev_phandle_match(struct device *dev, void *data) { phandle *ph = data; - return to_of_device(dev)->node->linux_phandle == *ph; + return to_of_device(dev)->node->phandle == *ph; } struct of_device *of_find_device_by_phandle(phandle ph) diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index 4201c743cc9f..812f1bf06c9e 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c @@ -15,6 +15,7 @@ #include <linux/bitops.h> #include <asm/system.h> #include <asm/pgalloc.h> +#include <asm/cacheflush.h> void show_regs(struct pt_regs *regs) { @@ -235,7 +236,9 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp) regs->pc = pc; regs->r1 = usp; regs->pt_mode = 0; +#ifdef CONFIG_MMU regs->msr |= MSR_UMS; +#endif } #ifdef CONFIG_MMU diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c index c005cc6f1aaf..a15ef6d67ca9 100644 --- a/arch/microblaze/kernel/prom.c +++ b/arch/microblaze/kernel/prom.c @@ -42,697 +42,20 @@ #include <asm/sections.h> #include <asm/pci-bridge.h> -static int __initdata dt_root_addr_cells; -static int __initdata dt_root_size_cells; - -typedef u32 cell_t; - -static struct boot_param_header *initial_boot_params; - -/* export that to outside world */ -struct device_node *of_chosen; - -static inline char *find_flat_dt_string(u32 offset) -{ - return ((char *)initial_boot_params) + - initial_boot_params->off_dt_strings + offset; -} - -/** - * This function is used to scan the flattened device-tree, it is - * used to extract the memory informations at boot before we can - * unflatten the tree - */ -int __init of_scan_flat_dt(int (*it)(unsigned long node, - const char *uname, int depth, - void *data), - void *data) -{ - unsigned long p = ((unsigned long)initial_boot_params) + - initial_boot_params->off_dt_struct; - int rc = 0; - int depth = -1; - - do { - u32 tag = *((u32 *)p); - char *pathp; - - p += 4; - if (tag == OF_DT_END_NODE) { - depth--; - continue; - } - if (tag == OF_DT_NOP) - continue; - if (tag == OF_DT_END) - break; - if (tag == OF_DT_PROP) { - u32 sz = *((u32 *)p); - p += 8; - if (initial_boot_params->version < 0x10) - p = _ALIGN(p, sz >= 8 ? 8 : 4); - p += sz; - p = _ALIGN(p, 4); - continue; - } - if (tag != OF_DT_BEGIN_NODE) { - printk(KERN_WARNING "Invalid tag %x scanning flattened" - " device tree !\n", tag); - return -EINVAL; - } - depth++; - pathp = (char *)p; - p = _ALIGN(p + strlen(pathp) + 1, 4); - if ((*pathp) == '/') { - char *lp, *np; - for (lp = NULL, np = pathp; *np; np++) - if ((*np) == '/') - lp = np+1; - if (lp != NULL) - pathp = lp; - } - rc = it(p, pathp, depth, data); - if (rc != 0) - break; - } while (1); - - return rc; -} - -unsigned long __init of_get_flat_dt_root(void) -{ - unsigned long p = ((unsigned long)initial_boot_params) + - initial_boot_params->off_dt_struct; - - while (*((u32 *)p) == OF_DT_NOP) - p += 4; - BUG_ON(*((u32 *)p) != OF_DT_BEGIN_NODE); - p += 4; - return _ALIGN(p + strlen((char *)p) + 1, 4); -} - -/** - * This function can be used within scan_flattened_dt callback to get - * access to properties - */ -void *__init of_get_flat_dt_prop(unsigned long node, const char *name, - unsigned long *size) -{ - unsigned long p = node; - - do { - u32 tag = *((u32 *)p); - u32 sz, noff; - const char *nstr; - - p += 4; - if (tag == OF_DT_NOP) - continue; - if (tag != OF_DT_PROP) - return NULL; - - sz = *((u32 *)p); - noff = *((u32 *)(p + 4)); - p += 8; - if (initial_boot_params->version < 0x10) - p = _ALIGN(p, sz >= 8 ? 8 : 4); - - nstr = find_flat_dt_string(noff); - if (nstr == NULL) { - printk(KERN_WARNING "Can't find property index" - " name !\n"); - return NULL; - } - if (strcmp(name, nstr) == 0) { - if (size) - *size = sz; - return (void *)p; - } - p += sz; - p = _ALIGN(p, 4); - } while (1); -} - -int __init of_flat_dt_is_compatible(unsigned long node, const char *compat) -{ - const char *cp; - unsigned long cplen, l; - - cp = of_get_flat_dt_prop(node, "compatible", &cplen); - if (cp == NULL) - return 0; - while (cplen > 0) { - if (strncasecmp(cp, compat, strlen(compat)) == 0) - return 1; - l = strlen(cp) + 1; - cp += l; - cplen -= l; - } - - return 0; -} - -static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size, - unsigned long align) -{ - void *res; - - *mem = _ALIGN(*mem, align); - res = (void *)*mem; - *mem += size; - - return res; -} - -static unsigned long __init unflatten_dt_node(unsigned long mem, - unsigned long *p, - struct device_node *dad, - struct device_node ***allnextpp, - unsigned long fpsize) -{ - struct device_node *np; - struct property *pp, **prev_pp = NULL; - char *pathp; - u32 tag; - unsigned int l, allocl; - int has_name = 0; - int new_format = 0; - - tag = *((u32 *)(*p)); - if (tag != OF_DT_BEGIN_NODE) { - printk("Weird tag at start of node: %x\n", tag); - return mem; - } - *p += 4; - pathp = (char *)*p; - l = allocl = strlen(pathp) + 1; - *p = _ALIGN(*p + l, 4); - - /* version 0x10 has a more compact unit name here instead of the full - * path. we accumulate the full path size using "fpsize", we'll rebuild - * it later. We detect this because the first character of the name is - * not '/'. - */ - if ((*pathp) != '/') { - new_format = 1; - if (fpsize == 0) { - /* root node: special case. fpsize accounts for path - * plus terminating zero. root node only has '/', so - * fpsize should be 2, but we want to avoid the first - * level nodes to have two '/' so we use fpsize 1 here - */ - fpsize = 1; - allocl = 2; - } else { - /* account for '/' and path size minus terminal 0 - * already in 'l' - */ - fpsize += l; - allocl = fpsize; - } - } - - np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl, - __alignof__(struct device_node)); - if (allnextpp) { - memset(np, 0, sizeof(*np)); - np->full_name = ((char *)np) + sizeof(struct device_node); - if (new_format) { - char *p2 = np->full_name; - /* rebuild full path for new format */ - if (dad && dad->parent) { - strcpy(p2, dad->full_name); -#ifdef DEBUG - if ((strlen(p2) + l + 1) != allocl) { - pr_debug("%s: p: %d, l: %d, a: %d\n", - pathp, (int)strlen(p2), - l, allocl); - } -#endif - p2 += strlen(p2); - } - *(p2++) = '/'; - memcpy(p2, pathp, l); - } else - memcpy(np->full_name, pathp, l); - prev_pp = &np->properties; - **allnextpp = np; - *allnextpp = &np->allnext; - if (dad != NULL) { - np->parent = dad; - /* we temporarily use the next field as `last_child'*/ - if (dad->next == NULL) - dad->child = np; - else - dad->next->sibling = np; - dad->next = np; - } - kref_init(&np->kref); - } - while (1) { - u32 sz, noff; - char *pname; - - tag = *((u32 *)(*p)); - if (tag == OF_DT_NOP) { - *p += 4; - continue; - } - if (tag != OF_DT_PROP) - break; - *p += 4; - sz = *((u32 *)(*p)); - noff = *((u32 *)((*p) + 4)); - *p += 8; - if (initial_boot_params->version < 0x10) - *p = _ALIGN(*p, sz >= 8 ? 8 : 4); - - pname = find_flat_dt_string(noff); - if (pname == NULL) { - printk(KERN_INFO - "Can't find property name in list !\n"); - break; - } - if (strcmp(pname, "name") == 0) - has_name = 1; - l = strlen(pname) + 1; - pp = unflatten_dt_alloc(&mem, sizeof(struct property), - __alignof__(struct property)); - if (allnextpp) { - if (strcmp(pname, "linux,phandle") == 0) { - np->node = *((u32 *)*p); - if (np->linux_phandle == 0) - np->linux_phandle = np->node; - } - if (strcmp(pname, "ibm,phandle") == 0) - np->linux_phandle = *((u32 *)*p); - pp->name = pname; - pp->length = sz; - pp->value = (void *)*p; - *prev_pp = pp; - prev_pp = &pp->next; - } - *p = _ALIGN((*p) + sz, 4); - } - /* with version 0x10 we may not have the name property, recreate - * it here from the unit name if absent - */ - if (!has_name) { - char *p1 = pathp, *ps = pathp, *pa = NULL; - int sz; - - while (*p1) { - if ((*p1) == '@') - pa = p1; - if ((*p1) == '/') - ps = p1 + 1; - p1++; - } - if (pa < ps) - pa = p1; - sz = (pa - ps) + 1; - pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz, - __alignof__(struct property)); - if (allnextpp) { - pp->name = "name"; - pp->length = sz; - pp->value = pp + 1; - *prev_pp = pp; - prev_pp = &pp->next; - memcpy(pp->value, ps, sz - 1); - ((char *)pp->value)[sz - 1] = 0; - pr_debug("fixed up name for %s -> %s\n", pathp, - (char *)pp->value); - } - } - if (allnextpp) { - *prev_pp = NULL; - np->name = of_get_property(np, "name", NULL); - np->type = of_get_property(np, "device_type", NULL); - - if (!np->name) - np->name = "<NULL>"; - if (!np->type) - np->type = "<NULL>"; - } - while (tag == OF_DT_BEGIN_NODE) { - mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize); - tag = *((u32 *)(*p)); - } - if (tag != OF_DT_END_NODE) { - printk(KERN_INFO "Weird tag at end of node: %x\n", tag); - return mem; - } - *p += 4; - return mem; -} - -/** - * unflattens the device-tree passed by the firmware, creating the - * tree of struct device_node. It also fills the "name" and "type" - * pointers of the nodes so the normal device-tree walking functions - * can be used (this used to be done by finish_device_tree) - */ -void __init unflatten_device_tree(void) -{ - unsigned long start, mem, size; - struct device_node **allnextp = &allnodes; - - pr_debug(" -> unflatten_device_tree()\n"); - - /* First pass, scan for size */ - start = ((unsigned long)initial_boot_params) + - initial_boot_params->off_dt_struct; - size = unflatten_dt_node(0, &start, NULL, NULL, 0); - size = (size | 3) + 1; - - pr_debug(" size is %lx, allocating...\n", size); - - /* Allocate memory for the expanded device tree */ - mem = lmb_alloc(size + 4, __alignof__(struct device_node)); - mem = (unsigned long) __va(mem); - - ((u32 *)mem)[size / 4] = 0xdeadbeef; - - pr_debug(" unflattening %lx...\n", mem); - - /* Second pass, do actual unflattening */ - start = ((unsigned long)initial_boot_params) + - initial_boot_params->off_dt_struct; - unflatten_dt_node(mem, &start, NULL, &allnextp, 0); - if (*((u32 *)start) != OF_DT_END) - printk(KERN_WARNING "Weird tag at end of tree: %08x\n", - *((u32 *)start)); - if (((u32 *)mem)[size / 4] != 0xdeadbeef) - printk(KERN_WARNING "End of tree marker overwritten: %08x\n", - ((u32 *)mem)[size / 4]); - *allnextp = NULL; - - /* Get pointer to OF "/chosen" node for use everywhere */ - of_chosen = of_find_node_by_path("/chosen"); - if (of_chosen == NULL) - of_chosen = of_find_node_by_path("/chosen@0"); - - pr_debug(" <- unflatten_device_tree()\n"); -} - -#define early_init_dt_scan_drconf_memory(node) 0 - -static int __init early_init_dt_scan_cpus(unsigned long node, - const char *uname, int depth, - void *data) -{ - static int logical_cpuid; - char *type = of_get_flat_dt_prop(node, "device_type", NULL); - const u32 *intserv; - int i, nthreads; - int found = 0; - - /* We are scanning "cpu" nodes only */ - if (type == NULL || strcmp(type, "cpu") != 0) - return 0; - - /* Get physical cpuid */ - intserv = of_get_flat_dt_prop(node, "reg", NULL); - nthreads = 1; - - /* - * Now see if any of these threads match our boot cpu. - * NOTE: This must match the parsing done in smp_setup_cpu_maps. - */ - for (i = 0; i < nthreads; i++) { - /* - * version 2 of the kexec param format adds the phys cpuid of - * booted proc. - */ - if (initial_boot_params && initial_boot_params->version >= 2) { - if (intserv[i] == - initial_boot_params->boot_cpuid_phys) { - found = 1; - break; - } - } else { - /* - * Check if it's the boot-cpu, set it's hw index now, - * unfortunately this format did not support booting - * off secondary threads. - */ - if (of_get_flat_dt_prop(node, - "linux,boot-cpu", NULL) != NULL) { - found = 1; - break; - } - } - -#ifdef CONFIG_SMP - /* logical cpu id is always 0 on UP kernels */ - logical_cpuid++; -#endif - } - - if (found) { - pr_debug("boot cpu: logical %d physical %d\n", logical_cpuid, - intserv[i]); - boot_cpuid = logical_cpuid; - } - - return 0; -} - -#ifdef CONFIG_BLK_DEV_INITRD -static void __init early_init_dt_check_for_initrd(unsigned long node) -{ - unsigned long l; - u32 *prop; - - pr_debug("Looking for initrd properties... "); - - prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l); - if (prop) { - initrd_start = (unsigned long) - __va((u32)of_read_ulong(prop, l/4)); - - prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l); - if (prop) { - initrd_end = (unsigned long) - __va((u32)of_read_ulong(prop, 1/4)); - initrd_below_start_ok = 1; - } else { - initrd_start = 0; - } - } - - pr_debug("initrd_start=0x%lx initrd_end=0x%lx\n", - initrd_start, initrd_end); -} -#else -static inline void early_init_dt_check_for_initrd(unsigned long node) -{ -} -#endif /* CONFIG_BLK_DEV_INITRD */ - -static int __init early_init_dt_scan_chosen(unsigned long node, - const char *uname, int depth, void *data) -{ - unsigned long l; - char *p; - - pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname); - - if (depth != 1 || - (strcmp(uname, "chosen") != 0 && - strcmp(uname, "chosen@0") != 0)) - return 0; - -#ifdef CONFIG_KEXEC - lprop = (u64 *)of_get_flat_dt_prop(node, - "linux,crashkernel-base", NULL); - if (lprop) - crashk_res.start = *lprop; - - lprop = (u64 *)of_get_flat_dt_prop(node, - "linux,crashkernel-size", NULL); - if (lprop) - crashk_res.end = crashk_res.start + *lprop - 1; -#endif - - early_init_dt_check_for_initrd(node); - - /* Retreive command line */ - p = of_get_flat_dt_prop(node, "bootargs", &l); - if (p != NULL && l > 0) - strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE)); - -#ifdef CONFIG_CMDLINE -#ifndef CONFIG_CMDLINE_FORCE - if (p == NULL || l == 0 || (l == 1 && (*p) == 0)) -#endif - strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); -#endif /* CONFIG_CMDLINE */ - - pr_debug("Command line is: %s\n", cmd_line); - - /* break now */ - return 1; -} - -static int __init early_init_dt_scan_root(unsigned long node, - const char *uname, int depth, void *data) -{ - u32 *prop; - - if (depth != 0) - return 0; - - prop = of_get_flat_dt_prop(node, "#size-cells", NULL); - dt_root_size_cells = (prop == NULL) ? 1 : *prop; - pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells); - - prop = of_get_flat_dt_prop(node, "#address-cells", NULL); - dt_root_addr_cells = (prop == NULL) ? 2 : *prop; - pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells); - - /* break now */ - return 1; -} - -static u64 __init dt_mem_next_cell(int s, cell_t **cellp) -{ - cell_t *p = *cellp; - - *cellp = p + s; - return of_read_number(p, s); -} - -static int __init early_init_dt_scan_memory(unsigned long node, - const char *uname, int depth, void *data) +void __init early_init_dt_scan_chosen_arch(unsigned long node) { - char *type = of_get_flat_dt_prop(node, "device_type", NULL); - cell_t *reg, *endp; - unsigned long l; - - /* Look for the ibm,dynamic-reconfiguration-memory node */ -/* if (depth == 1 && - strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0) - return early_init_dt_scan_drconf_memory(node); -*/ - /* We are scanning "memory" nodes only */ - if (type == NULL) { - /* - * The longtrail doesn't have a device_type on the - * /memory node, so look for the node called /memory@0. - */ - if (depth != 1 || strcmp(uname, "memory@0") != 0) - return 0; - } else if (strcmp(type, "memory") != 0) - return 0; - - reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l); - if (reg == NULL) - reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l); - if (reg == NULL) - return 0; - - endp = reg + (l / sizeof(cell_t)); - - pr_debug("memory scan node %s, reg size %ld, data: %x %x %x %x,\n", - uname, l, reg[0], reg[1], reg[2], reg[3]); - - while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { - u64 base, size; - - base = dt_mem_next_cell(dt_root_addr_cells, ®); - size = dt_mem_next_cell(dt_root_size_cells, ®); - - if (size == 0) - continue; - pr_debug(" - %llx , %llx\n", (unsigned long long)base, - (unsigned long long)size); - - lmb_add(base, size); - } - return 0; + /* No Microblaze specific code here */ } -#ifdef CONFIG_PHYP_DUMP -/** - * phyp_dump_calculate_reserve_size() - reserve variable boot area 5% or arg - * - * Function to find the largest size we need to reserve - * during early boot process. - * - * It either looks for boot param and returns that OR - * returns larger of 256 or 5% rounded down to multiples of 256MB. - * - */ -static inline unsigned long phyp_dump_calculate_reserve_size(void) +void __init early_init_dt_add_memory_arch(u64 base, u64 size) { - unsigned long tmp; - - if (phyp_dump_info->reserve_bootvar) - return phyp_dump_info->reserve_bootvar; - - /* divide by 20 to get 5% of value */ - tmp = lmb_end_of_DRAM(); - do_div(tmp, 20); - - /* round it down in multiples of 256 */ - tmp = tmp & ~0x0FFFFFFFUL; - - return (tmp > PHYP_DUMP_RMR_END ? tmp : PHYP_DUMP_RMR_END); + lmb_add(base, size); } -/** - * phyp_dump_reserve_mem() - reserve all not-yet-dumped mmemory - * - * This routine may reserve memory regions in the kernel only - * if the system is supported and a dump was taken in last - * boot instance or if the hardware is supported and the - * scratch area needs to be setup. In other instances it returns - * without reserving anything. The memory in case of dump being - * active is freed when the dump is collected (by userland tools). - */ -static void __init phyp_dump_reserve_mem(void) +u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) { - unsigned long base, size; - unsigned long variable_reserve_size; - - if (!phyp_dump_info->phyp_dump_configured) { - printk(KERN_ERR "Phyp-dump not supported on this hardware\n"); - return; - } - - if (!phyp_dump_info->phyp_dump_at_boot) { - printk(KERN_INFO "Phyp-dump disabled at boot time\n"); - return; - } - - variable_reserve_size = phyp_dump_calculate_reserve_size(); - - if (phyp_dump_info->phyp_dump_is_active) { - /* Reserve *everything* above RMR.Area freed by userland tools*/ - base = variable_reserve_size; - size = lmb_end_of_DRAM() - base; - - /* XXX crashed_ram_end is wrong, since it may be beyond - * the memory_limit, it will need to be adjusted. */ - lmb_reserve(base, size); - - phyp_dump_info->init_reserve_start = base; - phyp_dump_info->init_reserve_size = size; - } else { - size = phyp_dump_info->cpu_state_size + - phyp_dump_info->hpte_region_size + - variable_reserve_size; - base = lmb_end_of_DRAM() - size; - lmb_reserve(base, size); - phyp_dump_info->init_reserve_start = base; - phyp_dump_info->init_reserve_size = size; - } + return lmb_alloc(size, align); } -#else -static inline void __init phyp_dump_reserve_mem(void) {} -#endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */ #ifdef CONFIG_EARLY_PRINTK /* MS this is Microblaze specifig function */ @@ -775,11 +98,6 @@ void __init early_init_devtree(void *params) /* Setup flat device-tree pointer */ initial_boot_params = params; -#ifdef CONFIG_PHYP_DUMP - /* scan tree to see if dump occured during last boot */ - of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL); -#endif - /* Retrieve various informations from the /chosen node of the * device-tree, including the platform type, initrd location and * size, TCE reserve, and more ... @@ -799,33 +117,18 @@ void __init early_init_devtree(void *params) pr_debug("Phys. mem: %lx\n", (unsigned long) lmb_phys_mem_size()); - pr_debug("Scanning CPUs ...\n"); - - /* Retreive CPU related informations from the flat tree - * (altivec support, boot CPU ID, ...) - */ - of_scan_flat_dt(early_init_dt_scan_cpus, NULL); - pr_debug(" <- early_init_devtree()\n"); } -/** - * Indicates whether the root node has a given value in its - * compatible property. - */ -int machine_is_compatible(const char *compat) +#ifdef CONFIG_BLK_DEV_INITRD +void __init early_init_dt_setup_initrd_arch(unsigned long start, + unsigned long end) { - struct device_node *root; - int rc = 0; - - root = of_find_node_by_path("/"); - if (root) { - rc = of_device_is_compatible(root, compat); - of_node_put(root); - } - return rc; + initrd_start = (unsigned long)__va(start); + initrd_end = (unsigned long)__va(end); + initrd_below_start_ok = 1; } -EXPORT_SYMBOL(machine_is_compatible); +#endif /******* * @@ -838,296 +141,6 @@ EXPORT_SYMBOL(machine_is_compatible); * *******/ -/** - * of_find_node_by_phandle - Find a node given a phandle - * @handle: phandle of the node to find - * - * Returns a node pointer with refcount incremented, use - * of_node_put() on it when done. - */ -struct device_node *of_find_node_by_phandle(phandle handle) -{ - struct device_node *np; - - read_lock(&devtree_lock); - for (np = allnodes; np != NULL; np = np->allnext) - if (np->linux_phandle == handle) - break; - of_node_get(np); - read_unlock(&devtree_lock); - return np; -} -EXPORT_SYMBOL(of_find_node_by_phandle); - -/** - * of_find_all_nodes - Get next node in global list - * @prev: Previous node or NULL to start iteration - * of_node_put() will be called on it - * - * Returns a node pointer with refcount incremented, use - * of_node_put() on it when done. - */ -struct device_node *of_find_all_nodes(struct device_node *prev) -{ - struct device_node *np; - - read_lock(&devtree_lock); - np = prev ? prev->allnext : allnodes; - for (; np != NULL; np = np->allnext) - if (of_node_get(np)) - break; - of_node_put(prev); - read_unlock(&devtree_lock); - return np; -} -EXPORT_SYMBOL(of_find_all_nodes); - -/** - * of_node_get - Increment refcount of a node - * @node: Node to inc refcount, NULL is supported to - * simplify writing of callers - * - * Returns node. - */ -struct device_node *of_node_get(struct device_node *node) -{ - if (node) - kref_get(&node->kref); - return node; -} -EXPORT_SYMBOL(of_node_get); - -static inline struct device_node *kref_to_device_node(struct kref *kref) -{ - return container_of(kref, struct device_node, kref); -} - -/** - * of_node_release - release a dynamically allocated node - * @kref: kref element of the node to be released - * - * In of_node_put() this function is passed to kref_put() - * as the destructor. - */ -static void of_node_release(struct kref *kref) -{ - struct device_node *node = kref_to_device_node(kref); - struct property *prop = node->properties; - - /* We should never be releasing nodes that haven't been detached. */ - if (!of_node_check_flag(node, OF_DETACHED)) { - printk(KERN_INFO "WARNING: Bad of_node_put() on %s\n", - node->full_name); - dump_stack(); - kref_init(&node->kref); - return; - } - - if (!of_node_check_flag(node, OF_DYNAMIC)) - return; - - while (prop) { - struct property *next = prop->next; - kfree(prop->name); - kfree(prop->value); - kfree(prop); - prop = next; - - if (!prop) { - prop = node->deadprops; - node->deadprops = NULL; - } - } - kfree(node->full_name); - kfree(node->data); - kfree(node); -} - -/** - * of_node_put - Decrement refcount of a node - * @node: Node to dec refcount, NULL is supported to - * simplify writing of callers - * - */ -void of_node_put(struct device_node *node) -{ - if (node) - kref_put(&node->kref, of_node_release); -} -EXPORT_SYMBOL(of_node_put); - -/* - * Plug a device node into the tree and global list. - */ -void of_attach_node(struct device_node *np) -{ - unsigned long flags; - - write_lock_irqsave(&devtree_lock, flags); - np->sibling = np->parent->child; - np->allnext = allnodes; - np->parent->child = np; - allnodes = np; - write_unlock_irqrestore(&devtree_lock, flags); -} - -/* - * "Unplug" a node from the device tree. The caller must hold - * a reference to the node. The memory associated with the node - * is not freed until its refcount goes to zero. - */ -void of_detach_node(struct device_node *np) -{ - struct device_node *parent; - unsigned long flags; - - write_lock_irqsave(&devtree_lock, flags); - - parent = np->parent; - if (!parent) - goto out_unlock; - - if (allnodes == np) - allnodes = np->allnext; - else { - struct device_node *prev; - for (prev = allnodes; - prev->allnext != np; - prev = prev->allnext) - ; - prev->allnext = np->allnext; - } - - if (parent->child == np) - parent->child = np->sibling; - else { - struct device_node *prevsib; - for (prevsib = np->parent->child; - prevsib->sibling != np; - prevsib = prevsib->sibling) - ; - prevsib->sibling = np->sibling; - } - - of_node_set_flag(np, OF_DETACHED); - -out_unlock: - write_unlock_irqrestore(&devtree_lock, flags); -} - -/* - * Add a property to a node - */ -int prom_add_property(struct device_node *np, struct property *prop) -{ - struct property **next; - unsigned long flags; - - prop->next = NULL; - write_lock_irqsave(&devtree_lock, flags); - next = &np->properties; - while (*next) { - if (strcmp(prop->name, (*next)->name) == 0) { - /* duplicate ! don't insert it */ - write_unlock_irqrestore(&devtree_lock, flags); - return -1; - } - next = &(*next)->next; - } - *next = prop; - write_unlock_irqrestore(&devtree_lock, flags); - -#ifdef CONFIG_PROC_DEVICETREE - /* try to add to proc as well if it was initialized */ - if (np->pde) - proc_device_tree_add_prop(np->pde, prop); -#endif /* CONFIG_PROC_DEVICETREE */ - - return 0; -} - -/* - * Remove a property from a node. Note that we don't actually - * remove it, since we have given out who-knows-how-many pointers - * to the data using get-property. Instead we just move the property - * to the "dead properties" list, so it won't be found any more. - */ -int prom_remove_property(struct device_node *np, struct property *prop) -{ - struct property **next; - unsigned long flags; - int found = 0; - - write_lock_irqsave(&devtree_lock, flags); - next = &np->properties; - while (*next) { - if (*next == prop) { - /* found the node */ - *next = prop->next; - prop->next = np->deadprops; - np->deadprops = prop; - found = 1; - break; - } - next = &(*next)->next; - } - write_unlock_irqrestore(&devtree_lock, flags); - - if (!found) - return -ENODEV; - -#ifdef CONFIG_PROC_DEVICETREE - /* try to remove the proc node as well */ - if (np->pde) - proc_device_tree_remove_prop(np->pde, prop); -#endif /* CONFIG_PROC_DEVICETREE */ - - return 0; -} - -/* - * Update a property in a node. Note that we don't actually - * remove it, since we have given out who-knows-how-many pointers - * to the data using get-property. Instead we just move the property - * to the "dead properties" list, and add the new property to the - * property list - */ -int prom_update_property(struct device_node *np, - struct property *newprop, - struct property *oldprop) -{ - struct property **next; - unsigned long flags; - int found = 0; - - write_lock_irqsave(&devtree_lock, flags); - next = &np->properties; - while (*next) { - if (*next == oldprop) { - /* found the node */ - newprop->next = oldprop->next; - *next = newprop; - oldprop->next = np->deadprops; - np->deadprops = oldprop; - found = 1; - break; - } - next = &(*next)->next; - } - write_unlock_irqrestore(&devtree_lock, flags); - - if (!found) - return -ENODEV; - -#ifdef CONFIG_PROC_DEVICETREE - /* try to add to proc as well if it was initialized */ - if (np->pde) - proc_device_tree_update_prop(np->pde, newprop, oldprop); -#endif /* CONFIG_PROC_DEVICETREE */ - - return 0; -} - #if defined(CONFIG_DEBUG_FS) && defined(DEBUG) static struct debugfs_blob_wrapper flat_dt_blob; diff --git a/arch/microblaze/kernel/prom_parse.c b/arch/microblaze/kernel/prom_parse.c index ae0352ecd5a9..bf7e6c27e318 100644 --- a/arch/microblaze/kernel/prom_parse.c +++ b/arch/microblaze/kernel/prom_parse.c @@ -256,7 +256,7 @@ int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq) if (ppdev == NULL) { struct pci_controller *host; host = pci_bus_to_host(pdev->bus); - ppnode = host ? host->arch_data : NULL; + ppnode = host ? host->dn : NULL; /* No node for host bridge ? give up */ if (ppnode == NULL) return -EINVAL; diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c index 4b3ac32754de..6d6349a145f9 100644 --- a/arch/microblaze/kernel/ptrace.c +++ b/arch/microblaze/kernel/ptrace.c @@ -78,26 +78,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) unsigned long copied; switch (request) { - case PTRACE_PEEKTEXT: /* read word at location addr. */ - case PTRACE_PEEKDATA: - pr_debug("PEEKTEXT/PEEKDATA at %08lX\n", addr); - copied = access_process_vm(child, addr, &val, sizeof(val), 0); - rval = -EIO; - if (copied != sizeof(val)) - break; - rval = put_user(val, (unsigned long *)data); - break; - - case PTRACE_POKETEXT: /* write the word at location addr. */ - case PTRACE_POKEDATA: - pr_debug("POKETEXT/POKEDATA to %08lX\n", addr); - rval = 0; - if (access_process_vm(child, addr, &data, sizeof(data), 1) - == sizeof(data)) - break; - rval = -EIO; - break; - /* Read/write the word at location ADDR in the registers. */ case PTRACE_PEEKUSR: case PTRACE_POKEUSR: @@ -130,50 +110,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) if (rval == 0 && request == PTRACE_PEEKUSR) rval = put_user(val, (unsigned long *)data); break; - /* Continue and stop at next (return from) syscall */ - case PTRACE_SYSCALL: - pr_debug("PTRACE_SYSCALL\n"); - case PTRACE_SINGLESTEP: - pr_debug("PTRACE_SINGLESTEP\n"); - /* Restart after a signal. */ - case PTRACE_CONT: - pr_debug("PTRACE_CONT\n"); - rval = -EIO; - if (!valid_signal(data)) - break; - - if (request == PTRACE_SYSCALL) - set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - else - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - - child->exit_code = data; - pr_debug("wakeup_process\n"); - wake_up_process(child); - rval = 0; - break; - - /* - * make the child exit. Best I can do is send it a sigkill. - * perhaps it should be put in the status that it wants to - * exit. - */ - case PTRACE_KILL: - pr_debug("PTRACE_KILL\n"); - rval = 0; - if (child->exit_state == EXIT_ZOMBIE) /* already dead */ - break; - child->exit_code = SIGKILL; - wake_up_process(child); - break; - - case PTRACE_DETACH: /* detach a process that was attached. */ - pr_debug("PTRACE_DETACH\n"); - rval = ptrace_detach(child, data); - break; default: - /* rval = ptrace_request(child, request, addr, data); noMMU */ - rval = -EIO; + rval = ptrace_request(child, request, addr, data); } return rval; } diff --git a/arch/microblaze/kernel/reset.c b/arch/microblaze/kernel/reset.c new file mode 100644 index 000000000000..a1721a33042e --- /dev/null +++ b/arch/microblaze/kernel/reset.c @@ -0,0 +1,140 @@ +/* + * Copyright (C) 2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2009 PetaLogix + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/init.h> +#include <linux/of_platform.h> +#include <asm/prom.h> + +/* Trigger specific functions */ +#ifdef CONFIG_GPIOLIB + +#include <linux/of_gpio.h> + +static int handle; /* reset pin handle */ +static unsigned int reset_val; + +static int of_reset_gpio_handle(void) +{ + int ret; /* variable which stored handle reset gpio pin */ + struct device_node *root; /* root node */ + struct device_node *gpio; /* gpio node */ + struct of_gpio_chip *of_gc = NULL; + enum of_gpio_flags flags ; + const void *gpio_spec; + + /* find out root node */ + root = of_find_node_by_path("/"); + + /* give me handle for gpio node to be possible allocate pin */ + ret = of_parse_phandles_with_args(root, "hard-reset-gpios", + "#gpio-cells", 0, &gpio, &gpio_spec); + if (ret) { + pr_debug("%s: can't parse gpios property\n", __func__); + goto err0; + } + + of_gc = gpio->data; + if (!of_gc) { + pr_debug("%s: gpio controller %s isn't registered\n", + root->full_name, gpio->full_name); + ret = -ENODEV; + goto err1; + } + + ret = of_gc->xlate(of_gc, root, gpio_spec, &flags); + if (ret < 0) + goto err1; + + ret += of_gc->gc.base; +err1: + of_node_put(gpio); +err0: + pr_debug("%s exited with status %d\n", __func__, ret); + return ret; +} + +void of_platform_reset_gpio_probe(void) +{ + int ret; + handle = of_reset_gpio_handle(); + + if (!gpio_is_valid(handle)) { + printk(KERN_INFO "Skipping unavailable RESET gpio %d (%s)\n", + handle, "reset"); + } + + ret = gpio_request(handle, "reset"); + if (ret < 0) { + printk(KERN_INFO "GPIO pin is already allocated\n"); + return; + } + + /* get current setup value */ + reset_val = gpio_get_value(handle); + /* FIXME maybe worth to perform any action */ + pr_debug("Reset: Gpio output state: 0x%x\n", reset_val); + + /* Setup GPIO as output */ + ret = gpio_direction_output(handle, 0); + if (ret < 0) + goto err; + + /* Setup output direction */ + gpio_set_value(handle, 0); + + printk(KERN_INFO "RESET: Registered gpio device: %d, current val: %d\n", + handle, reset_val); + return; +err: + gpio_free(handle); + return; +} + + +static void gpio_system_reset(void) +{ + gpio_set_value(handle, 1 - reset_val); +} +#else +#define gpio_system_reset() do {} while (0) +void of_platform_reset_gpio_probe(void) +{ + return; +} +#endif + +void machine_restart(char *cmd) +{ + printk(KERN_NOTICE "Machine restart...\n"); + gpio_system_reset(); + dump_stack(); + while (1) + ; +} + +void machine_shutdown(void) +{ + printk(KERN_NOTICE "Machine shutdown...\n"); + while (1) + ; +} + +void machine_halt(void) +{ + printk(KERN_NOTICE "Machine halt...\n"); + while (1) + ; +} + +void machine_power_off(void) +{ + printk(KERN_NOTICE "Machine power off...\n"); + while (1) + ; +} diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index 8c1e0f4dcf18..f974ec7aa357 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c @@ -22,7 +22,10 @@ #include <linux/io.h> #include <linux/bug.h> #include <linux/param.h> +#include <linux/pci.h> #include <linux/cache.h> +#include <linux/of_platform.h> +#include <linux/dma-mapping.h> #include <asm/cacheflush.h> #include <asm/entry.h> #include <asm/cpuinfo.h> @@ -52,16 +55,12 @@ void __init setup_arch(char **cmdline_p) /* irq_early_init(); */ setup_cpuinfo(); - __invalidate_icache_all(); - __enable_icache(); - - __invalidate_dcache_all(); - __enable_dcache(); - - panic_timeout = 120; + microblaze_cache_init(); setup_memory(); + xilinx_pci_init(); + #if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER) printk(KERN_NOTICE "Self modified code enable\n"); #endif @@ -131,6 +130,8 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, strlcpy(cmd_line, cmdline, COMMAND_LINE_SIZE); #endif + lockdep_init(); + /* initialize device tree for usage in early_printk */ early_init_devtree((void *)_fdt_start); @@ -187,31 +188,36 @@ static int microblaze_debugfs_init(void) arch_initcall(microblaze_debugfs_init); #endif -void machine_restart(char *cmd) +static int dflt_bus_notify(struct notifier_block *nb, + unsigned long action, void *data) { - printk(KERN_NOTICE "Machine restart...\n"); - dump_stack(); - while (1) - ; -} + struct device *dev = data; -void machine_shutdown(void) -{ - printk(KERN_NOTICE "Machine shutdown...\n"); - while (1) - ; -} + /* We are only intereted in device addition */ + if (action != BUS_NOTIFY_ADD_DEVICE) + return 0; -void machine_halt(void) -{ - printk(KERN_NOTICE "Machine halt...\n"); - while (1) - ; + set_dma_ops(dev, &dma_direct_ops); + + return NOTIFY_DONE; } -void machine_power_off(void) +static struct notifier_block dflt_plat_bus_notifier = { + .notifier_call = dflt_bus_notify, + .priority = INT_MAX, +}; + +static struct notifier_block dflt_of_bus_notifier = { + .notifier_call = dflt_bus_notify, + .priority = INT_MAX, +}; + +static int __init setup_bus_notifier(void) { - printk(KERN_NOTICE "Machine power off...\n"); - while (1) - ; + bus_register_notifier(&platform_bus_type, &dflt_plat_bus_notifier); + bus_register_notifier(&of_platform_bus_type, &dflt_of_bus_notifier); + + return 0; } + +arch_initcall(setup_bus_notifier); diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c index 1c80e4fc40ce..d8d3bb396cd6 100644 --- a/arch/microblaze/kernel/signal.c +++ b/arch/microblaze/kernel/signal.c @@ -44,7 +44,6 @@ asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset, int in_sycall); - asmlinkage long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, struct pt_regs *regs) @@ -176,6 +175,11 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, struct rt_sigframe __user *frame; int err = 0; int signal; + unsigned long address = 0; +#ifdef CONFIG_MMU + pmd_t *pmdp; + pte_t *ptep; +#endif frame = get_sigframe(ka, regs, sizeof(*frame)); @@ -216,8 +220,29 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, Negative 8 offset because return is rtsd r15, 8 */ regs->r15 = ((unsigned long)frame->tramp)-8; - __invalidate_cache_sigtramp((unsigned long)frame->tramp); - + address = ((unsigned long)frame->tramp); +#ifdef CONFIG_MMU + pmdp = pmd_offset(pud_offset( + pgd_offset(current->mm, address), + address), address); + + preempt_disable(); + ptep = pte_offset_map(pmdp, address); + if (pte_present(*ptep)) { + address = (unsigned long) page_address(pte_page(*ptep)); + /* MS: I need add offset in page */ + address += ((unsigned long)frame->tramp) & ~PAGE_MASK; + /* MS address is virtual */ + address = virt_to_phys(address); + invalidate_icache_range(address, address + 8); + flush_dcache_range(address, address + 8); + } + pte_unmap(ptep); + preempt_enable(); +#else + flush_icache_range(address, address + 8); + flush_dcache_range(address, address + 8); +#endif if (err) goto give_sigsegv; @@ -233,6 +258,10 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, set_fs(USER_DS); + /* the tracer may want to single-step inside the handler */ + if (test_thread_flag(TIF_SINGLESTEP)) + ptrace_notify(SIGTRAP); + #ifdef DEBUG_SIG printk(KERN_INFO "SIG deliver (%s:%d): sp=%p pc=%08lx\n", current->comm, current->pid, frame, regs->pc); diff --git a/arch/microblaze/kernel/stacktrace.c b/arch/microblaze/kernel/stacktrace.c new file mode 100644 index 000000000000..123692f22647 --- /dev/null +++ b/arch/microblaze/kernel/stacktrace.c @@ -0,0 +1,65 @@ +/* + * Stack trace support for Microblaze. + * + * Copyright (C) 2009 Michal Simek <monstr@monstr.eu> + * Copyright (C) 2009 PetaLogix + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/sched.h> +#include <linux/stacktrace.h> +#include <linux/thread_info.h> +#include <linux/ptrace.h> +#include <linux/module.h> + +/* FIXME initial support */ +void save_stack_trace(struct stack_trace *trace) +{ + unsigned long *sp; + unsigned long addr; + asm("addik %0, r1, 0" : "=r" (sp)); + + while (!kstack_end(sp)) { + addr = *sp++; + if (__kernel_text_address(addr)) { + if (trace->skip > 0) + trace->skip--; + else + trace->entries[trace->nr_entries++] = addr; + + if (trace->nr_entries >= trace->max_entries) + break; + } + } +} +EXPORT_SYMBOL_GPL(save_stack_trace); + +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + unsigned int *sp; + unsigned long addr; + + struct thread_info *ti = task_thread_info(tsk); + + if (tsk == current) + asm("addik %0, r1, 0" : "=r" (sp)); + else + sp = (unsigned int *)ti->cpu_context.r1; + + while (!kstack_end(sp)) { + addr = *sp++; + if (__kernel_text_address(addr)) { + if (trace->skip > 0) + trace->skip--; + else + trace->entries[trace->nr_entries++] = addr; + + if (trace->nr_entries >= trace->max_entries) + break; + } + } +} +EXPORT_SYMBOL_GPL(save_stack_trace_tsk); diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c index 07cabed4b947..9f3c205fb75b 100644 --- a/arch/microblaze/kernel/sys_microblaze.c +++ b/arch/microblaze/kernel/sys_microblaze.c @@ -62,46 +62,14 @@ out: return error; } -asmlinkage long -sys_mmap2(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) -{ - struct file *file = NULL; - int ret = -EBADF; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) { - printk(KERN_INFO "no fd in mmap\r\n"); - goto out; - } - } - - down_write(¤t->mm->mmap_sem); - ret = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - if (file) - fput(file); -out: - return ret; -} - asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, off_t pgoff) { - int err = -EINVAL; - - if (pgoff & ~PAGE_MASK) { - printk(KERN_INFO "no pagemask in mmap\r\n"); - goto out; - } + if (pgoff & ~PAGE_MASK) + return -EINVAL; - err = sys_mmap2(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT); -out: - return err; + return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT); } /* diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S index ecec19155135..03376dc814c9 100644 --- a/arch/microblaze/kernel/syscall_table.S +++ b/arch/microblaze/kernel/syscall_table.S @@ -183,7 +183,7 @@ ENTRY(sys_call_table) .long sys_rt_sigpending .long sys_rt_sigtimedwait .long sys_rt_sigqueueinfo - .long sys_rt_sigsuspend_wrapper + .long sys_rt_sigsuspend .long sys_pread64 /* 180 */ .long sys_pwrite64 .long sys_chown @@ -196,7 +196,7 @@ ENTRY(sys_call_table) .long sys_ni_syscall /* reserved for streams2 */ .long sys_vfork /* 190 */ .long sys_getrlimit - .long sys_mmap2 /* mmap2 */ + .long sys_mmap_pgoff /* mmap2 */ .long sys_truncate64 .long sys_ftruncate64 .long sys_stat64 /* 195 */ @@ -303,7 +303,7 @@ ENTRY(sys_call_table) .long sys_mkdirat .long sys_mknodat .long sys_fchownat - .long sys_ni_syscall + .long sys_futimesat .long sys_fstatat64 /* 300 */ .long sys_unlinkat .long sys_renameat @@ -366,8 +366,9 @@ ENTRY(sys_call_table) .long sys_shutdown .long sys_sendmsg /* 360 */ .long sys_recvmsg - .long sys_ni_syscall + .long sys_accept4 .long sys_ni_syscall .long sys_ni_syscall .long sys_rt_tgsigqueueinfo /* 365 */ .long sys_perf_event_open + .long sys_recvmmsg diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c index 5499deae7fa6..ed61b2f17719 100644 --- a/arch/microblaze/kernel/timer.c +++ b/arch/microblaze/kernel/timer.c @@ -183,6 +183,31 @@ static cycle_t microblaze_read(struct clocksource *cs) return (cycle_t) (in_be32(TIMER_BASE + TCR1)); } +static struct timecounter microblaze_tc = { + .cc = NULL, +}; + +static cycle_t microblaze_cc_read(const struct cyclecounter *cc) +{ + return microblaze_read(NULL); +} + +static struct cyclecounter microblaze_cc = { + .read = microblaze_cc_read, + .mask = CLOCKSOURCE_MASK(32), + .shift = 24, +}; + +int __init init_microblaze_timecounter(void) +{ + microblaze_cc.mult = div_sc(cpuinfo.cpu_clock_freq, NSEC_PER_SEC, + microblaze_cc.shift); + + timecounter_init(µblaze_tc, µblaze_cc, sched_clock()); + + return 0; +} + static struct clocksource clocksource_microblaze = { .name = "microblaze_clocksource", .rating = 300, @@ -204,6 +229,9 @@ static int __init microblaze_clocksource_init(void) out_be32(TIMER_BASE + TCSR1, in_be32(TIMER_BASE + TCSR1) & ~TCSR_ENT); /* start timer1 - up counting without interrupt */ out_be32(TIMER_BASE + TCSR1, TCSR_TINT|TCSR_ENT|TCSR_ARHT); + + /* register timecounter - for ftrace support */ + init_microblaze_timecounter(); return 0; } diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S index e704188d7855..5ef619aad634 100644 --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S @@ -26,11 +26,12 @@ SECTIONS { _stext = . ; *(.text .text.*) *(.fixup) - EXIT_TEXT - EXIT_CALL + EXIT_TEXT + EXIT_CALL SCHED_TEXT LOCK_TEXT KPROBES_TEXT + IRQENTRY_TEXT . = ALIGN (4) ; _etext = . ; } @@ -86,6 +87,7 @@ SECTIONS { _KERNEL_SDA_BASE_ = _ssro + (_ssro_size / 2) ; } + . = ALIGN(PAGE_SIZE); __init_begin = .; INIT_TEXT_SECTION(PAGE_SIZE) |