diff options
Diffstat (limited to 'arch/sh/include/asm')
75 files changed, 1390 insertions, 1103 deletions
diff --git a/arch/sh/include/asm/.gitignore b/arch/sh/include/asm/.gitignore deleted file mode 100644 index 378db779fb6c..000000000000 --- a/arch/sh/include/asm/.gitignore +++ /dev/null @@ -1 +0,0 @@ -machtypes.h diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index e121c30f797d..46cb93477bcb 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild @@ -1,6 +1,8 @@ include include/asm-generic/Kbuild.asm -header-y += cachectl.h cpu-features.h +header-y += cachectl.h +header-y += cpu-features.h +header-y += hw_breakpoint.h unifdef-y += unistd_32.h unifdef-y += unistd_64.h diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h index 80d40813e057..446b3831c214 100644 --- a/arch/sh/include/asm/addrspace.h +++ b/arch/sh/include/asm/addrspace.h @@ -28,10 +28,7 @@ /* Returns the privileged segment base of a given address */ #define PXSEG(a) (((unsigned long)(a)) & 0xe0000000) -/* Returns the physical address of a PnSEG (n=1,2) address */ -#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff) - -#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED) +#ifdef CONFIG_29BIT /* * Map an address to a certain privileged segment */ @@ -43,7 +40,15 @@ ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG)) #define P4SEGADDR(a) \ ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG)) -#endif /* 29BIT || PMB_FIXED */ +#else +/* + * These will never work in 32-bit, don't even bother. + */ +#define P1SEGADDR(a) __futile_remapping_attempt +#define P2SEGADDR(a) __futile_remapping_attempt +#define P3SEGADDR(a) __futile_remapping_attempt +#define P4SEGADDR(a) __futile_remapping_attempt +#endif #endif /* P1SEG */ /* Check if an address can be reached in 29 bits */ diff --git a/arch/sh/include/asm/alignment.h b/arch/sh/include/asm/alignment.h new file mode 100644 index 000000000000..b12efecf5294 --- /dev/null +++ b/arch/sh/include/asm/alignment.h @@ -0,0 +1,21 @@ +#ifndef __ASM_SH_ALIGNMENT_H +#define __ASM_SH_ALIGNMENT_H + +#include <linux/types.h> + +extern void inc_unaligned_byte_access(void); +extern void inc_unaligned_word_access(void); +extern void inc_unaligned_dword_access(void); +extern void inc_unaligned_multi_access(void); +extern void inc_unaligned_user_access(void); +extern void inc_unaligned_kernel_access(void); + +#define UM_WARN (1 << 0) +#define UM_FIXUP (1 << 1) +#define UM_SIGNAL (1 << 2) + +extern unsigned int unaligned_user_action(void); + +extern void unaligned_fixups_notify(struct task_struct *, insn_size_t, struct pt_regs *); + +#endif /* __ASM_SH_ALIGNMENT_H */ diff --git a/arch/sh/include/asm/asm-offsets.h b/arch/sh/include/asm/asm-offsets.h new file mode 100644 index 000000000000..d370ee36a182 --- /dev/null +++ b/arch/sh/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include <generated/asm-offsets.h> diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h index 4c5b7dbfcedb..a273c88578fc 100644 --- a/arch/sh/include/asm/atomic-grb.h +++ b/arch/sh/include/asm/atomic-grb.h @@ -120,50 +120,4 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) : "memory" , "r0", "r1"); } -static inline int atomic_cmpxchg(atomic_t *v, int old, int new) -{ - int ret; - - __asm__ __volatile__ ( - " .align 2 \n\t" - " mova 1f, r0 \n\t" - " nop \n\t" - " mov r15, r1 \n\t" - " mov #-8, r15 \n\t" - " mov.l @%1, %0 \n\t" - " cmp/eq %2, %0 \n\t" - " bf 1f \n\t" - " mov.l %3, @%1 \n\t" - "1: mov r1, r15 \n\t" - : "=&r" (ret) - : "r" (v), "r" (old), "r" (new) - : "memory" , "r0", "r1" , "t"); - - return ret; -} - -static inline int atomic_add_unless(atomic_t *v, int a, int u) -{ - int ret; - unsigned long tmp; - - __asm__ __volatile__ ( - " .align 2 \n\t" - " mova 1f, r0 \n\t" - " nop \n\t" - " mov r15, r1 \n\t" - " mov #-12, r15 \n\t" - " mov.l @%2, %1 \n\t" - " mov %1, %0 \n\t" - " cmp/eq %4, %0 \n\t" - " bt/s 1f \n\t" - " add %3, %1 \n\t" - " mov.l %1, @%2 \n\t" - "1: mov r1, r15 \n\t" - : "=&r" (ret), "=&r" (tmp) - : "r" (v), "r" (a), "r" (u) - : "memory" , "r0", "r1" , "t"); - - return ret != u; -} #endif /* __ASM_SH_ATOMIC_GRB_H */ diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h index b040e1e08610..4b00b78e3f4f 100644 --- a/arch/sh/include/asm/atomic-llsc.h +++ b/arch/sh/include/asm/atomic-llsc.h @@ -104,31 +104,4 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) : "t"); } -#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) - -/** - * atomic_add_unless - add unless the number is a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns non-zero if @v was not @u, and zero otherwise. - */ -static inline int atomic_add_unless(atomic_t *v, int a, int u) -{ - int c, old; - c = atomic_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - - return c != (u); -} - #endif /* __ASM_SH_ATOMIC_LLSC_H */ diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index e8e78137c6f5..275a448ae8c2 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h @@ -25,64 +25,48 @@ #endif #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) +#define atomic_dec_return(v) atomic_sub_return(1, (v)) +#define atomic_inc_return(v) atomic_add_return(1, (v)) +#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) +#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) +#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -#define atomic_dec_return(v) atomic_sub_return(1,(v)) -#define atomic_inc_return(v) atomic_add_return(1,(v)) +#define atomic_inc(v) atomic_add(1, (v)) +#define atomic_dec(v) atomic_sub(1, (v)) -/* - * atomic_inc_and_test - increment and test +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) + +/** + * atomic_add_unless - add unless the number is a given value * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) - -#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) -#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) - -#define atomic_inc(v) atomic_add(1,(v)) -#define atomic_dec(v) atomic_sub(1,(v)) - -#if !defined(CONFIG_GUSA_RB) && !defined(CONFIG_CPU_SH4A) -static inline int atomic_cmpxchg(atomic_t *v, int old, int new) -{ - int ret; - unsigned long flags; - - local_irq_save(flags); - ret = v->counter; - if (likely(ret == old)) - v->counter = new; - local_irq_restore(flags); - - return ret; -} - static inline int atomic_add_unless(atomic_t *v, int a, int u) { - int ret; - unsigned long flags; - - local_irq_save(flags); - ret = v->counter; - if (ret != u) - v->counter += a; - local_irq_restore(flags); - - return ret != u; + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + + return c != (u); } -#endif /* !CONFIG_GUSA_RB && !CONFIG_CPU_SH4A */ - -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -/* Atomic operations are already serializing on SH */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() +#define smp_mb__before_atomic_dec() smp_mb() +#define smp_mb__after_atomic_dec() smp_mb() +#define smp_mb__before_atomic_inc() smp_mb() +#define smp_mb__after_atomic_inc() smp_mb() #include <asm-generic/atomic-long.h> #include <asm-generic/atomic64.h> diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h index ebe595b7ab1f..98511e4d28cb 100644 --- a/arch/sh/include/asm/bitops.h +++ b/arch/sh/include/asm/bitops.h @@ -26,8 +26,8 @@ /* * clear_bit() doesn't provide any barrier for the compiler. */ -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() +#define smp_mb__before_clear_bit() smp_mb() +#define smp_mb__after_clear_bit() smp_mb() #ifdef CONFIG_SUPERH32 static inline unsigned long ffz(unsigned long word) diff --git a/arch/sh/include/asm/bugs.h b/arch/sh/include/asm/bugs.h index 46260fcbdf4b..02a19a1c033a 100644 --- a/arch/sh/include/asm/bugs.h +++ b/arch/sh/include/asm/bugs.h @@ -14,11 +14,15 @@ #include <asm/processor.h> +extern void select_idle_routine(void); + static void __init check_bugs(void) { extern unsigned long loops_per_jiffy; char *p = &init_utsname()->machine[2]; /* "sh" */ + select_idle_routine(); + current_cpu_data.loops_per_jiffy = loops_per_jiffy; switch (current_cpu_data.family) { diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h index c29918f3c819..1f4e562c5e8c 100644 --- a/arch/sh/include/asm/cacheflush.h +++ b/arch/sh/include/asm/cacheflush.h @@ -42,6 +42,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page *page); extern void flush_icache_range(unsigned long start, unsigned long end); extern void flush_icache_page(struct vm_area_struct *vma, @@ -62,6 +63,14 @@ static inline void flush_anon_page(struct vm_area_struct *vma, if (boot_cpu_data.dcache.n_aliases && PageAnon(page)) __flush_anon_page(page, vmaddr); } +static inline void flush_kernel_vmap_range(void *addr, int size) +{ + __flush_wback_region(addr, size); +} +static inline void invalidate_kernel_vmap_range(void *addr, int size) +{ + __flush_invalidate_region(addr, size); +} #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE static inline void flush_kernel_dcache_page(struct page *page) @@ -77,8 +86,8 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len); -#define flush_cache_vmap(start, end) flush_cache_all() -#define flush_cache_vunmap(start, end) flush_cache_all() +#define flush_cache_vmap(start, end) local_flush_cache_all(NULL) +#define flush_cache_vunmap(start, end) local_flush_cache_all(NULL) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) diff --git a/arch/sh/include/asm/clock.h b/arch/sh/include/asm/clock.h index 9fe7d7f8af40..11da4c5beb68 100644 --- a/arch/sh/include/asm/clock.h +++ b/arch/sh/include/asm/clock.h @@ -146,8 +146,17 @@ int sh_clk_mstp32_register(struct clk *clks, int nr); .flags = _flags, \ } +struct clk_div4_table { + struct clk_div_mult_table *div_mult_table; + void (*kick)(struct clk *clk); +}; + int sh_clk_div4_register(struct clk *clks, int nr, - struct clk_div_mult_table *table); + struct clk_div4_table *table); +int sh_clk_div4_enable_register(struct clk *clks, int nr, + struct clk_div4_table *table); +int sh_clk_div4_reparent_register(struct clk *clks, int nr, + struct clk_div4_table *table); #define SH_CLK_DIV6(_name, _parent, _reg, _flags) \ { \ diff --git a/arch/sh/include/asm/cmpxchg-grb.h b/arch/sh/include/asm/cmpxchg-grb.h index e2681abe764f..4676bf57693a 100644 --- a/arch/sh/include/asm/cmpxchg-grb.h +++ b/arch/sh/include/asm/cmpxchg-grb.h @@ -57,11 +57,10 @@ static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old, " mov.l @%1, %0 \n\t" /* load old value */ " cmp/eq %0, %2 \n\t" " bf 1f \n\t" /* if not equal */ - " mov.l %2, @%1 \n\t" /* store new value */ + " mov.l %3, @%1 \n\t" /* store new value */ "1: mov r1, r15 \n\t" /* LOGOUT */ - : "=&r" (retval), - "+r" (m) - : "r" (new) + : "=&r" (retval) + : "r" (m), "r" (old), "r" (new) : "memory" , "r0", "r1", "t"); return retval; diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index 69d56dd4c968..bea3337a426a 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -1,219 +1,106 @@ #ifndef __ASM_SH_DMA_MAPPING_H #define __ASM_SH_DMA_MAPPING_H -#include <linux/mm.h> -#include <linux/scatterlist.h> -#include <linux/dma-debug.h> -#include <asm/cacheflush.h> -#include <asm/io.h> +extern struct dma_map_ops *dma_ops; +extern void no_iommu_init(void); + +static inline struct dma_map_ops *get_dma_ops(struct device *dev) +{ + return dma_ops; +} + #include <asm-generic/dma-coherent.h> +#include <asm-generic/dma-mapping-common.h> + +static inline int dma_supported(struct device *dev, u64 mask) +{ + struct dma_map_ops *ops = get_dma_ops(dev); -extern struct bus_type pci_bus_type; + if (ops->dma_supported) + return ops->dma_supported(dev, mask); -#define dma_supported(dev, mask) (1) + return 1; +} static inline int dma_set_mask(struct device *dev, u64 mask) { + struct dma_map_ops *ops = get_dma_ops(dev); + if (!dev->dma_mask || !dma_supported(dev, mask)) return -EIO; + if (ops->set_dma_mask) + return ops->set_dma_mask(dev, mask); *dev->dma_mask = mask; return 0; } -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); - -void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); - void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction dir); #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -#define dma_is_consistent(d, h) (1) - -static inline dma_addr_t dma_map_single(struct device *dev, - void *ptr, size_t size, - enum dma_data_direction dir) -{ - dma_addr_t addr = virt_to_phys(ptr); - -#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) - if (dev->bus == &pci_bus_type) - return addr; -#endif - dma_cache_sync(dev, ptr, size, dir); - debug_dma_map_page(dev, virt_to_page(ptr), - (unsigned long)ptr & ~PAGE_MASK, size, - dir, addr, true); - - return addr; -} - -static inline void dma_unmap_single(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir) -{ - debug_dma_unmap_page(dev, addr, size, dir, true); -} - -static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir) -{ - int i; - - for (i = 0; i < nents; i++) { -#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) - dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); -#endif - sg[i].dma_address = sg_phys(&sg[i]); - sg[i].dma_length = sg[i].length; - } - - debug_dma_map_sg(dev, sg, nents, i, dir); - - return nents; -} - -static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir) -{ - debug_dma_unmap_sg(dev, sg, nents, dir); -} - -static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir) -{ - return dma_map_single(dev, page_address(page) + offset, size, dir); -} - -static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, - size_t size, enum dma_data_direction dir) -{ - dma_unmap_single(dev, dma_address, size, dir); -} - -static inline void __dma_sync_single(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir) -{ -#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) - if (dev->bus == &pci_bus_type) - return; +#ifdef CONFIG_DMA_COHERENT +#define dma_is_consistent(d, h) (1) +#else +#define dma_is_consistent(d, h) (0) #endif - dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir); -} -static inline void dma_sync_single_range(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction dir) +static inline int dma_get_cache_alignment(void) { -#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) - if (dev->bus == &pci_bus_type) - return; -#endif - dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir); + /* + * Each processor family will define its own L1_CACHE_SHIFT, + * L1_CACHE_BYTES wraps to this, so this is always safe. + */ + return L1_CACHE_BYTES; } -static inline void __dma_sync_sg(struct device *dev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir) +static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - int i; + struct dma_map_ops *ops = get_dma_ops(dev); - for (i = 0; i < nelems; i++) { -#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) - dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); -#endif - sg[i].dma_address = sg_phys(&sg[i]); - sg[i].dma_length = sg[i].length; - } -} + if (ops->mapping_error) + return ops->mapping_error(dev, dma_addr); -static inline void dma_sync_single_for_cpu(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction dir) -{ - __dma_sync_single(dev, dma_handle, size, dir); - debug_dma_sync_single_for_cpu(dev, dma_handle, size, dir); -} - -static inline void dma_sync_single_for_device(struct device *dev, - dma_addr_t dma_handle, - size_t size, - enum dma_data_direction dir) -{ - __dma_sync_single(dev, dma_handle, size, dir); - debug_dma_sync_single_for_device(dev, dma_handle, size, dir); + return dma_addr == 0; } -static inline void dma_sync_single_range_for_cpu(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, - size_t size, - enum dma_data_direction direction) +static inline void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp) { - dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); - debug_dma_sync_single_range_for_cpu(dev, dma_handle, - offset, size, direction); -} + struct dma_map_ops *ops = get_dma_ops(dev); + void *memory; -static inline void dma_sync_single_range_for_device(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, - size_t size, - enum dma_data_direction direction) -{ - dma_sync_single_for_device(dev, dma_handle+offset, size, direction); - debug_dma_sync_single_range_for_device(dev, dma_handle, - offset, size, direction); -} + if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) + return memory; + if (!ops->alloc_coherent) + return NULL; + memory = ops->alloc_coherent(dev, size, dma_handle, gfp); + debug_dma_alloc_coherent(dev, size, *dma_handle, memory); -static inline void dma_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sg, int nelems, - enum dma_data_direction dir) -{ - __dma_sync_sg(dev, sg, nelems, dir); - debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); + return memory; } -static inline void dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sg, int nelems, - enum dma_data_direction dir) +static inline void dma_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle) { - __dma_sync_sg(dev, sg, nelems, dir); - debug_dma_sync_sg_for_device(dev, sg, nelems, dir); -} + struct dma_map_ops *ops = get_dma_ops(dev); -static inline int dma_get_cache_alignment(void) -{ - /* - * Each processor family will define its own L1_CACHE_SHIFT, - * L1_CACHE_BYTES wraps to this, so this is always safe. - */ - return L1_CACHE_BYTES; -} + if (dma_release_from_coherent(dev, get_order(size), vaddr)) + return; -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == 0; + debug_dma_free_coherent(dev, size, vaddr, dma_handle); + if (ops->free_coherent) + ops->free_coherent(dev, size, vaddr, dma_handle); } -#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY - -extern int -dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, - dma_addr_t device_addr, size_t size, int flags); - -extern void -dma_release_declared_memory(struct device *dev); - -extern void * -dma_mark_declared_memory_occupied(struct device *dev, - dma_addr_t device_addr, size_t size); +/* arch/sh/mm/consistent.c */ +extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_addr, gfp_t flag); +extern void dma_generic_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle); #endif /* __ASM_SH_DMA_MAPPING_H */ diff --git a/arch/sh/include/asm/dma-register.h b/arch/sh/include/asm/dma-register.h new file mode 100644 index 000000000000..51cd78feacff --- /dev/null +++ b/arch/sh/include/asm/dma-register.h @@ -0,0 +1,51 @@ +/* + * Common header for the legacy SH DMA driver and the new dmaengine driver + * + * extracted from arch/sh/include/asm/dma-sh.h: + * + * Copyright (C) 2000 Takashi YOSHII + * Copyright (C) 2003 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef DMA_REGISTER_H +#define DMA_REGISTER_H + +/* DMA register */ +#define SAR 0x00 +#define DAR 0x04 +#define TCR 0x08 +#define CHCR 0x0C +#define DMAOR 0x40 + +/* DMAOR definitions */ +#define DMAOR_AE 0x00000004 +#define DMAOR_NMIF 0x00000002 +#define DMAOR_DME 0x00000001 + +/* Definitions for the SuperH DMAC */ +#define REQ_L 0x00000000 +#define REQ_E 0x00080000 +#define RACK_H 0x00000000 +#define RACK_L 0x00040000 +#define ACK_R 0x00000000 +#define ACK_W 0x00020000 +#define ACK_H 0x00000000 +#define ACK_L 0x00010000 +#define DM_INC 0x00004000 +#define DM_DEC 0x00008000 +#define DM_FIX 0x0000c000 +#define SM_INC 0x00001000 +#define SM_DEC 0x00002000 +#define SM_FIX 0x00003000 +#define RS_IN 0x00000200 +#define RS_OUT 0x00000300 +#define TS_BLK 0x00000040 +#define TM_BUR 0x00000020 +#define CHCR_DE 0x00000001 +#define CHCR_TE 0x00000002 +#define CHCR_IE 0x00000004 + +#endif diff --git a/arch/sh/include/asm/dma-sh.h b/arch/sh/include/asm/dma-sh.h index 78eed3e0bdf5..f3acb8e34c6b 100644 --- a/arch/sh/include/asm/dma-sh.h +++ b/arch/sh/include/asm/dma-sh.h @@ -11,7 +11,8 @@ #ifndef __DMA_SH_H #define __DMA_SH_H -#include <asm/dma.h> +#include <asm/dma-register.h> +#include <cpu/dma-register.h> #include <cpu/dma.h> /* DMAOR contorl: The DMAOR access size is different by CPU.*/ @@ -20,14 +21,14 @@ defined(CONFIG_CPU_SUBTYPE_SH7780) || \ defined(CONFIG_CPU_SUBTYPE_SH7785) #define dmaor_read_reg(n) \ - (n ? ctrl_inw(SH_DMAC_BASE1 + DMAOR) \ - : ctrl_inw(SH_DMAC_BASE0 + DMAOR)) + (n ? __raw_readw(SH_DMAC_BASE1 + DMAOR) \ + : __raw_readw(SH_DMAC_BASE0 + DMAOR)) #define dmaor_write_reg(n, data) \ - (n ? ctrl_outw(data, SH_DMAC_BASE1 + DMAOR) \ - : ctrl_outw(data, SH_DMAC_BASE0 + DMAOR)) + (n ? __raw_writew(data, SH_DMAC_BASE1 + DMAOR) \ + : __raw_writew(data, SH_DMAC_BASE0 + DMAOR)) #else /* Other CPU */ -#define dmaor_read_reg(n) ctrl_inw(SH_DMAC_BASE0 + DMAOR) -#define dmaor_write_reg(n, data) ctrl_outw(data, SH_DMAC_BASE0 + DMAOR) +#define dmaor_read_reg(n) __raw_readw(SH_DMAC_BASE0 + DMAOR) +#define dmaor_write_reg(n, data) __raw_writew(data, SH_DMAC_BASE0 + DMAOR) #endif static int dmte_irq_map[] __maybe_unused = { @@ -53,37 +54,11 @@ static int dmte_irq_map[] __maybe_unused = { #endif }; -/* Definitions for the SuperH DMAC */ -#define REQ_L 0x00000000 -#define REQ_E 0x00080000 -#define RACK_H 0x00000000 -#define RACK_L 0x00040000 -#define ACK_R 0x00000000 -#define ACK_W 0x00020000 -#define ACK_H 0x00000000 -#define ACK_L 0x00010000 -#define DM_INC 0x00004000 -#define DM_DEC 0x00008000 -#define SM_INC 0x00001000 -#define SM_DEC 0x00002000 -#define RS_IN 0x00000200 -#define RS_OUT 0x00000300 -#define TS_BLK 0x00000040 -#define TM_BUR 0x00000020 -#define CHCR_DE 0x00000001 -#define CHCR_TE 0x00000002 -#define CHCR_IE 0x00000004 - -/* DMAOR definitions */ -#define DMAOR_AE 0x00000004 -#define DMAOR_NMIF 0x00000002 -#define DMAOR_DME 0x00000001 - /* * Define the default configuration for dual address memory-memory transfer. * The 0x400 value represents auto-request, external->external. */ -#define RS_DUAL (DM_INC | SM_INC | 0x400 | TS_32) +#define RS_DUAL (DM_INC | SM_INC | 0x400 | TS_INDEX2VAL(XMIT_SZ_32BIT)) /* DMA base address */ static u32 dma_base_addr[] __maybe_unused = { @@ -109,24 +84,4 @@ static u32 dma_base_addr[] __maybe_unused = { #endif }; -/* DMA register */ -#define SAR 0x00 -#define DAR 0x04 -#define TCR 0x08 -#define CHCR 0x0C -#define DMAOR 0x40 - -/* - * for dma engine - * - * SuperH DMA mode - */ -#define SHDMA_MIX_IRQ (1 << 1) -#define SHDMA_DMAOR1 (1 << 2) -#define SHDMA_DMAE1 (1 << 3) - -struct sh_dmae_pdata { - unsigned int mode; -}; - #endif /* __DMA_SH_H */ diff --git a/arch/sh/include/asm/dma.h b/arch/sh/include/asm/dma.h index 04ad0e1e637e..07373a074090 100644 --- a/arch/sh/include/asm/dma.h +++ b/arch/sh/include/asm/dma.h @@ -19,9 +19,11 @@ #include <asm-generic/dma.h> #ifdef CONFIG_NR_DMA_CHANNELS -# define MAX_DMA_CHANNELS (CONFIG_NR_DMA_CHANNELS) +# define MAX_DMA_CHANNELS (CONFIG_NR_DMA_CHANNELS) +#elif defined(CONFIG_NR_ONCHIP_DMA_CHANNELS) +# define MAX_DMA_CHANNELS (CONFIG_NR_ONCHIP_DMA_CHANNELS) #else -# define MAX_DMA_CHANNELS (CONFIG_NR_ONCHIP_DMA_CHANNELS) +# define MAX_DMA_CHANNELS 0 #endif /* diff --git a/arch/sh/include/asm/dmaengine.h b/arch/sh/include/asm/dmaengine.h new file mode 100644 index 000000000000..bf2f30cf0a27 --- /dev/null +++ b/arch/sh/include/asm/dmaengine.h @@ -0,0 +1,93 @@ +/* + * Header for the new SH dmaengine driver + * + * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASM_DMAENGINE_H +#define ASM_DMAENGINE_H + +#include <linux/dmaengine.h> +#include <linux/list.h> + +#include <asm/dma-register.h> + +#define SH_DMAC_MAX_CHANNELS 6 + +enum sh_dmae_slave_chan_id { + SHDMA_SLAVE_SCIF0_TX, + SHDMA_SLAVE_SCIF0_RX, + SHDMA_SLAVE_SCIF1_TX, + SHDMA_SLAVE_SCIF1_RX, + SHDMA_SLAVE_SCIF2_TX, + SHDMA_SLAVE_SCIF2_RX, + SHDMA_SLAVE_SCIF3_TX, + SHDMA_SLAVE_SCIF3_RX, + SHDMA_SLAVE_SCIF4_TX, + SHDMA_SLAVE_SCIF4_RX, + SHDMA_SLAVE_SCIF5_TX, + SHDMA_SLAVE_SCIF5_RX, + SHDMA_SLAVE_SIUA_TX, + SHDMA_SLAVE_SIUA_RX, + SHDMA_SLAVE_SIUB_TX, + SHDMA_SLAVE_SIUB_RX, + SHDMA_SLAVE_NUMBER, /* Must stay last */ +}; + +struct sh_dmae_slave_config { + enum sh_dmae_slave_chan_id slave_id; + dma_addr_t addr; + u32 chcr; + char mid_rid; +}; + +struct sh_dmae_channel { + unsigned int offset; + unsigned int dmars; + unsigned int dmars_bit; +}; + +struct sh_dmae_pdata { + struct sh_dmae_slave_config *slave; + int slave_num; + struct sh_dmae_channel *channel; + int channel_num; + unsigned int ts_low_shift; + unsigned int ts_low_mask; + unsigned int ts_high_shift; + unsigned int ts_high_mask; + unsigned int *ts_shift; + int ts_shift_num; + u16 dmaor_init; +}; + +struct device; + +/* Used by slave DMA clients to request DMA to/from a specific peripheral */ +struct sh_dmae_slave { + enum sh_dmae_slave_chan_id slave_id; /* Set by the platform */ + struct device *dma_dev; /* Set by the platform */ + struct sh_dmae_slave_config *config; /* Set by the driver */ +}; + +struct sh_dmae_regs { + u32 sar; /* SAR / source address */ + u32 dar; /* DAR / destination address */ + u32 tcr; /* TCR / transfer count */ +}; + +struct sh_desc { + struct sh_dmae_regs hw; + struct list_head node; + struct dma_async_tx_descriptor async_tx; + enum dma_data_direction direction; + dma_cookie_t cookie; + size_t partial; + int chunks; + int mark; +}; + +#endif diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h index ced6795891a6..d62abd1d0c05 100644 --- a/arch/sh/include/asm/dwarf.h +++ b/arch/sh/include/asm/dwarf.h @@ -194,6 +194,12 @@ #define DWARF_ARCH_RA_REG 17 #ifndef __ASSEMBLY__ + +#include <linux/compiler.h> +#include <linux/bug.h> +#include <linux/list.h> +#include <linux/module.h> + /* * Read either the frame pointer (r14) or the stack pointer (r15). * NOTE: this MUST be inlined. @@ -237,10 +243,13 @@ struct dwarf_cie { unsigned long cie_pointer; - struct list_head link; - unsigned long flags; #define DWARF_CIE_Z_AUGMENTATION (1 << 0) + + /* linked-list entry if this CIE is from a module */ + struct list_head link; + + struct rb_node node; }; /** @@ -254,7 +263,11 @@ struct dwarf_fde { unsigned long address_range; unsigned char *instructions; unsigned char *end; + + /* linked-list entry if this FDE is from a module */ struct list_head link; + + struct rb_node node; }; /** @@ -364,6 +377,12 @@ static inline unsigned int DW_CFA_operand(unsigned long insn) extern struct dwarf_frame *dwarf_unwind_stack(unsigned long, struct dwarf_frame *); +extern void dwarf_free_frame(struct dwarf_frame *); + +extern int module_dwarf_finalize(const Elf_Ehdr *, const Elf_Shdr *, + struct module *); +extern void module_dwarf_cleanup(struct module *); + #endif /* !__ASSEMBLY__ */ #define CFI_STARTPROC .cfi_startproc @@ -391,6 +410,10 @@ extern struct dwarf_frame *dwarf_unwind_stack(unsigned long, static inline void dwarf_unwinder_init(void) { } + +#define module_dwarf_finalize(hdr, sechdrs, me) (0) +#define module_dwarf_cleanup(mod) do { } while (0) + #endif #endif /* CONFIG_DWARF_UNWINDER */ diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h index ccb1d93bb043..ac04255022b6 100644 --- a/arch/sh/include/asm/elf.h +++ b/arch/sh/include/asm/elf.h @@ -114,7 +114,6 @@ typedef struct user_fpu_struct elf_fpregset_t; */ #define CORE_DUMP_USE_REGSET -#define USE_ELF_CORE_DUMP #define ELF_FDPIC_CORE_EFLAGS EF_SH_FDPIC #define ELF_EXEC_PAGESIZE PAGE_SIZE diff --git a/arch/sh/include/asm/fixmap.h b/arch/sh/include/asm/fixmap.h index 721fcc4d5e98..6e7cea453895 100644 --- a/arch/sh/include/asm/fixmap.h +++ b/arch/sh/include/asm/fixmap.h @@ -14,9 +14,9 @@ #define _ASM_FIXMAP_H #include <linux/kernel.h> +#include <linux/threads.h> #include <asm/page.h> #ifdef CONFIG_HIGHMEM -#include <linux/threads.h> #include <asm/kmap_types.h> #endif @@ -46,19 +46,38 @@ * fix-mapped? */ enum fixed_addresses { -#define FIX_N_COLOURS 16 + /* + * The FIX_CMAP entries are used by kmap_coherent() to get virtual + * addresses which are of a known color, and so their values are + * important. __fix_to_virt(FIX_CMAP_END - n) must give an address + * which is the same color as a page (n<<PAGE_SHIFT). + */ +#define FIX_N_COLOURS 8 FIX_CMAP_BEGIN, - FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS, - FIX_UNCACHED, + FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS) - 1, + #ifdef CONFIG_HIGHMEM FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, #endif + +#ifdef CONFIG_IOREMAP_FIXED + /* + * FIX_IOREMAP entries are useful for mapping physical address + * space before ioremap() is useable, e.g. really early in boot + * before kmalloc() is working. + */ +#define FIX_N_IOREMAPS 32 + FIX_IOREMAP_BEGIN, + FIX_IOREMAP_END = FIX_IOREMAP_BEGIN + FIX_N_IOREMAPS, +#endif + __end_of_fixed_addresses }; extern void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags); +extern void __clear_fixmap(enum fixed_addresses idx, pgprot_t flags); #define set_fixmap(idx, phys) \ __set_fixmap(idx, phys, PAGE_KERNEL) diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h index 1d3aee04b5cc..06c4281aab65 100644 --- a/arch/sh/include/asm/fpu.h +++ b/arch/sh/include/asm/fpu.h @@ -2,8 +2,8 @@ #define __ASM_SH_FPU_H #ifndef __ASSEMBLY__ -#include <linux/preempt.h> -#include <asm/ptrace.h> + +struct task_struct; #ifdef CONFIG_SH_FPU static inline void release_fpu(struct pt_regs *regs) @@ -16,59 +16,56 @@ static inline void grab_fpu(struct pt_regs *regs) regs->sr &= ~SR_FD; } -struct task_struct; - -extern void save_fpu(struct task_struct *__tsk, struct pt_regs *regs); +extern void save_fpu(struct task_struct *__tsk); +extern void restore_fpu(struct task_struct *__tsk); +extern void fpu_state_restore(struct pt_regs *regs); +extern void __fpu_state_restore(void); #else - -#define release_fpu(regs) do { } while (0) -#define grab_fpu(regs) do { } while (0) - -static inline void save_fpu(struct task_struct *tsk, struct pt_regs *regs) -{ - clear_tsk_thread_flag(tsk, TIF_USEDFPU); -} +#define save_fpu(tsk) do { } while (0) +#define restore_fpu(tsk) do { } while (0) +#define release_fpu(regs) do { } while (0) +#define grab_fpu(regs) do { } while (0) +#define fpu_state_restore(regs) do { } while (0) +#define __fpu_state_restore(regs) do { } while (0) #endif struct user_regset; extern int do_fpu_inst(unsigned short, struct pt_regs *); +extern int init_fpu(struct task_struct *); extern int fpregs_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf); +static inline void __unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs) +{ + if (task_thread_info(tsk)->status & TS_USEDFPU) { + task_thread_info(tsk)->status &= ~TS_USEDFPU; + save_fpu(tsk); + release_fpu(regs); + } else + tsk->fpu_counter = 0; +} + static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs) { preempt_disable(); - if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) - save_fpu(tsk, regs); + __unlazy_fpu(tsk, regs); preempt_enable(); } static inline void clear_fpu(struct task_struct *tsk, struct pt_regs *regs) { preempt_disable(); - if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) { - clear_tsk_thread_flag(tsk, TIF_USEDFPU); + if (task_thread_info(tsk)->status & TS_USEDFPU) { + task_thread_info(tsk)->status &= ~TS_USEDFPU; release_fpu(regs); } preempt_enable(); } -static inline int init_fpu(struct task_struct *tsk) -{ - if (tsk_used_math(tsk)) { - if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current) - unlazy_fpu(tsk, task_pt_regs(tsk)); - return 0; - } - - set_stopped_child_used_math(tsk); - return 0; -} - #endif /* __ASSEMBLY__ */ #endif /* __ASM_SH_FPU_H */ diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h index 12f3a31f20af..13e9966464c2 100644 --- a/arch/sh/include/asm/ftrace.h +++ b/arch/sh/include/asm/ftrace.h @@ -35,4 +35,21 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) #endif /* __ASSEMBLY__ */ #endif /* CONFIG_FUNCTION_TRACER */ +#ifndef __ASSEMBLY__ + +/* arch/sh/kernel/return_address.c */ +extern void *return_address(unsigned int); + +#define HAVE_ARCH_CALLER_ADDR + +#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) +#define CALLER_ADDR1 ((unsigned long)return_address(1)) +#define CALLER_ADDR2 ((unsigned long)return_address(2)) +#define CALLER_ADDR3 ((unsigned long)return_address(3)) +#define CALLER_ADDR4 ((unsigned long)return_address(4)) +#define CALLER_ADDR5 ((unsigned long)return_address(5)) +#define CALLER_ADDR6 ((unsigned long)return_address(6)) + +#endif /* __ASSEMBLY__ */ + #endif /* __ASM_SH_FTRACE_H */ diff --git a/arch/sh/include/asm/gpio.h b/arch/sh/include/asm/gpio.h index 61f93da2c62e..f8d9a731e903 100644 --- a/arch/sh/include/asm/gpio.h +++ b/arch/sh/include/asm/gpio.h @@ -20,7 +20,7 @@ #endif #define ARCH_NR_GPIOS 512 -#include <asm-generic/gpio.h> +#include <linux/sh_pfc.h> #ifdef CONFIG_GPIOLIB @@ -53,84 +53,4 @@ static inline int irq_to_gpio(unsigned int irq) #endif /* CONFIG_GPIOLIB */ -typedef unsigned short pinmux_enum_t; -typedef unsigned short pinmux_flag_t; - -#define PINMUX_TYPE_NONE 0 -#define PINMUX_TYPE_FUNCTION 1 -#define PINMUX_TYPE_GPIO 2 -#define PINMUX_TYPE_OUTPUT 3 -#define PINMUX_TYPE_INPUT 4 -#define PINMUX_TYPE_INPUT_PULLUP 5 -#define PINMUX_TYPE_INPUT_PULLDOWN 6 - -#define PINMUX_FLAG_TYPE (0x7) -#define PINMUX_FLAG_WANT_PULLUP (1 << 3) -#define PINMUX_FLAG_WANT_PULLDOWN (1 << 4) - -#define PINMUX_FLAG_DBIT_SHIFT 5 -#define PINMUX_FLAG_DBIT (0x1f << PINMUX_FLAG_DBIT_SHIFT) -#define PINMUX_FLAG_DREG_SHIFT 10 -#define PINMUX_FLAG_DREG (0x3f << PINMUX_FLAG_DREG_SHIFT) - -struct pinmux_gpio { - pinmux_enum_t enum_id; - pinmux_flag_t flags; -}; - -#define PINMUX_GPIO(gpio, data_or_mark) [gpio] = { data_or_mark } -#define PINMUX_DATA(data_or_mark, ids...) data_or_mark, ids, 0 - -struct pinmux_cfg_reg { - unsigned long reg, reg_width, field_width; - unsigned long *cnt; - pinmux_enum_t *enum_ids; -}; - -#define PINMUX_CFG_REG(name, r, r_width, f_width) \ - .reg = r, .reg_width = r_width, .field_width = f_width, \ - .cnt = (unsigned long [r_width / f_width]) {}, \ - .enum_ids = (pinmux_enum_t [(r_width / f_width) * (1 << f_width)]) \ - -struct pinmux_data_reg { - unsigned long reg, reg_width, reg_shadow; - pinmux_enum_t *enum_ids; -}; - -#define PINMUX_DATA_REG(name, r, r_width) \ - .reg = r, .reg_width = r_width, \ - .enum_ids = (pinmux_enum_t [r_width]) \ - -struct pinmux_range { - pinmux_enum_t begin; - pinmux_enum_t end; - pinmux_enum_t force; -}; - -struct pinmux_info { - char *name; - pinmux_enum_t reserved_id; - struct pinmux_range data; - struct pinmux_range input; - struct pinmux_range input_pd; - struct pinmux_range input_pu; - struct pinmux_range output; - struct pinmux_range mark; - struct pinmux_range function; - - unsigned first_gpio, last_gpio; - - struct pinmux_gpio *gpios; - struct pinmux_cfg_reg *cfg_regs; - struct pinmux_data_reg *data_regs; - - pinmux_enum_t *gpio_data; - unsigned int gpio_data_size; - - unsigned long *gpio_in_use; - struct gpio_chip chip; -}; - -int register_pinmux(struct pinmux_info *pip); - #endif /* __ASM_SH_GPIO_H */ diff --git a/arch/sh/include/asm/hardirq.h b/arch/sh/include/asm/hardirq.h index a5be4afa790b..48b191313a99 100644 --- a/arch/sh/include/asm/hardirq.h +++ b/arch/sh/include/asm/hardirq.h @@ -1,9 +1,16 @@ #ifndef __ASM_SH_HARDIRQ_H #define __ASM_SH_HARDIRQ_H -extern void ack_bad_irq(unsigned int irq); -#define ack_bad_irq ack_bad_irq +#include <linux/threads.h> +#include <linux/irq.h> + +typedef struct { + unsigned int __softirq_pending; + unsigned int __nmi_count; /* arch dependent */ +} ____cacheline_aligned irq_cpustat_t; -#include <asm-generic/hardirq.h> +#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ + +extern void ack_bad_irq(unsigned int irq); #endif /* __ASM_SH_HARDIRQ_H */ diff --git a/arch/sh/include/asm/hw_breakpoint.h b/arch/sh/include/asm/hw_breakpoint.h new file mode 100644 index 000000000000..965dd780d51b --- /dev/null +++ b/arch/sh/include/asm/hw_breakpoint.h @@ -0,0 +1,67 @@ +#ifndef __ASM_SH_HW_BREAKPOINT_H +#define __ASM_SH_HW_BREAKPOINT_H + +#ifdef __KERNEL__ +#define __ARCH_HW_BREAKPOINT_H + +#include <linux/kdebug.h> +#include <linux/types.h> + +struct arch_hw_breakpoint { + char *name; /* Contains name of the symbol to set bkpt */ + unsigned long address; + u16 len; + u16 type; +}; + +enum { + SH_BREAKPOINT_READ = (1 << 1), + SH_BREAKPOINT_WRITE = (1 << 2), + SH_BREAKPOINT_RW = SH_BREAKPOINT_READ | SH_BREAKPOINT_WRITE, + + SH_BREAKPOINT_LEN_1 = (1 << 12), + SH_BREAKPOINT_LEN_2 = (1 << 13), + SH_BREAKPOINT_LEN_4 = SH_BREAKPOINT_LEN_1 | SH_BREAKPOINT_LEN_2, + SH_BREAKPOINT_LEN_8 = (1 << 14), +}; + +struct sh_ubc { + const char *name; + unsigned int num_events; + unsigned int trap_nr; + void (*enable)(struct arch_hw_breakpoint *, int); + void (*disable)(struct arch_hw_breakpoint *, int); + void (*enable_all)(unsigned long); + void (*disable_all)(void); + unsigned long (*active_mask)(void); + unsigned long (*triggered_mask)(void); + void (*clear_triggered_mask)(unsigned long); + struct clk *clk; /* optional interface clock / MSTP bit */ +}; + +struct perf_event; +struct task_struct; +struct pmu; + +/* Maximum number of UBC channels */ +#define HBP_NUM 2 + +/* arch/sh/kernel/hw_breakpoint.c */ +extern int arch_check_va_in_userspace(unsigned long va, u16 hbp_len); +extern int arch_validate_hwbkpt_settings(struct perf_event *bp, + struct task_struct *tsk); +extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, + unsigned long val, void *data); + +int arch_install_hw_breakpoint(struct perf_event *bp); +void arch_uninstall_hw_breakpoint(struct perf_event *bp); +void hw_breakpoint_pmu_read(struct perf_event *bp); +void hw_breakpoint_pmu_unthrottle(struct perf_event *bp); + +extern void arch_fill_perf_breakpoint(struct perf_event *bp); +extern int register_sh_ubc(struct sh_ubc *); + +extern struct pmu perf_ops_bp; + +#endif /* __KERNEL__ */ +#endif /* __ASM_SH_HW_BREAKPOINT_H */ diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 5be45ea4dfec..f689554e17c1 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -22,6 +22,7 @@ * for old compat code for I/O offseting to SuperIOs, all of which are * better handled through the machvec ioport mapping routines these days. */ +#include <linux/errno.h> #include <asm/cache.h> #include <asm/system.h> #include <asm/addrspace.h> @@ -79,28 +80,81 @@ #define writel(v,a) ({ __raw_writel((v),(a)); mb(); }) #define writeq(v,a) ({ __raw_writeq((v),(a)); mb(); }) -/* SuperH on-chip I/O functions */ -#define ctrl_inb __raw_readb -#define ctrl_inw __raw_readw -#define ctrl_inl __raw_readl -#define ctrl_inq __raw_readq +/* + * Legacy SuperH on-chip I/O functions + * + * These are all deprecated, all new (and especially cross-platform) code + * should be using the __raw_xxx() routines directly. + */ +static inline u8 __deprecated ctrl_inb(unsigned long addr) +{ + return __raw_readb(addr); +} + +static inline u16 __deprecated ctrl_inw(unsigned long addr) +{ + return __raw_readw(addr); +} + +static inline u32 __deprecated ctrl_inl(unsigned long addr) +{ + return __raw_readl(addr); +} + +static inline u64 __deprecated ctrl_inq(unsigned long addr) +{ + return __raw_readq(addr); +} + +static inline void __deprecated ctrl_outb(u8 v, unsigned long addr) +{ + __raw_writeb(v, addr); +} -#define ctrl_outb __raw_writeb -#define ctrl_outw __raw_writew -#define ctrl_outl __raw_writel -#define ctrl_outq __raw_writeq +static inline void __deprecated ctrl_outw(u16 v, unsigned long addr) +{ + __raw_writew(v, addr); +} + +static inline void __deprecated ctrl_outl(u32 v, unsigned long addr) +{ + __raw_writel(v, addr); +} + +static inline void __deprecated ctrl_outq(u64 v, unsigned long addr) +{ + __raw_writeq(v, addr); +} + +extern unsigned long generic_io_base; static inline void ctrl_delay(void) { -#ifdef CONFIG_CPU_SH4 - __raw_readw(CCN_PVR); -#elif defined(P2SEG) - __raw_readw(P2SEG); -#else -#error "Need a dummy address for delay" -#endif + __raw_readw(generic_io_base); } +#define __BUILD_UNCACHED_IO(bwlq, type) \ +static inline type read##bwlq##_uncached(unsigned long addr) \ +{ \ + type ret; \ + jump_to_uncached(); \ + ret = __raw_read##bwlq(addr); \ + back_to_cached(); \ + return ret; \ +} \ + \ +static inline void write##bwlq##_uncached(type v, unsigned long addr) \ +{ \ + jump_to_uncached(); \ + __raw_write##bwlq(v, addr); \ + back_to_cached(); \ +} + +__BUILD_UNCACHED_IO(b, u8) +__BUILD_UNCACHED_IO(w, u16) +__BUILD_UNCACHED_IO(l, u32) +__BUILD_UNCACHED_IO(q, u64) + #define __BUILD_MEMORY_STRING(bwlq, type) \ \ static inline void __raw_writes##bwlq(volatile void __iomem *mem, \ @@ -186,8 +240,6 @@ __BUILD_MEMORY_STRING(q, u64) #define IO_SPACE_LIMIT 0xffffffff -extern unsigned long generic_io_base; - /* * This function provides a method for the generic case where a * board-specific ioport_map simply needs to return the port + some @@ -239,23 +291,22 @@ unsigned long long poke_real_address_q(unsigned long long addr, * doesn't exist, so everything must go through page tables. */ #ifdef CONFIG_MMU -void __iomem *__ioremap(unsigned long offset, unsigned long size, - unsigned long flags); +void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size, + pgprot_t prot, void *caller); void __iounmap(void __iomem *addr); static inline void __iomem * -__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) +__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot) { -#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) - unsigned long last_addr = offset + size - 1; -#endif - void __iomem *ret; + return __ioremap_caller(offset, size, prot, __builtin_return_address(0)); +} - ret = __ioremap_trapped(offset, size); - if (ret) - return ret; +static inline void __iomem * +__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot) +{ +#ifdef CONFIG_29BIT + phys_addr_t last_addr = offset + size - 1; -#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) /* * For P1 and P2 space this is trivial, as everything is already * mapped. Uncached access for P1 addresses are done through P2. @@ -263,7 +314,7 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) * mapping must be done by the PMB or by using page tables. */ if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) { - if (unlikely(flags & _PAGE_CACHABLE)) + if (unlikely(pgprot_val(prot) & _PAGE_CACHABLE)) return (void __iomem *)P1SEGADDR(offset); return (void __iomem *)P2SEGADDR(offset); @@ -274,25 +325,67 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) return (void __iomem *)P4SEGADDR(offset); #endif - return __ioremap(offset, size, flags); + return NULL; +} + +static inline void __iomem * +__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot) +{ + void __iomem *ret; + + ret = __ioremap_trapped(offset, size); + if (ret) + return ret; + + ret = __ioremap_29bit(offset, size, prot); + if (ret) + return ret; + + return __ioremap(offset, size, prot); } #else -#define __ioremap_mode(offset, size, flags) ((void __iomem *)(offset)) +#define __ioremap(offset, size, prot) ((void __iomem *)(offset)) +#define __ioremap_mode(offset, size, prot) ((void __iomem *)(offset)) #define __iounmap(addr) do { } while (0) #endif /* CONFIG_MMU */ -#define ioremap(offset, size) \ - __ioremap_mode((offset), (size), 0) -#define ioremap_nocache(offset, size) \ - __ioremap_mode((offset), (size), 0) -#define ioremap_cache(offset, size) \ - __ioremap_mode((offset), (size), _PAGE_CACHABLE) -#define p3_ioremap(offset, size, flags) \ - __ioremap((offset), (size), (flags)) -#define ioremap_prot(offset, size, flags) \ - __ioremap_mode((offset), (size), (flags)) -#define iounmap(addr) \ - __iounmap((addr)) +static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size) +{ + return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE); +} + +static inline void __iomem * +ioremap_cache(phys_addr_t offset, unsigned long size) +{ + return __ioremap_mode(offset, size, PAGE_KERNEL); +} + +#ifdef CONFIG_HAVE_IOREMAP_PROT +static inline void __iomem * +ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags) +{ + return __ioremap_mode(offset, size, __pgprot(flags)); +} +#endif + +#ifdef CONFIG_IOREMAP_FIXED +extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t); +extern int iounmap_fixed(void __iomem *); +extern void ioremap_fixed_init(void); +#else +static inline void __iomem * +ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot) +{ + BUG(); + return NULL; +} + +static inline void ioremap_fixed_init(void) { } +static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; } +#endif + +#define ioremap_nocache ioremap +#define iounmap __iounmap #define maybebadio(port) \ printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \ diff --git a/arch/sh/include/asm/irqflags.h b/arch/sh/include/asm/irqflags.h index 46e71da5be6b..a741153b41c2 100644 --- a/arch/sh/include/asm/irqflags.h +++ b/arch/sh/include/asm/irqflags.h @@ -1,34 +1,9 @@ #ifndef __ASM_SH_IRQFLAGS_H #define __ASM_SH_IRQFLAGS_H -#ifdef CONFIG_SUPERH32 -#include "irqflags_32.h" -#else -#include "irqflags_64.h" -#endif +#define RAW_IRQ_DISABLED 0xf0 +#define RAW_IRQ_ENABLED 0x00 -#define raw_local_save_flags(flags) \ - do { (flags) = __raw_local_save_flags(); } while (0) - -static inline int raw_irqs_disabled_flags(unsigned long flags) -{ - return (flags != 0); -} - -static inline int raw_irqs_disabled(void) -{ - unsigned long flags = __raw_local_save_flags(); - - return raw_irqs_disabled_flags(flags); -} - -#define raw_local_irq_save(flags) \ - do { (flags) = __raw_local_irq_save(); } while (0) - -static inline void raw_local_irq_restore(unsigned long flags) -{ - if ((flags & 0xf0) != 0xf0) - raw_local_irq_enable(); -} +#include <asm-generic/irqflags.h> #endif /* __ASM_SH_IRQFLAGS_H */ diff --git a/arch/sh/include/asm/irqflags_32.h b/arch/sh/include/asm/irqflags_32.h deleted file mode 100644 index 60218f541340..000000000000 --- a/arch/sh/include/asm/irqflags_32.h +++ /dev/null @@ -1,99 +0,0 @@ -#ifndef __ASM_SH_IRQFLAGS_32_H -#define __ASM_SH_IRQFLAGS_32_H - -static inline void raw_local_irq_enable(void) -{ - unsigned long __dummy0, __dummy1; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "and %1, %0\n\t" -#ifdef CONFIG_CPU_HAS_SR_RB - "stc r6_bank, %1\n\t" - "or %1, %0\n\t" -#endif - "ldc %0, sr\n\t" - : "=&r" (__dummy0), "=r" (__dummy1) - : "1" (~0x000000f0) - : "memory" - ); -} - -static inline void raw_local_irq_disable(void) -{ - unsigned long flags; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "or #0xf0, %0\n\t" - "ldc %0, sr\n\t" - : "=&z" (flags) - : /* no inputs */ - : "memory" - ); -} - -static inline void set_bl_bit(void) -{ - unsigned long __dummy0, __dummy1; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "or %2, %0\n\t" - "and %3, %0\n\t" - "ldc %0, sr\n\t" - : "=&r" (__dummy0), "=r" (__dummy1) - : "r" (0x10000000), "r" (0xffffff0f) - : "memory" - ); -} - -static inline void clear_bl_bit(void) -{ - unsigned long __dummy0, __dummy1; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "and %2, %0\n\t" - "ldc %0, sr\n\t" - : "=&r" (__dummy0), "=r" (__dummy1) - : "1" (~0x10000000) - : "memory" - ); -} - -static inline unsigned long __raw_local_save_flags(void) -{ - unsigned long flags; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "and #0xf0, %0\n\t" - : "=&z" (flags) - : /* no inputs */ - : "memory" - ); - - return flags; -} - -static inline unsigned long __raw_local_irq_save(void) -{ - unsigned long flags, __dummy; - - __asm__ __volatile__ ( - "stc sr, %1\n\t" - "mov %1, %0\n\t" - "or #0xf0, %0\n\t" - "ldc %0, sr\n\t" - "mov %1, %0\n\t" - "and #0xf0, %0\n\t" - : "=&z" (flags), "=&r" (__dummy) - : /* no inputs */ - : "memory" - ); - - return flags; -} - -#endif /* __ASM_SH_IRQFLAGS_32_H */ diff --git a/arch/sh/include/asm/irqflags_64.h b/arch/sh/include/asm/irqflags_64.h deleted file mode 100644 index 88f65222c1d4..000000000000 --- a/arch/sh/include/asm/irqflags_64.h +++ /dev/null @@ -1,85 +0,0 @@ -#ifndef __ASM_SH_IRQFLAGS_64_H -#define __ASM_SH_IRQFLAGS_64_H - -#include <cpu/registers.h> - -#define SR_MASK_LL 0x00000000000000f0LL -#define SR_BL_LL 0x0000000010000000LL - -static inline void raw_local_irq_enable(void) -{ - unsigned long long __dummy0, __dummy1 = ~SR_MASK_LL; - - __asm__ __volatile__("getcon " __SR ", %0\n\t" - "and %0, %1, %0\n\t" - "putcon %0, " __SR "\n\t" - : "=&r" (__dummy0) - : "r" (__dummy1)); -} - -static inline void raw_local_irq_disable(void) -{ - unsigned long long __dummy0, __dummy1 = SR_MASK_LL; - - __asm__ __volatile__("getcon " __SR ", %0\n\t" - "or %0, %1, %0\n\t" - "putcon %0, " __SR "\n\t" - : "=&r" (__dummy0) - : "r" (__dummy1)); -} - -static inline void set_bl_bit(void) -{ - unsigned long long __dummy0, __dummy1 = SR_BL_LL; - - __asm__ __volatile__("getcon " __SR ", %0\n\t" - "or %0, %1, %0\n\t" - "putcon %0, " __SR "\n\t" - : "=&r" (__dummy0) - : "r" (__dummy1)); - -} - -static inline void clear_bl_bit(void) -{ - unsigned long long __dummy0, __dummy1 = ~SR_BL_LL; - - __asm__ __volatile__("getcon " __SR ", %0\n\t" - "and %0, %1, %0\n\t" - "putcon %0, " __SR "\n\t" - : "=&r" (__dummy0) - : "r" (__dummy1)); -} - -static inline unsigned long __raw_local_save_flags(void) -{ - unsigned long long __dummy = SR_MASK_LL; - unsigned long flags; - - __asm__ __volatile__ ( - "getcon " __SR ", %0\n\t" - "and %0, %1, %0" - : "=&r" (flags) - : "r" (__dummy)); - - return flags; -} - -static inline unsigned long __raw_local_irq_save(void) -{ - unsigned long long __dummy0, __dummy1 = SR_MASK_LL; - unsigned long flags; - - __asm__ __volatile__ ( - "getcon " __SR ", %1\n\t" - "or %1, r63, %0\n\t" - "or %1, %2, %1\n\t" - "putcon %1, " __SR "\n\t" - "and %0, %2, %0" - : "=&r" (flags), "=&r" (__dummy0) - : "r" (__dummy1)); - - return flags; -} - -#endif /* __ASM_SH_IRQFLAGS_64_H */ diff --git a/arch/sh/include/asm/kdebug.h b/arch/sh/include/asm/kdebug.h index 985219f9759e..5f6d2e9ccb7c 100644 --- a/arch/sh/include/asm/kdebug.h +++ b/arch/sh/include/asm/kdebug.h @@ -6,6 +6,8 @@ enum die_val { DIE_TRAP, DIE_NMI, DIE_OOPS, + DIE_BREAKPOINT, + DIE_SSTEP, }; #endif /* __ASM_SH_KDEBUG_H */ diff --git a/arch/sh/include/asm/machvec.h b/arch/sh/include/asm/machvec.h index 84dd37761f56..9c30955630ff 100644 --- a/arch/sh/include/asm/machvec.h +++ b/arch/sh/include/asm/machvec.h @@ -12,7 +12,7 @@ #include <linux/types.h> #include <linux/time.h> -#include <asm/machtypes.h> +#include <generated/machtypes.h> struct sh_machine_vector { void (*mv_setup)(char **cmdline_p); diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index f5963037c9d6..19fe84550b49 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -7,12 +7,18 @@ #define PMB_PASCR 0xff000070 #define PMB_IRMCR 0xff000078 +#define PASCR_SE 0x80000000 + #define PMB_ADDR 0xf6100000 #define PMB_DATA 0xf7100000 -#define PMB_ENTRY_MAX 16 + +#define NR_PMB_ENTRIES 16 + #define PMB_E_MASK 0x0000000f #define PMB_E_SHIFT 8 +#define PMB_PFN_MASK 0xff000000 + #define PMB_SZ_16M 0x00000000 #define PMB_SZ_64M 0x00000010 #define PMB_SZ_128M 0x00000080 @@ -21,11 +27,15 @@ #define PMB_C 0x00000008 #define PMB_WT 0x00000001 #define PMB_UB 0x00000200 +#define PMB_CACHE_MASK (PMB_C | PMB_WT | PMB_UB) #define PMB_V 0x00000100 #define PMB_NO_ENTRY (-1) #ifndef __ASSEMBLY__ +#include <linux/errno.h> +#include <linux/threads.h> +#include <asm/page.h> /* Default "unsigned long" context */ typedef unsigned long mm_context_id_t[NR_CPUS]; @@ -43,36 +53,47 @@ typedef struct { #endif } mm_context_t; -struct pmb_entry; +#ifdef CONFIG_PMB +/* arch/sh/mm/pmb.c */ +bool __in_29bit_mode(void); + +void pmb_init(void); +int pmb_bolt_mapping(unsigned long virt, phys_addr_t phys, + unsigned long size, pgprot_t prot); +void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size, + pgprot_t prot, void *caller); +int pmb_unmap(void __iomem *addr); -struct pmb_entry { - unsigned long vpn; - unsigned long ppn; - unsigned long flags; +#else - /* - * 0 .. NR_PMB_ENTRIES for specific entry selection, or - * PMB_NO_ENTRY to search for a free one - */ - int entry; +static inline void __iomem * +pmb_remap_caller(phys_addr_t phys, unsigned long size, + pgprot_t prot, void *caller) +{ + return NULL; +} - struct pmb_entry *next; - /* Adjacent entry link for contiguous multi-entry mappings */ - struct pmb_entry *link; -}; +static inline int pmb_unmap(void __iomem *addr) +{ + return -EINVAL; +} + +#define pmb_init(addr) do { } while (0) + +#ifdef CONFIG_29BIT +#define __in_29bit_mode() (1) +#else +#define __in_29bit_mode() (0) +#endif + +#endif /* CONFIG_PMB */ + +static inline void __iomem * +pmb_remap(phys_addr_t phys, unsigned long size, pgprot_t prot) +{ + return pmb_remap_caller(phys, size, prot, __builtin_return_address(0)); +} -/* arch/sh/mm/pmb.c */ -int __set_pmb_entry(unsigned long vpn, unsigned long ppn, - unsigned long flags, int *entry); -int set_pmb_entry(struct pmb_entry *pmbe); -void clear_pmb_entry(struct pmb_entry *pmbe); -struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, - unsigned long flags); -void pmb_free(struct pmb_entry *pmbe); -long pmb_remap(unsigned long virt, unsigned long phys, - unsigned long size, unsigned long flags); -void pmb_unmap(unsigned long addr); #endif /* __ASSEMBLY__ */ #endif /* __MMU_H */ - diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h index 41080b173a7a..384c7471a374 100644 --- a/arch/sh/include/asm/mmu_context.h +++ b/arch/sh/include/asm/mmu_context.h @@ -158,7 +158,7 @@ static inline void enable_mmu(void) unsigned int cpu = smp_processor_id(); /* Enable MMU */ - ctrl_outl(MMU_CONTROL_INIT, MMUCR); + __raw_writel(MMU_CONTROL_INIT, MMUCR); ctrl_barrier(); if (asid_cache(cpu) == NO_CONTEXT) @@ -171,9 +171,9 @@ static inline void disable_mmu(void) { unsigned long cr; - cr = ctrl_inl(MMUCR); + cr = __raw_readl(MMUCR); cr &= ~MMU_CONTROL_INIT; - ctrl_outl(cr, MMUCR); + __raw_writel(cr, MMUCR); ctrl_barrier(); } diff --git a/arch/sh/include/asm/mmu_context_32.h b/arch/sh/include/asm/mmu_context_32.h index 8ef800c549ab..10e2e17210d2 100644 --- a/arch/sh/include/asm/mmu_context_32.h +++ b/arch/sh/include/asm/mmu_context_32.h @@ -49,11 +49,11 @@ static inline unsigned long get_asid(void) /* MMU_TTB is used for optimizing the fault handling. */ static inline void set_TTB(pgd_t *pgd) { - ctrl_outl((unsigned long)pgd, MMU_TTB); + __raw_writel((unsigned long)pgd, MMU_TTB); } static inline pgd_t *get_TTB(void) { - return (pgd_t *)ctrl_inl(MMU_TTB); + return (pgd_t *)__raw_readl(MMU_TTB); } #endif /* __ASM_SH_MMU_CONTEXT_32_H */ diff --git a/arch/sh/include/asm/module.h b/arch/sh/include/asm/module.h index 068bf1659750..b7927de86f9f 100644 --- a/arch/sh/include/asm/module.h +++ b/arch/sh/include/asm/module.h @@ -1,7 +1,22 @@ #ifndef _ASM_SH_MODULE_H #define _ASM_SH_MODULE_H -#include <asm-generic/module.h> +struct mod_arch_specific { +#ifdef CONFIG_DWARF_UNWINDER + struct list_head fde_list; + struct list_head cie_list; +#endif +}; + +#ifdef CONFIG_64BIT +#define Elf_Shdr Elf64_Shdr +#define Elf_Sym Elf64_Sym +#define Elf_Ehdr Elf64_Ehdr +#else +#define Elf_Shdr Elf32_Shdr +#define Elf_Sym Elf32_Sym +#define Elf_Ehdr Elf32_Ehdr +#endif #ifdef CONFIG_CPU_LITTLE_ENDIAN # ifdef CONFIG_CPU_SH2 diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index 81bffc0d6860..d71feb359304 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -45,6 +45,7 @@ #endif #ifndef __ASSEMBLY__ +#include <asm/uncached.h> extern unsigned long shm_align_mask; extern unsigned long max_low_pfn, min_low_pfn; @@ -56,7 +57,6 @@ pages_do_alias(unsigned long addr1, unsigned long addr2) return (addr1 ^ addr2) & shm_align_mask; } - #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) extern void copy_page(void *to, void *from); @@ -88,7 +88,7 @@ typedef struct { unsigned long pgd; } pgd_t; #define __pte(x) ((pte_t) { (x) } ) #else typedef struct { unsigned long long pte_low; } pte_t; -typedef struct { unsigned long pgprot; } pgprot_t; +typedef struct { unsigned long long pgprot; } pgprot_t; typedef struct { unsigned long pgd; } pgd_t; #define pte_val(x) ((x).pte_low) #define __pte(x) ((pte_t) { (x) } ) @@ -127,12 +127,7 @@ typedef struct page *pgtable_t; * is not visible (it is part of the PMB mapping) and so needs to be * added or subtracted as required. */ -#if defined(CONFIG_PMB_FIXED) -/* phys = virt - PAGE_OFFSET - (__MEMORY_START & 0xe0000000) */ -#define PMB_OFFSET (PAGE_OFFSET - PXSEG(__MEMORY_START)) -#define __pa(x) ((unsigned long)(x) - PMB_OFFSET) -#define __va(x) ((void *)((unsigned long)(x) + PMB_OFFSET)) -#elif defined(CONFIG_32BIT) +#ifdef CONFIG_PMB #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START)) #else @@ -140,6 +135,14 @@ typedef struct page *pgtable_t; #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) #endif +#ifdef CONFIG_UNCACHED_MAPPING +#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + uncached_start) +#define CAC_ADDR(addr) ((addr) - uncached_start + PAGE_OFFSET) +#else +#define UNCAC_ADDR(addr) ((addr)) +#define CAC_ADDR(addr) ((addr)) +#endif + #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h index 4163950cd1c6..8bd952fcf3ba 100644 --- a/arch/sh/include/asm/pci.h +++ b/arch/sh/include/asm/pci.h @@ -3,8 +3,6 @@ #ifdef __KERNEL__ -#include <linux/dma-mapping.h> - /* Can be used to override the logic in pci_scan_bus for skipping already-configured bus numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the loader */ @@ -17,20 +15,49 @@ */ struct pci_channel { struct pci_channel *next; + struct pci_bus *bus; struct pci_ops *pci_ops; - struct resource *io_resource; - struct resource *mem_resource; + + struct resource *resources; + unsigned int nr_resources; unsigned long io_offset; unsigned long mem_offset; unsigned long reg_base; - unsigned long io_map_base; + + unsigned int index; + unsigned int need_domain_info; + + /* Optional error handling */ + struct timer_list err_timer, serr_timer; + unsigned int err_irq, serr_irq; }; -extern void register_pci_controller(struct pci_channel *hose); +/* arch/sh/drivers/pci/pci.c */ +extern int register_pci_controller(struct pci_channel *hose); +extern void pcibios_report_status(unsigned int status_mask, int warn); + +/* arch/sh/drivers/pci/common.c */ +extern int early_read_config_byte(struct pci_channel *hose, int top_bus, + int bus, int devfn, int offset, u8 *value); +extern int early_read_config_word(struct pci_channel *hose, int top_bus, + int bus, int devfn, int offset, u16 *value); +extern int early_read_config_dword(struct pci_channel *hose, int top_bus, + int bus, int devfn, int offset, u32 *value); +extern int early_write_config_byte(struct pci_channel *hose, int top_bus, + int bus, int devfn, int offset, u8 value); +extern int early_write_config_word(struct pci_channel *hose, int top_bus, + int bus, int devfn, int offset, u16 value); +extern int early_write_config_dword(struct pci_channel *hose, int top_bus, + int bus, int devfn, int offset, u32 value); +extern void pcibios_enable_timers(struct pci_channel *hose); +extern unsigned int pcibios_handle_status_errors(unsigned long addr, + unsigned int status, struct pci_channel *hose); +extern int pci_is_66mhz_capable(struct pci_channel *hose, + int top_bus, int current_bus); extern unsigned long PCIBIOS_MIN_IO, PCIBIOS_MIN_MEM; @@ -54,38 +81,7 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) * address space. The networking and block device layers use * this boolean for bounce buffer decisions. */ -#define PCI_DMA_BUS_IS_PHYS (1) - -#include <linux/types.h> -#include <linux/slab.h> -#include <asm/scatterlist.h> -#include <linux/string.h> -#include <asm/io.h> - -/* pci_unmap_{single,page} being a nop depends upon the - * configuration. - */ -#ifdef CONFIG_SH_PCIDMA_NONCOHERENT -#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ - dma_addr_t ADDR_NAME; -#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ - __u32 LEN_NAME; -#define pci_unmap_addr(PTR, ADDR_NAME) \ - ((PTR)->ADDR_NAME) -#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ - (((PTR)->ADDR_NAME) = (VAL)) -#define pci_unmap_len(PTR, LEN_NAME) \ - ((PTR)->LEN_NAME) -#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ - (((PTR)->LEN_NAME) = (VAL)) -#else -#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) -#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) -#define pci_unmap_addr(PTR, ADDR_NAME) (0) -#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) -#define pci_unmap_len(PTR, LEN_NAME) (0) -#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) -#endif +#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) #ifdef CONFIG_PCI /* @@ -113,20 +109,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, } #endif -#ifdef CONFIG_SUPERH32 -/* - * If we're on an SH7751 or SH7780 PCI controller, PCI memory is mapped - * at the end of the address space in a special non-translatable area. - */ -#define PCI_MEM_FIXED_START 0xfd000000 -#define PCI_MEM_FIXED_END (PCI_MEM_FIXED_START + 0x01000000) - -#define is_pci_memory_fixed_range(s, e) \ - ((s) >= PCI_MEM_FIXED_START && (e) < PCI_MEM_FIXED_END) -#else -#define is_pci_memory_fixed_range(s, e) (0) -#endif - /* Board-specific fixup routines. */ int pcibios_map_platform_irq(struct pci_dev *dev, u8 slot, u8 pin); @@ -136,6 +118,14 @@ extern void pcibios_resource_to_bus(struct pci_dev *dev, extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, struct pci_bus_region *region); +#define pci_domain_nr(bus) ((struct pci_channel *)(bus)->sysdata)->index + +static inline int pci_proc_domain(struct pci_bus *bus) +{ + struct pci_channel *hose = bus->sysdata; + return hose->need_domain_info; +} + /* Chances are this interrupt is wired PC-style ... */ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) { diff --git a/arch/sh/include/asm/perf_event.h b/arch/sh/include/asm/perf_event.h index 11a302297ab7..3d0c9f36d150 100644 --- a/arch/sh/include/asm/perf_event.h +++ b/arch/sh/include/asm/perf_event.h @@ -1,8 +1,35 @@ #ifndef __ASM_SH_PERF_EVENT_H #define __ASM_SH_PERF_EVENT_H -/* SH only supports software events through this interface. */ -static inline void set_perf_event_pending(void) {} +struct hw_perf_event; + +#define MAX_HWEVENTS 2 + +struct sh_pmu { + const char *name; + unsigned int num_events; + void (*disable_all)(void); + void (*enable_all)(void); + void (*enable)(struct hw_perf_event *, int); + void (*disable)(struct hw_perf_event *, int); + u64 (*read)(int); + int (*event_map)(int); + unsigned int max_events; + unsigned long raw_event_mask; + const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; +}; + +/* arch/sh/kernel/perf_event.c */ +extern int register_sh_pmu(struct sh_pmu *); +extern int reserve_pmc_hardware(void); +extern void release_pmc_hardware(void); + +static inline void set_perf_event_pending(void) +{ + /* Nothing to see here, move along. */ +} #define PERF_EVENT_INDEX_OFFSET 0 diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h index 63ca37bd9a95..8c00785c60d5 100644 --- a/arch/sh/include/asm/pgalloc.h +++ b/arch/sh/include/asm/pgalloc.h @@ -4,8 +4,16 @@ #include <linux/quicklist.h> #include <asm/page.h> -#define QUICK_PGD 0 /* We preserve special mappings over free */ -#define QUICK_PT 1 /* Other page table pages that are zero on free */ +#define QUICK_PT 0 /* Other page table pages that are zero on free */ + +extern pgd_t *pgd_alloc(struct mm_struct *); +extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); + +#if PAGETABLE_LEVELS > 2 +extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); +extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address); +extern void pmd_free(struct mm_struct *mm, pmd_t *pmd); +#endif static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) @@ -20,28 +28,9 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, } #define pmd_pgtable(pmd) pmd_page(pmd) -static inline void pgd_ctor(void *x) -{ - pgd_t *pgd = x; - - memcpy(pgd + USER_PTRS_PER_PGD, - swapper_pg_dir + USER_PTRS_PER_PGD, - (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); -} - /* * Allocate and free page tables. */ -static inline pgd_t *pgd_alloc(struct mm_struct *mm) -{ - return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor); -} - -static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ - quicklist_free(QUICK_PGD, NULL, pgd); -} - static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { @@ -81,7 +70,6 @@ do { \ static inline void check_pgt_cache(void) { - quicklist_trim(QUICK_PGD, NULL, 25, 16); quicklist_trim(QUICK_PT, NULL, 25, 16); } diff --git a/arch/sh/include/asm/pgtable-2level.h b/arch/sh/include/asm/pgtable-2level.h new file mode 100644 index 000000000000..19bd89db17e7 --- /dev/null +++ b/arch/sh/include/asm/pgtable-2level.h @@ -0,0 +1,23 @@ +#ifndef __ASM_SH_PGTABLE_2LEVEL_H +#define __ASM_SH_PGTABLE_2LEVEL_H + +#include <asm-generic/pgtable-nopmd.h> + +/* + * traditional two-level paging structure + */ +#define PAGETABLE_LEVELS 2 + +/* PTE bits */ +#define PTE_MAGNITUDE 2 /* 32-bit PTEs */ + +#define PTE_SHIFT PAGE_SHIFT +#define PTE_BITS (PTE_SHIFT - PTE_MAGNITUDE) + +/* PGD bits */ +#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS) + +#define PTRS_PER_PGD (PAGE_SIZE / (1 << PTE_MAGNITUDE)) +#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) + +#endif /* __ASM_SH_PGTABLE_2LEVEL_H */ diff --git a/arch/sh/include/asm/pgtable-3level.h b/arch/sh/include/asm/pgtable-3level.h new file mode 100644 index 000000000000..249a985d9648 --- /dev/null +++ b/arch/sh/include/asm/pgtable-3level.h @@ -0,0 +1,56 @@ +#ifndef __ASM_SH_PGTABLE_3LEVEL_H +#define __ASM_SH_PGTABLE_3LEVEL_H + +#include <asm-generic/pgtable-nopud.h> + +/* + * Some cores need a 3-level page table layout, for example when using + * 64-bit PTEs and 4K pages. + */ +#define PAGETABLE_LEVELS 3 + +#define PTE_MAGNITUDE 3 /* 64-bit PTEs on SH-X2 TLB */ + +/* PGD bits */ +#define PGDIR_SHIFT 30 + +#define PTRS_PER_PGD 4 +#define USER_PTRS_PER_PGD 2 + +/* PMD bits */ +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTE_MAGNITUDE)) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) + +#define PTRS_PER_PMD ((1 << PGDIR_SHIFT) / PMD_SIZE) + +#define pmd_ERROR(e) \ + printk("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) + +typedef struct { unsigned long long pmd; } pmd_t; +#define pmd_val(x) ((x).pmd) +#define __pmd(x) ((pmd_t) { (x) } ) + +static inline unsigned long pud_page_vaddr(pud_t pud) +{ + return pud_val(pud); +} + +#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) +static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) +{ + return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); +} + +#define pud_none(x) (!pud_val(x)) +#define pud_present(x) (pud_val(x)) +#define pud_clear(xp) do { set_pud(xp, __pud(0)); } while (0) +#define pud_bad(x) (pud_val(x) & ~PAGE_MASK) + +/* + * (puds are folded into pgds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0) + +#endif /* __ASM_SH_PGTABLE_3LEVEL_H */ diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index 4f3efa7d5a64..02f77450cd8f 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -12,7 +12,11 @@ #ifndef __ASM_SH_PGTABLE_H #define __ASM_SH_PGTABLE_H -#include <asm-generic/pgtable-nopmd.h> +#ifdef CONFIG_X2TLB +#include <asm/pgtable-3level.h> +#else +#include <asm/pgtable-2level.h> +#endif #include <asm/page.h> #ifndef __ASSEMBLY__ @@ -51,37 +55,39 @@ static inline unsigned long long neff_sign_extend(unsigned long val) #define NPHYS_SIGN (1LL << (NPHYS - 1)) #define NPHYS_MASK (-1LL << NPHYS) -/* - * traditional two-level paging structure - */ -/* PTE bits */ -#if defined(CONFIG_X2TLB) || defined(CONFIG_SUPERH64) -# define PTE_MAGNITUDE 3 /* 64-bit PTEs on extended mode SH-X2 TLB */ -#else -# define PTE_MAGNITUDE 2 /* 32-bit PTEs */ -#endif -#define PTE_SHIFT PAGE_SHIFT -#define PTE_BITS (PTE_SHIFT - PTE_MAGNITUDE) - -/* PGD bits */ -#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS) #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) /* Entries per level */ #define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE)) -#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) -#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) #define FIRST_USER_ADDRESS 0 -#ifdef CONFIG_32BIT -#define PHYS_ADDR_MASK 0xffffffff +#define PHYS_ADDR_MASK29 0x1fffffff +#define PHYS_ADDR_MASK32 0xffffffff + +#ifdef CONFIG_PMB +static inline unsigned long phys_addr_mask(void) +{ + /* Is the MMU in 29bit mode? */ + if (__in_29bit_mode()) + return PHYS_ADDR_MASK29; + + return PHYS_ADDR_MASK32; +} +#elif defined(CONFIG_32BIT) +static inline unsigned long phys_addr_mask(void) +{ + return PHYS_ADDR_MASK32; +} #else -#define PHYS_ADDR_MASK 0x1fffffff +static inline unsigned long phys_addr_mask(void) +{ + return PHYS_ADDR_MASK29; +} #endif -#define PTE_PHYS_MASK (PHYS_ADDR_MASK & PAGE_MASK) +#define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK) #define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT) #ifdef CONFIG_SUPERH32 @@ -135,9 +141,9 @@ typedef pte_t *pte_addr_t; #define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) /* - * No page table caches to initialise + * Initialise the page table caches */ -#define pgtable_cache_init() do { } while (0) +extern void pgtable_cache_init(void); struct vm_area_struct; @@ -147,8 +153,9 @@ extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte); static inline void -update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) +update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { + pte_t pte = *ptep; __update_cache(vma, address, pte); __update_tlb(vma, address, pte); } diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h index c0d359ce337b..e172d696e52b 100644 --- a/arch/sh/include/asm/pgtable_32.h +++ b/arch/sh/include/asm/pgtable_32.h @@ -71,6 +71,8 @@ #define _PAGE_EXT_KERN_WRITE 0x1000 /* EPR4-bit: Kernel space writable */ #define _PAGE_EXT_KERN_READ 0x2000 /* EPR5-bit: Kernel space readable */ +#define _PAGE_EXT_WIRED 0x4000 /* software: Wire TLB entry */ + /* Wrapper for extended mode pgprot twiddling */ #define _PAGE_EXT(x) ((unsigned long long)(x) << 32) @@ -108,7 +110,7 @@ static inline unsigned long copy_ptea_attributes(unsigned long x) #define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | _PAGE_FILE) #endif -#define _PAGE_FLAGS_HARDWARE_MASK (PHYS_ADDR_MASK & ~(_PAGE_CLEAR_FLAGS)) +#define _PAGE_FLAGS_HARDWARE_MASK (phys_addr_mask() & ~(_PAGE_CLEAR_FLAGS)) /* Hardware flags, page size encoding */ #if !defined(CONFIG_MMU) @@ -141,12 +143,14 @@ static inline unsigned long copy_ptea_attributes(unsigned long x) # elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB) # define _PAGE_SZHUGE (_PAGE_EXT_ESZ2 | _PAGE_EXT_ESZ3) # endif +# define _PAGE_WIRED (_PAGE_EXT(_PAGE_EXT_WIRED)) #else # if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) # define _PAGE_SZHUGE (_PAGE_SZ1) # elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) # define _PAGE_SZHUGE (_PAGE_SZ0 | _PAGE_SZ1) # endif +# define _PAGE_WIRED (0) #endif /* @@ -344,7 +348,8 @@ static inline void set_pte(pte_t *ptep, pte_t pte) #define pte_special(pte) ((pte).pte_low & _PAGE_SPECIAL) #ifdef CONFIG_X2TLB -#define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE) +#define pte_write(pte) \ + ((pte).pte_high & (_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE)) #else #define pte_write(pte) ((pte).pte_low & _PAGE_RW) #endif @@ -358,7 +363,7 @@ static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; } * individually toggled (and user permissions are entirely decoupled from * kernel permissions), we attempt to couple them a bit more sanely here. */ -PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE); +PTE_BIT_FUNC(high, wrprotect, &= ~(_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE)); PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE); PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE); #else diff --git a/arch/sh/include/asm/pgtable_64.h b/arch/sh/include/asm/pgtable_64.h index 17cdbecc3adc..0ee46776dad6 100644 --- a/arch/sh/include/asm/pgtable_64.h +++ b/arch/sh/include/asm/pgtable_64.h @@ -43,11 +43,6 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval) } #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) -static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep) -{ - pmd_val(*pmdp) = (unsigned long) ptep; -} - /* * PGD defines. Top level. */ @@ -128,8 +123,21 @@ static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep) #define _PAGE_DIRTY 0x400 /* software: page accessed in write */ #define _PAGE_ACCESSED 0x800 /* software: page referenced */ +/* Wrapper for extended mode pgprot twiddling */ +#define _PAGE_EXT(x) ((unsigned long long)(x) << 32) + +/* + * We can use the sign-extended bits in the PTEL to get 32 bits of + * software flags. This works for now because no implementations uses + * anything above the PPN field. + */ +#define _PAGE_WIRED _PAGE_EXT(0x001) /* software: wire the tlb entry */ + +#define _PAGE_CLEAR_FLAGS (_PAGE_PRESENT | _PAGE_FILE | _PAGE_SHARED | \ + _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_WIRED) + /* Mask which drops software flags */ -#define _PAGE_FLAGS_HARDWARE_MASK 0xfffffffffffff3dbLL +#define _PAGE_FLAGS_HARDWARE_MASK (NEFF_MASK & ~(_PAGE_CLEAR_FLAGS)) /* * HugeTLB support @@ -203,12 +211,6 @@ static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep) #define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE) /* - * Handling allocation failures during page table setup. - */ -extern void __handle_bad_pmd_kernel(pmd_t * pmd); -#define __handle_bad_pmd(x) __handle_bad_pmd_kernel(x) - -/* * PTE level access routines. * * Note1: diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h index 017e0c1807b2..9605e062840f 100644 --- a/arch/sh/include/asm/processor.h +++ b/arch/sh/include/asm/processor.h @@ -98,13 +98,34 @@ extern struct sh_cpuinfo cpu_data[]; /* Forward decl */ struct seq_operations; +struct task_struct; extern struct pt_regs fake_swapper_regs; +/* arch/sh/kernel/process.c */ +extern unsigned int xstate_size; +extern void free_thread_xstate(struct task_struct *); +extern struct kmem_cache *task_xstate_cachep; + +/* arch/sh/mm/alignment.c */ +extern int get_unalign_ctl(struct task_struct *, unsigned long addr); +extern int set_unalign_ctl(struct task_struct *, unsigned int val); + +#define GET_UNALIGN_CTL(tsk, addr) get_unalign_ctl((tsk), (addr)) +#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val)) + +/* arch/sh/mm/init.c */ +extern unsigned int mem_init_done; + /* arch/sh/kernel/setup.c */ const char *get_cpu_subtype(struct sh_cpuinfo *c); extern const struct seq_operations cpuinfo_op; +/* thread_struct flags */ +#define SH_THREAD_UAC_NOPRINT (1 << 0) +#define SH_THREAD_UAC_SIGBUS (1 << 1) +#define SH_THREAD_UAC_MASK (SH_THREAD_UAC_NOPRINT | SH_THREAD_UAC_SIGBUS) + /* processor boot mode configuration */ #define MODE_PIN0 (1 << 0) #define MODE_PIN1 (1 << 1) diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index 9a8714945dc9..572b4eb09493 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -14,6 +14,7 @@ #include <asm/page.h> #include <asm/types.h> #include <asm/ptrace.h> +#include <asm/hw_breakpoint.h> /* * Default implementation of macro that returns current @@ -56,6 +57,7 @@ asmlinkage void __init sh_cpu_init(void); #define SR_DSP 0x00001000 #define SR_IMASK 0x000000f0 #define SR_FD 0x00008000 +#define SR_MD 0x40000000 /* * DSP structure and data @@ -89,9 +91,9 @@ struct sh_fpu_soft_struct { unsigned long entry_pc; }; -union sh_fpu_union { - struct sh_fpu_hard_struct hard; - struct sh_fpu_soft_struct soft; +union thread_xstate { + struct sh_fpu_hard_struct hardfpu; + struct sh_fpu_soft_struct softfpu; }; struct thread_struct { @@ -99,44 +101,36 @@ struct thread_struct { unsigned long sp; unsigned long pc; - /* Hardware debugging registers */ - unsigned long ubc_pc; + /* Various thread flags, see SH_THREAD_xxx */ + unsigned long flags; - /* floating point info */ - union sh_fpu_union fpu; + /* Save middle states of ptrace breakpoints */ + struct perf_event *ptrace_bps[HBP_NUM]; #ifdef CONFIG_SH_DSP /* Dsp status information */ struct sh_dsp_struct dsp_status; #endif -}; -/* Count of active tasks with UBC settings */ -extern int ubc_usercnt; + /* Extended processor state */ + union thread_xstate *xstate; +}; #define INIT_THREAD { \ .sp = sizeof(init_stack) + (long) &init_stack, \ + .flags = 0, \ } -/* - * Do necessary setup to start up a newly executed thread. - */ -#define start_thread(_regs, new_pc, new_sp) \ - set_fs(USER_DS); \ - _regs->pr = 0; \ - _regs->sr = SR_FD; /* User mode. */ \ - _regs->pc = new_pc; \ - _regs->regs[15] = new_sp - /* Forward declaration, a strange C thing */ struct task_struct; -struct mm_struct; + +extern void start_thread(struct pt_regs *regs, unsigned long new_pc, unsigned long new_sp); /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); /* Prepare to copy thread state - unlazy all lazy status */ -#define prepare_to_copy(tsk) do { } while (0) +void prepare_to_copy(struct task_struct *tsk); /* * create a kernel thread without removing it from tasklists diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h index 5727d31b0ccf..621bc4618c6b 100644 --- a/arch/sh/include/asm/processor_64.h +++ b/arch/sh/include/asm/processor_64.h @@ -87,26 +87,31 @@ struct sh_fpu_hard_struct { /* long status; * software status information */ }; -#if 0 /* Dummy fpu emulator */ struct sh_fpu_soft_struct { - unsigned long long fp_regs[32]; + unsigned long fp_regs[64]; unsigned int fpscr; unsigned char lookahead; unsigned long entry_pc; }; -#endif -union sh_fpu_union { - struct sh_fpu_hard_struct hard; - /* 'hard' itself only produces 32 bit alignment, yet we need - to access it using 64 bit load/store as well. */ +union thread_xstate { + struct sh_fpu_hard_struct hardfpu; + struct sh_fpu_soft_struct softfpu; + /* + * The structure definitions only produce 32 bit alignment, yet we need + * to access them using 64 bit load/store as well. + */ unsigned long long alignment_dummy; }; struct thread_struct { unsigned long sp; unsigned long pc; + + /* Various thread flags, see SH_THREAD_xxx */ + unsigned long flags; + /* This stores the address of the pt_regs built during a context switch, or of the register save area built for a kernel mode exception. It is used for backtracing the stack of a sleeping task @@ -122,7 +127,7 @@ struct thread_struct { /* Hardware debugging registers may come here */ /* floating point info */ - union sh_fpu_union fpu; + union thread_xstate *xstate; }; #define INIT_MMAP \ @@ -137,7 +142,7 @@ struct thread_struct { .trap_no = 0, \ .error_code = 0, \ .address = 0, \ - .fpu = { { { 0, } }, } \ + .flags = 0, \ } /* diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h index 1dc12cb44a2d..2168fde25611 100644 --- a/arch/sh/include/asm/ptrace.h +++ b/arch/sh/include/asm/ptrace.h @@ -102,13 +102,15 @@ struct pt_dspregs { #define PTRACE_GETDSPREGS 55 /* DSP registers */ #define PTRACE_SETDSPREGS 56 -#define PT_TEXT_END_ADDR 240 -#define PT_TEXT_ADDR 244 /* &(struct user)->start_code */ -#define PT_DATA_ADDR 248 /* &(struct user)->start_data */ +#define PT_TEXT_END_ADDR 240 +#define PT_TEXT_ADDR 244 /* &(struct user)->start_code */ +#define PT_DATA_ADDR 248 /* &(struct user)->start_data */ #define PT_TEXT_LEN 252 #ifdef __KERNEL__ #include <asm/addrspace.h> +#include <asm/page.h> +#include <asm/system.h> #define user_mode(regs) (((regs)->sr & 0x40000000)==0) #define instruction_pointer(regs) ((unsigned long)(regs)->pc) @@ -121,8 +123,12 @@ extern void show_regs(struct pt_regs *); struct task_struct; #define arch_has_single_step() (1) -extern void user_enable_single_step(struct task_struct *); -extern void user_disable_single_step(struct task_struct *); + +struct perf_event; +struct perf_sample_data; + +extern void ptrace_triggered(struct perf_event *bp, int nmi, + struct perf_sample_data *data, struct pt_regs *regs); #define task_pt_regs(task) \ ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE) - 1) @@ -131,10 +137,8 @@ static inline unsigned long profile_pc(struct pt_regs *regs) { unsigned long pc = instruction_pointer(regs); -#ifdef P2SEG - if (pc >= P2SEG && pc < P3SEG) - pc -= 0x20000000; -#endif + if (virt_addr_uncached(pc)) + return CAC_ADDR(pc); return pc; } diff --git a/arch/sh/include/asm/reboot.h b/arch/sh/include/asm/reboot.h new file mode 100644 index 000000000000..b3da0c63fc3d --- /dev/null +++ b/arch/sh/include/asm/reboot.h @@ -0,0 +1,21 @@ +#ifndef __ASM_SH_REBOOT_H +#define __ASM_SH_REBOOT_H + +#include <linux/kdebug.h> + +struct pt_regs; + +struct machine_ops { + void (*restart)(char *cmd); + void (*halt)(void); + void (*power_off)(void); + void (*shutdown)(void); + void (*crash_shutdown)(struct pt_regs *); +}; + +extern struct machine_ops machine_ops; + +/* arch/sh/kernel/machine_kexec.c */ +void native_machine_crash_shutdown(struct pt_regs *regs); + +#endif /* __ASM_SH_REBOOT_H */ diff --git a/arch/sh/include/asm/rwsem.h b/arch/sh/include/asm/rwsem.h index 1987f3ea7f1b..06e2251a5e48 100644 --- a/arch/sh/include/asm/rwsem.h +++ b/arch/sh/include/asm/rwsem.h @@ -41,7 +41,7 @@ struct rw_semaphore { #endif #define __RWSEM_INITIALIZER(name) \ - { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ + { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ LIST_HEAD_INIT((name).wait_list) \ __RWSEM_DEP_MAP_INIT(name) } diff --git a/arch/sh/include/asm/scatterlist.h b/arch/sh/include/asm/scatterlist.h index 327cc2e4c97b..e38d1d4c7f6f 100644 --- a/arch/sh/include/asm/scatterlist.h +++ b/arch/sh/include/asm/scatterlist.h @@ -1,7 +1,7 @@ #ifndef __ASM_SH_SCATTERLIST_H #define __ASM_SH_SCATTERLIST_H -#define ISA_DMA_THRESHOLD PHYS_ADDR_MASK +#define ISA_DMA_THRESHOLD phys_addr_mask() #include <asm-generic/scatterlist.h> diff --git a/arch/sh/include/asm/setup.h b/arch/sh/include/asm/setup.h index ce3743599b27..4758325bb24a 100644 --- a/arch/sh/include/asm/setup.h +++ b/arch/sh/include/asm/setup.h @@ -18,7 +18,6 @@ /* ... */ #define COMMAND_LINE ((char *) (PARAM+0x100)) -int setup_early_printk(char *); void sh_mv_setup(void); #endif /* __KERNEL__ */ diff --git a/arch/sh/include/asm/sh_bios.h b/arch/sh/include/asm/sh_bios.h index d9c96d7cf6c7..95714c28422b 100644 --- a/arch/sh/include/asm/sh_bios.h +++ b/arch/sh/include/asm/sh_bios.h @@ -1,18 +1,27 @@ #ifndef __ASM_SH_BIOS_H #define __ASM_SH_BIOS_H +#ifdef CONFIG_SH_STANDARD_BIOS + /* * Copyright (C) 2000 Greg Banks, Mitch Davis * C API to interface to the standard LinuxSH BIOS * usually from within the early stages of kernel boot. */ - - extern void sh_bios_console_write(const char *buf, unsigned int len); -extern void sh_bios_char_out(char ch); extern void sh_bios_gdb_detach(void); extern void sh_bios_get_node_addr(unsigned char *node_addr); extern void sh_bios_shutdown(unsigned int how); +extern void sh_bios_vbr_init(void); +extern void sh_bios_vbr_reload(void); + +#else + +static inline void sh_bios_vbr_init(void) { } +static inline void sh_bios_vbr_reload(void) { } + +#endif /* CONFIG_SH_STANDARD_BIOS */ + #endif /* __ASM_SH_BIOS_H */ diff --git a/arch/sh/include/asm/sh_eth.h b/arch/sh/include/asm/sh_eth.h index acf99700deed..f739061e2ee4 100644 --- a/arch/sh/include/asm/sh_eth.h +++ b/arch/sh/include/asm/sh_eth.h @@ -7,6 +7,7 @@ struct sh_eth_plat_data { int phy; int edmac_endian; + unsigned char mac_addr[6]; unsigned no_ether_link:1; unsigned ether_link_active_low:1; }; diff --git a/arch/sh/include/asm/sh_keysc.h b/arch/sh/include/asm/sh_keysc.h deleted file mode 100644 index 4a65b1e40eab..000000000000 --- a/arch/sh/include/asm/sh_keysc.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef __ASM_KEYSC_H__ -#define __ASM_KEYSC_H__ - -#define SH_KEYSC_MAXKEYS 30 - -struct sh_keysc_info { - enum { SH_KEYSC_MODE_1, SH_KEYSC_MODE_2, SH_KEYSC_MODE_3 } mode; - int scan_timing; /* 0 -> 7, see KYCR1, SCN[2:0] */ - int delay; - int kycr2_delay; - int keycodes[SH_KEYSC_MAXKEYS]; -}; - -#endif /* __ASM_KEYSC_H__ */ diff --git a/arch/sh/include/asm/siu.h b/arch/sh/include/asm/siu.h new file mode 100644 index 000000000000..f1b1e6944a5f --- /dev/null +++ b/arch/sh/include/asm/siu.h @@ -0,0 +1,26 @@ +/* + * platform header for the SIU ASoC driver + * + * Copyright (C) 2009-2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef ASM_SIU_H +#define ASM_SIU_H + +#include <asm/dmaengine.h> + +struct device; + +struct siu_platform { + struct device *dma_dev; + enum sh_dmae_slave_chan_id dma_slave_tx_a; + enum sh_dmae_slave_chan_id dma_slave_rx_a; + enum sh_dmae_slave_chan_id dma_slave_tx_b; + enum sh_dmae_slave_chan_id dma_slave_rx_b; +}; + +#endif /* ASM_SIU_H */ diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h index a28c9f0053fd..bdc0f3b6c56a 100644 --- a/arch/sh/include/asm/spinlock.h +++ b/arch/sh/include/asm/spinlock.h @@ -23,10 +23,10 @@ * Your basic SMP spinlocks, allowing only a single CPU anywhere */ -#define __raw_spin_is_locked(x) ((x)->lock <= 0) -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -#define __raw_spin_unlock_wait(x) \ - do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0) +#define arch_spin_is_locked(x) ((x)->lock <= 0) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) +#define arch_spin_unlock_wait(x) \ + do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) /* * Simple spin lock operations. There are two variants, one clears IRQ's @@ -34,14 +34,14 @@ * * We make no fairness assumptions. They have a cost. */ -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; unsigned long oldval; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%2, %0 ! __raw_spin_lock \n\t" + "movli.l @%2, %0 ! arch_spin_lock \n\t" "mov %0, %1 \n\t" "mov #0, %0 \n\t" "movco.l %0, @%2 \n\t" @@ -54,12 +54,12 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) ); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { unsigned long tmp; __asm__ __volatile__ ( - "mov #1, %0 ! __raw_spin_unlock \n\t" + "mov #1, %0 ! arch_spin_unlock \n\t" "mov.l %0, @%1 \n\t" : "=&z" (tmp) : "r" (&lock->lock) @@ -67,13 +67,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) ); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned long tmp, oldval; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%2, %0 ! __raw_spin_trylock \n\t" + "movli.l @%2, %0 ! arch_spin_trylock \n\t" "mov %0, %1 \n\t" "mov #0, %0 \n\t" "movco.l %0, @%2 \n\t" @@ -100,21 +100,21 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_read_can_lock(x) ((x)->lock > 0) +#define arch_read_can_lock(x) ((x)->lock > 0) /** * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) +#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned long tmp; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%1, %0 ! __raw_read_lock \n\t" + "movli.l @%1, %0 ! arch_read_lock \n\t" "cmp/pl %0 \n\t" "bf 1b \n\t" "add #-1, %0 \n\t" @@ -126,13 +126,13 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) ); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned long tmp; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%1, %0 ! __raw_read_unlock \n\t" + "movli.l @%1, %0 ! arch_read_unlock \n\t" "add #1, %0 \n\t" "movco.l %0, @%1 \n\t" "bf 1b \n\t" @@ -142,13 +142,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) ); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned long tmp; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%1, %0 ! __raw_write_lock \n\t" + "movli.l @%1, %0 ! arch_write_lock \n\t" "cmp/hs %2, %0 \n\t" "bf 1b \n\t" "sub %2, %0 \n\t" @@ -160,23 +160,23 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) ); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { __asm__ __volatile__ ( - "mov.l %1, @%0 ! __raw_write_unlock \n\t" + "mov.l %1, @%0 ! arch_write_unlock \n\t" : : "r" (&rw->lock), "r" (RW_LOCK_BIAS) : "t", "memory" ); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned long tmp, oldval; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%2, %0 ! __raw_read_trylock \n\t" + "movli.l @%2, %0 ! arch_read_trylock \n\t" "mov %0, %1 \n\t" "cmp/pl %0 \n\t" "bf 2f \n\t" @@ -193,13 +193,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) return (oldval > 0); } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned long tmp, oldval; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%2, %0 ! __raw_write_trylock \n\t" + "movli.l @%2, %0 ! arch_write_trylock \n\t" "mov %0, %1 \n\t" "cmp/hs %3, %0 \n\t" "bf 2f \n\t" @@ -216,11 +216,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) return (oldval > (RW_LOCK_BIAS - 1)); } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_SH_SPINLOCK_H */ diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h index b4d244e7b60c..9b7560db06ca 100644 --- a/arch/sh/include/asm/spinlock_types.h +++ b/arch/sh/include/asm/spinlock_types.h @@ -7,15 +7,15 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 1 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 1 } typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; #define RW_LOCK_BIAS 0x01000000 -#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } +#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } #endif diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h index 5c8ea28ff7a4..64eb41a063e8 100644 --- a/arch/sh/include/asm/suspend.h +++ b/arch/sh/include/asm/suspend.h @@ -2,6 +2,7 @@ #define _ASM_SH_SUSPEND_H #ifndef __ASSEMBLY__ +#include <linux/notifier.h> static inline int arch_prepare_suspend(void) { return 0; } #include <asm/ptrace.h> @@ -19,6 +20,69 @@ void sh_mobile_setup_cpuidle(void); static inline void sh_mobile_setup_cpuidle(void) {} #endif +/* notifier chains for pre/post sleep hooks */ +extern struct atomic_notifier_head sh_mobile_pre_sleep_notifier_list; +extern struct atomic_notifier_head sh_mobile_post_sleep_notifier_list; + +/* priority levels for notifiers */ +#define SH_MOBILE_SLEEP_BOARD 0 +#define SH_MOBILE_SLEEP_CPU 1 +#define SH_MOBILE_PRE(x) (x) +#define SH_MOBILE_POST(x) (-(x)) + +/* board code registration function for self-refresh assembly snippets */ +void sh_mobile_register_self_refresh(unsigned long flags, + void *pre_start, void *pre_end, + void *post_start, void *post_end); + +/* register structure for address/data information */ +struct sh_sleep_regs { + unsigned long stbcr; + unsigned long bar; + + /* MMU */ + unsigned long pteh; + unsigned long ptel; + unsigned long ttb; + unsigned long tea; + unsigned long mmucr; + unsigned long ptea; + unsigned long pascr; + unsigned long irmcr; + + /* Cache */ + unsigned long ccr; + unsigned long ramcr; +}; + +/* data area for low-level sleep code */ +struct sh_sleep_data { + /* current sleep mode (SUSP_SH_...) */ + unsigned long mode; + + /* addresses of board specific self-refresh snippets */ + unsigned long sf_pre; + unsigned long sf_post; + + /* address of resume code */ + unsigned long resume; + + /* register state saved and restored by the assembly code */ + unsigned long vbr; + unsigned long spc; + unsigned long sr; + unsigned long sp; + + /* structure for keeping register addresses */ + struct sh_sleep_regs addr; + + /* structure for saving/restoring register state */ + struct sh_sleep_regs data; +}; + +/* a bitmap of supported sleep modes (SUSP_SH..) */ +extern unsigned long sh_mobile_sleep_supported; + #endif /* flags passed to assembly suspend code */ @@ -27,5 +91,7 @@ static inline void sh_mobile_setup_cpuidle(void) {} #define SUSP_SH_RSTANDBY (1 << 2) /* SH-Mobile R-standby mode */ #define SUSP_SH_USTANDBY (1 << 3) /* SH-Mobile U-standby mode */ #define SUSP_SH_SF (1 << 4) /* Enable self-refresh */ +#define SUSP_SH_MMU (1 << 5) /* Save/restore MMU and cache */ +#define SUSP_SH_REGS (1 << 6) /* Save/restore registers */ #endif /* _ASM_SH_SUSPEND_H */ diff --git a/arch/sh/include/asm/syscall.h b/arch/sh/include/asm/syscall.h index 6a381429ee9d..aa7777bdc370 100644 --- a/arch/sh/include/asm/syscall.h +++ b/arch/sh/include/asm/syscall.h @@ -1,6 +1,8 @@ #ifndef __ASM_SH_SYSCALL_H #define __ASM_SH_SYSCALL_H +extern const unsigned long sys_call_table[]; + #ifdef CONFIG_SUPERH32 # include "syscall_32.h" #else diff --git a/arch/sh/include/asm/syscalls.h b/arch/sh/include/asm/syscalls.h index c1e2b8deb837..507725af2e54 100644 --- a/arch/sh/include/asm/syscalls.h +++ b/arch/sh/include/asm/syscalls.h @@ -3,17 +3,12 @@ #ifdef __KERNEL__ -struct old_utsname; - asmlinkage int old_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, int fd, unsigned long off); asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff); -asmlinkage int sys_ipc(uint call, int first, int second, - int third, void __user *ptr, long fifth); -asmlinkage int sys_uname(struct old_utsname __user *name); #ifdef CONFIG_SUPERH32 # include "syscalls_32.h" diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h index b5c5acdc8c0e..0bd7a17d5e1a 100644 --- a/arch/sh/include/asm/system.h +++ b/arch/sh/include/asm/system.h @@ -10,7 +10,6 @@ #include <linux/compiler.h> #include <linux/linkage.h> #include <asm/types.h> -#include <asm/ptrace.h> #define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */ @@ -32,7 +31,7 @@ #define mb() __asm__ __volatile__ ("synco": : :"memory") #define rmb() mb() #define wmb() __asm__ __volatile__ ("synco": : :"memory") -#define ctrl_barrier() __icbi(0xa8000000) +#define ctrl_barrier() __icbi(PAGE_OFFSET) #define read_barrier_depends() do { } while(0) #else #define mb() __asm__ __volatile__ ("": : :"memory") @@ -114,6 +113,8 @@ static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, (unsigned long)_n_, sizeof(*(ptr))); \ }) +struct pt_regs; + extern void die(const char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn)); void free_initmem(void); void free_initrd_mem(unsigned long start, unsigned long end); @@ -137,14 +138,14 @@ extern unsigned int instruction_size(unsigned int insn); #endif extern unsigned long cached_to_uncached; +extern unsigned long uncached_size; extern struct dentry *sh_debugfs_root; void per_cpu_trap_init(void); void default_idle(void); void cpu_idle_wait(void); - -asmlinkage void break_point_trap(void); +void stop_this_cpu(void *); #ifdef CONFIG_SUPERH32 #define BUILD_TRAP_HANDLER(name) \ @@ -171,10 +172,6 @@ BUILD_TRAP_HANDLER(fpu_error); BUILD_TRAP_HANDLER(fpu_state_restore); BUILD_TRAP_HANDLER(nmi); -#ifdef CONFIG_BUG -extern void handle_BUG(struct pt_regs *); -#endif - #define arch_align_stack(x) (x) struct mem_access { diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h index 607d413f6168..51296b36770e 100644 --- a/arch/sh/include/asm/system_32.h +++ b/arch/sh/include/asm/system_32.h @@ -2,6 +2,7 @@ #define __ASM_SH_SYSTEM_32_H #include <linux/types.h> +#include <asm/mmu.h> #ifdef CONFIG_SH_DSP @@ -144,9 +145,6 @@ do { \ __restore_dsp(prev); \ } while (0) -#define __uses_jump_to_uncached \ - noinline __attribute__ ((__section__ (".uncached.text"))) - /* * Jump to uncached area. * When handling TLB or caches, we need to do it from an uncached area. @@ -216,6 +214,17 @@ static inline reg_size_t register_align(void *val) int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs, struct mem_access *ma, int); +static inline void trigger_address_error(void) +{ + if (__in_29bit_mode()) + __asm__ __volatile__ ( + "ldc %0, sr\n\t" + "mov.l @%1, %0" + : + : "r" (0x10000000), "r" (0x80000001) + ); +} + asmlinkage void do_address_error(struct pt_regs *regs, unsigned long writeaccess, unsigned long address); @@ -232,4 +241,33 @@ asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs __regs); +static inline void set_bl_bit(void) +{ + unsigned long __dummy0, __dummy1; + + __asm__ __volatile__ ( + "stc sr, %0\n\t" + "or %2, %0\n\t" + "and %3, %0\n\t" + "ldc %0, sr\n\t" + : "=&r" (__dummy0), "=r" (__dummy1) + : "r" (0x10000000), "r" (0xffffff0f) + : "memory" + ); +} + +static inline void clear_bl_bit(void) +{ + unsigned long __dummy0, __dummy1; + + __asm__ __volatile__ ( + "stc sr, %0\n\t" + "and %2, %0\n\t" + "ldc %0, sr\n\t" + : "=&r" (__dummy0), "=r" (__dummy1) + : "1" (~0x10000000) + : "memory" + ); +} + #endif /* __ASM_SH_SYSTEM_32_H */ diff --git a/arch/sh/include/asm/system_64.h b/arch/sh/include/asm/system_64.h index 8e4a03e7966c..36338646dfc8 100644 --- a/arch/sh/include/asm/system_64.h +++ b/arch/sh/include/asm/system_64.h @@ -12,11 +12,13 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. */ +#include <cpu/registers.h> #include <asm/processor.h> /* * switch_to() should switch tasks to task nr n, first */ +struct thread_struct; struct task_struct *sh64_switch_to(struct task_struct *prev, struct thread_struct *prev_thread, struct task_struct *next, @@ -32,8 +34,6 @@ do { \ &next->thread); \ } while (0) -#define __uses_jump_to_uncached - #define jump_to_uncached() do { } while (0) #define back_to_cached() do { } while (0) @@ -47,4 +47,36 @@ static inline reg_size_t register_align(void *val) return (unsigned long long)(signed long long)(signed long)val; } +extern void phys_stext(void); + +static inline void trigger_address_error(void) +{ + phys_stext(); +} + +#define SR_BL_LL 0x0000000010000000LL + +static inline void set_bl_bit(void) +{ + unsigned long long __dummy0, __dummy1 = SR_BL_LL; + + __asm__ __volatile__("getcon " __SR ", %0\n\t" + "or %0, %1, %0\n\t" + "putcon %0, " __SR "\n\t" + : "=&r" (__dummy0) + : "r" (__dummy1)); + +} + +static inline void clear_bl_bit(void) +{ + unsigned long long __dummy0, __dummy1 = ~SR_BL_LL; + + __asm__ __volatile__("getcon " __SR ", %0\n\t" + "and %0, %1, %0\n\t" + "putcon %0, " __SR "\n\t" + : "=&r" (__dummy0) + : "r" (__dummy1)); +} + #endif /* __ASM_SH_SYSTEM_64_H */ diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index bdeb9d46d17d..55a36fef6875 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -19,6 +19,7 @@ struct thread_info { struct task_struct *task; /* main task structure */ struct exec_domain *exec_domain; /* execution domain */ unsigned long flags; /* low level flags */ + __u32 status; /* thread synchronous flags */ __u32 cpu; int preempt_count; /* 0 => preemptable, <0 => BUG */ mm_segment_t addr_limit; /* thread address space */ @@ -50,6 +51,7 @@ struct thread_info { .task = &tsk, \ .exec_domain = &default_exec_domain, \ .flags = 0, \ + .status = 0, \ .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ @@ -91,14 +93,16 @@ static inline struct thread_info *current_thread_info(void) #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) -#else /* THREAD_SHIFT < PAGE_SHIFT */ - -#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR +#endif extern struct thread_info *alloc_thread_info(struct task_struct *tsk); extern void free_thread_info(struct thread_info *ti); +extern void arch_task_cache_init(void); +#define arch_task_cache_init arch_task_cache_init +extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); +extern void init_thread_xstate(void); -#endif /* THREAD_SHIFT < PAGE_SHIFT */ +#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR #endif /* __ASSEMBLY__ */ @@ -111,13 +115,11 @@ extern void free_thread_info(struct thread_info *ti); #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ -#define TIF_RESTORE_SIGMASK 3 /* restore signal mask in do_signal() */ #define TIF_SINGLESTEP 4 /* singlestepping active */ #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ #define TIF_SECCOMP 6 /* secure computing */ #define TIF_NOTIFY_RESUME 7 /* callback before returning to user */ #define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */ -#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_MEMDIE 18 #define TIF_FREEZE 19 /* Freezing for suspend */ @@ -125,13 +127,11 @@ extern void free_thread_info(struct thread_info *ti); #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) -#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) -#define _TIF_USEDFPU (1 << TIF_USEDFPU) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_FREEZE (1 << TIF_FREEZE) @@ -149,13 +149,33 @@ extern void free_thread_info(struct thread_info *ti); /* work to do on any return to u-space */ #define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \ _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \ - _TIF_SINGLESTEP | _TIF_RESTORE_SIGMASK | \ - _TIF_NOTIFY_RESUME | _TIF_SYSCALL_TRACEPOINT) + _TIF_SINGLESTEP | _TIF_NOTIFY_RESUME | \ + _TIF_SYSCALL_TRACEPOINT) /* work to do on interrupt/exception return */ #define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP)) +/* + * Thread-synchronous status. + * + * This is different from the flags in that nobody else + * ever touches our thread-synchronous status, so we don't + * have to worry about atomic accesses. + */ +#define TS_RESTORE_SIGMASK 0x0001 /* restore signal mask in do_signal() */ +#define TS_USEDFPU 0x0002 /* FPU used by this task this quantum */ + +#ifndef __ASSEMBLY__ +#define HAVE_SET_RESTORE_SIGMASK 1 +static inline void set_restore_sigmask(void) +{ + struct thread_info *ti = current_thread_info(); + ti->status |= TS_RESTORE_SIGMASK; + set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags); +} +#endif /* !__ASSEMBLY__ */ + #endif /* __KERNEL__ */ #endif /* __ASM_SH_THREAD_INFO_H */ diff --git a/arch/sh/include/asm/timex.h b/arch/sh/include/asm/timex.h index b556d49e5f2b..18bf06d9c764 100644 --- a/arch/sh/include/asm/timex.h +++ b/arch/sh/include/asm/timex.h @@ -6,7 +6,17 @@ #ifndef __ASM_SH_TIMEX_H #define __ASM_SH_TIMEX_H +/* + * Only parts using the legacy CPG code for their clock framework + * implementation need to define their own Pclk value. If provided, this + * can be used for accurately setting CLOCK_TICK_RATE, otherwise we + * simply fall back on the i8253 PIT value. + */ +#ifdef CONFIG_SH_PCLK_FREQ #define CLOCK_TICK_RATE (CONFIG_SH_PCLK_FREQ / 4) /* Underlying HZ */ +#else +#define CLOCK_TICK_RATE 1193180 +#endif #include <asm-generic/timex.h> diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index da8fe7ab8728..75abb38dffd5 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h @@ -11,6 +11,7 @@ #ifdef CONFIG_MMU #include <asm/pgalloc.h> #include <asm/tlbflush.h> +#include <asm/mmu_context.h> /* * TLB handling. This allows us to remove pages from the page @@ -97,6 +98,22 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) #define tlb_migrate_finish(mm) do { } while (0) +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64) +extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t); +extern void tlb_unwire_entry(void); +#else +static inline void tlb_wire_entry(struct vm_area_struct *vma , + unsigned long addr, pte_t pte) +{ + BUG(); +} + +static inline void tlb_unwire_entry(void) +{ + BUG(); +} +#endif + #else /* CONFIG_MMU */ #define tlb_start_vma(tlb, vma) do { } while (0) diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h index 65e7bd2f2240..88e734069fa6 100644 --- a/arch/sh/include/asm/topology.h +++ b/arch/sh/include/asm/topology.h @@ -35,11 +35,19 @@ #define pcibus_to_node(bus) ((void)(bus), -1) #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ - CPU_MASK_ALL_PTR : \ + cpu_all_mask : \ cpumask_of_node(pcibus_to_node(bus))) #endif +#define mc_capable() (1) + +const struct cpumask *cpu_coregroup_mask(unsigned int cpu); + +extern cpumask_t cpu_core_map[NR_CPUS]; + +#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) + #include <asm-generic/topology.h> #endif /* _ASM_SH_TOPOLOGY_H */ diff --git a/arch/sh/include/asm/ubc.h b/arch/sh/include/asm/ubc.h deleted file mode 100644 index 4ca4b7717371..000000000000 --- a/arch/sh/include/asm/ubc.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * include/asm-sh/ubc.h - * - * Copyright (C) 1999 Niibe Yutaka - * Copyright (C) 2002, 2003 Paul Mundt - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#ifndef __ASM_SH_UBC_H -#define __ASM_SH_UBC_H -#ifdef __KERNEL__ - -#include <cpu/ubc.h> - -/* User Break Controller */ -#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) -#define UBC_TYPE_SH7729 (current_cpu_data.type == CPU_SH7729) -#else -#define UBC_TYPE_SH7729 0 -#endif - -#define BAMR_ASID (1 << 2) -#define BAMR_NONE 0 -#define BAMR_10 0x1 -#define BAMR_12 0x2 -#define BAMR_ALL 0x3 -#define BAMR_16 0x8 -#define BAMR_20 0x9 - -#define BBR_INST (1 << 4) -#define BBR_DATA (2 << 4) -#define BBR_READ (1 << 2) -#define BBR_WRITE (2 << 2) -#define BBR_BYTE 0x1 -#define BBR_HALF 0x2 -#define BBR_LONG 0x3 -#define BBR_QUAD (1 << 6) /* SH7750 */ -#define BBR_CPU (1 << 6) /* SH7709A,SH7729 */ -#define BBR_DMA (2 << 6) /* SH7709A,SH7729 */ - -#define BRCR_CMFA (1 << 15) -#define BRCR_CMFB (1 << 14) - -#if defined CONFIG_CPU_SH2A -#define BRCR_CMFCA (1 << 15) -#define BRCR_CMFCB (1 << 14) -#define BRCR_CMFDA (1 << 13) -#define BRCR_CMFDB (1 << 12) -#define BRCR_PCBB (1 << 6) /* 1: after execution */ -#define BRCR_PCBA (1 << 5) /* 1: after execution */ -#define BRCR_PCTE 0 -#else -#define BRCR_PCTE (1 << 11) -#define BRCR_PCBA (1 << 10) /* 1: after execution */ -#define BRCR_DBEB (1 << 7) -#define BRCR_PCBB (1 << 6) -#define BRCR_SEQ (1 << 3) -#define BRCR_UBDE (1 << 0) -#endif - -#ifndef __ASSEMBLY__ -/* arch/sh/kernel/cpu/ubc.S */ -extern void ubc_sleep(void); - -#ifdef CONFIG_UBC_WAKEUP -extern void ubc_wakeup(void); -#else -#define ubc_wakeup() do { } while (0) -#endif -#endif - -#endif /* __KERNEL__ */ -#endif /* __ASM_SH_UBC_H */ diff --git a/arch/sh/include/asm/uncached.h b/arch/sh/include/asm/uncached.h new file mode 100644 index 000000000000..e3419f96626a --- /dev/null +++ b/arch/sh/include/asm/uncached.h @@ -0,0 +1,18 @@ +#ifndef __ASM_SH_UNCACHED_H +#define __ASM_SH_UNCACHED_H + +#include <linux/bug.h> + +#ifdef CONFIG_UNCACHED_MAPPING +extern unsigned long uncached_start, uncached_end; + +extern int virt_addr_uncached(unsigned long kaddr); +extern void uncached_init(void); +extern void uncached_resize(unsigned long size); +#else +#define virt_addr_uncached(kaddr) (0) +#define uncached_init() do { } while (0) +#define uncached_resize(size) BUG() +#endif + +#endif /* __ASM_SH_UNCACHED_H */ diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h index f3fd1b9eb6b1..0e7f0fc8f086 100644 --- a/arch/sh/include/asm/unistd_32.h +++ b/arch/sh/include/asm/unistd_32.h @@ -350,12 +350,15 @@ #ifdef __KERNEL__ +#define __IGNORE_recvmmsg + #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_GETHOSTNAME +#define __ARCH_WANT_SYS_IPC #define __ARCH_WANT_SYS_PAUSE #define __ARCH_WANT_SYS_SGETMASK #define __ARCH_WANT_SYS_SIGNAL @@ -368,6 +371,7 @@ #define __ARCH_WANT_SYS_LLSEEK #define __ARCH_WANT_SYS_NICE #define __ARCH_WANT_SYS_OLD_GETRLIMIT +#define __ARCH_WANT_SYS_OLD_UNAME #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h index 343ce8f073ea..0580c33a1e04 100644 --- a/arch/sh/include/asm/unistd_64.h +++ b/arch/sh/include/asm/unistd_64.h @@ -385,10 +385,12 @@ #define __NR_pwritev 362 #define __NR_rt_tgsigqueueinfo 363 #define __NR_perf_event_open 364 +#define __NR_recvmmsg 365 +#define __NR_accept4 366 #ifdef __KERNEL__ -#define NR_syscalls 365 +#define NR_syscalls 367 #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR @@ -396,6 +398,7 @@ #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_GETHOSTNAME +#define __ARCH_WANT_SYS_IPC #define __ARCH_WANT_SYS_PAUSE #define __ARCH_WANT_SYS_SGETMASK #define __ARCH_WANT_SYS_SIGNAL @@ -408,6 +411,7 @@ #define __ARCH_WANT_SYS_LLSEEK #define __ARCH_WANT_SYS_NICE #define __ARCH_WANT_SYS_OLD_GETRLIMIT +#define __ARCH_WANT_SYS_OLD_UNAME #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK diff --git a/arch/sh/include/asm/vmlinux.lds.h b/arch/sh/include/asm/vmlinux.lds.h index 244ec4ad9a79..d58ad493b3a6 100644 --- a/arch/sh/include/asm/vmlinux.lds.h +++ b/arch/sh/include/asm/vmlinux.lds.h @@ -14,4 +14,12 @@ #define DWARF_EH_FRAME #endif +#ifdef CONFIG_SUPERH64 +#define EXTRA_TEXT \ + *(.text64) \ + *(.text..SHmedia32) +#else +#define EXTRA_TEXT +#endif + #endif /* __ASM_SH_VMLINUX_LDS_H */ diff --git a/arch/sh/include/asm/watchdog.h b/arch/sh/include/asm/watchdog.h index 2fe7cee9e43a..85a7aca7fb8f 100644 --- a/arch/sh/include/asm/watchdog.h +++ b/arch/sh/include/asm/watchdog.h @@ -2,6 +2,8 @@ * include/asm-sh/watchdog.h * * Copyright (C) 2002, 2003 Paul Mundt + * Copyright (C) 2009 Siemens AG + * Copyright (C) 2009 Valentin Sitdikov * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -61,13 +63,68 @@ #define WTCSR_CKS_2048 0x06 #define WTCSR_CKS_4096 0x07 +#if defined(CONFIG_CPU_SUBTYPE_SH7785) || defined(CONFIG_CPU_SUBTYPE_SH7780) +/** + * sh_wdt_read_cnt - Read from Counter + * Reads back the WTCNT value. + */ +static inline __u32 sh_wdt_read_cnt(void) +{ + return __raw_readl(WTCNT_R); +} + +/** + * sh_wdt_write_cnt - Write to Counter + * @val: Value to write + * + * Writes the given value @val to the lower byte of the timer counter. + * The upper byte is set manually on each write. + */ +static inline void sh_wdt_write_cnt(__u32 val) +{ + __raw_writel((WTCNT_HIGH << 24) | (__u32)val, WTCNT); +} + +/** + * sh_wdt_write_bst - Write to Counter + * @val: Value to write + * + * Writes the given value @val to the lower byte of the timer counter. + * The upper byte is set manually on each write. + */ +static inline void sh_wdt_write_bst(__u32 val) +{ + __raw_writel((WTBST_HIGH << 24) | (__u32)val, WTBST); +} +/** + * sh_wdt_read_csr - Read from Control/Status Register + * + * Reads back the WTCSR value. + */ +static inline __u32 sh_wdt_read_csr(void) +{ + return __raw_readl(WTCSR_R); +} + +/** + * sh_wdt_write_csr - Write to Control/Status Register + * @val: Value to write + * + * Writes the given value @val to the lower byte of the control/status + * register. The upper byte is set manually on each write. + */ +static inline void sh_wdt_write_csr(__u32 val) +{ + __raw_writel((WTCSR_HIGH << 24) | (__u32)val, WTCSR); +} +#else /** * sh_wdt_read_cnt - Read from Counter * Reads back the WTCNT value. */ static inline __u8 sh_wdt_read_cnt(void) { - return ctrl_inb(WTCNT_R); + return __raw_readb(WTCNT_R); } /** @@ -79,7 +136,7 @@ static inline __u8 sh_wdt_read_cnt(void) */ static inline void sh_wdt_write_cnt(__u8 val) { - ctrl_outw((WTCNT_HIGH << 8) | (__u16)val, WTCNT); + __raw_writew((WTCNT_HIGH << 8) | (__u16)val, WTCNT); } /** @@ -89,7 +146,7 @@ static inline void sh_wdt_write_cnt(__u8 val) */ static inline __u8 sh_wdt_read_csr(void) { - return ctrl_inb(WTCSR_R); + return __raw_readb(WTCSR_R); } /** @@ -101,8 +158,8 @@ static inline __u8 sh_wdt_read_csr(void) */ static inline void sh_wdt_write_csr(__u8 val) { - ctrl_outw((WTCSR_HIGH << 8) | (__u16)val, WTCSR); + __raw_writew((WTCSR_HIGH << 8) | (__u16)val, WTCSR); } - +#endif /* CONFIG_CPU_SUBTYPE_SH7785 || CONFIG_CPU_SUBTYPE_SH7780 */ #endif /* __KERNEL__ */ #endif /* __ASM_SH_WATCHDOG_H */ |