diff options
Diffstat (limited to 'arch/arm/include')
46 files changed, 1191 insertions, 396 deletions
diff --git a/arch/arm/include/asm/asm-offsets.h b/arch/arm/include/asm/asm-offsets.h new file mode 100644 index 000000000000..d370ee36a182 --- /dev/null +++ b/arch/arm/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include <generated/asm-offsets.h> diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index d0daeab2234e..e8ddec2cb158 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -235,6 +235,234 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() +#ifndef CONFIG_GENERIC_ATOMIC64 +typedef struct { + u64 __aligned(8) counter; +} atomic64_t; + +#define ATOMIC64_INIT(i) { (i) } + +static inline u64 atomic64_read(atomic64_t *v) +{ + u64 result; + + __asm__ __volatile__("@ atomic64_read\n" +" ldrexd %0, %H0, [%1]" + : "=&r" (result) + : "r" (&v->counter) + ); + + return result; +} + +static inline void atomic64_set(atomic64_t *v, u64 i) +{ + u64 tmp; + + __asm__ __volatile__("@ atomic64_set\n" +"1: ldrexd %0, %H0, [%1]\n" +" strexd %0, %2, %H2, [%1]\n" +" teq %0, #0\n" +" bne 1b" + : "=&r" (tmp) + : "r" (&v->counter), "r" (i) + : "cc"); +} + +static inline void atomic64_add(u64 i, atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + __asm__ __volatile__("@ atomic64_add\n" +"1: ldrexd %0, %H0, [%2]\n" +" adds %0, %0, %3\n" +" adc %H0, %H0, %H3\n" +" strexd %1, %0, %H0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter), "r" (i) + : "cc"); +} + +static inline u64 atomic64_add_return(u64 i, atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_add_return\n" +"1: ldrexd %0, %H0, [%2]\n" +" adds %0, %0, %3\n" +" adc %H0, %H0, %H3\n" +" strexd %1, %0, %H0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter), "r" (i) + : "cc"); + + smp_mb(); + + return result; +} + +static inline void atomic64_sub(u64 i, atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + __asm__ __volatile__("@ atomic64_sub\n" +"1: ldrexd %0, %H0, [%2]\n" +" subs %0, %0, %3\n" +" sbc %H0, %H0, %H3\n" +" strexd %1, %0, %H0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter), "r" (i) + : "cc"); +} + +static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_sub_return\n" +"1: ldrexd %0, %H0, [%2]\n" +" subs %0, %0, %3\n" +" sbc %H0, %H0, %H3\n" +" strexd %1, %0, %H0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter), "r" (i) + : "cc"); + + smp_mb(); + + return result; +} + +static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) +{ + u64 oldval; + unsigned long res; + + smp_mb(); + + do { + __asm__ __volatile__("@ atomic64_cmpxchg\n" + "ldrexd %1, %H1, [%2]\n" + "mov %0, #0\n" + "teq %1, %3\n" + "teqeq %H1, %H3\n" + "strexdeq %0, %4, %H4, [%2]" + : "=&r" (res), "=&r" (oldval) + : "r" (&ptr->counter), "r" (old), "r" (new) + : "cc"); + } while (res); + + smp_mb(); + + return oldval; +} + +static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) +{ + u64 result; + unsigned long tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_xchg\n" +"1: ldrexd %0, %H0, [%2]\n" +" strexd %1, %3, %H3, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&ptr->counter), "r" (new) + : "cc"); + + smp_mb(); + + return result; +} + +static inline u64 atomic64_dec_if_positive(atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_dec_if_positive\n" +"1: ldrexd %0, %H0, [%2]\n" +" subs %0, %0, #1\n" +" sbc %H0, %H0, #0\n" +" teq %H0, #0\n" +" bmi 2f\n" +" strexd %1, %0, %H0, [%2]\n" +" teq %1, #0\n" +" bne 1b\n" +"2:" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter) + : "cc"); + + smp_mb(); + + return result; +} + +static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) +{ + u64 val; + unsigned long tmp; + int ret = 1; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_add_unless\n" +"1: ldrexd %0, %H0, [%3]\n" +" teq %0, %4\n" +" teqeq %H0, %H4\n" +" moveq %1, #0\n" +" beq 2f\n" +" adds %0, %0, %5\n" +" adc %H0, %H0, %H5\n" +" strexd %2, %0, %H0, [%3]\n" +" teq %2, #0\n" +" bne 1b\n" +"2:" + : "=&r" (val), "=&r" (ret), "=&r" (tmp) + : "r" (&v->counter), "r" (u), "r" (a) + : "cc"); + + if (ret) + smp_mb(); + + return ret; +} + +#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) +#define atomic64_inc(v) atomic64_add(1LL, (v)) +#define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) +#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) +#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) +#define atomic64_dec(v) atomic64_sub(1LL, (v)) +#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) +#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) + +#else /* !CONFIG_GENERIC_ATOMIC64 */ +#include <asm-generic/atomic64.h> +#endif #include <asm-generic/atomic-long.h> #endif #endif diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index 63a481fbbed4..338ff19ae447 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -84,7 +84,7 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p) *p = res | mask; raw_local_irq_restore(flags); - return res & mask; + return (res & mask) != 0; } static inline int @@ -101,7 +101,7 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p) *p = res & ~mask; raw_local_irq_restore(flags); - return res & mask; + return (res & mask) != 0; } static inline int @@ -118,7 +118,7 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) *p = res ^ mask; raw_local_irq_restore(flags); - return res & mask; + return (res & mask) != 0; } #include <asm-generic/bitops/non-atomic.h> diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index fd03fb63a332..72da7e045c6b 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -42,7 +42,8 @@ #endif #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ - defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) + defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \ + defined(CONFIG_CPU_ARM1026) # define MULTI_CACHE 1 #endif @@ -154,16 +155,16 @@ * Please note that the implementation of these, and the required * effects are cache-type (VIVT/VIPT/PIPT) specific. * - * flush_cache_kern_all() + * flush_kern_all() * * Unconditionally clean and invalidate the entire cache. * - * flush_cache_user_mm(mm) + * flush_user_all() * * Clean and invalidate all user space cache entries * before a change of page tables. * - * flush_cache_user_range(start, end, flags) + * flush_user_range(start, end, flags) * * Clean and invalidate a range of cache entries in the * specified address space before a change of page tables. @@ -179,23 +180,22 @@ * - start - virtual start address * - end - virtual end address * - * DMA Cache Coherency - * =================== - * - * dma_inv_range(start, end) + * coherent_user_range(start, end) * - * Invalidate (discard) the specified virtual address range. - * May not write back any entries. If 'start' or 'end' - * are not cache line aligned, those lines must be written - * back. + * Ensure coherency between the Icache and the Dcache in the + * region described by start, end. If you have non-snooping + * Harvard caches, you need to implement this function. * - start - virtual start address * - end - virtual end address * - * dma_clean_range(start, end) + * flush_kern_dcache_area(kaddr, size) * - * Clean (write back) the specified virtual address range. - * - start - virtual start address - * - end - virtual end address + * Ensure that the data held in page is written back. + * - kaddr - page address + * - size - region size + * + * DMA Cache Coherency + * =================== * * dma_flush_range(start, end) * @@ -211,10 +211,11 @@ struct cpu_cache_fns { void (*coherent_kern_range)(unsigned long, unsigned long); void (*coherent_user_range)(unsigned long, unsigned long); - void (*flush_kern_dcache_page)(void *); + void (*flush_kern_dcache_area)(void *, size_t); + + void (*dma_map_area)(const void *, size_t, int); + void (*dma_unmap_area)(const void *, size_t, int); - void (*dma_inv_range)(const void *, const void *); - void (*dma_clean_range)(const void *, const void *); void (*dma_flush_range)(const void *, const void *); }; @@ -236,7 +237,7 @@ extern struct cpu_cache_fns cpu_cache; #define __cpuc_flush_user_range cpu_cache.flush_user_range #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range #define __cpuc_coherent_user_range cpu_cache.coherent_user_range -#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page +#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area /* * These are private to the dma-mapping API. Do not use directly. @@ -244,8 +245,8 @@ extern struct cpu_cache_fns cpu_cache; * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ -#define dmac_inv_range cpu_cache.dma_inv_range -#define dmac_clean_range cpu_cache.dma_clean_range +#define dmac_map_area cpu_cache.dma_map_area +#define dmac_unmap_area cpu_cache.dma_unmap_area #define dmac_flush_range cpu_cache.dma_flush_range #else @@ -255,14 +256,14 @@ extern struct cpu_cache_fns cpu_cache; #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) -#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page) +#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area) extern void __cpuc_flush_kern_all(void); extern void __cpuc_flush_user_all(void); extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); extern void __cpuc_coherent_user_range(unsigned long, unsigned long); -extern void __cpuc_flush_dcache_page(void *); +extern void __cpuc_flush_dcache_area(void *, size_t); /* * These are private to the dma-mapping API. Do not use directly. @@ -270,12 +271,12 @@ extern void __cpuc_flush_dcache_page(void *); * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ -#define dmac_inv_range __glue(_CACHE,_dma_inv_range) -#define dmac_clean_range __glue(_CACHE,_dma_clean_range) +#define dmac_map_area __glue(_CACHE,_dma_map_area) +#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) #define dmac_flush_range __glue(_CACHE,_dma_flush_range) -extern void dmac_inv_range(const void *, const void *); -extern void dmac_clean_range(const void *, const void *); +extern void dmac_map_area(const void *, size_t, int); +extern void dmac_unmap_area(const void *, size_t, int); extern void dmac_flush_range(const void *, const void *); #endif @@ -316,12 +317,8 @@ static inline void outer_flush_range(unsigned long start, unsigned long end) * processes address space. Really, we want to allow our "user * space" model to handle this. */ -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ - do { \ - memcpy(dst, src, len); \ - flush_ptrace_access(vma, page, vaddr, dst, len, 1);\ - } while (0) - +extern void copy_to_user_page(struct vm_area_struct *, struct page *, + unsigned long, void *, const void *, unsigned long); #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ @@ -331,15 +328,15 @@ static inline void outer_flush_range(unsigned long start, unsigned long end) * Convert calls to our calling convention. */ #define flush_cache_all() __cpuc_flush_kern_all() -#ifndef CONFIG_CPU_CACHE_VIPT -static inline void flush_cache_mm(struct mm_struct *mm) + +static inline void vivt_flush_cache_mm(struct mm_struct *mm) { if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) __cpuc_flush_user_all(); } static inline void -flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) +vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), @@ -347,7 +344,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long } static inline void -flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) +vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) { if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { unsigned long addr = user_addr & PAGE_MASK; @@ -355,23 +352,17 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned l } } -static inline void -flush_ptrace_access(struct vm_area_struct *vma, struct page *page, - unsigned long uaddr, void *kaddr, - unsigned long len, int write) -{ - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { - unsigned long addr = (unsigned long)kaddr; - __cpuc_coherent_kern_range(addr, addr + len); - } -} +#ifndef CONFIG_CPU_CACHE_VIPT +#define flush_cache_mm(mm) \ + vivt_flush_cache_mm(mm) +#define flush_cache_range(vma,start,end) \ + vivt_flush_cache_range(vma,start,end) +#define flush_cache_page(vma,addr,pfn) \ + vivt_flush_cache_page(vma,addr,pfn) #else extern void flush_cache_mm(struct mm_struct *mm); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); -extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, - unsigned long uaddr, void *kaddr, - unsigned long len, int write); #endif #define flush_cache_dup_mm(mm) flush_cache_mm(mm) @@ -408,15 +399,29 @@ extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, * about to change to user space. This is the same method as used on SPARC64. * See update_mmu_cache for the user space part. */ +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page *); -extern void __flush_dcache_page(struct address_space *mapping, struct page *page); - static inline void __flush_icache_all(void) { +#ifdef CONFIG_ARM_ERRATA_411920 + extern void v6_icache_inval_all(void); + v6_icache_inval_all(); +#else asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n" : : "r" (0)); +#endif +} +static inline void flush_kernel_vmap_range(void *addr, int size) +{ + if ((cache_is_vivt() || cache_is_vipt_aliasing())) + __cpuc_flush_dcache_area(addr, (size_t)size); +} +static inline void invalidate_kernel_vmap_range(void *addr, int size) +{ + if ((cache_is_vivt() || cache_is_vipt_aliasing())) + __cpuc_flush_dcache_area(addr, (size_t)size); } #define ARCH_HAS_FLUSH_ANON_PAGE @@ -434,7 +439,7 @@ static inline void flush_kernel_dcache_page(struct page *page) { /* highmem pages are always flushed upon kunmap already */ if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page)) - __cpuc_flush_dcache_page(page_address(page)); + __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); } #define flush_dcache_mmap_lock(mapping) \ @@ -451,13 +456,6 @@ static inline void flush_kernel_dcache_page(struct page *page) */ #define flush_icache_page(vma,page) do { } while (0) -static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt, - unsigned offset, size_t size) -{ - const void *start = (void __force *)virt + offset; - dmac_inv_range(start, start + size); -} - /* * flush_cache_vmap() is used when creating mappings (eg, via vmap, * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h index b6ec7c627b39..7a0690da5e63 100644 --- a/arch/arm/include/asm/clkdev.h +++ b/arch/arm/include/asm/clkdev.h @@ -27,4 +27,7 @@ struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, void clkdev_add(struct clk_lookup *cl); void clkdev_drop(struct clk_lookup *cl); +void clkdev_add_table(struct clk_lookup *, size_t); +int clk_add_alias(const char *, const char *, char *, struct device *); + #endif diff --git a/arch/arm/include/asm/cpu.h b/arch/arm/include/asm/cpu.h index 634b2d7c612a..793968173bef 100644 --- a/arch/arm/include/asm/cpu.h +++ b/arch/arm/include/asm/cpu.h @@ -11,6 +11,7 @@ #define __ASM_ARM_CPU_H #include <linux/percpu.h> +#include <linux/cpu.h> struct cpuinfo_arm { struct cpu cpu; diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index ff46dfa68a97..69ce0727edb5 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -15,20 +15,15 @@ * must not be used by drivers. */ #ifndef __arch_page_to_dma - -#if !defined(CONFIG_HIGHMEM) static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) { - return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page)); + return (dma_addr_t)__pfn_to_bus(page_to_pfn(page)); } -#elif defined(__pfn_to_bus) -static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) + +static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr) { - return (dma_addr_t)__pfn_to_bus(page_to_pfn(page)); + return pfn_to_page(__bus_to_pfn(addr)); } -#else -#error "this machine class needs to define __arch_page_to_dma to use HIGHMEM" -#endif static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) { @@ -45,6 +40,11 @@ static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) return __arch_page_to_dma(dev, page); } +static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr) +{ + return __arch_dma_to_page(dev, addr); +} + static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) { return __arch_dma_to_virt(dev, addr); @@ -57,18 +57,58 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) #endif /* - * DMA-consistent mapping functions. These allocate/free a region of - * uncached, unwrite-buffered mapped memory space for use with DMA - * devices. This is the "generic" version. The PCI specific version - * is in pci.h + * The DMA API is built upon the notion of "buffer ownership". A buffer + * is either exclusively owned by the CPU (and therefore may be accessed + * by it) or exclusively owned by the DMA device. These helper functions + * represent the transitions between these two ownership states. + * + * Note, however, that on later ARMs, this notion does not work due to + * speculative prefetches. We model our approach on the assumption that + * the CPU does do speculative prefetches, which means we clean caches + * before transfers and delay cache invalidation until transfer completion. * - * Note: Drivers should NOT use this function directly, as it will break - * platforms with CONFIG_DMABOUNCE. - * Use the driver DMA support - see dma-mapping.h (dma_sync_*) + * Private support functions: these are not part of the API and are + * liable to change. Drivers must not use these. */ -extern void dma_cache_maint(const void *kaddr, size_t size, int rw); -extern void dma_cache_maint_page(struct page *page, unsigned long offset, - size_t size, int rw); +static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, + enum dma_data_direction dir) +{ + extern void ___dma_single_cpu_to_dev(const void *, size_t, + enum dma_data_direction); + + if (!arch_is_coherent()) + ___dma_single_cpu_to_dev(kaddr, size, dir); +} + +static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, + enum dma_data_direction dir) +{ + extern void ___dma_single_dev_to_cpu(const void *, size_t, + enum dma_data_direction); + + if (!arch_is_coherent()) + ___dma_single_dev_to_cpu(kaddr, size, dir); +} + +static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, + size_t size, enum dma_data_direction dir) +{ + extern void ___dma_page_cpu_to_dev(struct page *, unsigned long, + size_t, enum dma_data_direction); + + if (!arch_is_coherent()) + ___dma_page_cpu_to_dev(page, off, size, dir); +} + +static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, + size_t size, enum dma_data_direction dir) +{ + extern void ___dma_page_dev_to_cpu(struct page *, unsigned long, + size_t, enum dma_data_direction); + + if (!arch_is_coherent()) + ___dma_page_dev_to_cpu(page, off, size, dir); +} /* * Return whether the given device DMA address mask can be supported @@ -88,6 +128,14 @@ static inline int dma_supported(struct device *dev, u64 mask) static inline int dma_set_mask(struct device *dev, u64 dma_mask) { +#ifdef CONFIG_DMABOUNCE + if (dev->archdata.dmabounce) { + if (dma_mask >= ISA_DMA_THRESHOLD) + return 0; + else + return -EIO; + } +#endif if (!dev->dma_mask || !dma_supported(dev, dma_mask)) return -EIO; @@ -257,9 +305,11 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); */ extern dma_addr_t dma_map_single(struct device *, void *, size_t, enum dma_data_direction); +extern void dma_unmap_single(struct device *, dma_addr_t, size_t, + enum dma_data_direction); extern dma_addr_t dma_map_page(struct device *, struct page *, unsigned long, size_t, enum dma_data_direction); -extern void dma_unmap_single(struct device *, dma_addr_t, size_t, +extern void dma_unmap_page(struct device *, dma_addr_t, size_t, enum dma_data_direction); /* @@ -302,8 +352,7 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, { BUG_ON(!valid_dma_direction(dir)); - if (!arch_is_coherent()) - dma_cache_maint(cpu_addr, size, dir); + __dma_single_cpu_to_dev(cpu_addr, size, dir); return virt_to_dma(dev, cpu_addr); } @@ -327,8 +376,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, { BUG_ON(!valid_dma_direction(dir)); - if (!arch_is_coherent()) - dma_cache_maint_page(page, offset, size, dir); + __dma_page_cpu_to_dev(page, offset, size, dir); return page_to_dma(dev, page) + offset; } @@ -350,9 +398,8 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - /* nothing to do */ + __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); } -#endif /* CONFIG_DMABOUNCE */ /** * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() @@ -371,8 +418,10 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - dma_unmap_single(dev, handle, size, dir); + __dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK, + size, dir); } +#endif /* CONFIG_DMABOUNCE */ /** * dma_sync_single_range_for_cpu @@ -398,7 +447,10 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, { BUG_ON(!valid_dma_direction(dir)); - dmabounce_sync_for_cpu(dev, handle, offset, size, dir); + if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) + return; + + __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir); } static inline void dma_sync_single_range_for_device(struct device *dev, @@ -410,8 +462,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev, if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) return; - if (!arch_is_coherent()) - dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); + __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir); } static inline void dma_sync_single_for_cpu(struct device *dev, diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index 7edf3536df24..ca51143f97f1 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h @@ -138,12 +138,12 @@ extern int get_dma_residue(unsigned int chan); #define NO_DMA 255 #endif +#endif /* CONFIG_ISA_DMA_API */ + #ifdef CONFIG_PCI extern int isa_dma_bridge_buggy; #else #define isa_dma_bridge_buggy (0) #endif -#endif /* CONFIG_ISA_DMA_API */ - #endif /* __ASM_ARM_DMA_H */ diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index c3b911ee9151..bff056489cc1 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -98,7 +98,10 @@ extern int elf_check_arch(const struct elf32_hdr *); extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int); #define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk) -#define USE_ELF_CORE_DUMP +struct task_struct; +int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); +#define ELF_CORE_COPY_TASK_REGS dump_task_regs + #define ELF_EXEC_PAGESIZE 4096 /* This is the location that an ET_DYN program is loaded if exec'ed. Typical diff --git a/arch/arm/include/asm/entry-macro-vic2.S b/arch/arm/include/asm/entry-macro-vic2.S new file mode 100644 index 000000000000..3ceb85e43850 --- /dev/null +++ b/arch/arm/include/asm/entry-macro-vic2.S @@ -0,0 +1,57 @@ +/* arch/arm/include/asm/entry-macro-vic2.S + * + * Originally arch/arm/mach-s3c6400/include/mach/entry-macro.S + * + * Copyright 2008 Openmoko, Inc. + * Copyright 2008 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * Ben Dooks <ben@simtec.co.uk> + * + * Low-level IRQ helper macros for a device with two VICs + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. +*/ + +/* This should be included from <mach/entry-macro.S> with the necessary + * defines for virtual addresses and IRQ bases for the two vics. + * + * The code needs the following defined: + * IRQ_VIC0_BASE IRQ number of VIC0's first IRQ + * IRQ_VIC1_BASE IRQ number of VIC1's first IRQ + * VA_VIC0 Virtual address of VIC0 + * VA_VIC1 Virtual address of VIC1 + * + * Note, code assumes VIC0's virtual address is an ARM immediate constant + * away from VIC1. +*/ + +#include <asm/hardware/vic.h> + + .macro disable_fiq + .endm + + .macro get_irqnr_preamble, base, tmp + ldr \base, =VA_VIC0 + .endm + + .macro arch_ret_to_user, tmp1, tmp2 + .endm + + .macro get_irqnr_and_base, irqnr, irqstat, base, tmp + + @ check the vic0 + mov \irqnr, #IRQ_VIC0_BASE + 31 + ldr \irqstat, [ \base, # VIC_IRQ_STATUS ] + teq \irqstat, #0 + + @ otherwise try vic1 + addeq \tmp, \base, #(VA_VIC1 - VA_VIC0) + addeq \irqnr, \irqnr, #(IRQ_VIC1_BASE - IRQ_VIC0_BASE) + ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ] + teqeq \irqstat, #0 + + clzne \irqstat, \irqstat + subne \irqnr, \irqnr, \irqstat + .endm diff --git a/arch/arm/include/asm/glue.h b/arch/arm/include/asm/glue.h index a0e39d5d00c9..234a3fc1c78e 100644 --- a/arch/arm/include/asm/glue.h +++ b/arch/arm/include/asm/glue.h @@ -120,25 +120,39 @@ #endif /* - * Prefetch abort handler. If the CPU has an IFAR use that, otherwise - * use the address of the aborted instruction + * Prefetch Abort Model + * ================ + * + * We have the following to choose from: + * legacy - no IFSR, no IFAR + * v6 - ARMv6: IFSR, no IFAR + * v7 - ARMv7: IFSR and IFAR */ + #undef CPU_PABORT_HANDLER #undef MULTI_PABORT -#ifdef CONFIG_CPU_PABRT_IFAR +#ifdef CONFIG_CPU_PABRT_LEGACY +# ifdef CPU_PABORT_HANDLER +# define MULTI_PABORT 1 +# else +# define CPU_PABORT_HANDLER legacy_pabort +# endif +#endif + +#ifdef CONFIG_CPU_PABRT_V6 # ifdef CPU_PABORT_HANDLER # define MULTI_PABORT 1 # else -# define CPU_PABORT_HANDLER(reg, insn) mrc p15, 0, reg, cr6, cr0, 2 +# define CPU_PABORT_HANDLER v6_pabort # endif #endif -#ifdef CONFIG_CPU_PABRT_NOIFAR +#ifdef CONFIG_CPU_PABRT_V7 # ifdef CPU_PABORT_HANDLER # define MULTI_PABORT 1 # else -# define CPU_PABORT_HANDLER(reg, insn) mov reg, insn +# define CPU_PABORT_HANDLER v7_pabort # endif #endif diff --git a/arch/arm/include/asm/hardware/cache-tauros2.h b/arch/arm/include/asm/hardware/cache-tauros2.h new file mode 100644 index 000000000000..538f17ca905b --- /dev/null +++ b/arch/arm/include/asm/hardware/cache-tauros2.h @@ -0,0 +1,11 @@ +/* + * arch/arm/include/asm/hardware/cache-tauros2.h + * + * Copyright (C) 2008 Marvell Semiconductor + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +extern void __init tauros2_init(void); diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h new file mode 100644 index 000000000000..f82b25d4f73e --- /dev/null +++ b/arch/arm/include/asm/hardware/coresight.h @@ -0,0 +1,165 @@ +/* + * linux/arch/arm/include/asm/hardware/coresight.h + * + * CoreSight components' registers + * + * Copyright (C) 2009 Nokia Corporation. + * Alexander Shishkin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_HARDWARE_CORESIGHT_H +#define __ASM_HARDWARE_CORESIGHT_H + +#define TRACER_ACCESSED_BIT 0 +#define TRACER_RUNNING_BIT 1 +#define TRACER_CYCLE_ACC_BIT 2 +#define TRACER_ACCESSED BIT(TRACER_ACCESSED_BIT) +#define TRACER_RUNNING BIT(TRACER_RUNNING_BIT) +#define TRACER_CYCLE_ACC BIT(TRACER_CYCLE_ACC_BIT) + +struct tracectx { + unsigned int etb_bufsz; + void __iomem *etb_regs; + void __iomem *etm_regs; + unsigned long flags; + int ncmppairs; + int etm_portsz; + struct device *dev; + struct clk *emu_clk; + struct mutex mutex; +}; + +#define TRACER_TIMEOUT 10000 + +#define etm_writel(t, v, x) \ + (__raw_writel((v), (t)->etm_regs + (x))) +#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x))) + +/* CoreSight Management Registers */ +#define CSMR_LOCKACCESS 0xfb0 +#define CSMR_LOCKSTATUS 0xfb4 +#define CSMR_AUTHSTATUS 0xfb8 +#define CSMR_DEVID 0xfc8 +#define CSMR_DEVTYPE 0xfcc +/* CoreSight Component Registers */ +#define CSCR_CLASS 0xff4 + +#define CSCR_PRSR 0x314 + +#define UNLOCK_MAGIC 0xc5acce55 + +/* ETM control register, "ETM Architecture", 3.3.1 */ +#define ETMR_CTRL 0 +#define ETMCTRL_POWERDOWN 1 +#define ETMCTRL_PROGRAM (1 << 10) +#define ETMCTRL_PORTSEL (1 << 11) +#define ETMCTRL_DO_CONTEXTID (3 << 14) +#define ETMCTRL_PORTMASK1 (7 << 4) +#define ETMCTRL_PORTMASK2 (1 << 21) +#define ETMCTRL_PORTMASK (ETMCTRL_PORTMASK1 | ETMCTRL_PORTMASK2) +#define ETMCTRL_PORTSIZE(x) ((((x) & 7) << 4) | (!!((x) & 8)) << 21) +#define ETMCTRL_DO_CPRT (1 << 1) +#define ETMCTRL_DATAMASK (3 << 2) +#define ETMCTRL_DATA_DO_DATA (1 << 2) +#define ETMCTRL_DATA_DO_ADDR (1 << 3) +#define ETMCTRL_DATA_DO_BOTH (ETMCTRL_DATA_DO_DATA | ETMCTRL_DATA_DO_ADDR) +#define ETMCTRL_BRANCH_OUTPUT (1 << 8) +#define ETMCTRL_CYCLEACCURATE (1 << 12) + +/* ETM configuration code register */ +#define ETMR_CONFCODE (0x04) + +/* ETM trace start/stop resource control register */ +#define ETMR_TRACESSCTRL (0x18) + +/* ETM trigger event register */ +#define ETMR_TRIGEVT (0x08) + +/* address access type register bits, "ETM architecture", + * table 3-27 */ +/* - access type */ +#define ETMAAT_IFETCH 0 +#define ETMAAT_IEXEC 1 +#define ETMAAT_IEXECPASS 2 +#define ETMAAT_IEXECFAIL 3 +#define ETMAAT_DLOADSTORE 4 +#define ETMAAT_DLOAD 5 +#define ETMAAT_DSTORE 6 +/* - comparison access size */ +#define ETMAAT_JAVA (0 << 3) +#define ETMAAT_THUMB (1 << 3) +#define ETMAAT_ARM (3 << 3) +/* - data value comparison control */ +#define ETMAAT_NOVALCMP (0 << 5) +#define ETMAAT_VALMATCH (1 << 5) +#define ETMAAT_VALNOMATCH (3 << 5) +/* - exact match */ +#define ETMAAT_EXACTMATCH (1 << 7) +/* - context id comparator control */ +#define ETMAAT_IGNCONTEXTID (0 << 8) +#define ETMAAT_VALUE1 (1 << 8) +#define ETMAAT_VALUE2 (2 << 8) +#define ETMAAT_VALUE3 (3 << 8) +/* - security level control */ +#define ETMAAT_IGNSECURITY (0 << 10) +#define ETMAAT_NSONLY (1 << 10) +#define ETMAAT_SONLY (2 << 10) + +#define ETMR_COMP_VAL(x) (0x40 + (x) * 4) +#define ETMR_COMP_ACC_TYPE(x) (0x80 + (x) * 4) + +/* ETM status register, "ETM Architecture", 3.3.2 */ +#define ETMR_STATUS (0x10) +#define ETMST_OVERFLOW (1 << 0) +#define ETMST_PROGBIT (1 << 1) +#define ETMST_STARTSTOP (1 << 2) +#define ETMST_TRIGGER (1 << 3) + +#define etm_progbit(t) (etm_readl((t), ETMR_STATUS) & ETMST_PROGBIT) +#define etm_started(t) (etm_readl((t), ETMR_STATUS) & ETMST_STARTSTOP) +#define etm_triggered(t) (etm_readl((t), ETMR_STATUS) & ETMST_TRIGGER) + +#define ETMR_TRACEENCTRL2 0x1c +#define ETMR_TRACEENCTRL 0x24 +#define ETMTE_INCLEXCL (1 << 24) +#define ETMR_TRACEENEVT 0x20 +#define ETMCTRL_OPTS (ETMCTRL_DO_CPRT | \ + ETMCTRL_DATA_DO_ADDR | \ + ETMCTRL_BRANCH_OUTPUT | \ + ETMCTRL_DO_CONTEXTID) + +/* ETB registers, "CoreSight Components TRM", 9.3 */ +#define ETBR_DEPTH 0x04 +#define ETBR_STATUS 0x0c +#define ETBR_READMEM 0x10 +#define ETBR_READADDR 0x14 +#define ETBR_WRITEADDR 0x18 +#define ETBR_TRIGGERCOUNT 0x1c +#define ETBR_CTRL 0x20 +#define ETBR_FORMATTERCTRL 0x304 +#define ETBFF_ENFTC 1 +#define ETBFF_ENFCONT (1 << 1) +#define ETBFF_FONFLIN (1 << 4) +#define ETBFF_MANUAL_FLUSH (1 << 6) +#define ETBFF_TRIGIN (1 << 8) +#define ETBFF_TRIGEVT (1 << 9) +#define ETBFF_TRIGFL (1 << 10) + +#define etb_writel(t, v, x) \ + (__raw_writel((v), (t)->etb_regs + (x))) +#define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x))) + +#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0) +#define etm_unlock(t) \ + do { etm_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0) + +#define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0) +#define etb_unlock(t) \ + do { etb_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0) + +#endif /* __ASM_HARDWARE_CORESIGHT_H */ + diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h index 1a8c7279a28b..9b28f1243bdc 100644 --- a/arch/arm/include/asm/hardware/iop3xx-adma.h +++ b/arch/arm/include/asm/hardware/iop3xx-adma.h @@ -366,8 +366,7 @@ static inline int iop_chan_xor_slot_count(size_t len, int src_cnt, slot_cnt += *slots_per_op; } - if (len) - slot_cnt += *slots_per_op; + slot_cnt += *slots_per_op; return slot_cnt; } @@ -389,8 +388,7 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt, slot_cnt += *slots_per_op; } - if (len) - slot_cnt += *slots_per_op; + slot_cnt += *slots_per_op; return slot_cnt; } @@ -737,10 +735,8 @@ iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len) i += slots_per_op; } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT); - if (len) { - iter = iop_hw_desc_slot_idx(hw_desc, i); - iter->byte_count = len; - } + iter = iop_hw_desc_slot_idx(hw_desc, i); + iter->byte_count = len; } } diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index 4b8e7f559929..5daea2961d48 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h @@ -215,6 +215,7 @@ extern int iop3xx_get_init_atu(void); * IOP3XX I/O and Mem space regions for PCI autoconfiguration */ #define IOP3XX_PCI_LOWER_MEM_PA 0x80000000 +#define IOP3XX_PCI_MEM_WINDOW_SIZE 0x08000000 #define IOP3XX_PCI_IO_WINDOW_SIZE 0x00010000 #define IOP3XX_PCI_LOWER_IO_PA 0x90000000 @@ -233,7 +234,13 @@ extern int iop3xx_get_init_atu(void); void iop3xx_map_io(void); void iop_init_cp6_handler(void); void iop_init_time(unsigned long tickrate); -unsigned long iop_gettimeoffset(void); + +static inline u32 read_tmr0(void) +{ + u32 val; + asm volatile("mrc p6, 0, %0, c0, c1, 0" : "=r" (val)); + return val; +} static inline void write_tmr0(u32 val) { @@ -252,6 +259,11 @@ static inline u32 read_tcr0(void) return val; } +static inline void write_tcr0(u32 val) +{ + asm volatile("mcr p6, 0, %0, c2, c1, 0" : : "r" (val)); +} + static inline u32 read_tcr1(void) { u32 val; @@ -259,6 +271,11 @@ static inline u32 read_tcr1(void) return val; } +static inline void write_tcr1(u32 val) +{ + asm volatile("mcr p6, 0, %0, c3, c1, 0" : : "r" (val)); +} + static inline void write_trr0(u32 val) { asm volatile("mcr p6, 0, %0, c4, c1, 0" : : "r" (val)); diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h index 74b5fff7f575..6700c7fc7ebd 100644 --- a/arch/arm/include/asm/hardware/it8152.h +++ b/arch/arm/include/asm/hardware/it8152.h @@ -75,6 +75,18 @@ extern unsigned long it8152_base_address; IT8152_PD_IRQ(1) USB (USBR) IT8152_PD_IRQ(0) Audio controller (ACR) */ +#define IT8152_IRQ(x) (IRQ_BOARD_END + (x)) + +/* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */ +#define IT8152_LD_IRQ_COUNT 9 +#define IT8152_LP_IRQ_COUNT 16 +#define IT8152_PD_IRQ_COUNT 15 + +/* Priorities: */ +#define IT8152_PD_IRQ(i) IT8152_IRQ(i) +#define IT8152_LP_IRQ(i) (IT8152_IRQ(i) + IT8152_PD_IRQ_COUNT) +#define IT8152_LD_IRQ(i) (IT8152_IRQ(i) + IT8152_PD_IRQ_COUNT + IT8152_LP_IRQ_COUNT) + /* frequently used interrupts */ #define IT8152_PCISERR IT8152_PD_IRQ(14) #define IT8152_H2PTADR IT8152_PD_IRQ(13) diff --git a/arch/arm/include/asm/hardware/locomo.h b/arch/arm/include/asm/hardware/locomo.h index 954b1be991b4..74e51d6bd93f 100644 --- a/arch/arm/include/asm/hardware/locomo.h +++ b/arch/arm/include/asm/hardware/locomo.h @@ -214,4 +214,8 @@ void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int /* Frontlight control */ void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf); +struct locomo_platform_data { + int irq_base; /* IRQ base for cascaded on-chip IRQs */ +}; + #endif diff --git a/arch/arm/include/asm/hardware/sa1111.h b/arch/arm/include/asm/hardware/sa1111.h index 5da2595759e5..92ed254c175b 100644 --- a/arch/arm/include/asm/hardware/sa1111.h +++ b/arch/arm/include/asm/hardware/sa1111.h @@ -578,4 +578,8 @@ void sa1111_set_io_dir(struct sa1111_dev *sadev, unsigned int bits, unsigned int void sa1111_set_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v); void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v); +struct sa1111_platform_data { + int irq_base; /* base for cascaded on-chip IRQs */ +}; + #endif /* _ASM_ARCH_SA1111 */ diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index d2a59cfc30ce..c980156f3263 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -69,9 +69,16 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen); /* * __arm_ioremap takes CPU physical address. * __arm_ioremap_pfn takes a Page Frame Number and an offset into that page + * The _caller variety takes a __builtin_return_address(0) value for + * /proc/vmalloc to use - and should only be used in non-inline functions. */ -extern void __iomem * __arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); -extern void __iomem * __arm_ioremap(unsigned long, size_t, unsigned int); +extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long, + size_t, unsigned int, void *); +extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int, + void *); + +extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); +extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int); extern void __iounmap(volatile void __iomem *addr); /* diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h index d16ec97ec9a9..c019949a5189 100644 --- a/arch/arm/include/asm/kmap_types.h +++ b/arch/arm/include/asm/kmap_types.h @@ -22,4 +22,10 @@ enum km_type { KM_TYPE_NR }; +#ifdef CONFIG_DEBUG_HIGHMEM +#define KM_NMI (-1) +#define KM_NMI_PTE (-1) +#define KM_IRQ_PTE (-1) +#endif + #endif diff --git a/arch/arm/include/asm/mach-types.h b/arch/arm/include/asm/mach-types.h new file mode 100644 index 000000000000..948178cc6ba8 --- /dev/null +++ b/arch/arm/include/asm/mach-types.h @@ -0,0 +1 @@ +#include <generated/mach-types.h> diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h index acac5302e4ea..8920b2d6e3b8 100644 --- a/arch/arm/include/asm/mach/irq.h +++ b/arch/arm/include/asm/mach/irq.h @@ -26,9 +26,9 @@ extern int show_fiq_list(struct seq_file *, void *); */ #define do_bad_IRQ(irq,desc) \ do { \ - spin_lock(&desc->lock); \ + raw_spin_lock(&desc->lock); \ handle_bad_irq(irq, desc); \ - spin_unlock(&desc->lock); \ + raw_spin_unlock(&desc->lock); \ } while(0) #endif diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h index b2cc1fcd0400..8bffc3ff3acf 100644 --- a/arch/arm/include/asm/mach/time.h +++ b/arch/arm/include/asm/mach/time.h @@ -46,12 +46,4 @@ struct sys_timer { extern struct sys_timer *system_timer; extern void timer_tick(void); -/* - * Kernel time keeping support. - */ -struct timespec; -extern int (*set_rtc)(void); -extern void save_time_delta(struct timespec *delta, struct timespec *rtc); -extern void restore_time_delta(struct timespec *delta, struct timespec *rtc); - #endif diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index cefedf062138..4312ee5e3d0b 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -76,6 +76,17 @@ */ #define IOREMAP_MAX_ORDER 24 +/* + * Size of DMA-consistent memory region. Must be multiple of 2M, + * between 2MB and 14MB inclusive. + */ +#ifndef CONSISTENT_DMA_SIZE +#define CONSISTENT_DMA_SIZE SZ_2M +#endif + +#define CONSISTENT_END (0xffe00000UL) +#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE) + #else /* CONFIG_MMU */ /* @@ -93,11 +104,11 @@ #endif #ifndef PHYS_OFFSET -#define PHYS_OFFSET (CONFIG_DRAM_BASE) +#define PHYS_OFFSET UL(CONFIG_DRAM_BASE) #endif #ifndef END_MEM -#define END_MEM (CONFIG_DRAM_BASE + CONFIG_DRAM_SIZE) +#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) #endif #ifndef PAGE_OFFSET @@ -113,20 +124,14 @@ #endif /* !CONFIG_MMU */ /* - * Size of DMA-consistent memory region. Must be multiple of 2M, - * between 2MB and 14MB inclusive. - */ -#ifndef CONSISTENT_DMA_SIZE -#define CONSISTENT_DMA_SIZE SZ_2M -#endif - -/* * Physical vs virtual RAM address space conversion. These are * private definitions which should NOT be used outside memory.h * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. */ +#ifndef __virt_to_phys #define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) #define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) +#endif /* * Convert a physical address to a Page Frame Number and back @@ -134,6 +139,12 @@ #define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT) #define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) +/* + * Convert a page to/from a physical address + */ +#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) +#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) + #ifndef __ASSEMBLY__ /* @@ -194,7 +205,8 @@ static inline void *phys_to_virt(unsigned long x) #ifndef __virt_to_bus #define __virt_to_bus __virt_to_phys #define __bus_to_virt __phys_to_virt -#define __pfn_to_bus(x) ((x) << PAGE_SHIFT) +#define __pfn_to_bus(x) __pfn_to_phys(x) +#define __bus_to_pfn(x) __phys_to_pfn(x) #endif static inline __deprecated unsigned long virt_to_bus(void *x) @@ -293,11 +305,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x) #endif /* !CONFIG_DISCONTIGMEM */ /* - * For BIO. "will die". Kill me when bio_to_phys() and bvec_to_phys() die. - */ -#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) - -/* * Optional coherency support. Currently used only by selected * Intel XSC3-based systems. */ diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h index 8eebf89f5ab1..41f99c573b93 100644 --- a/arch/arm/include/asm/mman.h +++ b/arch/arm/include/asm/mman.h @@ -1 +1,4 @@ #include <asm-generic/mman.h> + +#define arch_mmap_check(addr, len, flags) \ + (((flags) & MAP_FIXED && (addr) < FIRST_USER_ADDRESS) ? -EINVAL : 0) diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index b561584d04a1..68870c776671 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -6,6 +6,7 @@ typedef struct { #ifdef CONFIG_CPU_HAS_ASID unsigned int id; + spinlock_t id_lock; #endif unsigned int kvm_seq; } mm_context_t; diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index de6cefb329dd..a0b3cac0547c 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -43,12 +43,23 @@ void __check_kvm_seq(struct mm_struct *mm); #define ASID_FIRST_VERSION (1 << ASID_BITS) extern unsigned int cpu_last_asid; +#ifdef CONFIG_SMP +DECLARE_PER_CPU(struct mm_struct *, current_mm); +#endif void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); void __new_context(struct mm_struct *mm); static inline void check_context(struct mm_struct *mm) { + /* + * This code is executed with interrupts enabled. Therefore, + * mm->context.id cannot be updated to the latest ASID version + * on a different CPU (and condition below not triggered) + * without first getting an IPI to reset the context. The + * alternative is to take a read_lock on mm->context.id_lock + * (after changing its type to rwlock_t). + */ if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) __new_context(mm); @@ -108,6 +119,10 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, __flush_icache_all(); #endif if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { +#ifdef CONFIG_SMP + struct mm_struct **crt_mm = &per_cpu(current_mm, cpu); + *crt_mm = next; +#endif check_context(next); cpu_switch_mm(next->pgd, next); if (cache_is_vivt()) diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 3a32af4cce30..a485ac3c8696 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -117,11 +117,12 @@ #endif struct page; +struct vm_area_struct; struct cpu_user_fns { void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); void (*cpu_copy_user_highpage)(struct page *to, struct page *from, - unsigned long vaddr); + unsigned long vaddr, struct vm_area_struct *vma); }; #ifdef MULTI_USER @@ -137,7 +138,7 @@ extern struct cpu_user_fns cpu_user; extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr); extern void __cpu_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr); + unsigned long vaddr, struct vm_area_struct *vma); #endif #define clear_user_highpage(page,vaddr) \ @@ -145,7 +146,7 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, #define __HAVE_ARCH_COPY_USER_HIGHPAGE #define copy_user_highpage(to,from,vaddr,vma) \ - __cpu_copy_user_highpage(to, from, vaddr) + __cpu_copy_user_highpage(to, from, vaddr, vma) #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) extern void copy_page(void *to, const void *from); diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h index 226cddd2fb65..47980118d0a5 100644 --- a/arch/arm/include/asm/pci.h +++ b/arch/arm/include/asm/pci.h @@ -30,17 +30,6 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) */ #define PCI_DMA_BUS_IS_PHYS (1) -/* - * Whether pci_unmap_{single,page} is a nop depends upon the - * configuration. - */ -#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME; -#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME; -#define pci_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) -#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) -#define pci_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) -#define pci_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) - #ifdef CONFIG_PCI static inline void pci_dma_burst_advice(struct pci_dev *pdev, enum pci_dma_burst_strategy *strat, diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h new file mode 100644 index 000000000000..49e3049aba32 --- /dev/null +++ b/arch/arm/include/asm/perf_event.h @@ -0,0 +1,31 @@ +/* + * linux/arch/arm/include/asm/perf_event.h + * + * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __ARM_PERF_EVENT_H__ +#define __ARM_PERF_EVENT_H__ + +/* + * NOP: on *most* (read: all supported) ARM platforms, the performance + * counter interrupts are regular interrupts and not an NMI. This + * means that when we receive the interrupt we can call + * perf_event_do_pending() that handles all of the work with + * interrupts enabled. + */ +static inline void +set_perf_event_pending(void) +{ +} + +/* ARM performance counters start from 1 (in the cp15 accesses) so use the + * same indexes here for consistency. */ +#define PERF_EVENT_INDEX_OFFSET 1 + +#endif /* __ARM_PERF_EVENT_H__ */ diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h index b011f2e939aa..ffc0e85775b4 100644 --- a/arch/arm/include/asm/pgtable-nommu.h +++ b/arch/arm/include/asm/pgtable-nommu.h @@ -67,6 +67,7 @@ static inline int pte_file(pte_t pte) { return 0; } */ #define pgprot_noncached(prot) __pgprot(0) #define pgprot_writecombine(prot) __pgprot(0) +#define pgprot_dmacoherent(prot) __pgprot(0) /* @@ -86,8 +87,8 @@ extern unsigned int kobjsize(const void *objp); * All 32bit addresses are effectively valid for vmalloc... * Sort of meaningless for non-VM targets. */ -#define VMALLOC_START 0 -#define VMALLOC_END 0xffffffff +#define VMALLOC_START 0UL +#define VMALLOC_END 0xffffffffUL #define FIRST_USER_ADDRESS (0) diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 201ccaa11f61..11397687f42c 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -304,13 +304,23 @@ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); static inline pte_t pte_mkspecial(pte_t pte) { return pte; } +#define __pgprot_modify(prot,mask,bits) \ + __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) + /* * Mark the prot value as uncacheable and unbufferable. */ #define pgprot_noncached(prot) \ - __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_UNCACHED) + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) #define pgprot_writecombine(prot) \ - __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_BUFFERABLE) + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) +#if __LINUX_ARM_ARCH__ >= 7 +#define pgprot_dmacoherent(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) +#else +#define pgprot_dmacoherent(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED) +#endif #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_present(pmd) (pmd_val(pmd)) diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h new file mode 100644 index 000000000000..2829b9f981a1 --- /dev/null +++ b/arch/arm/include/asm/pmu.h @@ -0,0 +1,75 @@ +/* + * linux/arch/arm/include/asm/pmu.h + * + * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __ARM_PMU_H__ +#define __ARM_PMU_H__ + +#ifdef CONFIG_CPU_HAS_PMU + +struct pmu_irqs { + const int *irqs; + int num_irqs; +}; + +/** + * reserve_pmu() - reserve the hardware performance counters + * + * Reserve the hardware performance counters in the system for exclusive use. + * The 'struct pmu_irqs' for the system is returned on success, ERR_PTR() + * encoded error on failure. + */ +extern const struct pmu_irqs * +reserve_pmu(void); + +/** + * release_pmu() - Relinquish control of the performance counters + * + * Release the performance counters and allow someone else to use them. + * Callers must have disabled the counters and released IRQs before calling + * this. The 'struct pmu_irqs' returned from reserve_pmu() must be passed as + * a cookie. + */ +extern int +release_pmu(const struct pmu_irqs *irqs); + +/** + * init_pmu() - Initialise the PMU. + * + * Initialise the system ready for PMU enabling. This should typically set the + * IRQ affinity and nothing else. The users (oprofile/perf events etc) will do + * the actual hardware initialisation. + */ +extern int +init_pmu(void); + +#else /* CONFIG_CPU_HAS_PMU */ + +static inline const struct pmu_irqs * +reserve_pmu(void) +{ + return ERR_PTR(-ENODEV); +} + +static inline int +release_pmu(const struct pmu_irqs *irqs) +{ + return -ENODEV; +} + +static inline int +init_pmu(void) +{ + return -ENODEV; +} + +#endif /* CONFIG_CPU_HAS_PMU */ + +#endif /* __ARM_PMU_H__ */ diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index 3976412685f8..8fdae9bc9abb 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -24,206 +24,228 @@ * CPU_NAME - the prefix for CPU related functions */ -#ifdef CONFIG_CPU_32 -# ifdef CONFIG_CPU_ARM610 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm6 -# endif +#ifdef CONFIG_CPU_ARM610 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm6 # endif -# ifdef CONFIG_CPU_ARM7TDMI -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm7tdmi -# endif +#endif + +#ifdef CONFIG_CPU_ARM7TDMI +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm7tdmi # endif -# ifdef CONFIG_CPU_ARM710 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm7 -# endif +#endif + +#ifdef CONFIG_CPU_ARM710 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm7 # endif -# ifdef CONFIG_CPU_ARM720T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm720 -# endif +#endif + +#ifdef CONFIG_CPU_ARM720T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm720 # endif -# ifdef CONFIG_CPU_ARM740T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm740 -# endif +#endif + +#ifdef CONFIG_CPU_ARM740T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm740 # endif -# ifdef CONFIG_CPU_ARM9TDMI -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm9tdmi -# endif +#endif + +#ifdef CONFIG_CPU_ARM9TDMI +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm9tdmi # endif -# ifdef CONFIG_CPU_ARM920T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm920 -# endif +#endif + +#ifdef CONFIG_CPU_ARM920T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm920 # endif -# ifdef CONFIG_CPU_ARM922T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm922 -# endif +#endif + +#ifdef CONFIG_CPU_ARM922T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm922 # endif -# ifdef CONFIG_CPU_FA526 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_fa526 -# endif +#endif + +#ifdef CONFIG_CPU_FA526 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_fa526 # endif -# ifdef CONFIG_CPU_ARM925T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm925 -# endif +#endif + +#ifdef CONFIG_CPU_ARM925T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm925 # endif -# ifdef CONFIG_CPU_ARM926T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm926 -# endif +#endif + +#ifdef CONFIG_CPU_ARM926T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm926 # endif -# ifdef CONFIG_CPU_ARM940T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm940 -# endif +#endif + +#ifdef CONFIG_CPU_ARM940T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm940 # endif -# ifdef CONFIG_CPU_ARM946E -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm946 -# endif +#endif + +#ifdef CONFIG_CPU_ARM946E +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm946 # endif -# ifdef CONFIG_CPU_SA110 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_sa110 -# endif +#endif + +#ifdef CONFIG_CPU_SA110 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_sa110 # endif -# ifdef CONFIG_CPU_SA1100 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_sa1100 -# endif +#endif + +#ifdef CONFIG_CPU_SA1100 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_sa1100 # endif -# ifdef CONFIG_CPU_ARM1020 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm1020 -# endif +#endif + +#ifdef CONFIG_CPU_ARM1020 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm1020 # endif -# ifdef CONFIG_CPU_ARM1020E -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm1020e -# endif +#endif + +#ifdef CONFIG_CPU_ARM1020E +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm1020e # endif -# ifdef CONFIG_CPU_ARM1022 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm1022 -# endif +#endif + +#ifdef CONFIG_CPU_ARM1022 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm1022 # endif -# ifdef CONFIG_CPU_ARM1026 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm1026 -# endif +#endif + +#ifdef CONFIG_CPU_ARM1026 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm1026 # endif -# ifdef CONFIG_CPU_XSCALE -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_xscale -# endif +#endif + +#ifdef CONFIG_CPU_XSCALE +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_xscale # endif -# ifdef CONFIG_CPU_XSC3 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_xsc3 -# endif +#endif + +#ifdef CONFIG_CPU_XSC3 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_xsc3 # endif -# ifdef CONFIG_CPU_MOHAWK -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_mohawk -# endif +#endif + +#ifdef CONFIG_CPU_MOHAWK +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_mohawk # endif -# ifdef CONFIG_CPU_FEROCEON -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_feroceon -# endif +#endif + +#ifdef CONFIG_CPU_FEROCEON +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_feroceon # endif -# ifdef CONFIG_CPU_V6 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_v6 -# endif +#endif + +#ifdef CONFIG_CPU_V6 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_v6 # endif -# ifdef CONFIG_CPU_V7 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_v7 -# endif +#endif + +#ifdef CONFIG_CPU_V7 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_v7 # endif #endif diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index bbecccda76d0..9dcb11e59026 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -97,9 +97,15 @@ * stack during a system call. Note that sizeof(struct pt_regs) * has to be a multiple of 8. */ +#ifndef __KERNEL__ struct pt_regs { long uregs[18]; }; +#else /* __KERNEL__ */ +struct pt_regs { + unsigned long uregs[18]; +}; +#endif /* __KERNEL__ */ #define ARM_cpsr uregs[16] #define ARM_pc uregs[15] @@ -122,6 +128,8 @@ struct pt_regs { #ifdef __KERNEL__ +#define arch_has_single_step() (1) + #define user_mode(regs) \ (((regs)->ARM_cpsr & 0xf) == 0) diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index 5ccce0a9b03c..f392fb4437af 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -223,18 +223,6 @@ extern struct meminfo meminfo; #define bank_phys_end(bank) ((bank)->start + (bank)->size) #define bank_phys_size(bank) (bank)->size -/* - * Early command line parameters. - */ -struct early_params { - const char *arg; - void (*fn)(char **p); -}; - -#define __early_param(name,fn) \ -static struct early_params __early_##fn __used \ -__attribute__((__section__(".early_param.init"))) = { name, fn } - #endif /* __KERNEL__ */ #endif diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h new file mode 100644 index 000000000000..e6215305544a --- /dev/null +++ b/arch/arm/include/asm/smp_plat.h @@ -0,0 +1,21 @@ +/* + * ARM specific SMP header, this contains our implementation + * details. + */ +#ifndef __ASMARM_SMP_PLAT_H +#define __ASMARM_SMP_PLAT_H + +#include <asm/cputype.h> + +/* all SMP configurations have the extended CPUID registers */ +static inline int tlb_ops_need_broadcast(void) +{ + return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; +} + +static inline int cache_ops_need_broadcast(void) +{ + return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1; +} + +#endif diff --git a/arch/arm/include/asm/socket.h b/arch/arm/include/asm/socket.h index 92ac61d294fd..90ffd04b8e74 100644 --- a/arch/arm/include/asm/socket.h +++ b/arch/arm/include/asm/socket.h @@ -60,4 +60,6 @@ #define SO_PROTOCOL 38 #define SO_DOMAIN 39 +#define SO_RXQ_OVFL 40 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index c13681ac1ede..17eb355707dd 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -5,6 +5,22 @@ #error SMP not supported on pre-ARMv6 CPUs #endif +static inline void dsb_sev(void) +{ +#if __LINUX_ARM_ARCH__ >= 7 + __asm__ __volatile__ ( + "dsb\n" + "sev" + ); +#elif defined(CONFIG_CPU_32v6K) + __asm__ __volatile__ ( + "mcr p15, 0, %0, c7, c10, 4\n" + "sev" + : : "r" (0) + ); +#endif +} + /* * ARMv6 Spin-locking. * @@ -17,13 +33,13 @@ * Locked value: 1 */ -#define __raw_spin_is_locked(x) ((x)->lock != 0) -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) +#define arch_spin_is_locked(x) ((x)->lock != 0) +#define arch_spin_unlock_wait(lock) \ + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; @@ -43,7 +59,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) smp_mb(); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned long tmp; @@ -63,19 +79,17 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) } } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { smp_mb(); __asm__ __volatile__( " str %1, [%0]\n" -#ifdef CONFIG_CPU_32v6K -" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ -" sev" -#endif : : "r" (&lock->lock), "r" (0) : "cc"); + + dsb_sev(); } /* @@ -86,7 +100,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) * just write zero since the lock is exclusively held. */ -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned long tmp; @@ -106,7 +120,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) smp_mb(); } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned long tmp; @@ -126,23 +140,21 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) } } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { smp_mb(); __asm__ __volatile__( "str %1, [%0]\n" -#ifdef CONFIG_CPU_32v6K -" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ -" sev\n" -#endif : : "r" (&rw->lock), "r" (0) : "cc"); + + dsb_sev(); } /* write_can_lock - would write_trylock() succeed? */ -#define __raw_write_can_lock(x) ((x)->lock == 0) +#define arch_write_can_lock(x) ((x)->lock == 0) /* * Read locks are a bit more hairy: @@ -156,7 +168,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) * currently active. However, we know we won't have any write * locks. */ -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned long tmp, tmp2; @@ -176,7 +188,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) smp_mb(); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned long tmp, tmp2; @@ -188,17 +200,15 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) " strex %1, %0, [%2]\n" " teq %1, #0\n" " bne 1b" -#ifdef CONFIG_CPU_32v6K -"\n cmp %0, #0\n" -" mcreq p15, 0, %0, c7, c10, 4\n" -" seveq" -#endif : "=&r" (tmp), "=&r" (tmp2) : "r" (&rw->lock) : "cc"); + + if (tmp == 0) + dsb_sev(); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned long tmp, tmp2 = 1; @@ -215,13 +225,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) } /* read_can_lock - would read_trylock() succeed? */ -#define __raw_read_can_lock(x) ((x)->lock < 0x80000000) +#define arch_read_can_lock(x) ((x)->lock < 0x80000000) -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h index 43e83f6d2ee5..d14d197ae04a 100644 --- a/arch/arm/include/asm/spinlock_types.h +++ b/arch/arm/include/asm/spinlock_types.h @@ -7,14 +7,14 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/arm/include/asm/swab.h b/arch/arm/include/asm/swab.h index ca2bf2f6d6ea..9997ad20eff1 100644 --- a/arch/arm/include/asm/swab.h +++ b/arch/arm/include/asm/swab.h @@ -22,6 +22,24 @@ # define __SWAB_64_THRU_32__ #endif +#if defined(__KERNEL__) && __LINUX_ARM_ARCH__ >= 6 + +static inline __attribute_const__ __u16 __arch_swab16(__u16 x) +{ + __asm__ ("rev16 %0, %1" : "=r" (x) : "r" (x)); + return x; +} +#define __arch_swab16 __arch_swab16 + +static inline __attribute_const__ __u32 __arch_swab32(__u32 x) +{ + __asm__ ("rev %0, %1" : "=r" (x) : "r" (x)); + return x; +} +#define __arch_swab32 __arch_swab32 + +#else + static inline __attribute_const__ __u32 __arch_swab32(__u32 x) { __u32 t; @@ -48,3 +66,4 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x) #endif +#endif diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index d65b2f5bf41f..ca88e6a84707 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -73,8 +73,7 @@ extern unsigned int mem_fclk_21285; struct pt_regs; -void die(const char *msg, struct pt_regs *regs, int err) - __attribute__((noreturn)); +void die(const char *msg, struct pt_regs *regs, int err); struct siginfo; void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, @@ -138,21 +137,26 @@ extern unsigned int user_debug; #define dmb() __asm__ __volatile__ ("" : : : "memory") #endif -#ifndef CONFIG_SMP +#if __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP) +#define mb() dmb() +#define rmb() dmb() +#define wmb() dmb() +#else #define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) #define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) #define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) +#endif + +#ifndef CONFIG_SMP #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() #else -#define mb() dmb() -#define rmb() dmb() -#define wmb() dmb() -#define smp_mb() dmb() -#define smp_rmb() dmb() -#define smp_wmb() dmb() +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() #endif + #define read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0) diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 2dfb7d7a66e9..b74970ec02c4 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -115,7 +115,8 @@ extern void iwmmxt_task_restore(struct thread_info *, void *); extern void iwmmxt_task_release(struct thread_info *); extern void iwmmxt_task_switch(struct thread_info *); -extern void vfp_sync_state(struct thread_info *thread); +extern void vfp_sync_hwstate(struct thread_info *); +extern void vfp_flush_hwstate(struct thread_info *); #endif diff --git a/arch/arm/include/asm/thread_notify.h b/arch/arm/include/asm/thread_notify.h index f27379d7f72a..c4391ba20350 100644 --- a/arch/arm/include/asm/thread_notify.h +++ b/arch/arm/include/asm/thread_notify.h @@ -41,7 +41,7 @@ static inline void thread_notify(unsigned long rc, struct thread_info *thread) * These are the reason codes for the thread notifier. */ #define THREAD_NOTIFY_FLUSH 0 -#define THREAD_NOTIFY_RELEASE 1 +#define THREAD_NOTIFY_EXIT 1 #define THREAD_NOTIFY_SWITCH 2 #endif diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index a45ab5dd8255..e085e2c545eb 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -350,7 +350,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) if (tlb_flag(TLB_WB)) dsb(); - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { + if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { if (tlb_flag(TLB_V3_FULL)) asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); if (tlb_flag(TLB_V4_U_FULL)) @@ -360,6 +360,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) if (tlb_flag(TLB_V4_I_FULL)) asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); } + put_cpu(); if (tlb_flag(TLB_V6_U_ASID)) asm("mcr p15, 0, %0, c8, c7, 2" : : "r" (asid) : "cc"); @@ -528,7 +529,8 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); * cache entries for the kernels virtual memory range are written * back to the page. */ -extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte); +extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep); #endif diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 89f7eade20af..dd2bf53000fe 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -391,6 +391,7 @@ #define __NR_pwritev (__NR_SYSCALL_BASE+362) #define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363) #define __NR_perf_event_open (__NR_SYSCALL_BASE+364) +#define __NR_recvmmsg (__NR_SYSCALL_BASE+365) /* * The following SWIs are ARM private. @@ -403,6 +404,15 @@ #define __ARM_NR_set_tls (__ARM_NR_BASE+5) /* + * *NOTE*: This is a ghost syscall private to the kernel. Only the + * __kuser_cmpxchg code in entry-armv.S should be aware of its + * existence. Don't ever use this from user code. + */ +#ifdef __KERNEL__ +#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0) +#endif + +/* * The following syscalls are obsolete and no longer available for EABI. */ #if defined(__ARM_EABI__) && !defined(__KERNEL__) @@ -433,9 +443,12 @@ #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGSUSPEND +#define __ARCH_WANT_SYS_OLD_MMAP +#define __ARCH_WANT_SYS_OLD_SELECT #if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT) #define __ARCH_WANT_SYS_TIME +#define __ARCH_WANT_SYS_IPC #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_UTIME @@ -456,6 +469,7 @@ * Unimplemented (or alternatively implemented) syscalls */ #define __IGNORE_fadvise64_64 1 +#define __IGNORE_migrate_pages 1 #endif /* __KERNEL__ */ #endif /* __ASM_ARM_UNISTD_H */ |