summaryrefslogtreecommitdiffstats
path: root/arch/xtensa/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa/include')
-rw-r--r--arch/xtensa/include/asm/asmmacro.h40
-rw-r--r--arch/xtensa/include/asm/current.h4
-rw-r--r--arch/xtensa/include/asm/dma-mapping.h10
-rw-r--r--arch/xtensa/include/asm/fixmap.h4
-rw-r--r--arch/xtensa/include/asm/futex.h23
-rw-r--r--arch/xtensa/include/asm/highmem.h2
-rw-r--r--arch/xtensa/include/asm/kasan.h37
-rw-r--r--arch/xtensa/include/asm/kmem_layout.h7
-rw-r--r--arch/xtensa/include/asm/linkage.h9
-rw-r--r--arch/xtensa/include/asm/mmu_context.h1
-rw-r--r--arch/xtensa/include/asm/nommu_context.h4
-rw-r--r--arch/xtensa/include/asm/page.h2
-rw-r--r--arch/xtensa/include/asm/pgtable.h3
-rw-r--r--arch/xtensa/include/asm/ptrace.h15
-rw-r--r--arch/xtensa/include/asm/regs.h1
-rw-r--r--arch/xtensa/include/asm/stackprotector.h40
-rw-r--r--arch/xtensa/include/asm/string.h23
-rw-r--r--arch/xtensa/include/asm/thread_info.h16
-rw-r--r--arch/xtensa/include/asm/traps.h35
-rw-r--r--arch/xtensa/include/asm/uaccess.h9
-rw-r--r--arch/xtensa/include/uapi/asm/poll.h21
21 files changed, 247 insertions, 59 deletions
diff --git a/arch/xtensa/include/asm/asmmacro.h b/arch/xtensa/include/asm/asmmacro.h
index 746dcc8b5abc..7f2ae5872151 100644
--- a/arch/xtensa/include/asm/asmmacro.h
+++ b/arch/xtensa/include/asm/asmmacro.h
@@ -150,5 +150,45 @@
__endl \ar \as
.endm
+/* Load or store instructions that may cause exceptions use the EX macro. */
+
+#define EX(handler) \
+ .section __ex_table, "a"; \
+ .word 97f, handler; \
+ .previous \
+97:
+
+
+/*
+ * Extract unaligned word that is split between two registers w0 and w1
+ * into r regardless of machine endianness. SAR must be loaded with the
+ * starting bit of the word (see __ssa8).
+ */
+
+ .macro __src_b r, w0, w1
+#ifdef __XTENSA_EB__
+ src \r, \w0, \w1
+#else
+ src \r, \w1, \w0
+#endif
+ .endm
+
+/*
+ * Load 2 lowest address bits of r into SAR for __src_b to extract unaligned
+ * word starting at r from two registers loaded from consecutive aligned
+ * addresses covering r regardless of machine endianness.
+ *
+ * r 0 1 2 3
+ * LE SAR 0 8 16 24
+ * BE SAR 32 24 16 8
+ */
+
+ .macro __ssa8 r
+#ifdef __XTENSA_EB__
+ ssa8b \r
+#else
+ ssa8l \r
+#endif
+ .endm
#endif /* _XTENSA_ASMMACRO_H */
diff --git a/arch/xtensa/include/asm/current.h b/arch/xtensa/include/asm/current.h
index 47e46dcf5d49..5d98a7ad4251 100644
--- a/arch/xtensa/include/asm/current.h
+++ b/arch/xtensa/include/asm/current.h
@@ -11,6 +11,8 @@
#ifndef _XTENSA_CURRENT_H
#define _XTENSA_CURRENT_H
+#include <asm/thread_info.h>
+
#ifndef __ASSEMBLY__
#include <linux/thread_info.h>
@@ -26,8 +28,6 @@ static inline struct task_struct *get_current(void)
#else
-#define CURRENT_SHIFT 13
-
#define GET_CURRENT(reg,sp) \
GET_THREAD_INFO(reg,sp); \
l32i reg, reg, TI_TASK \
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h
index 153bf2370988..44098800dad7 100644
--- a/arch/xtensa/include/asm/dma-mapping.h
+++ b/arch/xtensa/include/asm/dma-mapping.h
@@ -23,14 +23,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &xtensa_dma_map_ops;
}
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- return (dma_addr_t)paddr;
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
-{
- return (phys_addr_t)daddr;
-}
-
#endif /* _XTENSA_DMA_MAPPING_H */
diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h
index 0d30403b6c95..7e25c1b50ac0 100644
--- a/arch/xtensa/include/asm/fixmap.h
+++ b/arch/xtensa/include/asm/fixmap.h
@@ -44,7 +44,7 @@ enum fixed_addresses {
__end_of_fixed_addresses
};
-#define FIXADDR_TOP (VMALLOC_START - PAGE_SIZE)
+#define FIXADDR_TOP (XCHAL_KSEG_CACHED_VADDR - PAGE_SIZE)
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK)
@@ -63,7 +63,7 @@ static __always_inline unsigned long fix_to_virt(const unsigned int idx)
* table.
*/
BUILD_BUG_ON(FIXADDR_START <
- XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE);
+ TLBTEMP_BASE_1 + TLBTEMP_SIZE);
BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
return __fix_to_virt(idx);
}
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
index eaaf1ebcc7a4..5bfbc1c401d4 100644
--- a/arch/xtensa/include/asm/futex.h
+++ b/arch/xtensa/include/asm/futex.h
@@ -92,7 +92,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
int ret = 0;
- u32 prev;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
@@ -103,26 +102,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
__asm__ __volatile__ (
" # futex_atomic_cmpxchg_inatomic\n"
- "1: l32i %1, %3, 0\n"
- " mov %0, %5\n"
- " wsr %1, scompare1\n"
- "2: s32c1i %0, %3, 0\n"
- "3:\n"
+ " wsr %5, scompare1\n"
+ "1: s32c1i %1, %4, 0\n"
+ " s32i %1, %6, 0\n"
+ "2:\n"
" .section .fixup,\"ax\"\n"
" .align 4\n"
- "4: .long 3b\n"
- "5: l32r %1, 4b\n"
- " movi %0, %6\n"
+ "3: .long 2b\n"
+ "4: l32r %1, 3b\n"
+ " movi %0, %7\n"
" jx %1\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
- " .long 1b,5b,2b,5b\n"
+ " .long 1b,4b\n"
" .previous\n"
- : "+r" (ret), "=&r" (prev), "+m" (*uaddr)
- : "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT)
+ : "+r" (ret), "+r" (newval), "+m" (*uaddr), "+m" (*uval)
+ : "r" (uaddr), "r" (oldval), "r" (uval), "I" (-EFAULT)
: "memory");
- *uval = prev;
return ret;
}
diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h
index 6e070db1022e..04e9340eac4b 100644
--- a/arch/xtensa/include/asm/highmem.h
+++ b/arch/xtensa/include/asm/highmem.h
@@ -72,7 +72,7 @@ static inline void *kmap(struct page *page)
* page table.
*/
BUILD_BUG_ON(PKMAP_BASE <
- XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE);
+ TLBTEMP_BASE_1 + TLBTEMP_SIZE);
BUG_ON(in_interrupt());
if (!PageHighMem(page))
return page_address(page);
diff --git a/arch/xtensa/include/asm/kasan.h b/arch/xtensa/include/asm/kasan.h
new file mode 100644
index 000000000000..54be80876e57
--- /dev/null
+++ b/arch/xtensa/include/asm/kasan.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_KASAN
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <asm/kmem_layout.h>
+
+/* Start of area covered by KASAN */
+#define KASAN_START_VADDR __XTENSA_UL_CONST(0x90000000)
+/* Start of the shadow map */
+#define KASAN_SHADOW_START (XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE)
+/* Size of the shadow map */
+#define KASAN_SHADOW_SIZE (-KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT)
+/* Offset for mem to shadow address transformation */
+#define KASAN_SHADOW_OFFSET __XTENSA_UL_CONST(CONFIG_KASAN_SHADOW_OFFSET)
+
+void __init kasan_early_init(void);
+void __init kasan_init(void);
+
+#else
+
+static inline void kasan_early_init(void)
+{
+}
+
+static inline void kasan_init(void)
+{
+}
+
+#endif
+#endif
+#endif
diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h
index 561f8729bcde..2317c835a4db 100644
--- a/arch/xtensa/include/asm/kmem_layout.h
+++ b/arch/xtensa/include/asm/kmem_layout.h
@@ -71,4 +71,11 @@
#endif
+#ifndef CONFIG_KASAN
+#define KERNEL_STACK_SHIFT 13
+#else
+#define KERNEL_STACK_SHIFT 15
+#endif
+#define KERNEL_STACK_SIZE (1 << KERNEL_STACK_SHIFT)
+
#endif
diff --git a/arch/xtensa/include/asm/linkage.h b/arch/xtensa/include/asm/linkage.h
new file mode 100644
index 000000000000..0ba9973235d9
--- /dev/null
+++ b/arch/xtensa/include/asm/linkage.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#define __ALIGN .align 4
+#define __ALIGN_STR ".align 4"
+
+#endif
diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h
index f7e186dfc4e4..de5e6cbbafe4 100644
--- a/arch/xtensa/include/asm/mmu_context.h
+++ b/arch/xtensa/include/asm/mmu_context.h
@@ -52,6 +52,7 @@ DECLARE_PER_CPU(unsigned long, asid_cache);
#define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
void init_mmu(void);
+void init_kio(void);
static inline void set_rasid_register (unsigned long val)
{
diff --git a/arch/xtensa/include/asm/nommu_context.h b/arch/xtensa/include/asm/nommu_context.h
index 2cebdbbdb633..37251b2ef871 100644
--- a/arch/xtensa/include/asm/nommu_context.h
+++ b/arch/xtensa/include/asm/nommu_context.h
@@ -3,6 +3,10 @@ static inline void init_mmu(void)
{
}
+static inline void init_kio(void)
+{
+}
+
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index 4ddbfd57a7c8..5d69c11c01b8 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -36,8 +36,6 @@
#define MAX_LOW_PFN PHYS_PFN(0xfffffffful)
#endif
-#define PGTABLE_START 0x80000000
-
/*
* Cache aliasing:
*
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 30dd5b2e4ad5..38802259978f 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -12,9 +12,9 @@
#define _XTENSA_PGTABLE_H
#define __ARCH_USE_5LEVEL_HACK
-#include <asm-generic/pgtable-nopmd.h>
#include <asm/page.h>
#include <asm/kmem_layout.h>
+#include <asm-generic/pgtable-nopmd.h>
/*
* We only use two ring levels, user and kernel space.
@@ -170,6 +170,7 @@
#define PAGE_SHARED_EXEC \
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h
index e2d9c5eb10bd..3a5c5918aea3 100644
--- a/arch/xtensa/include/asm/ptrace.h
+++ b/arch/xtensa/include/asm/ptrace.h
@@ -10,6 +10,7 @@
#ifndef _XTENSA_PTRACE_H
#define _XTENSA_PTRACE_H
+#include <asm/kmem_layout.h>
#include <uapi/asm/ptrace.h>
/*
@@ -38,20 +39,6 @@
* +-----------------------+ --------
*/
-#define KERNEL_STACK_SIZE (2 * PAGE_SIZE)
-
-/* Offsets for exception_handlers[] (3 x 64-entries x 4-byte tables). */
-
-#define EXC_TABLE_KSTK 0x004 /* Kernel Stack */
-#define EXC_TABLE_DOUBLE_SAVE 0x008 /* Double exception save area for a0 */
-#define EXC_TABLE_FIXUP 0x00c /* Fixup handler */
-#define EXC_TABLE_PARAM 0x010 /* For passing a parameter to fixup */
-#define EXC_TABLE_SYSCALL_SAVE 0x014 /* For fast syscall handler */
-#define EXC_TABLE_FAST_USER 0x100 /* Fast user exception handler */
-#define EXC_TABLE_FAST_KERNEL 0x200 /* Fast kernel exception handler */
-#define EXC_TABLE_DEFAULT 0x300 /* Default C-Handler */
-#define EXC_TABLE_SIZE 0x400
-
#ifndef __ASSEMBLY__
#include <asm/coprocessor.h>
diff --git a/arch/xtensa/include/asm/regs.h b/arch/xtensa/include/asm/regs.h
index 881a1134a4b4..477594e5817f 100644
--- a/arch/xtensa/include/asm/regs.h
+++ b/arch/xtensa/include/asm/regs.h
@@ -76,6 +76,7 @@
#define EXCCAUSE_COPROCESSOR5_DISABLED 37
#define EXCCAUSE_COPROCESSOR6_DISABLED 38
#define EXCCAUSE_COPROCESSOR7_DISABLED 39
+#define EXCCAUSE_N 64
/* PS register fields. */
diff --git a/arch/xtensa/include/asm/stackprotector.h b/arch/xtensa/include/asm/stackprotector.h
new file mode 100644
index 000000000000..e368f94fd2af
--- /dev/null
+++ b/arch/xtensa/include/asm/stackprotector.h
@@ -0,0 +1,40 @@
+/*
+ * GCC stack protector support.
+ *
+ * (This is directly adopted from the ARM implementation)
+ *
+ * Stack protector works by putting predefined pattern at the start of
+ * the stack frame and verifying that it hasn't been overwritten when
+ * returning from the function. The pattern is called stack canary
+ * and gcc expects it to be defined by a global variable called
+ * "__stack_chk_guard" on Xtensa. This unfortunately means that on SMP
+ * we cannot have a different canary value per task.
+ */
+
+#ifndef _ASM_STACKPROTECTOR_H
+#define _ASM_STACKPROTECTOR_H 1
+
+#include <linux/random.h>
+#include <linux/version.h>
+
+extern unsigned long __stack_chk_guard;
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+ unsigned long canary;
+
+ /* Try to get a semi random initial value. */
+ get_random_bytes(&canary, sizeof(canary));
+ canary ^= LINUX_VERSION_CODE;
+
+ current->stack_canary = canary;
+ __stack_chk_guard = current->stack_canary;
+}
+
+#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/xtensa/include/asm/string.h b/arch/xtensa/include/asm/string.h
index 8d5d9dfadb09..89b51a0c752f 100644
--- a/arch/xtensa/include/asm/string.h
+++ b/arch/xtensa/include/asm/string.h
@@ -53,7 +53,7 @@ static inline char *strncpy(char *__dest, const char *__src, size_t __n)
"bne %1, %5, 1b\n"
"2:"
: "=r" (__dest), "=r" (__src), "=&r" (__dummy)
- : "0" (__dest), "1" (__src), "r" (__src+__n)
+ : "0" (__dest), "1" (__src), "r" ((uintptr_t)__src+__n)
: "memory");
return __xdest;
@@ -101,21 +101,40 @@ static inline int strncmp(const char *__cs, const char *__ct, size_t __n)
"2:\n\t"
"sub %2, %2, %3"
: "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy)
- : "0" (__cs), "1" (__ct), "r" (__cs+__n));
+ : "0" (__cs), "1" (__ct), "r" ((uintptr_t)__cs+__n));
return __res;
}
#define __HAVE_ARCH_MEMSET
extern void *memset(void *__s, int __c, size_t __count);
+extern void *__memset(void *__s, int __c, size_t __count);
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
+extern void *__memcpy(void *__to, __const__ void *__from, size_t __n);
#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+extern void *__memmove(void *__dest, __const__ void *__src, size_t __n);
/* Don't build bcopy at all ... */
#define __HAVE_ARCH_BCOPY
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+
+/*
+ * For files that are not instrumented (e.g. mm/slub.c) we
+ * should use not instrumented version of mem* functions.
+ */
+
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+#endif
+
#endif /* _XTENSA_STRING_H */
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
index 7be2400f745a..2bd19ae61e47 100644
--- a/arch/xtensa/include/asm/thread_info.h
+++ b/arch/xtensa/include/asm/thread_info.h
@@ -11,7 +11,9 @@
#ifndef _XTENSA_THREAD_INFO_H
#define _XTENSA_THREAD_INFO_H
-#ifdef __KERNEL__
+#include <asm/kmem_layout.h>
+
+#define CURRENT_SHIFT KERNEL_STACK_SHIFT
#ifndef __ASSEMBLY__
# include <asm/processor.h>
@@ -77,14 +79,11 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
- __asm__("extui %0,a1,0,13\n\t"
+ __asm__("extui %0, a1, 0, "__stringify(CURRENT_SHIFT)"\n\t"
"xor %0, a1, %0" : "=&r" (ti) : );
return ti;
}
@@ -93,7 +92,7 @@ static inline struct thread_info *current_thread_info(void)
/* how to get the thread information struct from ASM */
#define GET_THREAD_INFO(reg,sp) \
- extui reg, sp, 0, 13; \
+ extui reg, sp, 0, CURRENT_SHIFT; \
xor reg, sp, reg
#endif
@@ -130,8 +129,7 @@ static inline struct thread_info *current_thread_info(void)
*/
#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
-#define THREAD_SIZE 8192 //(2*PAGE_SIZE)
-#define THREAD_SIZE_ORDER 1
+#define THREAD_SIZE KERNEL_STACK_SIZE
+#define THREAD_SIZE_ORDER (KERNEL_STACK_SHIFT - PAGE_SHIFT)
-#endif /* __KERNEL__ */
#endif /* _XTENSA_THREAD_INFO */
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
index 2e69aa4b843f..f5cd7a7e65e0 100644
--- a/arch/xtensa/include/asm/traps.h
+++ b/arch/xtensa/include/asm/traps.h
@@ -13,12 +13,47 @@
#include <asm/ptrace.h>
/*
+ * Per-CPU exception handling data structure.
+ * EXCSAVE1 points to it.
+ */
+struct exc_table {
+ /* Kernel Stack */
+ void *kstk;
+ /* Double exception save area for a0 */
+ unsigned long double_save;
+ /* Fixup handler */
+ void *fixup;
+ /* For passing a parameter to fixup */
+ void *fixup_param;
+ /* For fast syscall handler */
+ unsigned long syscall_save;
+ /* Fast user exception handlers */
+ void *fast_user_handler[EXCCAUSE_N];
+ /* Fast kernel exception handlers */
+ void *fast_kernel_handler[EXCCAUSE_N];
+ /* Default C-Handlers */
+ void *default_handler[EXCCAUSE_N];
+};
+
+/*
* handler must be either of the following:
* void (*)(struct pt_regs *regs);
* void (*)(struct pt_regs *regs, unsigned long exccause);
*/
extern void * __init trap_set_handler(int cause, void *handler);
extern void do_unhandled(struct pt_regs *regs, unsigned long exccause);
+void fast_second_level_miss(void);
+
+/* Initialize minimal exc_table structure sufficient for basic paging */
+static inline void __init early_trap_init(void)
+{
+ static struct exc_table exc_table __initdata = {
+ .fast_kernel_handler[EXCCAUSE_DTLB_MISS] =
+ fast_second_level_miss,
+ };
+ __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (&exc_table));
+}
+
void secondary_trap_init(void);
static inline void spill_registers(void)
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index b8f152b6aaa5..f1158b4c629c 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -44,6 +44,8 @@
#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
+#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE)
+
/*
* These are the main single-value transfer routines. They
* automatically use the right size if we just have the right pointer
@@ -261,7 +263,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
static inline unsigned long
__xtensa_clear_user(void *addr, unsigned long size)
{
- if ( ! memset(addr, 0, size) )
+ if (!__memset(addr, 0, size))
return size;
return 0;
}
@@ -277,6 +279,8 @@ clear_user(void *addr, unsigned long size)
#define __clear_user __xtensa_clear_user
+#ifndef CONFIG_GENERIC_STRNCPY_FROM_USER
+
extern long __strncpy_user(char *, const char *, long);
static inline long
@@ -286,6 +290,9 @@ strncpy_from_user(char *dst, const char *src, long count)
return __strncpy_user(dst, src, count);
return -EFAULT;
}
+#else
+long strncpy_from_user(char *dst, const char *src, long count);
+#endif
/*
* Return the size of a string (including the ending 0!)
diff --git a/arch/xtensa/include/uapi/asm/poll.h b/arch/xtensa/include/uapi/asm/poll.h
index 4d249040b33d..e3246d41182c 100644
--- a/arch/xtensa/include/uapi/asm/poll.h
+++ b/arch/xtensa/include/uapi/asm/poll.h
@@ -12,9 +12,26 @@
#ifndef _XTENSA_POLL_H
#define _XTENSA_POLL_H
+#ifndef __KERNEL__
#define POLLWRNORM POLLOUT
-#define POLLWRBAND 0x0100
-#define POLLREMOVE 0x0800
+#define POLLWRBAND (__force __poll_t)0x0100
+#define POLLREMOVE (__force __poll_t)0x0800
+#else
+#define __ARCH_HAS_MANGLED_POLL
+static inline __u16 mangle_poll(__poll_t val)
+{
+ __u16 v = (__force __u16)val;
+ /* bit 9 -> bit 8, bit 8 -> bit 2 */
+ return (v & ~0x300) | ((v & 0x200) >> 1) | ((v & 0x100) >> 6);
+}
+
+static inline __poll_t demangle_poll(__u16 v)
+{
+ /* bit 8 -> bit 9, bit 2 -> bits 2 and 8 */
+ return (__force __poll_t)((v & ~0x100) | ((v & 0x100) << 1) |
+ ((v & 4) << 6));
+}
+#endif
#include <asm-generic/poll.h>