summaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/checksum_32.h2
-rw-r--r--arch/x86/include/asm/io.h8
-rw-r--r--arch/x86/include/asm/pgtable_32.h2
-rw-r--r--arch/x86/include/asm/string_64.h18
-rw-r--r--arch/x86/include/asm/uaccess.h97
5 files changed, 59 insertions, 68 deletions
diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
index 7a659c74cd03..f57b94e02c57 100644
--- a/arch/x86/include/asm/checksum_32.h
+++ b/arch/x86/include/asm/checksum_32.h
@@ -182,7 +182,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
__wsum ret;
might_sleep();
- if (access_ok(VERIFY_WRITE, dst, len)) {
+ if (access_ok(dst, len)) {
stac();
ret = csum_partial_copy_generic(src, (__force void *)dst,
len, sum, NULL, err_ptr);
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 832da8229cc7..686247db3106 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -221,6 +221,14 @@ extern void set_iounmap_nonlazy(void);
#ifdef __KERNEL__
+void memcpy_fromio(void *, const volatile void __iomem *, size_t);
+void memcpy_toio(volatile void __iomem *, const void *, size_t);
+void memset_io(volatile void __iomem *, int, size_t);
+
+#define memcpy_fromio memcpy_fromio
+#define memcpy_toio memcpy_toio
+#define memset_io memset_io
+
#include <asm-generic/iomap.h>
/*
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index b3ec519e3982..4fe9e7fc74d3 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -37,7 +37,7 @@ void sync_initial_page_table(void);
/*
* Define this if things work differently on an i386 and an i486:
* it will (on an i486) warn about kernel memory accesses that are
- * done without a 'access_ok(VERIFY_WRITE,..)'
+ * done without a 'access_ok( ..)'
*/
#undef TEST_ACCESS_OK
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index 7ad41bfcc16c..4e4194e21a09 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -7,24 +7,6 @@
/* Written 2002 by Andi Kleen */
-/* Only used for special circumstances. Stolen from i386/string.h */
-static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n)
-{
- unsigned long d0, d1, d2;
- asm volatile("rep ; movsl\n\t"
- "testb $2,%b4\n\t"
- "je 1f\n\t"
- "movsw\n"
- "1:\ttestb $1,%b4\n\t"
- "je 2f\n\t"
- "movsb\n"
- "2:"
- : "=&c" (d0), "=&D" (d1), "=&S" (d2)
- : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from)
- : "memory");
- return to;
-}
-
/* Even with __builtin_ the compiler may decide to use the out of line
function. */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index b5e58cc0c5e7..a77445d1b034 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -77,9 +77,6 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
/**
* access_ok: - Checks if a user space pointer is valid
- * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
- * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
- * to write to a block, it is always safe to read from it.
* @addr: User space pointer to start of block to check
* @size: Size of block to check
*
@@ -95,7 +92,7 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
* checks that the pointer is in the user space range - after calling
* this function, memory access functions may still return -EFAULT.
*/
-#define access_ok(type, addr, size) \
+#define access_ok(addr, size) \
({ \
WARN_ON_IN_IRQ(); \
likely(!__range_not_ok(addr, size, user_addr_max())); \
@@ -189,19 +186,14 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
#ifdef CONFIG_X86_32
-#define __put_user_asm_u64(x, addr, err, errret) \
- asm volatile("\n" \
- "1: movl %%eax,0(%2)\n" \
- "2: movl %%edx,4(%2)\n" \
- "3:" \
- ".section .fixup,\"ax\"\n" \
- "4: movl %3,%0\n" \
- " jmp 3b\n" \
- ".previous\n" \
- _ASM_EXTABLE_UA(1b, 4b) \
- _ASM_EXTABLE_UA(2b, 4b) \
- : "=r" (err) \
- : "A" (x), "r" (addr), "i" (errret), "0" (err))
+#define __put_user_goto_u64(x, addr, label) \
+ asm_volatile_goto("\n" \
+ "1: movl %%eax,0(%1)\n" \
+ "2: movl %%edx,4(%1)\n" \
+ _ASM_EXTABLE_UA(1b, %l2) \
+ _ASM_EXTABLE_UA(2b, %l2) \
+ : : "A" (x), "r" (addr) \
+ : : label)
#define __put_user_asm_ex_u64(x, addr) \
asm volatile("\n" \
@@ -216,8 +208,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
asm volatile("call __put_user_8" : "=a" (__ret_pu) \
: "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
#else
-#define __put_user_asm_u64(x, ptr, retval, errret) \
- __put_user_asm(x, ptr, retval, "q", "", "er", errret)
+#define __put_user_goto_u64(x, ptr, label) \
+ __put_user_goto(x, ptr, "q", "", "er", label)
#define __put_user_asm_ex_u64(x, addr) \
__put_user_asm_ex(x, addr, "q", "", "er")
#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
@@ -278,23 +270,21 @@ extern void __put_user_8(void);
__builtin_expect(__ret_pu, 0); \
})
-#define __put_user_size(x, ptr, size, retval, errret) \
+#define __put_user_size(x, ptr, size, label) \
do { \
- retval = 0; \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
- __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
+ __put_user_goto(x, ptr, "b", "b", "iq", label); \
break; \
case 2: \
- __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
+ __put_user_goto(x, ptr, "w", "w", "ir", label); \
break; \
case 4: \
- __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
+ __put_user_goto(x, ptr, "l", "k", "ir", label); \
break; \
case 8: \
- __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
- errret); \
+ __put_user_goto_u64((__typeof__(*ptr))(x), ptr, label); \
break; \
default: \
__put_user_bad(); \
@@ -439,9 +429,12 @@ do { \
#define __put_user_nocheck(x, ptr, size) \
({ \
- int __pu_err; \
+ __label__ __pu_label; \
+ int __pu_err = -EFAULT; \
__uaccess_begin(); \
- __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
+ __put_user_size((x), (ptr), (size), __pu_label); \
+ __pu_err = 0; \
+__pu_label: \
__uaccess_end(); \
__builtin_expect(__pu_err, 0); \
})
@@ -466,17 +459,23 @@ struct __large_struct { unsigned long buf[100]; };
* we do not write to any memory gcc knows about, so there are no
* aliasing issues.
*/
-#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
- asm volatile("\n" \
- "1: mov"itype" %"rtype"1,%2\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: mov %3,%0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- _ASM_EXTABLE_UA(1b, 3b) \
- : "=r"(err) \
- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
+#define __put_user_goto(x, addr, itype, rtype, ltype, label) \
+ asm_volatile_goto("\n" \
+ "1: mov"itype" %"rtype"0,%1\n" \
+ _ASM_EXTABLE_UA(1b, %l2) \
+ : : ltype(x), "m" (__m(addr)) \
+ : : label)
+
+#define __put_user_failed(x, addr, itype, rtype, ltype, errret) \
+ ({ __label__ __puflab; \
+ int __pufret = errret; \
+ __put_user_goto(x,addr,itype,rtype,ltype,__puflab); \
+ __pufret = 0; \
+ __puflab: __pufret; })
+
+#define __put_user_asm(x, addr, retval, itype, rtype, ltype, errret) do { \
+ retval = __put_user_failed(x, addr, itype, rtype, ltype, errret); \
+} while (0)
#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
asm volatile("1: mov"itype" %"rtype"0,%1\n" \
@@ -670,7 +669,7 @@ extern void __cmpxchg_wrong_size(void)
#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
({ \
- access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
+ access_ok((ptr), sizeof(*(ptr))) ? \
__user_atomic_cmpxchg_inatomic((uval), (ptr), \
(old), (new), sizeof(*(ptr))) : \
-EFAULT; \
@@ -708,16 +707,18 @@ extern struct movsl_mask {
* checking before using them, but you have to surround them with the
* user_access_begin/end() pair.
*/
-#define user_access_begin() __uaccess_begin()
+static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
+{
+ if (unlikely(!access_ok(ptr,len)))
+ return 0;
+ __uaccess_begin();
+ return 1;
+}
+#define user_access_begin(a,b) user_access_begin(a,b)
#define user_access_end() __uaccess_end()
-#define unsafe_put_user(x, ptr, err_label) \
-do { \
- int __pu_err; \
- __typeof__(*(ptr)) __pu_val = (x); \
- __put_user_size(__pu_val, (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
- if (unlikely(__pu_err)) goto err_label; \
-} while (0)
+#define unsafe_put_user(x, ptr, label) \
+ __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
#define unsafe_get_user(x, ptr, err_label) \
do { \