summaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/sys_sparc.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel/sys_sparc.c')
-rw-r--r--arch/sparc64/kernel/sys_sparc.c297
1 files changed, 263 insertions, 34 deletions
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c
index 5f8c822a2b4a..7a869138c37f 100644
--- a/arch/sparc64/kernel/sys_sparc.c
+++ b/arch/sparc64/kernel/sys_sparc.c
@@ -25,25 +25,93 @@
#include <linux/syscalls.h>
#include <linux/ipc.h>
#include <linux/personality.h>
+#include <linux/random.h>
#include <asm/uaccess.h>
#include <asm/ipc.h>
#include <asm/utrap.h>
#include <asm/perfctr.h>
+#include <asm/a.out.h>
/* #define DEBUG_UNIMP_SYSCALL */
-/* XXX Make this per-binary type, this way we can detect the type of
- * XXX a binary. Every Sparc executable calls this very early on.
- */
asmlinkage unsigned long sys_getpagesize(void)
{
return PAGE_SIZE;
}
-#define COLOUR_ALIGN(addr,pgoff) \
- ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
- (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
+#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
+#define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
+
+/* Does addr --> addr+len fall within 4GB of the VA-space hole or
+ * overflow past the end of the 64-bit address space?
+ */
+static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
+{
+ unsigned long va_exclude_start, va_exclude_end;
+
+ va_exclude_start = VA_EXCLUDE_START;
+ va_exclude_end = VA_EXCLUDE_END;
+
+ if (unlikely(len >= va_exclude_start))
+ return 1;
+
+ if (unlikely((addr + len) < addr))
+ return 1;
+
+ if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) ||
+ ((addr + len) >= va_exclude_start &&
+ (addr + len) < va_exclude_end)))
+ return 1;
+
+ return 0;
+}
+
+/* Does start,end straddle the VA-space hole? */
+static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end)
+{
+ unsigned long va_exclude_start, va_exclude_end;
+
+ va_exclude_start = VA_EXCLUDE_START;
+ va_exclude_end = VA_EXCLUDE_END;
+
+ if (likely(start < va_exclude_start && end < va_exclude_start))
+ return 0;
+
+ if (likely(start >= va_exclude_end && end >= va_exclude_end))
+ return 0;
+
+ return 1;
+}
+
+/* These functions differ from the default implementations in
+ * mm/mmap.c in two ways:
+ *
+ * 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
+ * for fixed such mappings we just validate what the user gave us.
+ * 2) For 64-bit tasks we avoid mapping anything within 4GB of
+ * the spitfire/niagara VA-hole.
+ */
+
+static inline unsigned long COLOUR_ALIGN(unsigned long addr,
+ unsigned long pgoff)
+{
+ unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
+ unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
+
+ return base + off;
+}
+
+static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
+ unsigned long pgoff)
+{
+ unsigned long base = addr & ~(SHMLBA-1);
+ unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
+
+ if (base + off <= addr)
+ return base + off;
+ return base - off;
+}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
{
@@ -64,8 +132,8 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
}
if (test_thread_flag(TIF_32BIT))
- task_size = 0xf0000000UL;
- if (len > task_size || len > -PAGE_OFFSET)
+ task_size = STACK_TOP32;
+ if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
return -ENOMEM;
do_color_align = 0;
@@ -84,11 +152,12 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
return addr;
}
- if (len <= mm->cached_hole_size) {
+ if (len > mm->cached_hole_size) {
+ start_addr = addr = mm->free_area_cache;
+ } else {
+ start_addr = addr = TASK_UNMAPPED_BASE;
mm->cached_hole_size = 0;
- mm->free_area_cache = TASK_UNMAPPED_BASE;
}
- start_addr = addr = mm->free_area_cache;
task_size -= len;
@@ -100,11 +169,12 @@ full_search:
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
- if (addr < PAGE_OFFSET && -PAGE_OFFSET - len < addr) {
- addr = PAGE_OFFSET;
- vma = find_vma(mm, PAGE_OFFSET);
+ if (addr < VA_EXCLUDE_START &&
+ (addr + len) >= VA_EXCLUDE_START) {
+ addr = VA_EXCLUDE_END;
+ vma = find_vma(mm, VA_EXCLUDE_END);
}
- if (task_size < addr) {
+ if (unlikely(task_size < addr)) {
if (start_addr != TASK_UNMAPPED_BASE) {
start_addr = addr = TASK_UNMAPPED_BASE;
mm->cached_hole_size = 0;
@@ -112,7 +182,7 @@ full_search:
}
return -ENOMEM;
}
- if (!vma || addr + len <= vma->vm_start) {
+ if (likely(!vma || addr + len <= vma->vm_start)) {
/*
* Remember the place where we stopped the search:
*/
@@ -128,6 +198,121 @@ full_search:
}
}
+unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ const unsigned long len, const unsigned long pgoff,
+ const unsigned long flags)
+{
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ unsigned long task_size = STACK_TOP32;
+ unsigned long addr = addr0;
+ int do_color_align;
+
+ /* This should only ever run for 32-bit processes. */
+ BUG_ON(!test_thread_flag(TIF_32BIT));
+
+ if (flags & MAP_FIXED) {
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+ if ((flags & MAP_SHARED) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+ }
+
+ if (unlikely(len > task_size))
+ return -ENOMEM;
+
+ do_color_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
+
+ /* requesting a specific address */
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+ if (task_size - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+
+ /* check if free_area_cache is useful for us */
+ if (len <= mm->cached_hole_size) {
+ mm->cached_hole_size = 0;
+ mm->free_area_cache = mm->mmap_base;
+ }
+
+ /* either no address requested or can't fit in requested address hole */
+ addr = mm->free_area_cache;
+ if (do_color_align) {
+ unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
+
+ addr = base + len;
+ }
+
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr-len);
+ if (!vma || addr <= vma->vm_start) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+ }
+
+ if (unlikely(mm->mmap_base < len))
+ goto bottomup;
+
+ addr = mm->mmap_base-len;
+ if (do_color_align)
+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+
+ do {
+ /*
+ * Lookup failure means no vma is above this address,
+ * else if new region fits below vma->vm_start,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+ if (likely(!vma || addr+len <= vma->vm_start)) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+
+ /* remember the largest hole we saw so far */
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+ addr = vma->vm_start-len;
+ if (do_color_align)
+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+ } while (likely(len < vma->vm_start));
+
+bottomup:
+ /*
+ * A failed mmap() very likely causes application failure,
+ * so fall back to the bottom-up function here. This scenario
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
+ mm->cached_hole_size = ~0UL;
+ mm->free_area_cache = TASK_UNMAPPED_BASE;
+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+ /*
+ * Restore the topdown base:
+ */
+ mm->free_area_cache = mm->mmap_base;
+ mm->cached_hole_size = ~0UL;
+
+ return addr;
+}
+
/* Try to align mapping such that we align it as much as possible. */
unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
{
@@ -171,15 +356,57 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
return addr;
}
+/* Essentially the same as PowerPC... */
+void arch_pick_mmap_layout(struct mm_struct *mm)
+{
+ unsigned long random_factor = 0UL;
+
+ if (current->flags & PF_RANDOMIZE) {
+ random_factor = get_random_int();
+ if (test_thread_flag(TIF_32BIT))
+ random_factor &= ((1 * 1024 * 1024) - 1);
+ else
+ random_factor = ((random_factor << PAGE_SHIFT) &
+ 0xffffffffUL);
+ }
+
+ /*
+ * Fall back to the standard layout if the personality
+ * bit is set, or if the expected stack growth is unlimited:
+ */
+ if (!test_thread_flag(TIF_32BIT) ||
+ (current->personality & ADDR_COMPAT_LAYOUT) ||
+ current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
+ sysctl_legacy_va_layout) {
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ /* We know it's 32-bit */
+ unsigned long task_size = STACK_TOP32;
+ unsigned long gap;
+
+ gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
+ if (gap < 128 * 1024 * 1024)
+ gap = 128 * 1024 * 1024;
+ if (gap > (task_size / 6 * 5))
+ gap = (task_size / 6 * 5);
+
+ mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+}
+
asmlinkage unsigned long sparc_brk(unsigned long brk)
{
/* People could try to be nasty and use ta 0x6d in 32bit programs */
- if (test_thread_flag(TIF_32BIT) &&
- brk >= 0xf0000000UL)
+ if (test_thread_flag(TIF_32BIT) && brk >= STACK_TOP32)
return current->mm->brk;
- if ((current->mm->brk & PAGE_OFFSET) != (brk & PAGE_OFFSET))
+ if (unlikely(straddles_64bit_va_hole(current->mm->brk, brk)))
return current->mm->brk;
+
return sys_brk(brk);
}
@@ -340,13 +567,16 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
retval = -EINVAL;
if (test_thread_flag(TIF_32BIT)) {
- if (len > 0xf0000000UL ||
- ((flags & MAP_FIXED) && addr > 0xf0000000UL - len))
+ if (len >= STACK_TOP32)
+ goto out_putf;
+
+ if ((flags & MAP_FIXED) && addr > STACK_TOP32 - len)
goto out_putf;
} else {
- if (len > -PAGE_OFFSET ||
- ((flags & MAP_FIXED) &&
- addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET))
+ if (len >= VA_EXCLUDE_START)
+ goto out_putf;
+
+ if ((flags & MAP_FIXED) && invalid_64bit_range(addr, len))
goto out_putf;
}
@@ -365,9 +595,9 @@ asmlinkage long sys64_munmap(unsigned long addr, size_t len)
{
long ret;
- if (len > -PAGE_OFFSET ||
- (addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET))
+ if (invalid_64bit_range(addr, len))
return -EINVAL;
+
down_write(&current->mm->mmap_sem);
ret = do_munmap(current->mm, addr, len);
up_write(&current->mm->mmap_sem);
@@ -384,18 +614,19 @@ asmlinkage unsigned long sys64_mremap(unsigned long addr,
{
struct vm_area_struct *vma;
unsigned long ret = -EINVAL;
+
if (test_thread_flag(TIF_32BIT))
goto out;
- if (old_len > -PAGE_OFFSET || new_len > -PAGE_OFFSET)
+ if (unlikely(new_len >= VA_EXCLUDE_START))
goto out;
- if (addr < PAGE_OFFSET && addr + old_len > -PAGE_OFFSET)
+ if (unlikely(invalid_64bit_range(addr, old_len)))
goto out;
+
down_write(&current->mm->mmap_sem);
if (flags & MREMAP_FIXED) {
- if (new_addr < PAGE_OFFSET &&
- new_addr + new_len > -PAGE_OFFSET)
+ if (invalid_64bit_range(new_addr, new_len))
goto out_sem;
- } else if (addr < PAGE_OFFSET && addr + new_len > -PAGE_OFFSET) {
+ } else if (invalid_64bit_range(addr, new_len)) {
unsigned long map_flags = 0;
struct file *file = NULL;
@@ -554,12 +785,10 @@ asmlinkage long sys_utrap_install(utrap_entry_t type,
}
if (!current_thread_info()->utraps) {
current_thread_info()->utraps =
- kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
+ kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
if (!current_thread_info()->utraps)
return -ENOMEM;
current_thread_info()->utraps[0] = 1;
- memset(current_thread_info()->utraps+1, 0,
- UT_TRAP_INSTRUCTION_31*sizeof(long));
} else {
if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
current_thread_info()->utraps[0] > 1) {