summaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r--include/asm-x86_64/a.out.h2
-rw-r--r--include/asm-x86_64/agp.h10
-rw-r--r--include/asm-x86_64/page.h1
-rw-r--r--include/asm-x86_64/pgtable.h3
-rw-r--r--include/asm-x86_64/processor.h11
-rw-r--r--include/asm-x86_64/smp.h2
6 files changed, 21 insertions, 8 deletions
diff --git a/include/asm-x86_64/a.out.h b/include/asm-x86_64/a.out.h
index 5952914f4121..7255cde06538 100644
--- a/include/asm-x86_64/a.out.h
+++ b/include/asm-x86_64/a.out.h
@@ -21,7 +21,7 @@ struct exec
#ifdef __KERNEL__
#include <linux/thread_info.h>
-#define STACK_TOP (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE)
+#define STACK_TOP TASK_SIZE
#endif
#endif /* __A_OUT_GNU_H__ */
diff --git a/include/asm-x86_64/agp.h b/include/asm-x86_64/agp.h
index 0bb9019d58aa..06c52ee9c06b 100644
--- a/include/asm-x86_64/agp.h
+++ b/include/asm-x86_64/agp.h
@@ -19,4 +19,14 @@ int unmap_page_from_agp(struct page *page);
worth it. Would need a page for it. */
#define flush_agp_cache() asm volatile("wbinvd":::"memory")
+/* Convert a physical address to an address suitable for the GART. */
+#define phys_to_gart(x) (x)
+#define gart_to_phys(x) (x)
+
+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
+#define alloc_gatt_pages(order) \
+ ((char *)__get_free_pages(GFP_KERNEL, (order)))
+#define free_gatt_pages(table, order) \
+ free_pages((unsigned long)(table), (order))
+
#endif
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index f43048035a03..9ce338c3a71e 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -28,6 +28,7 @@
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+#define ARCH_HAS_HUGETLB_CLEAN_STALE_PGTABLE
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index db2a0efbf573..4eec176c3c39 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -253,6 +253,7 @@ extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;
extern inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
extern inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
extern inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
extern inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
@@ -263,6 +264,7 @@ extern inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _
extern inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
extern inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
extern inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
+extern inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | __LARGE_PTE)); return pte; }
struct vm_area_struct;
@@ -290,7 +292,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
*/
#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
-#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
static inline int pmd_large(pmd_t pte) {
return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE;
}
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index d641b19f6da5..8b55f139968f 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -160,16 +160,17 @@ static inline void clear_in_cr4 (unsigned long mask)
/*
* User space process size. 47bits minus one guard page.
*/
-#define TASK_SIZE (0x800000000000UL - 4096)
+#define TASK_SIZE64 (0x800000000000UL - 4096)
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
-#define TASK_UNMAPPED_32 PAGE_ALIGN(IA32_PAGE_OFFSET/3)
-#define TASK_UNMAPPED_64 PAGE_ALIGN(TASK_SIZE/3)
-#define TASK_UNMAPPED_BASE \
- (test_thread_flag(TIF_IA32) ? TASK_UNMAPPED_32 : TASK_UNMAPPED_64)
+
+#define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64)
+#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64)
+
+#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3)
/*
* Size of io_bitmap.
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index 96844fecbde8..a7425aa5a3b7 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -68,7 +68,7 @@ static inline int num_booting_cpus(void)
return cpus_weight(cpu_callout_map);
}
-#define __smp_processor_id() read_pda(cpunumber)
+#define raw_smp_processor_id() read_pda(cpunumber)
extern __inline int hard_smp_processor_id(void)
{