summaryrefslogtreecommitdiffstats
path: root/include/linux/types.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/types.h')
-rw-r--r--include/linux/types.h30
1 files changed, 29 insertions, 1 deletions
diff --git a/include/linux/types.h b/include/linux/types.h
index 23d237a075e2..c2a9eb44f2fa 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -8,7 +8,10 @@
#define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)]
-
+#else
+#ifndef __EXPORTED_HEADERS__
+#warning "Attempt to use kernel headers from user space, see http://kernelnewbies.org/KernelHeaders"
+#endif /* __EXPORTED_HEADERS__ */
#endif
#include <linux/posix_types.h>
@@ -175,6 +178,19 @@ typedef __u64 __bitwise __be64;
typedef __u16 __bitwise __sum16;
typedef __u32 __bitwise __wsum;
+/*
+ * aligned_u64 should be used in defining kernel<->userspace ABIs to avoid
+ * common 32/64-bit compat problems.
+ * 64-bit values align to 4-byte boundaries on x86_32 (and possibly other
+ * architectures) and to 8-byte boundaries on 64-bit architetures. The new
+ * aligned_64 type enforces 8-byte alignment so that structs containing
+ * aligned_64 values have the same alignment on 32-bit and 64-bit architectures.
+ * No conversions are necessary between 32-bit user-space and a 64-bit kernel.
+ */
+#define __aligned_u64 __u64 __attribute__((aligned(8)))
+#define __aligned_be64 __be64 __attribute__((aligned(8)))
+#define __aligned_le64 __le64 __attribute__((aligned(8)))
+
#ifdef __KERNEL__
typedef unsigned __bitwise__ gfp_t;
typedef unsigned __bitwise__ fmode_t;
@@ -197,6 +213,18 @@ typedef struct {
} atomic64_t;
#endif
+struct list_head {
+ struct list_head *next, *prev;
+};
+
+struct hlist_head {
+ struct hlist_node *first;
+};
+
+struct hlist_node {
+ struct hlist_node *next, **pprev;
+};
+
struct ustat {
__kernel_daddr_t f_tfree;
__kernel_ino_t f_tinode;