summaryrefslogtreecommitdiffstats
path: root/include/asm-h8300/system.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-h8300/system.h')
-rw-r--r--include/asm-h8300/system.h149
1 files changed, 149 insertions, 0 deletions
diff --git a/include/asm-h8300/system.h b/include/asm-h8300/system.h
new file mode 100644
index 000000000000..dfe96c7121cf
--- /dev/null
+++ b/include/asm-h8300/system.h
@@ -0,0 +1,149 @@
+#ifndef _H8300_SYSTEM_H
+#define _H8300_SYSTEM_H
+
+#include <linux/config.h> /* get configuration macros */
+#include <linux/linkage.h>
+
+#define prepare_to_switch() do { } while(0)
+
+/*
+ * switch_to(n) should switch tasks to task ptr, first checking that
+ * ptr isn't the current task, in which case it does nothing. This
+ * also clears the TS-flag if the task we switched to has used the
+ * math co-processor latest.
+ */
+/*
+ * switch_to() saves the extra registers, that are not saved
+ * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
+ * a0-a1. Some of these are used by schedule() and its predecessors
+ * and so we might get see unexpected behaviors when a task returns
+ * with unexpected register values.
+ *
+ * syscall stores these registers itself and none of them are used
+ * by syscall after the function in the syscall has been called.
+ *
+ * Beware that resume now expects *next to be in d1 and the offset of
+ * tss to be in a1. This saves a few instructions as we no longer have
+ * to push them onto the stack and read them back right after.
+ *
+ * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
+ *
+ * Changed 96/09/19 by Andreas Schwab
+ * pass prev in a0, next in a1, offset of tss in d1, and whether
+ * the mm structures are shared in d2 (to avoid atc flushing).
+ *
+ * H8/300 Porting 2002/09/04 Yoshinori Sato
+ */
+
+asmlinkage void resume(void);
+#define switch_to(prev,next,last) { \
+ void *_last; \
+ __asm__ __volatile__( \
+ "mov.l %1, er0\n\t" \
+ "mov.l %2, er1\n\t" \
+ "mov.l %3, er2\n\t" \
+ "jsr @_resume\n\t" \
+ "mov.l er2,%0\n\t" \
+ : "=r" (_last) \
+ : "r" (&(prev->thread)), \
+ "r" (&(next->thread)), \
+ "g" (prev) \
+ : "cc", "er0", "er1", "er2", "er3"); \
+ (last) = _last; \
+}
+
+#define __sti() asm volatile ("andc #0x7f,ccr")
+#define __cli() asm volatile ("orc #0x80,ccr")
+
+#define __save_flags(x) \
+ asm volatile ("stc ccr,%w0":"=r" (x))
+
+#define __restore_flags(x) \
+ asm volatile ("ldc %w0,ccr": :"r" (x))
+
+#define irqs_disabled() \
+({ \
+ unsigned char flags; \
+ __save_flags(flags); \
+ ((flags & 0x80) == 0x80); \
+})
+
+#define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc")
+
+/* For spinlocks etc */
+#define local_irq_disable() __cli()
+#define local_irq_enable() __sti()
+#define local_irq_save(x) ({ __save_flags(x); local_irq_disable(); })
+#define local_irq_restore(x) __restore_flags(x)
+#define local_save_flags(x) __save_flags(x)
+
+/*
+ * Force strict CPU ordering.
+ * Not really required on H8...
+ */
+#define nop() asm volatile ("nop"::)
+#define mb() asm volatile ("" : : :"memory")
+#define rmb() asm volatile ("" : : :"memory")
+#define wmb() asm volatile ("" : : :"memory")
+#define set_rmb(var, value) do { xchg(&var, value); } while (0)
+#define set_mb(var, value) set_rmb(var, value)
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#define smp_read_barrier_depends() read_barrier_depends()
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() do { } while(0)
+#endif
+
+#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+#define tas(ptr) (xchg((ptr),1))
+
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((volatile struct __xchg_dummy *)(x))
+
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+ unsigned long tmp, flags;
+
+ local_irq_save(flags);
+
+ switch (size) {
+ case 1:
+ __asm__ __volatile__
+ ("mov.b %2,%0\n\t"
+ "mov.b %1,%2"
+ : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
+ break;
+ case 2:
+ __asm__ __volatile__
+ ("mov.w %2,%0\n\t"
+ "mov.w %1,%2"
+ : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
+ break;
+ case 4:
+ __asm__ __volatile__
+ ("mov.l %2,%0\n\t"
+ "mov.l %1,%2"
+ : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
+ break;
+ default:
+ tmp = 0;
+ }
+ local_irq_restore(flags);
+ return tmp;
+}
+
+#define HARD_RESET_NOW() ({ \
+ local_irq_disable(); \
+ asm("jmp @@0"); \
+})
+
+#define arch_align_stack(x) (x)
+
+#endif /* _H8300_SYSTEM_H */