summaryrefslogtreecommitdiffstats
path: root/arch/metag/include/asm/spinlock.h
diff options
context:
space:
mode:
authorJames Hogan2012-10-09 12:00:24 +0200
committerJames Hogan2013-03-02 21:09:50 +0100
commit6006c0d8ce9441dd1363bf14f18a8e28d3588460 (patch)
tree786183053c89e11b3058b8a16f7953744b819340 /arch/metag/include/asm/spinlock.h
parentmetag: Module support (diff)
downloadkernel-qcow2-linux-6006c0d8ce9441dd1363bf14f18a8e28d3588460.tar.gz
kernel-qcow2-linux-6006c0d8ce9441dd1363bf14f18a8e28d3588460.tar.xz
kernel-qcow2-linux-6006c0d8ce9441dd1363bf14f18a8e28d3588460.zip
metag: Atomics, locks and bitops
Add header files to implement Meta hardware thread locks (used by some other atomic operations), atomics, spinlocks, and bitops. There are 2 main types of atomic primitives for metag (in addition to IRQs off on UP): - LOCK instructions provide locking between hardware threads. - LNKGET/LNKSET instructions provide load-linked/store-conditional operations allowing for lighter weight atomics on Meta2 LOCK instructions allow for hardware threads to acquire voluntary or exclusive hardware thread locks: - LOCK0 releases exclusive and voluntary lock from the running hardware thread. - LOCK1 acquires the voluntary hardware lock, blocking until it becomes available. - LOCK2 implies LOCK1, and additionally acquires the exclusive hardware lock, blocking all other hardware threads from executing. Signed-off-by: James Hogan <james.hogan@imgtec.com>
Diffstat (limited to 'arch/metag/include/asm/spinlock.h')
-rw-r--r--arch/metag/include/asm/spinlock.h22
1 files changed, 22 insertions, 0 deletions
diff --git a/arch/metag/include/asm/spinlock.h b/arch/metag/include/asm/spinlock.h
new file mode 100644
index 000000000000..86a7cf3d1386
--- /dev/null
+++ b/arch/metag/include/asm/spinlock.h
@@ -0,0 +1,22 @@
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#ifdef CONFIG_METAG_ATOMICITY_LOCK1
+#include <asm/spinlock_lock1.h>
+#else
+#include <asm/spinlock_lnkget.h>
+#endif
+
+#define arch_spin_unlock_wait(lock) \
+ do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
+
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
+
+#endif /* __ASM_SPINLOCK_H */