summaryrefslogtreecommitdiffstats
path: root/arch/mips/include/asm/irq.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/include/asm/irq.h')
-rw-r--r--arch/mips/include/asm/irq.h64
1 files changed, 33 insertions, 31 deletions
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index b003ed52ed17..0ec01294b063 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -55,9 +55,9 @@ static inline void smtc_im_ack_irq(unsigned int irq)
#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
#include <linux/cpumask.h>
-extern int plat_set_irq_affinity(unsigned int irq,
- const struct cpumask *affinity);
-extern void smtc_forward_irq(unsigned int irq);
+extern int plat_set_irq_affinity(struct irq_data *d,
+ const struct cpumask *affinity, bool force);
+extern void smtc_forward_irq(struct irq_data *d);
/*
* IRQ affinity hook invoked at the beginning of interrupt dispatch
@@ -70,51 +70,53 @@ extern void smtc_forward_irq(unsigned int irq);
* cpumask implementations, this version is optimistically assuming
* that cpumask.h macro overhead is reasonable during interrupt dispatch.
*/
-#define IRQ_AFFINITY_HOOK(irq) \
-do { \
- if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity)) {\
- smtc_forward_irq(irq); \
- irq_exit(); \
- return; \
- } \
-} while (0)
+static inline int handle_on_other_cpu(unsigned int irq)
+{
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ if (cpumask_test_cpu(smp_processor_id(), d->affinity))
+ return 0;
+ smtc_forward_irq(d);
+ return 1;
+}
#else /* Not doing SMTC affinity */
-#define IRQ_AFFINITY_HOOK(irq) do { } while (0)
+static inline int handle_on_other_cpu(unsigned int irq) { return 0; }
#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
+static inline void smtc_im_backstop(unsigned int irq)
+{
+ if (irq_hwmask[irq] & 0x0000ff00)
+ write_c0_tccontext(read_c0_tccontext() &
+ ~(irq_hwmask[irq] & 0x0000ff00));
+}
+
/*
* Clear interrupt mask handling "backstop" if irq_hwmask
* entry so indicates. This implies that the ack() or end()
* functions will take over re-enabling the low-level mask.
* Otherwise it will be done on return from exception.
*/
-#define __DO_IRQ_SMTC_HOOK(irq) \
-do { \
- IRQ_AFFINITY_HOOK(irq); \
- if (irq_hwmask[irq] & 0x0000ff00) \
- write_c0_tccontext(read_c0_tccontext() & \
- ~(irq_hwmask[irq] & 0x0000ff00)); \
-} while (0)
-
-#define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) \
-do { \
- if (irq_hwmask[irq] & 0x0000ff00) \
- write_c0_tccontext(read_c0_tccontext() & \
- ~(irq_hwmask[irq] & 0x0000ff00)); \
-} while (0)
+static inline int smtc_handle_on_other_cpu(unsigned int irq)
+{
+ int ret = handle_on_other_cpu(irq);
+
+ if (!ret)
+ smtc_im_backstop(irq);
+ return ret;
+}
#else
-#define __DO_IRQ_SMTC_HOOK(irq) \
-do { \
- IRQ_AFFINITY_HOOK(irq); \
-} while (0)
-#define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) do { } while (0)
+static inline void smtc_im_backstop(unsigned int irq) { }
+static inline int smtc_handle_on_other_cpu(unsigned int irq)
+{
+ return handle_on_other_cpu(irq);
+}
#endif