summaryrefslogtreecommitdiffstats
path: root/drivers/staging/tidspbridge/core
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/tidspbridge/core')
-rw-r--r--drivers/staging/tidspbridge/core/_cmm.h45
-rw-r--r--drivers/staging/tidspbridge/core/_deh.h34
-rw-r--r--drivers/staging/tidspbridge/core/_msg_sm.h142
-rw-r--r--drivers/staging/tidspbridge/core/_tiomap.h382
-rw-r--r--drivers/staging/tidspbridge/core/_tiomap_pwr.h85
-rw-r--r--drivers/staging/tidspbridge/core/chnl_sm.c1013
-rw-r--r--drivers/staging/tidspbridge/core/dsp-clock.c421
-rw-r--r--drivers/staging/tidspbridge/core/dsp-mmu.c317
-rw-r--r--drivers/staging/tidspbridge/core/io_sm.c2180
-rw-r--r--drivers/staging/tidspbridge/core/msg_sm.c673
-rw-r--r--drivers/staging/tidspbridge/core/sync.c121
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c948
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430_pwr.c550
-rw-r--r--drivers/staging/tidspbridge/core/tiomap_io.c455
-rw-r--r--drivers/staging/tidspbridge/core/tiomap_io.h104
-rw-r--r--drivers/staging/tidspbridge/core/ue_deh.c160
-rw-r--r--drivers/staging/tidspbridge/core/wdt.c150
17 files changed, 7780 insertions, 0 deletions
diff --git a/drivers/staging/tidspbridge/core/_cmm.h b/drivers/staging/tidspbridge/core/_cmm.h
new file mode 100644
index 000000000000..7660bef6ebb3
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/_cmm.h
@@ -0,0 +1,45 @@
+/*
+ * _cmm.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Private header file defining CMM manager objects and defines needed
+ * by IO manager to register shared memory regions when DSP base image
+ * is loaded(bridge_io_on_loaded).
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _CMM_
+#define _CMM_
+
+/*
+ * These target side symbols define the beginning and ending addresses
+ * of the section of shared memory used for shared memory manager CMM.
+ * They are defined in the *cfg.cmd file by cdb code.
+ */
+#define SHM0_SHARED_BASE_SYM "_SHM0_BEG"
+#define SHM0_SHARED_END_SYM "_SHM0_END"
+#define SHM0_SHARED_RESERVED_BASE_SYM "_SHM0_RSVDSTRT"
+
+/*
+ * Shared Memory Region #0(SHMSEG0) is used in the following way:
+ *
+ * |(_SHM0_BEG) | (_SHM0_RSVDSTRT) | (_SHM0_END)
+ * V V V
+ * ------------------------------------------------------------
+ * | DSP-side allocations | GPP-side allocations |
+ * ------------------------------------------------------------
+ *
+ *
+ */
+
+#endif /* _CMM_ */
diff --git a/drivers/staging/tidspbridge/core/_deh.h b/drivers/staging/tidspbridge/core/_deh.h
new file mode 100644
index 000000000000..8ae263387a87
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/_deh.h
@@ -0,0 +1,34 @@
+/*
+ * _deh.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Private header for DEH module.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ * Copyright (C) 2010 Felipe Contreras
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _DEH_
+#define _DEH_
+
+#include <dspbridge/ntfy.h>
+#include <dspbridge/dspdefs.h>
+
+/* DEH Manager: only one created per board: */
+struct deh_mgr {
+ struct bridge_dev_context *hbridge_context; /* Bridge context. */
+ struct ntfy_object *ntfy_obj; /* NTFY object */
+};
+
+int mmu_fault_isr(struct iommu *mmu);
+
+#endif /* _DEH_ */
diff --git a/drivers/staging/tidspbridge/core/_msg_sm.h b/drivers/staging/tidspbridge/core/_msg_sm.h
new file mode 100644
index 000000000000..556de5c025dd
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/_msg_sm.h
@@ -0,0 +1,142 @@
+/*
+ * _msg_sm.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Private header file defining msg_ctrl manager objects and defines needed
+ * by IO manager.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _MSG_SM_
+#define _MSG_SM_
+
+#include <dspbridge/list.h>
+#include <dspbridge/msgdefs.h>
+
+/*
+ * These target side symbols define the beginning and ending addresses
+ * of the section of shared memory used for messages. They are
+ * defined in the *cfg.cmd file by cdb code.
+ */
+#define MSG_SHARED_BUFFER_BASE_SYM "_MSG_BEG"
+#define MSG_SHARED_BUFFER_LIMIT_SYM "_MSG_END"
+
+#ifndef _CHNL_WORDSIZE
+#define _CHNL_WORDSIZE 4 /* default _CHNL_WORDSIZE is 2 bytes/word */
+#endif
+
+/*
+ * ======== msg_ctrl ========
+ * There is a control structure for messages to the DSP, and a control
+ * structure for messages from the DSP. The shared memory region for
+ * transferring messages is partitioned as follows:
+ *
+ * ----------------------------------------------------------
+ * |Control | Messages from DSP | Control | Messages to DSP |
+ * ----------------------------------------------------------
+ *
+ * msg_ctrl control structure for messages to the DSP is used in the following
+ * way:
+ *
+ * buf_empty - This flag is set to FALSE by the GPP after it has output
+ * messages for the DSP. The DSP host driver sets it to
+ * TRUE after it has copied the messages.
+ * post_swi - Set to 1 by the GPP after it has written the messages,
+ * set the size, and set buf_empty to FALSE.
+ * The DSP Host driver uses SWI_andn of the post_swi field
+ * when a host interrupt occurs. The host driver clears
+ * this after posting the SWI.
+ * size - Number of messages to be read by the DSP.
+ *
+ * For messages from the DSP:
+ * buf_empty - This flag is set to FALSE by the DSP after it has output
+ * messages for the GPP. The DPC on the GPP sets it to
+ * TRUE after it has copied the messages.
+ * post_swi - Set to 1 the DPC on the GPP after copying the messages.
+ * size - Number of messages to be read by the GPP.
+ */
+struct msg_ctrl {
+ u32 buf_empty; /* to/from DSP buffer is empty */
+ u32 post_swi; /* Set to "1" to post msg_ctrl SWI */
+ u32 size; /* Number of messages to/from the DSP */
+ u32 resvd;
+};
+
+/*
+ * ======== msg_mgr ========
+ * The msg_mgr maintains a list of all MSG_QUEUEs. Each NODE object can
+ * have msg_queue to hold all messages that come up from the corresponding
+ * node on the DSP. The msg_mgr also has a shared queue of messages
+ * ready to go to the DSP.
+ */
+struct msg_mgr {
+ /* The first field must match that in msgobj.h */
+
+ /* Function interface to Bridge driver */
+ struct bridge_drv_interface *intf_fxns;
+
+ struct io_mgr *hio_mgr; /* IO manager */
+ struct lst_list *queue_list; /* List of MSG_QUEUEs */
+ spinlock_t msg_mgr_lock; /* For critical sections */
+ /* Signalled when MsgFrame is available */
+ struct sync_object *sync_event;
+ struct lst_list *msg_free_list; /* Free MsgFrames ready to be filled */
+ struct lst_list *msg_used_list; /* MsgFrames ready to go to DSP */
+ u32 msgs_pending; /* # of queued messages to go to DSP */
+ u32 max_msgs; /* Max # of msgs that fit in buffer */
+ msg_onexit on_exit; /* called when RMS_EXIT is received */
+};
+
+/*
+ * ======== msg_queue ========
+ * Each NODE has a msg_queue for receiving messages from the
+ * corresponding node on the DSP. The msg_queue object maintains a list
+ * of messages that have been sent to the host, but not yet read (MSG_Get),
+ * and a list of free frames that can be filled when new messages arrive
+ * from the DSP.
+ * The msg_queue's hSynEvent gets posted when a message is ready.
+ */
+struct msg_queue {
+ struct list_head list_elem;
+ struct msg_mgr *hmsg_mgr;
+ u32 max_msgs; /* Node message depth */
+ u32 msgq_id; /* Node environment pointer */
+ struct lst_list *msg_free_list; /* Free MsgFrames ready to be filled */
+ /* Filled MsgFramess waiting to be read */
+ struct lst_list *msg_used_list;
+ void *arg; /* Handle passed to mgr on_exit callback */
+ struct sync_object *sync_event; /* Signalled when message is ready */
+ struct sync_object *sync_done; /* For synchronizing cleanup */
+ struct sync_object *sync_done_ack; /* For synchronizing cleanup */
+ struct ntfy_object *ntfy_obj; /* For notification of message ready */
+ bool done; /* TRUE <==> deleting the object */
+ u32 io_msg_pend; /* Number of pending MSG_get/put calls */
+};
+
+/*
+ * ======== msg_dspmsg ========
+ */
+struct msg_dspmsg {
+ struct dsp_msg msg;
+ u32 msgq_id; /* Identifies the node the message goes to */
+};
+
+/*
+ * ======== msg_frame ========
+ */
+struct msg_frame {
+ struct list_head list_elem;
+ struct msg_dspmsg msg_data;
+};
+
+#endif /* _MSG_SM_ */
diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h
new file mode 100644
index 000000000000..e0a801c1cb98
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/_tiomap.h
@@ -0,0 +1,382 @@
+/*
+ * _tiomap.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Definitions and types private to this Bridge driver.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _TIOMAP_
+#define _TIOMAP_
+
+#include <plat/powerdomain.h>
+#include <plat/clockdomain.h>
+#include <mach-omap2/prm-regbits-34xx.h>
+#include <mach-omap2/cm-regbits-34xx.h>
+#include <dspbridge/dsp-mmu.h>
+#include <dspbridge/devdefs.h>
+#include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */
+#include <dspbridge/sync.h>
+#include <dspbridge/clk.h>
+
+struct map_l4_peripheral {
+ u32 phys_addr;
+ u32 dsp_virt_addr;
+};
+
+#define ARM_MAILBOX_START 0xfffcf000
+#define ARM_MAILBOX_LENGTH 0x800
+
+/* New Registers in OMAP3.1 */
+
+#define TESTBLOCK_ID_START 0xfffed400
+#define TESTBLOCK_ID_LENGTH 0xff
+
+/* ID Returned by OMAP1510 */
+#define TBC_ID_VALUE 0xB47002F
+
+#define SPACE_LENGTH 0x2000
+#define API_CLKM_DPLL_DMA 0xfffec000
+#define ARM_INTERRUPT_OFFSET 0xb00
+
+#define BIOS24XX
+
+#define L4_PERIPHERAL_NULL 0x0
+#define DSPVA_PERIPHERAL_NULL 0x0
+
+#define MAX_LOCK_TLB_ENTRIES 15
+
+#define L4_PERIPHERAL_PRM 0x48306000 /*PRM L4 Peripheral */
+#define DSPVA_PERIPHERAL_PRM 0x1181e000
+#define L4_PERIPHERAL_SCM 0x48002000 /*SCM L4 Peripheral */
+#define DSPVA_PERIPHERAL_SCM 0x1181f000
+#define L4_PERIPHERAL_MMU 0x5D000000 /*MMU L4 Peripheral */
+#define DSPVA_PERIPHERAL_MMU 0x11820000
+#define L4_PERIPHERAL_CM 0x48004000 /* Core L4, Clock Management */
+#define DSPVA_PERIPHERAL_CM 0x1181c000
+#define L4_PERIPHERAL_PER 0x48005000 /* PER */
+#define DSPVA_PERIPHERAL_PER 0x1181d000
+
+#define L4_PERIPHERAL_GPIO1 0x48310000
+#define DSPVA_PERIPHERAL_GPIO1 0x11809000
+#define L4_PERIPHERAL_GPIO2 0x49050000
+#define DSPVA_PERIPHERAL_GPIO2 0x1180a000
+#define L4_PERIPHERAL_GPIO3 0x49052000
+#define DSPVA_PERIPHERAL_GPIO3 0x1180b000
+#define L4_PERIPHERAL_GPIO4 0x49054000
+#define DSPVA_PERIPHERAL_GPIO4 0x1180c000
+#define L4_PERIPHERAL_GPIO5 0x49056000
+#define DSPVA_PERIPHERAL_GPIO5 0x1180d000
+
+#define L4_PERIPHERAL_IVA2WDT 0x49030000
+#define DSPVA_PERIPHERAL_IVA2WDT 0x1180e000
+
+#define L4_PERIPHERAL_DISPLAY 0x48050000
+#define DSPVA_PERIPHERAL_DISPLAY 0x1180f000
+
+#define L4_PERIPHERAL_SSI 0x48058000
+#define DSPVA_PERIPHERAL_SSI 0x11804000
+#define L4_PERIPHERAL_GDD 0x48059000
+#define DSPVA_PERIPHERAL_GDD 0x11805000
+#define L4_PERIPHERAL_SS1 0x4805a000
+#define DSPVA_PERIPHERAL_SS1 0x11806000
+#define L4_PERIPHERAL_SS2 0x4805b000
+#define DSPVA_PERIPHERAL_SS2 0x11807000
+
+#define L4_PERIPHERAL_CAMERA 0x480BC000
+#define DSPVA_PERIPHERAL_CAMERA 0x11819000
+
+#define L4_PERIPHERAL_SDMA 0x48056000
+#define DSPVA_PERIPHERAL_SDMA 0x11810000 /* 0x1181d000 conflict w/ PER */
+
+#define L4_PERIPHERAL_UART1 0x4806a000
+#define DSPVA_PERIPHERAL_UART1 0x11811000
+#define L4_PERIPHERAL_UART2 0x4806c000
+#define DSPVA_PERIPHERAL_UART2 0x11812000
+#define L4_PERIPHERAL_UART3 0x49020000
+#define DSPVA_PERIPHERAL_UART3 0x11813000
+
+#define L4_PERIPHERAL_MCBSP1 0x48074000
+#define DSPVA_PERIPHERAL_MCBSP1 0x11814000
+#define L4_PERIPHERAL_MCBSP2 0x49022000
+#define DSPVA_PERIPHERAL_MCBSP2 0x11815000
+#define L4_PERIPHERAL_MCBSP3 0x49024000
+#define DSPVA_PERIPHERAL_MCBSP3 0x11816000
+#define L4_PERIPHERAL_MCBSP4 0x49026000
+#define DSPVA_PERIPHERAL_MCBSP4 0x11817000
+#define L4_PERIPHERAL_MCBSP5 0x48096000
+#define DSPVA_PERIPHERAL_MCBSP5 0x11818000
+
+#define L4_PERIPHERAL_GPTIMER5 0x49038000
+#define DSPVA_PERIPHERAL_GPTIMER5 0x11800000
+#define L4_PERIPHERAL_GPTIMER6 0x4903a000
+#define DSPVA_PERIPHERAL_GPTIMER6 0x11801000
+#define L4_PERIPHERAL_GPTIMER7 0x4903c000
+#define DSPVA_PERIPHERAL_GPTIMER7 0x11802000
+#define L4_PERIPHERAL_GPTIMER8 0x4903e000
+#define DSPVA_PERIPHERAL_GPTIMER8 0x11803000
+
+#define L4_PERIPHERAL_SPI1 0x48098000
+#define DSPVA_PERIPHERAL_SPI1 0x1181a000
+#define L4_PERIPHERAL_SPI2 0x4809a000
+#define DSPVA_PERIPHERAL_SPI2 0x1181b000
+
+#define L4_PERIPHERAL_MBOX 0x48094000
+#define DSPVA_PERIPHERAL_MBOX 0x11808000
+
+#define PM_GRPSEL_BASE 0x48307000
+#define DSPVA_GRPSEL_BASE 0x11821000
+
+#define L4_PERIPHERAL_SIDETONE_MCBSP2 0x49028000
+#define DSPVA_PERIPHERAL_SIDETONE_MCBSP2 0x11824000
+#define L4_PERIPHERAL_SIDETONE_MCBSP3 0x4902a000
+#define DSPVA_PERIPHERAL_SIDETONE_MCBSP3 0x11825000
+
+/* define a static array with L4 mappings */
+static const struct map_l4_peripheral l4_peripheral_table[] = {
+ {L4_PERIPHERAL_MBOX, DSPVA_PERIPHERAL_MBOX},
+ {L4_PERIPHERAL_SCM, DSPVA_PERIPHERAL_SCM},
+ {L4_PERIPHERAL_MMU, DSPVA_PERIPHERAL_MMU},
+ {L4_PERIPHERAL_GPTIMER5, DSPVA_PERIPHERAL_GPTIMER5},
+ {L4_PERIPHERAL_GPTIMER6, DSPVA_PERIPHERAL_GPTIMER6},
+ {L4_PERIPHERAL_GPTIMER7, DSPVA_PERIPHERAL_GPTIMER7},
+ {L4_PERIPHERAL_GPTIMER8, DSPVA_PERIPHERAL_GPTIMER8},
+ {L4_PERIPHERAL_GPIO1, DSPVA_PERIPHERAL_GPIO1},
+ {L4_PERIPHERAL_GPIO2, DSPVA_PERIPHERAL_GPIO2},
+ {L4_PERIPHERAL_GPIO3, DSPVA_PERIPHERAL_GPIO3},
+ {L4_PERIPHERAL_GPIO4, DSPVA_PERIPHERAL_GPIO4},
+ {L4_PERIPHERAL_GPIO5, DSPVA_PERIPHERAL_GPIO5},
+ {L4_PERIPHERAL_IVA2WDT, DSPVA_PERIPHERAL_IVA2WDT},
+ {L4_PERIPHERAL_DISPLAY, DSPVA_PERIPHERAL_DISPLAY},
+ {L4_PERIPHERAL_SSI, DSPVA_PERIPHERAL_SSI},
+ {L4_PERIPHERAL_GDD, DSPVA_PERIPHERAL_GDD},
+ {L4_PERIPHERAL_SS1, DSPVA_PERIPHERAL_SS1},
+ {L4_PERIPHERAL_SS2, DSPVA_PERIPHERAL_SS2},
+ {L4_PERIPHERAL_UART1, DSPVA_PERIPHERAL_UART1},
+ {L4_PERIPHERAL_UART2, DSPVA_PERIPHERAL_UART2},
+ {L4_PERIPHERAL_UART3, DSPVA_PERIPHERAL_UART3},
+ {L4_PERIPHERAL_MCBSP1, DSPVA_PERIPHERAL_MCBSP1},
+ {L4_PERIPHERAL_MCBSP2, DSPVA_PERIPHERAL_MCBSP2},
+ {L4_PERIPHERAL_MCBSP3, DSPVA_PERIPHERAL_MCBSP3},
+ {L4_PERIPHERAL_MCBSP4, DSPVA_PERIPHERAL_MCBSP4},
+ {L4_PERIPHERAL_MCBSP5, DSPVA_PERIPHERAL_MCBSP5},
+ {L4_PERIPHERAL_CAMERA, DSPVA_PERIPHERAL_CAMERA},
+ {L4_PERIPHERAL_SPI1, DSPVA_PERIPHERAL_SPI1},
+ {L4_PERIPHERAL_SPI2, DSPVA_PERIPHERAL_SPI2},
+ {L4_PERIPHERAL_PRM, DSPVA_PERIPHERAL_PRM},
+ {L4_PERIPHERAL_CM, DSPVA_PERIPHERAL_CM},
+ {L4_PERIPHERAL_PER, DSPVA_PERIPHERAL_PER},
+ {PM_GRPSEL_BASE, DSPVA_GRPSEL_BASE},
+ {L4_PERIPHERAL_SIDETONE_MCBSP2, DSPVA_PERIPHERAL_SIDETONE_MCBSP2},
+ {L4_PERIPHERAL_SIDETONE_MCBSP3, DSPVA_PERIPHERAL_SIDETONE_MCBSP3},
+ {L4_PERIPHERAL_NULL, DSPVA_PERIPHERAL_NULL}
+};
+
+/*
+ * 15 10 0
+ * ---------------------------------
+ * |0|0|1|0|0|0|c|c|c|i|i|i|i|i|i|i|
+ * ---------------------------------
+ * | (class) | (module specific) |
+ *
+ * where c -> Externel Clock Command: Clk & Autoidle Disable/Enable
+ * i -> External Clock ID Timers 5,6,7,8, McBSP1,2 and WDT3
+ */
+
+/* MBX_PM_CLK_IDMASK: DSP External clock id mask. */
+#define MBX_PM_CLK_IDMASK 0x7F
+
+/* MBX_PM_CLK_CMDSHIFT: DSP External clock command shift. */
+#define MBX_PM_CLK_CMDSHIFT 7
+
+/* MBX_PM_CLK_CMDMASK: DSP External clock command mask. */
+#define MBX_PM_CLK_CMDMASK 7
+
+/* MBX_PM_MAX_RESOURCES: CORE 1 Clock resources. */
+#define MBX_CORE1_RESOURCES 7
+
+/* MBX_PM_MAX_RESOURCES: CORE 2 Clock Resources. */
+#define MBX_CORE2_RESOURCES 1
+
+/* MBX_PM_MAX_RESOURCES: TOTAL Clock Reosurces. */
+#define MBX_PM_MAX_RESOURCES 11
+
+/* Power Management Commands */
+#define BPWR_DISABLE_CLOCK 0
+#define BPWR_ENABLE_CLOCK 1
+
+/* OMAP242x specific resources */
+enum bpwr_ext_clock_id {
+ BPWR_GP_TIMER5 = 0x10,
+ BPWR_GP_TIMER6,
+ BPWR_GP_TIMER7,
+ BPWR_GP_TIMER8,
+ BPWR_WD_TIMER3,
+ BPWR_MCBSP1,
+ BPWR_MCBSP2,
+ BPWR_MCBSP3,
+ BPWR_MCBSP4,
+ BPWR_MCBSP5,
+ BPWR_SSI = 0x20
+};
+
+static const u32 bpwr_clkid[] = {
+ (u32) BPWR_GP_TIMER5,
+ (u32) BPWR_GP_TIMER6,
+ (u32) BPWR_GP_TIMER7,
+ (u32) BPWR_GP_TIMER8,
+ (u32) BPWR_WD_TIMER3,
+ (u32) BPWR_MCBSP1,
+ (u32) BPWR_MCBSP2,
+ (u32) BPWR_MCBSP3,
+ (u32) BPWR_MCBSP4,
+ (u32) BPWR_MCBSP5,
+ (u32) BPWR_SSI
+};
+
+struct bpwr_clk_t {
+ u32 clk_id;
+ enum dsp_clk_id clk;
+};
+
+static const struct bpwr_clk_t bpwr_clks[] = {
+ {(u32) BPWR_GP_TIMER5, DSP_CLK_GPT5},
+ {(u32) BPWR_GP_TIMER6, DSP_CLK_GPT6},
+ {(u32) BPWR_GP_TIMER7, DSP_CLK_GPT7},
+ {(u32) BPWR_GP_TIMER8, DSP_CLK_GPT8},
+ {(u32) BPWR_WD_TIMER3, DSP_CLK_WDT3},
+ {(u32) BPWR_MCBSP1, DSP_CLK_MCBSP1},
+ {(u32) BPWR_MCBSP2, DSP_CLK_MCBSP2},
+ {(u32) BPWR_MCBSP3, DSP_CLK_MCBSP3},
+ {(u32) BPWR_MCBSP4, DSP_CLK_MCBSP4},
+ {(u32) BPWR_MCBSP5, DSP_CLK_MCBSP5},
+ {(u32) BPWR_SSI, DSP_CLK_SSI}
+};
+
+/* Interrupt Register Offsets */
+#define INTH_IT_REG_OFFSET 0x00 /* Interrupt register offset */
+#define INTH_MASK_IT_REG_OFFSET 0x04 /* Mask Interrupt reg offset */
+
+#define DSP_MAILBOX1_INT 10
+/*
+ * Bit definition of Interrupt Level Registers
+ */
+
+/* Mail Box defines */
+#define MB_ARM2DSP1_REG_OFFSET 0x00
+
+#define MB_ARM2DSP1B_REG_OFFSET 0x04
+
+#define MB_DSP2ARM1B_REG_OFFSET 0x0C
+
+#define MB_ARM2DSP1_FLAG_REG_OFFSET 0x18
+
+#define MB_ARM2DSP_FLAG 0x0001
+
+#define MBOX_ARM2DSP HW_MBOX_ID0
+#define MBOX_DSP2ARM HW_MBOX_ID1
+#define MBOX_ARM HW_MBOX_U0_ARM
+#define MBOX_DSP HW_MBOX_U1_DSP1
+
+#define ENABLE true
+#define DISABLE false
+
+#define HIGH_LEVEL true
+#define LOW_LEVEL false
+
+/* Macro's */
+#define CLEAR_BIT(reg, mask) (reg &= ~mask)
+#define SET_BIT(reg, mask) (reg |= mask)
+
+#define SET_GROUP_BITS16(reg, position, width, value) \
+ do {\
+ reg &= ~((0xFFFF >> (16 - (width))) << (position)) ; \
+ reg |= ((value & (0xFFFF >> (16 - (width)))) << (position)); \
+ } while (0);
+
+#define CLEAR_BIT_INDEX(reg, index) (reg &= ~(1 << (index)))
+
+struct shm_segs {
+ u32 seg0_da;
+ u32 seg0_pa;
+ u32 seg0_va;
+ u32 seg0_size;
+ u32 seg1_da;
+ u32 seg1_pa;
+ u32 seg1_va;
+ u32 seg1_size;
+};
+
+
+/* This Bridge driver's device context: */
+struct bridge_dev_context {
+ struct dev_object *hdev_obj; /* Handle to Bridge device object. */
+ u32 dw_dsp_base_addr; /* Arm's API to DSP virt base addr */
+ /*
+ * DSP External memory prog address as seen virtually by the OS on
+ * the host side.
+ */
+ u32 dw_dsp_ext_base_addr; /* See the comment above */
+ u32 dw_api_reg_base; /* API mem map'd registers */
+ u32 dw_api_clk_base; /* CLK Registers */
+ u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */
+ u32 dw_public_rhea; /* Pub Rhea */
+ u32 dw_int_addr; /* MB INTR reg */
+ u32 dw_tc_endianism; /* TC Endianism register */
+ u32 dw_test_base; /* DSP MMU Mapped registers */
+ u32 dw_self_loop; /* Pointer to the selfloop */
+ u32 dw_dsp_start_add; /* API Boot vector */
+ u32 dw_internal_size; /* Internal memory size */
+
+ struct omap_mbox *mbox; /* Mail box handle */
+ struct iommu *dsp_mmu; /* iommu for iva2 handler */
+ struct shm_segs sh_s;
+ struct cfg_hostres *resources; /* Host Resources */
+
+ /*
+ * Processor specific info is set when prog loaded and read from DCD.
+ * [See bridge_dev_ctrl()] PROC info contains DSP-MMU TLB entries.
+ */
+ /* DMMU TLB entries */
+ struct bridge_ioctl_extproc atlb_entry[BRDIOCTL_NUMOFMMUTLB];
+ u32 dw_brd_state; /* Last known board state. */
+
+ /* TC Settings */
+ bool tc_word_swap_on; /* Traffic Controller Word Swap */
+ u32 dsp_per_clks;
+};
+
+/*
+ * If dsp_debug is true, do not branch to the DSP entry
+ * point and wait for DSP to boot.
+ */
+extern s32 dsp_debug;
+
+/*
+ * ======== sm_interrupt_dsp ========
+ * Purpose:
+ * Set interrupt value & send an interrupt to the DSP processor(s).
+ * This is typicaly used when mailbox interrupt mechanisms allow data
+ * to be associated with interrupt such as for OMAP's CMD/DATA regs.
+ * Parameters:
+ * dev_context: Handle to Bridge driver defined device info.
+ * mb_val: Value associated with interrupt(e.g. mailbox value).
+ * Returns:
+ * 0: Interrupt sent;
+ * else: Unable to send interrupt.
+ * Requires:
+ * Ensures:
+ */
+int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val);
+
+#endif /* _TIOMAP_ */
diff --git a/drivers/staging/tidspbridge/core/_tiomap_pwr.h b/drivers/staging/tidspbridge/core/_tiomap_pwr.h
new file mode 100644
index 000000000000..bd0354d9ad03
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/_tiomap_pwr.h
@@ -0,0 +1,85 @@
+/*
+ * _tiomap_pwr.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Definitions and types for the DSP wake/sleep routines.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _TIOMAP_PWR_
+#define _TIOMAP_PWR_
+
+#ifdef CONFIG_PM
+extern s32 dsp_test_sleepstate;
+#endif
+
+extern struct mailbox_context mboxsetting;
+
+/*
+ * ======== wake_dsp =========
+ * Wakes up the DSP from DeepSleep
+ */
+extern int wake_dsp(struct bridge_dev_context *dev_context,
+ void *pargs);
+
+/*
+ * ======== sleep_dsp =========
+ * Places the DSP in DeepSleep.
+ */
+extern int sleep_dsp(struct bridge_dev_context *dev_context,
+ u32 dw_cmd, void *pargs);
+/*
+ * ========interrupt_dsp========
+ * Sends an interrupt to DSP unconditionally.
+ */
+extern void interrupt_dsp(struct bridge_dev_context *dev_context,
+ u16 mb_val);
+
+/*
+ * ======== wake_dsp =========
+ * Wakes up the DSP from DeepSleep
+ */
+extern int dsp_peripheral_clk_ctrl(struct bridge_dev_context
+ *dev_context, void *pargs);
+/*
+ * ======== handle_hibernation_from_dsp ========
+ * Handle Hibernation requested from DSP
+ */
+int handle_hibernation_from_dsp(struct bridge_dev_context *dev_context);
+/*
+ * ======== post_scale_dsp ========
+ * Handle Post Scale notification to DSP
+ */
+int post_scale_dsp(struct bridge_dev_context *dev_context,
+ void *pargs);
+/*
+ * ======== pre_scale_dsp ========
+ * Handle Pre Scale notification to DSP
+ */
+int pre_scale_dsp(struct bridge_dev_context *dev_context,
+ void *pargs);
+/*
+ * ======== handle_constraints_set ========
+ * Handle constraints request from DSP
+ */
+int handle_constraints_set(struct bridge_dev_context *dev_context,
+ void *pargs);
+
+/*
+ * ======== dsp_clk_wakeup_event_ctrl ========
+ * This function sets the group selction bits for while
+ * enabling/disabling.
+ */
+void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable);
+
+#endif /* _TIOMAP_PWR_ */
diff --git a/drivers/staging/tidspbridge/core/chnl_sm.c b/drivers/staging/tidspbridge/core/chnl_sm.c
new file mode 100644
index 000000000000..662a5b5a58e3
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/chnl_sm.c
@@ -0,0 +1,1013 @@
+/*
+ * chnl_sm.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Implements upper edge functions for Bridge driver channel module.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*
+ * The lower edge functions must be implemented by the Bridge driver
+ * writer, and are declared in chnl_sm.h.
+ *
+ * Care is taken in this code to prevent simulataneous access to channel
+ * queues from
+ * 1. Threads.
+ * 2. io_dpc(), scheduled from the io_isr() as an event.
+ *
+ * This is done primarily by:
+ * - Semaphores.
+ * - state flags in the channel object; and
+ * - ensuring the IO_Dispatch() routine, which is called from both
+ * CHNL_AddIOReq() and the DPC(if implemented), is not re-entered.
+ *
+ * Channel Invariant:
+ * There is an important invariant condition which must be maintained per
+ * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
+ * which may cause timeouts and/or failure offunction sync_wait_on_event.
+ * This invariant condition is:
+ *
+ * LST_Empty(pchnl->pio_completions) ==> pchnl->sync_event is reset
+ * and
+ * !LST_Empty(pchnl->pio_completions) ==> pchnl->sync_event is set.
+ */
+
+#include <linux/types.h>
+
+/* ----------------------------------- OS */
+#include <dspbridge/host_os.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/sync.h>
+
+/* ----------------------------------- Bridge Driver */
+#include <dspbridge/dspdefs.h>
+#include <dspbridge/dspchnl.h>
+#include "_tiomap.h"
+
+/* ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+
+/* ----------------------------------- Others */
+#include <dspbridge/io_sm.h>
+
+/* ----------------------------------- Define for This */
+#define USERMODE_ADDR PAGE_OFFSET
+
+#define MAILBOX_IRQ INT_MAIL_MPU_IRQ
+
+/* ----------------------------------- Function Prototypes */
+static struct lst_list *create_chirp_list(u32 chirps);
+
+static void free_chirp_list(struct lst_list *chirp_list);
+
+static struct chnl_irp *make_new_chirp(void);
+
+static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
+ u32 *chnl);
+
+/*
+ * ======== bridge_chnl_add_io_req ========
+ * Enqueue an I/O request for data transfer on a channel to the DSP.
+ * The direction (mode) is specified in the channel object. Note the DSP
+ * address is specified for channels opened in direct I/O mode.
+ */
+int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *host_buf,
+ u32 byte_size, u32 buf_size,
+ u32 dw_dsp_addr, u32 dw_arg)
+{
+ int status = 0;
+ struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
+ struct chnl_irp *chnl_packet_obj = NULL;
+ struct bridge_dev_context *dev_ctxt;
+ struct dev_object *dev_obj;
+ u8 dw_state;
+ bool is_eos;
+ struct chnl_mgr *chnl_mgr_obj = pchnl->chnl_mgr_obj;
+ u8 *host_sys_buf = NULL;
+ bool sched_dpc = false;
+ u16 mb_val = 0;
+
+ is_eos = (byte_size == 0);
+
+ /* Validate args */
+ if (!host_buf || !pchnl) {
+ status = -EFAULT;
+ } else if (is_eos && CHNL_IS_INPUT(pchnl->chnl_mode)) {
+ status = -EPERM;
+ } else {
+ /*
+ * Check the channel state: only queue chirp if channel state
+ * allows it.
+ */
+ dw_state = pchnl->dw_state;
+ if (dw_state != CHNL_STATEREADY) {
+ if (dw_state & CHNL_STATECANCEL)
+ status = -ECANCELED;
+ else if ((dw_state & CHNL_STATEEOS) &&
+ CHNL_IS_OUTPUT(pchnl->chnl_mode))
+ status = -EPIPE;
+ else
+ /* No other possible states left */
+ DBC_ASSERT(0);
+ }
+ }
+
+ dev_obj = dev_get_first();
+ dev_get_bridge_context(dev_obj, &dev_ctxt);
+ if (!dev_ctxt)
+ status = -EFAULT;
+
+ if (status)
+ goto func_end;
+
+ if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1 && host_buf) {
+ if (!(host_buf < (void *)USERMODE_ADDR)) {
+ host_sys_buf = host_buf;
+ goto func_cont;
+ }
+ /* if addr in user mode, then copy to kernel space */
+ host_sys_buf = kmalloc(buf_size, GFP_KERNEL);
+ if (host_sys_buf == NULL) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+ if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
+ status = copy_from_user(host_sys_buf, host_buf,
+ buf_size);
+ if (status) {
+ kfree(host_sys_buf);
+ host_sys_buf = NULL;
+ status = -EFAULT;
+ goto func_end;
+ }
+ }
+ }
+func_cont:
+ /* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY
+ * channels. DPCCS is held to avoid race conditions with PCPY channels.
+ * If DPC is scheduled in process context (iosm_schedule) and any
+ * non-mailbox interrupt occurs, that DPC will run and break CS. Hence
+ * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */
+ spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
+ omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
+ if (pchnl->chnl_type == CHNL_PCPY) {
+ /* This is a processor-copy channel. */
+ if (!status && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
+ /* Check buffer size on output channels for fit. */
+ if (byte_size >
+ io_buf_size(pchnl->chnl_mgr_obj->hio_mgr))
+ status = -EINVAL;
+
+ }
+ }
+ if (!status) {
+ /* Get a free chirp: */
+ chnl_packet_obj =
+ (struct chnl_irp *)lst_get_head(pchnl->free_packets_list);
+ if (chnl_packet_obj == NULL)
+ status = -EIO;
+
+ }
+ if (!status) {
+ /* Enqueue the chirp on the chnl's IORequest queue: */
+ chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf =
+ host_buf;
+ if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)
+ chnl_packet_obj->host_sys_buf = host_sys_buf;
+
+ /*
+ * Note: for dma chans dw_dsp_addr contains dsp address
+ * of SM buffer.
+ */
+ DBC_ASSERT(chnl_mgr_obj->word_size != 0);
+ /* DSP address */
+ chnl_packet_obj->dsp_tx_addr =
+ dw_dsp_addr / chnl_mgr_obj->word_size;
+ chnl_packet_obj->byte_size = byte_size;
+ chnl_packet_obj->buf_size = buf_size;
+ /* Only valid for output channel */
+ chnl_packet_obj->dw_arg = dw_arg;
+ chnl_packet_obj->status = (is_eos ? CHNL_IOCSTATEOS :
+ CHNL_IOCSTATCOMPLETE);
+ lst_put_tail(pchnl->pio_requests,
+ (struct list_head *)chnl_packet_obj);
+ pchnl->cio_reqs++;
+ DBC_ASSERT(pchnl->cio_reqs <= pchnl->chnl_packets);
+ /*
+ * If end of stream, update the channel state to prevent
+ * more IOR's.
+ */
+ if (is_eos)
+ pchnl->dw_state |= CHNL_STATEEOS;
+
+ /* Legacy DSM Processor-Copy */
+ DBC_ASSERT(pchnl->chnl_type == CHNL_PCPY);
+ /* Request IO from the DSP */
+ io_request_chnl(chnl_mgr_obj->hio_mgr, pchnl,
+ (CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT :
+ IO_OUTPUT), &mb_val);
+ sched_dpc = true;
+
+ }
+ omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
+ spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
+ if (mb_val != 0)
+ sm_interrupt_dsp(dev_ctxt, mb_val);
+
+ /* Schedule a DPC, to do the actual data transfer */
+ if (sched_dpc)
+ iosm_schedule(chnl_mgr_obj->hio_mgr);
+
+func_end:
+ return status;
+}
+
+/*
+ * ======== bridge_chnl_cancel_io ========
+ * Return all I/O requests to the client which have not yet been
+ * transferred. The channel's I/O completion object is
+ * signalled, and all the I/O requests are queued as IOC's, with the
+ * status field set to CHNL_IOCSTATCANCEL.
+ * This call is typically used in abort situations, and is a prelude to
+ * chnl_close();
+ */
+int bridge_chnl_cancel_io(struct chnl_object *chnl_obj)
+{
+ int status = 0;
+ struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
+ u32 chnl_id = -1;
+ s8 chnl_mode;
+ struct chnl_irp *chnl_packet_obj;
+ struct chnl_mgr *chnl_mgr_obj = NULL;
+
+ /* Check args: */
+ if (pchnl && pchnl->chnl_mgr_obj) {
+ chnl_id = pchnl->chnl_id;
+ chnl_mode = pchnl->chnl_mode;
+ chnl_mgr_obj = pchnl->chnl_mgr_obj;
+ } else {
+ status = -EFAULT;
+ }
+ if (status)
+ goto func_end;
+
+ /* Mark this channel as cancelled, to prevent further IORequests or
+ * IORequests or dispatching. */
+ spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
+ pchnl->dw_state |= CHNL_STATECANCEL;
+ if (LST_IS_EMPTY(pchnl->pio_requests))
+ goto func_cont;
+
+ if (pchnl->chnl_type == CHNL_PCPY) {
+ /* Indicate we have no more buffers available for transfer: */
+ if (CHNL_IS_INPUT(pchnl->chnl_mode)) {
+ io_cancel_chnl(chnl_mgr_obj->hio_mgr, chnl_id);
+ } else {
+ /* Record that we no longer have output buffers
+ * available: */
+ chnl_mgr_obj->dw_output_mask &= ~(1 << chnl_id);
+ }
+ }
+ /* Move all IOR's to IOC queue: */
+ while (!LST_IS_EMPTY(pchnl->pio_requests)) {
+ chnl_packet_obj =
+ (struct chnl_irp *)lst_get_head(pchnl->pio_requests);
+ if (chnl_packet_obj) {
+ chnl_packet_obj->byte_size = 0;
+ chnl_packet_obj->status |= CHNL_IOCSTATCANCEL;
+ lst_put_tail(pchnl->pio_completions,
+ (struct list_head *)chnl_packet_obj);
+ pchnl->cio_cs++;
+ pchnl->cio_reqs--;
+ DBC_ASSERT(pchnl->cio_reqs >= 0);
+ }
+ }
+func_cont:
+ spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
+func_end:
+ return status;
+}
+
+/*
+ * ======== bridge_chnl_close ========
+ * Purpose:
+ * Ensures all pending I/O on this channel is cancelled, discards all
+ * queued I/O completion notifications, then frees the resources allocated
+ * for this channel, and makes the corresponding logical channel id
+ * available for subsequent use.
+ */
+int bridge_chnl_close(struct chnl_object *chnl_obj)
+{
+ int status;
+ struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
+
+ /* Check args: */
+ if (!pchnl) {
+ status = -EFAULT;
+ goto func_cont;
+ }
+ {
+ /* Cancel IO: this ensures no further IO requests or
+ * notifications. */
+ status = bridge_chnl_cancel_io(chnl_obj);
+ }
+func_cont:
+ if (!status) {
+ /* Assert I/O on this channel is now cancelled: Protects
+ * from io_dpc. */
+ DBC_ASSERT((pchnl->dw_state & CHNL_STATECANCEL));
+ /* Invalidate channel object: Protects from
+ * CHNL_GetIOCompletion(). */
+ /* Free the slot in the channel manager: */
+ pchnl->chnl_mgr_obj->ap_channel[pchnl->chnl_id] = NULL;
+ spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
+ pchnl->chnl_mgr_obj->open_channels -= 1;
+ spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
+ if (pchnl->ntfy_obj) {
+ ntfy_delete(pchnl->ntfy_obj);
+ kfree(pchnl->ntfy_obj);
+ pchnl->ntfy_obj = NULL;
+ }
+ /* Reset channel event: (NOTE: user_event freed in user
+ * context.). */
+ if (pchnl->sync_event) {
+ sync_reset_event(pchnl->sync_event);
+ kfree(pchnl->sync_event);
+ pchnl->sync_event = NULL;
+ }
+ /* Free I/O request and I/O completion queues: */
+ if (pchnl->pio_completions) {
+ free_chirp_list(pchnl->pio_completions);
+ pchnl->pio_completions = NULL;
+ pchnl->cio_cs = 0;
+ }
+ if (pchnl->pio_requests) {
+ free_chirp_list(pchnl->pio_requests);
+ pchnl->pio_requests = NULL;
+ pchnl->cio_reqs = 0;
+ }
+ if (pchnl->free_packets_list) {
+ free_chirp_list(pchnl->free_packets_list);
+ pchnl->free_packets_list = NULL;
+ }
+ /* Release channel object. */
+ kfree(pchnl);
+ pchnl = NULL;
+ }
+ DBC_ENSURE(status || !pchnl);
+ return status;
+}
+
+/*
+ * ======== bridge_chnl_create ========
+ * Create a channel manager object, responsible for opening new channels
+ * and closing old ones for a given board.
+ */
+int bridge_chnl_create(struct chnl_mgr **channel_mgr,
+ struct dev_object *hdev_obj,
+ const struct chnl_mgrattrs *mgr_attrts)
+{
+ int status = 0;
+ struct chnl_mgr *chnl_mgr_obj = NULL;
+ u8 max_channels;
+
+ /* Check DBC requirements: */
+ DBC_REQUIRE(channel_mgr != NULL);
+ DBC_REQUIRE(mgr_attrts != NULL);
+ DBC_REQUIRE(mgr_attrts->max_channels > 0);
+ DBC_REQUIRE(mgr_attrts->max_channels <= CHNL_MAXCHANNELS);
+ DBC_REQUIRE(mgr_attrts->word_size != 0);
+
+ /* Allocate channel manager object */
+ chnl_mgr_obj = kzalloc(sizeof(struct chnl_mgr), GFP_KERNEL);
+ if (chnl_mgr_obj) {
+ /*
+ * The max_channels attr must equal the # of supported chnls for
+ * each transport(# chnls for PCPY = DDMA = ZCPY): i.e.
+ * mgr_attrts->max_channels = CHNL_MAXCHANNELS =
+ * DDMA_MAXDDMACHNLS = DDMA_MAXZCPYCHNLS.
+ */
+ DBC_ASSERT(mgr_attrts->max_channels == CHNL_MAXCHANNELS);
+ max_channels = CHNL_MAXCHANNELS + CHNL_MAXCHANNELS * CHNL_PCPY;
+ /* Create array of channels */
+ chnl_mgr_obj->ap_channel = kzalloc(sizeof(struct chnl_object *)
+ * max_channels, GFP_KERNEL);
+ if (chnl_mgr_obj->ap_channel) {
+ /* Initialize chnl_mgr object */
+ chnl_mgr_obj->dw_type = CHNL_TYPESM;
+ chnl_mgr_obj->word_size = mgr_attrts->word_size;
+ /* Total # chnls supported */
+ chnl_mgr_obj->max_channels = max_channels;
+ chnl_mgr_obj->open_channels = 0;
+ chnl_mgr_obj->dw_output_mask = 0;
+ chnl_mgr_obj->dw_last_output = 0;
+ chnl_mgr_obj->hdev_obj = hdev_obj;
+ spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock);
+ } else {
+ status = -ENOMEM;
+ }
+ } else {
+ status = -ENOMEM;
+ }
+
+ if (status) {
+ bridge_chnl_destroy(chnl_mgr_obj);
+ *channel_mgr = NULL;
+ } else {
+ /* Return channel manager object to caller... */
+ *channel_mgr = chnl_mgr_obj;
+ }
+ return status;
+}
+
+/*
+ * ======== bridge_chnl_destroy ========
+ * Purpose:
+ * Close all open channels, and destroy the channel manager.
+ */
+int bridge_chnl_destroy(struct chnl_mgr *hchnl_mgr)
+{
+ int status = 0;
+ struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
+ u32 chnl_id;
+
+ if (hchnl_mgr) {
+ /* Close all open channels: */
+ for (chnl_id = 0; chnl_id < chnl_mgr_obj->max_channels;
+ chnl_id++) {
+ status =
+ bridge_chnl_close(chnl_mgr_obj->ap_channel
+ [chnl_id]);
+ if (status)
+ dev_dbg(bridge, "%s: Error status 0x%x\n",
+ __func__, status);
+ }
+
+ /* Free channel manager object: */
+ kfree(chnl_mgr_obj->ap_channel);
+
+ /* Set hchnl_mgr to NULL in device object. */
+ dev_set_chnl_mgr(chnl_mgr_obj->hdev_obj, NULL);
+ /* Free this Chnl Mgr object: */
+ kfree(hchnl_mgr);
+ } else {
+ status = -EFAULT;
+ }
+ return status;
+}
+
+/*
+ * ======== bridge_chnl_flush_io ========
+ * purpose:
+ * Flushes all the outstanding data requests on a channel.
+ */
+int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout)
+{
+ int status = 0;
+ struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
+ s8 chnl_mode = -1;
+ struct chnl_mgr *chnl_mgr_obj;
+ struct chnl_ioc chnl_ioc_obj;
+ /* Check args: */
+ if (pchnl) {
+ if ((timeout == CHNL_IOCNOWAIT)
+ && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
+ status = -EINVAL;
+ } else {
+ chnl_mode = pchnl->chnl_mode;
+ chnl_mgr_obj = pchnl->chnl_mgr_obj;
+ }
+ } else {
+ status = -EFAULT;
+ }
+ if (!status) {
+ /* Note: Currently, if another thread continues to add IO
+ * requests to this channel, this function will continue to
+ * flush all such queued IO requests. */
+ if (CHNL_IS_OUTPUT(chnl_mode)
+ && (pchnl->chnl_type == CHNL_PCPY)) {
+ /* Wait for IO completions, up to the specified
+ * timeout: */
+ while (!LST_IS_EMPTY(pchnl->pio_requests) && !status) {
+ status = bridge_chnl_get_ioc(chnl_obj,
+ timeout, &chnl_ioc_obj);
+ if (status)
+ continue;
+
+ if (chnl_ioc_obj.status & CHNL_IOCSTATTIMEOUT)
+ status = -ETIMEDOUT;
+
+ }
+ } else {
+ status = bridge_chnl_cancel_io(chnl_obj);
+ /* Now, leave the channel in the ready state: */
+ pchnl->dw_state &= ~CHNL_STATECANCEL;
+ }
+ }
+ DBC_ENSURE(status || LST_IS_EMPTY(pchnl->pio_requests));
+ return status;
+}
+
+/*
+ * ======== bridge_chnl_get_info ========
+ * Purpose:
+ * Retrieve information related to a channel.
+ */
+int bridge_chnl_get_info(struct chnl_object *chnl_obj,
+ struct chnl_info *channel_info)
+{
+ int status = 0;
+ struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
+ if (channel_info != NULL) {
+ if (pchnl) {
+ /* Return the requested information: */
+ channel_info->hchnl_mgr = pchnl->chnl_mgr_obj;
+ channel_info->event_obj = pchnl->user_event;
+ channel_info->cnhl_id = pchnl->chnl_id;
+ channel_info->dw_mode = pchnl->chnl_mode;
+ channel_info->bytes_tx = pchnl->bytes_moved;
+ channel_info->process = pchnl->process;
+ channel_info->sync_event = pchnl->sync_event;
+ channel_info->cio_cs = pchnl->cio_cs;
+ channel_info->cio_reqs = pchnl->cio_reqs;
+ channel_info->dw_state = pchnl->dw_state;
+ } else {
+ status = -EFAULT;
+ }
+ } else {
+ status = -EFAULT;
+ }
+ return status;
+}
+
+/*
+ * ======== bridge_chnl_get_ioc ========
+ * Optionally wait for I/O completion on a channel. Dequeue an I/O
+ * completion record, which contains information about the completed
+ * I/O request.
+ * Note: Ensures Channel Invariant (see notes above).
+ */
+int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
+ struct chnl_ioc *chan_ioc)
+{
+ int status = 0;
+ struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
+ struct chnl_irp *chnl_packet_obj;
+ int stat_sync;
+ bool dequeue_ioc = true;
+ struct chnl_ioc ioc = { NULL, 0, 0, 0, 0 };
+ u8 *host_sys_buf = NULL;
+ struct bridge_dev_context *dev_ctxt;
+ struct dev_object *dev_obj;
+
+ /* Check args: */
+ if (!chan_ioc || !pchnl) {
+ status = -EFAULT;
+ } else if (timeout == CHNL_IOCNOWAIT) {
+ if (LST_IS_EMPTY(pchnl->pio_completions))
+ status = -EREMOTEIO;
+
+ }
+
+ dev_obj = dev_get_first();
+ dev_get_bridge_context(dev_obj, &dev_ctxt);
+ if (!dev_ctxt)
+ status = -EFAULT;
+
+ if (status)
+ goto func_end;
+
+ ioc.status = CHNL_IOCSTATCOMPLETE;
+ if (timeout !=
+ CHNL_IOCNOWAIT && LST_IS_EMPTY(pchnl->pio_completions)) {
+ if (timeout == CHNL_IOCINFINITE)
+ timeout = SYNC_INFINITE;
+
+ stat_sync = sync_wait_on_event(pchnl->sync_event, timeout);
+ if (stat_sync == -ETIME) {
+ /* No response from DSP */
+ ioc.status |= CHNL_IOCSTATTIMEOUT;
+ dequeue_ioc = false;
+ } else if (stat_sync == -EPERM) {
+ /* This can occur when the user mode thread is
+ * aborted (^C), or when _VWIN32_WaitSingleObject()
+ * fails due to unkown causes. */
+ /* Even though Wait failed, there may be something in
+ * the Q: */
+ if (LST_IS_EMPTY(pchnl->pio_completions)) {
+ ioc.status |= CHNL_IOCSTATCANCEL;
+ dequeue_ioc = false;
+ }
+ }
+ }
+ /* See comment in AddIOReq */
+ spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
+ omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
+ if (dequeue_ioc) {
+ /* Dequeue IOC and set chan_ioc; */
+ DBC_ASSERT(!LST_IS_EMPTY(pchnl->pio_completions));
+ chnl_packet_obj =
+ (struct chnl_irp *)lst_get_head(pchnl->pio_completions);
+ /* Update chan_ioc from channel state and chirp: */
+ if (chnl_packet_obj) {
+ pchnl->cio_cs--;
+ /* If this is a zero-copy channel, then set IOC's pbuf
+ * to the DSP's address. This DSP address will get
+ * translated to user's virtual addr later. */
+ {
+ host_sys_buf = chnl_packet_obj->host_sys_buf;
+ ioc.pbuf = chnl_packet_obj->host_user_buf;
+ }
+ ioc.byte_size = chnl_packet_obj->byte_size;
+ ioc.buf_size = chnl_packet_obj->buf_size;
+ ioc.dw_arg = chnl_packet_obj->dw_arg;
+ ioc.status |= chnl_packet_obj->status;
+ /* Place the used chirp on the free list: */
+ lst_put_tail(pchnl->free_packets_list,
+ (struct list_head *)chnl_packet_obj);
+ } else {
+ ioc.pbuf = NULL;
+ ioc.byte_size = 0;
+ }
+ } else {
+ ioc.pbuf = NULL;
+ ioc.byte_size = 0;
+ ioc.dw_arg = 0;
+ ioc.buf_size = 0;
+ }
+ /* Ensure invariant: If any IOC's are queued for this channel... */
+ if (!LST_IS_EMPTY(pchnl->pio_completions)) {
+ /* Since DSPStream_Reclaim() does not take a timeout
+ * parameter, we pass the stream's timeout value to
+ * bridge_chnl_get_ioc. We cannot determine whether or not
+ * we have waited in User mode. Since the stream's timeout
+ * value may be non-zero, we still have to set the event.
+ * Therefore, this optimization is taken out.
+ *
+ * if (timeout == CHNL_IOCNOWAIT) {
+ * ... ensure event is set..
+ * sync_set_event(pchnl->sync_event);
+ * } */
+ sync_set_event(pchnl->sync_event);
+ } else {
+ /* else, if list is empty, ensure event is reset. */
+ sync_reset_event(pchnl->sync_event);
+ }
+ omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
+ spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
+ if (dequeue_ioc
+ && (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)) {
+ if (!(ioc.pbuf < (void *)USERMODE_ADDR))
+ goto func_cont;
+
+ /* If the addr is in user mode, then copy it */
+ if (!host_sys_buf || !ioc.pbuf) {
+ status = -EFAULT;
+ goto func_cont;
+ }
+ if (!CHNL_IS_INPUT(pchnl->chnl_mode))
+ goto func_cont1;
+
+ /*host_user_buf */
+ status = copy_to_user(ioc.pbuf, host_sys_buf, ioc.byte_size);
+ if (status) {
+ if (current->flags & PF_EXITING)
+ status = 0;
+ }
+ if (status)
+ status = -EFAULT;
+func_cont1:
+ kfree(host_sys_buf);
+ }
+func_cont:
+ /* Update User's IOC block: */
+ *chan_ioc = ioc;
+func_end:
+ return status;
+}
+
+/*
+ * ======== bridge_chnl_get_mgr_info ========
+ * Retrieve information related to the channel manager.
+ */
+int bridge_chnl_get_mgr_info(struct chnl_mgr *hchnl_mgr, u32 ch_id,
+ struct chnl_mgrinfo *mgr_info)
+{
+ int status = 0;
+ struct chnl_mgr *chnl_mgr_obj = (struct chnl_mgr *)hchnl_mgr;
+
+ if (mgr_info != NULL) {
+ if (ch_id <= CHNL_MAXCHANNELS) {
+ if (hchnl_mgr) {
+ /* Return the requested information: */
+ mgr_info->chnl_obj =
+ chnl_mgr_obj->ap_channel[ch_id];
+ mgr_info->open_channels =
+ chnl_mgr_obj->open_channels;
+ mgr_info->dw_type = chnl_mgr_obj->dw_type;
+ /* total # of chnls */
+ mgr_info->max_channels =
+ chnl_mgr_obj->max_channels;
+ } else {
+ status = -EFAULT;
+ }
+ } else {
+ status = -ECHRNG;
+ }
+ } else {
+ status = -EFAULT;
+ }
+
+ return status;
+}
+
+/*
+ * ======== bridge_chnl_idle ========
+ * Idles a particular channel.
+ */
+int bridge_chnl_idle(struct chnl_object *chnl_obj, u32 timeout,
+ bool flush_data)
+{
+ s8 chnl_mode;
+ struct chnl_mgr *chnl_mgr_obj;
+ int status = 0;
+
+ DBC_REQUIRE(chnl_obj);
+
+ chnl_mode = chnl_obj->chnl_mode;
+ chnl_mgr_obj = chnl_obj->chnl_mgr_obj;
+
+ if (CHNL_IS_OUTPUT(chnl_mode) && !flush_data) {
+ /* Wait for IO completions, up to the specified timeout: */
+ status = bridge_chnl_flush_io(chnl_obj, timeout);
+ } else {
+ status = bridge_chnl_cancel_io(chnl_obj);
+
+ /* Reset the byte count and put channel back in ready state. */
+ chnl_obj->bytes_moved = 0;
+ chnl_obj->dw_state &= ~CHNL_STATECANCEL;
+ }
+
+ return status;
+}
+
+/*
+ * ======== bridge_chnl_open ========
+ * Open a new half-duplex channel to the DSP board.
+ */
+int bridge_chnl_open(struct chnl_object **chnl,
+ struct chnl_mgr *hchnl_mgr, s8 chnl_mode,
+ u32 ch_id, const struct chnl_attr *pattrs)
+{
+ int status = 0;
+ struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
+ struct chnl_object *pchnl = NULL;
+ struct sync_object *sync_event = NULL;
+ /* Ensure DBC requirements: */
+ DBC_REQUIRE(chnl != NULL);
+ DBC_REQUIRE(pattrs != NULL);
+ DBC_REQUIRE(hchnl_mgr != NULL);
+ *chnl = NULL;
+ /* Validate Args: */
+ if (pattrs->uio_reqs == 0) {
+ status = -EINVAL;
+ } else {
+ if (!hchnl_mgr) {
+ status = -EFAULT;
+ } else {
+ if (ch_id != CHNL_PICKFREE) {
+ if (ch_id >= chnl_mgr_obj->max_channels)
+ status = -ECHRNG;
+ else if (chnl_mgr_obj->ap_channel[ch_id] !=
+ NULL)
+ status = -EALREADY;
+ } else {
+ /* Check for free channel */
+ status =
+ search_free_channel(chnl_mgr_obj, &ch_id);
+ }
+ }
+ }
+ if (status)
+ goto func_end;
+
+ DBC_ASSERT(ch_id < chnl_mgr_obj->max_channels);
+ /* Create channel object: */
+ pchnl = kzalloc(sizeof(struct chnl_object), GFP_KERNEL);
+ if (!pchnl) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+ /* Protect queues from io_dpc: */
+ pchnl->dw_state = CHNL_STATECANCEL;
+ /* Allocate initial IOR and IOC queues: */
+ pchnl->free_packets_list = create_chirp_list(pattrs->uio_reqs);
+ pchnl->pio_requests = create_chirp_list(0);
+ pchnl->pio_completions = create_chirp_list(0);
+ pchnl->chnl_packets = pattrs->uio_reqs;
+ pchnl->cio_cs = 0;
+ pchnl->cio_reqs = 0;
+ sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
+ if (sync_event)
+ sync_init_event(sync_event);
+ else
+ status = -ENOMEM;
+
+ if (!status) {
+ pchnl->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
+ GFP_KERNEL);
+ if (pchnl->ntfy_obj)
+ ntfy_init(pchnl->ntfy_obj);
+ else
+ status = -ENOMEM;
+ }
+
+ if (!status) {
+ if (pchnl->pio_completions && pchnl->pio_requests &&
+ pchnl->free_packets_list) {
+ /* Initialize CHNL object fields: */
+ pchnl->chnl_mgr_obj = chnl_mgr_obj;
+ pchnl->chnl_id = ch_id;
+ pchnl->chnl_mode = chnl_mode;
+ pchnl->user_event = sync_event;
+ pchnl->sync_event = sync_event;
+ /* Get the process handle */
+ pchnl->process = current->tgid;
+ pchnl->pcb_arg = 0;
+ pchnl->bytes_moved = 0;
+ /* Default to proc-copy */
+ pchnl->chnl_type = CHNL_PCPY;
+ } else {
+ status = -ENOMEM;
+ }
+ }
+
+ if (status) {
+ /* Free memory */
+ if (pchnl->pio_completions) {
+ free_chirp_list(pchnl->pio_completions);
+ pchnl->pio_completions = NULL;
+ pchnl->cio_cs = 0;
+ }
+ if (pchnl->pio_requests) {
+ free_chirp_list(pchnl->pio_requests);
+ pchnl->pio_requests = NULL;
+ }
+ if (pchnl->free_packets_list) {
+ free_chirp_list(pchnl->free_packets_list);
+ pchnl->free_packets_list = NULL;
+ }
+ kfree(sync_event);
+ sync_event = NULL;
+
+ if (pchnl->ntfy_obj) {
+ ntfy_delete(pchnl->ntfy_obj);
+ kfree(pchnl->ntfy_obj);
+ pchnl->ntfy_obj = NULL;
+ }
+ kfree(pchnl);
+ } else {
+ /* Insert channel object in channel manager: */
+ chnl_mgr_obj->ap_channel[pchnl->chnl_id] = pchnl;
+ spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
+ chnl_mgr_obj->open_channels++;
+ spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
+ /* Return result... */
+ pchnl->dw_state = CHNL_STATEREADY;
+ *chnl = pchnl;
+ }
+func_end:
+ DBC_ENSURE((!status && pchnl) || (*chnl == NULL));
+ return status;
+}
+
+/*
+ * ======== bridge_chnl_register_notify ========
+ * Registers for events on a particular channel.
+ */
+int bridge_chnl_register_notify(struct chnl_object *chnl_obj,
+ u32 event_mask, u32 notify_type,
+ struct dsp_notification *hnotification)
+{
+ int status = 0;
+
+ DBC_ASSERT(!(event_mask & ~(DSP_STREAMDONE | DSP_STREAMIOCOMPLETION)));
+
+ if (event_mask)
+ status = ntfy_register(chnl_obj->ntfy_obj, hnotification,
+ event_mask, notify_type);
+ else
+ status = ntfy_unregister(chnl_obj->ntfy_obj, hnotification);
+
+ return status;
+}
+
+/*
+ * ======== create_chirp_list ========
+ * Purpose:
+ * Initialize a queue of channel I/O Request/Completion packets.
+ * Parameters:
+ * chirps: Number of Chirps to allocate.
+ * Returns:
+ * Pointer to queue of IRPs, or NULL.
+ * Requires:
+ * Ensures:
+ */
+static struct lst_list *create_chirp_list(u32 chirps)
+{
+ struct lst_list *chirp_list;
+ struct chnl_irp *chnl_packet_obj;
+ u32 i;
+
+ chirp_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
+
+ if (chirp_list) {
+ INIT_LIST_HEAD(&chirp_list->head);
+ /* Make N chirps and place on queue. */
+ for (i = 0; (i < chirps)
+ && ((chnl_packet_obj = make_new_chirp()) != NULL); i++) {
+ lst_put_tail(chirp_list,
+ (struct list_head *)chnl_packet_obj);
+ }
+
+ /* If we couldn't allocate all chirps, free those allocated: */
+ if (i != chirps) {
+ free_chirp_list(chirp_list);
+ chirp_list = NULL;
+ }
+ }
+
+ return chirp_list;
+}
+
+/*
+ * ======== free_chirp_list ========
+ * Purpose:
+ * Free the queue of Chirps.
+ */
+static void free_chirp_list(struct lst_list *chirp_list)
+{
+ DBC_REQUIRE(chirp_list != NULL);
+
+ while (!LST_IS_EMPTY(chirp_list))
+ kfree(lst_get_head(chirp_list));
+
+ kfree(chirp_list);
+}
+
+/*
+ * ======== make_new_chirp ========
+ * Allocate the memory for a new channel IRP.
+ */
+static struct chnl_irp *make_new_chirp(void)
+{
+ struct chnl_irp *chnl_packet_obj;
+
+ chnl_packet_obj = kzalloc(sizeof(struct chnl_irp), GFP_KERNEL);
+ if (chnl_packet_obj != NULL) {
+ /* lst_init_elem only resets the list's member values. */
+ lst_init_elem(&chnl_packet_obj->link);
+ }
+
+ return chnl_packet_obj;
+}
+
+/*
+ * ======== search_free_channel ========
+ * Search for a free channel slot in the array of channel pointers.
+ */
+static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
+ u32 *chnl)
+{
+ int status = -ENOSR;
+ u32 i;
+
+ DBC_REQUIRE(chnl_mgr_obj);
+
+ for (i = 0; i < chnl_mgr_obj->max_channels; i++) {
+ if (chnl_mgr_obj->ap_channel[i] == NULL) {
+ status = 0;
+ *chnl = i;
+ break;
+ }
+ }
+
+ return status;
+}
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
new file mode 100644
index 000000000000..46d17c777b88
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/dsp-clock.c
@@ -0,0 +1,421 @@
+/*
+ * clk.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Clock and Timer services.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/types.h>
+
+/* ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+#include <plat/dmtimer.h>
+#include <plat/mcbsp.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+#include <dspbridge/drv.h>
+#include <dspbridge/dev.h>
+#include "_tiomap.h"
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/clk.h>
+
+/* ----------------------------------- Defines, Data Structures, Typedefs */
+
+#define OMAP_SSI_OFFSET 0x58000
+#define OMAP_SSI_SIZE 0x1000
+#define OMAP_SSI_SYSCONFIG_OFFSET 0x10
+
+#define SSI_AUTOIDLE (1 << 0)
+#define SSI_SIDLE_SMARTIDLE (2 << 3)
+#define SSI_MIDLE_NOIDLE (1 << 12)
+
+/* Clk types requested by the dsp */
+#define IVA2_CLK 0
+#define GPT_CLK 1
+#define WDT_CLK 2
+#define MCBSP_CLK 3
+#define SSI_CLK 4
+
+/* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */
+#define DMT_ID(id) ((id) + 4)
+
+/* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */
+#define MCBSP_ID(id) ((id) - 6)
+
+static struct omap_dm_timer *timer[4];
+
+struct clk *iva2_clk;
+
+struct dsp_ssi {
+ struct clk *sst_fck;
+ struct clk *ssr_fck;
+ struct clk *ick;
+};
+
+static struct dsp_ssi ssi;
+
+static u32 dsp_clocks;
+
+static inline u32 is_dsp_clk_active(u32 clk, u8 id)
+{
+ return clk & (1 << id);
+}
+
+static inline void set_dsp_clk_active(u32 *clk, u8 id)
+{
+ *clk |= (1 << id);
+}
+
+static inline void set_dsp_clk_inactive(u32 *clk, u8 id)
+{
+ *clk &= ~(1 << id);
+}
+
+static s8 get_clk_type(u8 id)
+{
+ s8 type;
+
+ if (id == DSP_CLK_IVA2)
+ type = IVA2_CLK;
+ else if (id <= DSP_CLK_GPT8)
+ type = GPT_CLK;
+ else if (id == DSP_CLK_WDT3)
+ type = WDT_CLK;
+ else if (id <= DSP_CLK_MCBSP5)
+ type = MCBSP_CLK;
+ else if (id == DSP_CLK_SSI)
+ type = SSI_CLK;
+ else
+ type = -1;
+
+ return type;
+}
+
+/*
+ * ======== dsp_clk_exit ========
+ * Purpose:
+ * Cleanup CLK module.
+ */
+void dsp_clk_exit(void)
+{
+ dsp_clock_disable_all(dsp_clocks);
+
+ clk_put(iva2_clk);
+ clk_put(ssi.sst_fck);
+ clk_put(ssi.ssr_fck);
+ clk_put(ssi.ick);
+}
+
+/*
+ * ======== dsp_clk_init ========
+ * Purpose:
+ * Initialize CLK module.
+ */
+void dsp_clk_init(void)
+{
+ static struct platform_device dspbridge_device;
+
+ dspbridge_device.dev.bus = &platform_bus_type;
+
+ iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
+ if (IS_ERR(iva2_clk))
+ dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
+
+ ssi.sst_fck = clk_get(&dspbridge_device.dev, "ssi_sst_fck");
+ ssi.ssr_fck = clk_get(&dspbridge_device.dev, "ssi_ssr_fck");
+ ssi.ick = clk_get(&dspbridge_device.dev, "ssi_ick");
+
+ if (IS_ERR(ssi.sst_fck) || IS_ERR(ssi.ssr_fck) || IS_ERR(ssi.ick))
+ dev_err(bridge, "failed to get ssi: sst %p, ssr %p, ick %p\n",
+ ssi.sst_fck, ssi.ssr_fck, ssi.ick);
+}
+
+#ifdef CONFIG_OMAP_MCBSP
+static void mcbsp_clk_prepare(bool flag, u8 id)
+{
+ struct cfg_hostres *resources;
+ struct dev_object *hdev_object = NULL;
+ struct bridge_dev_context *bridge_context = NULL;
+ u32 val;
+
+ hdev_object = (struct dev_object *)drv_get_first_dev_object();
+ if (!hdev_object)
+ return;
+
+ dev_get_bridge_context(hdev_object, &bridge_context);
+ if (!bridge_context)
+ return;
+
+ resources = bridge_context->resources;
+ if (!resources)
+ return;
+
+ if (flag) {
+ if (id == DSP_CLK_MCBSP1) {
+ /* set MCBSP1_CLKS, on McBSP1 ON */
+ val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
+ val |= 1 << 2;
+ __raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
+ } else if (id == DSP_CLK_MCBSP2) {
+ /* set MCBSP2_CLKS, on McBSP2 ON */
+ val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
+ val |= 1 << 6;
+ __raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
+ }
+ } else {
+ if (id == DSP_CLK_MCBSP1) {
+ /* clear MCBSP1_CLKS, on McBSP1 OFF */
+ val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
+ val &= ~(1 << 2);
+ __raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
+ } else if (id == DSP_CLK_MCBSP2) {
+ /* clear MCBSP2_CLKS, on McBSP2 OFF */
+ val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
+ val &= ~(1 << 6);
+ __raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
+ }
+ }
+}
+#endif
+
+/**
+ * dsp_gpt_wait_overflow - set gpt overflow and wait for fixed timeout
+ * @clk_id: GP Timer clock id.
+ * @load: Overflow value.
+ *
+ * Sets an overflow interrupt for the desired GPT waiting for a timeout
+ * of 5 msecs for the interrupt to occur.
+ */
+void dsp_gpt_wait_overflow(short int clk_id, unsigned int load)
+{
+ struct omap_dm_timer *gpt = timer[clk_id - 1];
+ unsigned long timeout;
+
+ if (!gpt)
+ return;
+
+ /* Enable overflow interrupt */
+ omap_dm_timer_set_int_enable(gpt, OMAP_TIMER_INT_OVERFLOW);
+
+ /*
+ * Set counter value to overflow counter after
+ * one tick and start timer.
+ */
+ omap_dm_timer_set_load_start(gpt, 0, load);
+
+ /* Wait 80us for timer to overflow */
+ udelay(80);
+
+ timeout = msecs_to_jiffies(5);
+ /* Check interrupt status and wait for interrupt */
+ while (!(omap_dm_timer_read_status(gpt) & OMAP_TIMER_INT_OVERFLOW)) {
+ if (time_is_after_jiffies(timeout)) {
+ pr_err("%s: GPTimer interrupt failed\n", __func__);
+ break;
+ }
+ }
+}
+
+/*
+ * ======== dsp_clk_enable ========
+ * Purpose:
+ * Enable Clock .
+ *
+ */
+int dsp_clk_enable(enum dsp_clk_id clk_id)
+{
+ int status = 0;
+
+ if (is_dsp_clk_active(dsp_clocks, clk_id)) {
+ dev_err(bridge, "WARN: clock id %d already enabled\n", clk_id);
+ goto out;
+ }
+
+ switch (get_clk_type(clk_id)) {
+ case IVA2_CLK:
+ clk_enable(iva2_clk);
+ break;
+ case GPT_CLK:
+ timer[clk_id - 1] =
+ omap_dm_timer_request_specific(DMT_ID(clk_id));
+ break;
+#ifdef CONFIG_OMAP_MCBSP
+ case MCBSP_CLK:
+ mcbsp_clk_prepare(true, clk_id);
+ omap_mcbsp_set_io_type(MCBSP_ID(clk_id), OMAP_MCBSP_POLL_IO);
+ omap_mcbsp_request(MCBSP_ID(clk_id));
+ break;
+#endif
+ case WDT_CLK:
+ dev_err(bridge, "ERROR: DSP requested to enable WDT3 clk\n");
+ break;
+ case SSI_CLK:
+ clk_enable(ssi.sst_fck);
+ clk_enable(ssi.ssr_fck);
+ clk_enable(ssi.ick);
+
+ /*
+ * The SSI module need to configured not to have the Forced
+ * idle for master interface. If it is set to forced idle,
+ * the SSI module is transitioning to standby thereby causing
+ * the client in the DSP hang waiting for the SSI module to
+ * be active after enabling the clocks
+ */
+ ssi_clk_prepare(true);
+ break;
+ default:
+ dev_err(bridge, "Invalid clock id for enable\n");
+ status = -EPERM;
+ }
+
+ if (!status)
+ set_dsp_clk_active(&dsp_clocks, clk_id);
+
+out:
+ return status;
+}
+
+/**
+ * dsp_clock_enable_all - Enable clocks used by the DSP
+ * @dev_context Driver's device context strucure
+ *
+ * This function enables all the peripheral clocks that were requested by DSP.
+ */
+u32 dsp_clock_enable_all(u32 dsp_per_clocks)
+{
+ u32 clk_id;
+ u32 status = -EPERM;
+
+ for (clk_id = 0; clk_id < DSP_CLK_NOT_DEFINED; clk_id++) {
+ if (is_dsp_clk_active(dsp_per_clocks, clk_id))
+ status = dsp_clk_enable(clk_id);
+ }
+
+ return status;
+}
+
+/*
+ * ======== dsp_clk_disable ========
+ * Purpose:
+ * Disable the clock.
+ *
+ */
+int dsp_clk_disable(enum dsp_clk_id clk_id)
+{
+ int status = 0;
+
+ if (!is_dsp_clk_active(dsp_clocks, clk_id)) {
+ dev_err(bridge, "ERR: clock id %d already disabled\n", clk_id);
+ goto out;
+ }
+
+ switch (get_clk_type(clk_id)) {
+ case IVA2_CLK:
+ clk_disable(iva2_clk);
+ break;
+ case GPT_CLK:
+ omap_dm_timer_free(timer[clk_id - 1]);
+ break;
+#ifdef CONFIG_OMAP_MCBSP
+ case MCBSP_CLK:
+ mcbsp_clk_prepare(false, clk_id);
+ omap_mcbsp_free(MCBSP_ID(clk_id));
+ break;
+#endif
+ case WDT_CLK:
+ dev_err(bridge, "ERROR: DSP requested to disable WDT3 clk\n");
+ break;
+ case SSI_CLK:
+ ssi_clk_prepare(false);
+ ssi_clk_prepare(false);
+ clk_disable(ssi.sst_fck);
+ clk_disable(ssi.ssr_fck);
+ clk_disable(ssi.ick);
+ break;
+ default:
+ dev_err(bridge, "Invalid clock id for disable\n");
+ status = -EPERM;
+ }
+
+ if (!status)
+ set_dsp_clk_inactive(&dsp_clocks, clk_id);
+
+out:
+ return status;
+}
+
+/**
+ * dsp_clock_disable_all - Disable all active clocks
+ * @dev_context Driver's device context structure
+ *
+ * This function disables all the peripheral clocks that were enabled by DSP.
+ * It is meant to be called only when DSP is entering hibernation or when DSP
+ * is in error state.
+ */
+u32 dsp_clock_disable_all(u32 dsp_per_clocks)
+{
+ u32 clk_id;
+ u32 status = -EPERM;
+
+ for (clk_id = 0; clk_id < DSP_CLK_NOT_DEFINED; clk_id++) {
+ if (is_dsp_clk_active(dsp_per_clocks, clk_id))
+ status = dsp_clk_disable(clk_id);
+ }
+
+ return status;
+}
+
+u32 dsp_clk_get_iva2_rate(void)
+{
+ u32 clk_speed_khz;
+
+ clk_speed_khz = clk_get_rate(iva2_clk);
+ clk_speed_khz /= 1000;
+ dev_dbg(bridge, "%s: clk speed Khz = %d\n", __func__, clk_speed_khz);
+
+ return clk_speed_khz;
+}
+
+void ssi_clk_prepare(bool FLAG)
+{
+ void __iomem *ssi_base;
+ unsigned int value;
+
+ ssi_base = ioremap(L4_34XX_BASE + OMAP_SSI_OFFSET, OMAP_SSI_SIZE);
+ if (!ssi_base) {
+ pr_err("%s: error, SSI not configured\n", __func__);
+ return;
+ }
+
+ if (FLAG) {
+ /* Set Autoidle, SIDLEMode to smart idle, and MIDLEmode to
+ * no idle
+ */
+ value = SSI_AUTOIDLE | SSI_SIDLE_SMARTIDLE | SSI_MIDLE_NOIDLE;
+ } else {
+ /* Set Autoidle, SIDLEMode to forced idle, and MIDLEmode to
+ * forced idle
+ */
+ value = SSI_AUTOIDLE;
+ }
+
+ __raw_writel(value, ssi_base + OMAP_SSI_SYSCONFIG_OFFSET);
+ iounmap(ssi_base);
+}
+
diff --git a/drivers/staging/tidspbridge/core/dsp-mmu.c b/drivers/staging/tidspbridge/core/dsp-mmu.c
new file mode 100644
index 000000000000..983c95adc8ff
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/dsp-mmu.c
@@ -0,0 +1,317 @@
+/*
+ * dsp-mmu.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * DSP iommu.
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <dspbridge/host_os.h>
+#include <plat/dmtimer.h>
+#include <dspbridge/dbdefs.h>
+#include <dspbridge/dev.h>
+#include <dspbridge/io_sm.h>
+#include <dspbridge/dspdeh.h>
+#include "_tiomap.h"
+
+#include <dspbridge/dsp-mmu.h>
+
+#define MMU_CNTL_TWL_EN (1 << 2)
+
+static struct tasklet_struct mmu_tasklet;
+
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
+static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
+{
+ void *dummy_addr;
+ u32 fa, tmp;
+ struct iotlb_entry e;
+ struct iommu *mmu = dev_context->dsp_mmu;
+ dummy_addr = (void *)__get_free_page(GFP_ATOMIC);
+
+ /*
+ * Before acking the MMU fault, let's make sure MMU can only
+ * access entry #0. Then add a new entry so that the DSP OS
+ * can continue in order to dump the stack.
+ */
+ tmp = iommu_read_reg(mmu, MMU_CNTL);
+ tmp &= ~MMU_CNTL_TWL_EN;
+ iommu_write_reg(mmu, tmp, MMU_CNTL);
+ fa = iommu_read_reg(mmu, MMU_FAULT_AD);
+ e.da = fa & PAGE_MASK;
+ e.pa = virt_to_phys(dummy_addr);
+ e.valid = 1;
+ e.prsvd = 1;
+ e.pgsz = IOVMF_PGSZ_4K & MMU_CAM_PGSZ_MASK;
+ e.endian = MMU_RAM_ENDIAN_LITTLE;
+ e.elsz = MMU_RAM_ELSZ_32;
+ e.mixed = 0;
+
+ load_iotlb_entry(mmu, &e);
+
+ dsp_clk_enable(DSP_CLK_GPT8);
+
+ dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
+
+ /* Clear MMU interrupt */
+ tmp = iommu_read_reg(mmu, MMU_IRQSTATUS);
+ iommu_write_reg(mmu, tmp, MMU_IRQSTATUS);
+
+ dump_dsp_stack(dev_context);
+ dsp_clk_disable(DSP_CLK_GPT8);
+
+ iopgtable_clear_entry(mmu, fa);
+ free_page((unsigned long)dummy_addr);
+}
+#endif
+
+
+static void fault_tasklet(unsigned long data)
+{
+ struct iommu *mmu = (struct iommu *)data;
+ struct bridge_dev_context *dev_ctx;
+ struct deh_mgr *dm;
+ u32 fa;
+ dev_get_deh_mgr(dev_get_first(), &dm);
+ dev_get_bridge_context(dev_get_first(), &dev_ctx);
+
+ if (!dm || !dev_ctx)
+ return;
+
+ fa = iommu_read_reg(mmu, MMU_FAULT_AD);
+
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
+ print_dsp_trace_buffer(dev_ctx);
+ dump_dl_modules(dev_ctx);
+ mmu_fault_print_stack(dev_ctx);
+#endif
+
+ bridge_deh_notify(dm, DSP_MMUFAULT, fa);
+}
+
+/*
+ * ======== mmu_fault_isr ========
+ * ISR to be triggered by a DSP MMU fault interrupt.
+ */
+static int mmu_fault_callback(struct iommu *mmu)
+{
+ if (!mmu)
+ return -EPERM;
+
+ iommu_write_reg(mmu, 0, MMU_IRQENABLE);
+ tasklet_schedule(&mmu_tasklet);
+ return 0;
+}
+
+/**
+ * dsp_mmu_init() - initialize dsp_mmu module and returns a handle
+ *
+ * This function initialize dsp mmu module and returns a struct iommu
+ * handle to use it for dsp maps.
+ *
+ */
+struct iommu *dsp_mmu_init()
+{
+ struct iommu *mmu;
+
+ mmu = iommu_get("iva2");
+
+ if (!IS_ERR(mmu)) {
+ tasklet_init(&mmu_tasklet, fault_tasklet, (unsigned long)mmu);
+ mmu->isr = mmu_fault_callback;
+ }
+
+ return mmu;
+}
+
+/**
+ * dsp_mmu_exit() - destroy dsp mmu module
+ * @mmu: Pointer to iommu handle.
+ *
+ * This function destroys dsp mmu module.
+ *
+ */
+void dsp_mmu_exit(struct iommu *mmu)
+{
+ if (mmu)
+ iommu_put(mmu);
+ tasklet_kill(&mmu_tasklet);
+}
+
+/**
+ * user_va2_pa() - get physical address from userspace address.
+ * @mm: mm_struct Pointer of the process.
+ * @address: Virtual user space address.
+ *
+ */
+static u32 user_va2_pa(struct mm_struct *mm, u32 address)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+
+ pgd = pgd_offset(mm, address);
+ if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
+ pmd = pmd_offset(pgd, address);
+ if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
+ ptep = pte_offset_map(pmd, address);
+ if (ptep) {
+ pte = *ptep;
+ if (pte_present(pte))
+ return pte & PAGE_MASK;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * get_io_pages() - pin and get pages of io user's buffer.
+ * @mm: mm_struct Pointer of the process.
+ * @uva: Virtual user space address.
+ * @pages Pages to be pined.
+ * @usr_pgs struct page array pointer where the user pages will be stored
+ *
+ */
+static int get_io_pages(struct mm_struct *mm, u32 uva, unsigned pages,
+ struct page **usr_pgs)
+{
+ u32 pa;
+ int i;
+ struct page *pg;
+
+ for (i = 0; i < pages; i++) {
+ pa = user_va2_pa(mm, uva);
+
+ if (!pfn_valid(__phys_to_pfn(pa)))
+ break;
+
+ pg = phys_to_page(pa);
+ usr_pgs[i] = pg;
+ get_page(pg);
+ }
+ return i;
+}
+
+/**
+ * user_to_dsp_map() - maps user to dsp virtual address
+ * @mmu: Pointer to iommu handle.
+ * @uva: Virtual user space address.
+ * @da DSP address
+ * @size Buffer size to map.
+ * @usr_pgs struct page array pointer where the user pages will be stored
+ *
+ * This function maps a user space buffer into DSP virtual address.
+ *
+ */
+u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
+ struct page **usr_pgs)
+{
+ int res, w;
+ unsigned pages;
+ int i;
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+
+ if (!size || !usr_pgs)
+ return -EINVAL;
+
+ pages = size / PG_SIZE4K;
+
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, uva);
+ while (vma && (uva + size > vma->vm_end))
+ vma = find_vma(mm, vma->vm_end + 1);
+
+ if (!vma) {
+ pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
+ __func__, uva, size);
+ up_read(&mm->mmap_sem);
+ return -EINVAL;
+ }
+ if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
+ w = 1;
+
+ if (vma->vm_flags & VM_IO)
+ i = get_io_pages(mm, uva, pages, usr_pgs);
+ else
+ i = get_user_pages(current, mm, uva, pages, w, 1,
+ usr_pgs, NULL);
+ up_read(&mm->mmap_sem);
+
+ if (i < 0)
+ return i;
+
+ if (i < pages) {
+ res = -EFAULT;
+ goto err_pages;
+ }
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ res = -ENOMEM;
+ goto err_pages;
+ }
+
+ res = sg_alloc_table(sgt, pages, GFP_KERNEL);
+
+ if (res < 0)
+ goto err_sg;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ sg_set_page(sg, usr_pgs[i], PAGE_SIZE, 0);
+
+ da = iommu_vmap(mmu, da, sgt, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
+
+ if (!IS_ERR_VALUE(da))
+ return da;
+ res = (int)da;
+
+ sg_free_table(sgt);
+err_sg:
+ kfree(sgt);
+ i = pages;
+err_pages:
+ while (i--)
+ put_page(usr_pgs[i]);
+ return res;
+}
+
+/**
+ * user_to_dsp_unmap() - unmaps DSP virtual buffer.
+ * @mmu: Pointer to iommu handle.
+ * @da DSP address
+ *
+ * This function unmaps a user space buffer into DSP virtual address.
+ *
+ */
+int user_to_dsp_unmap(struct iommu *mmu, u32 da)
+{
+ unsigned i;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+
+ sgt = iommu_vunmap(mmu, da);
+ if (!sgt)
+ return -EFAULT;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ put_page(sg_page(sg));
+ sg_free_table(sgt);
+ kfree(sgt);
+
+ return 0;
+}
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
new file mode 100644
index 000000000000..194badaba0ed
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -0,0 +1,2180 @@
+/*
+ * io_sm.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * IO dispatcher for a shared memory channel driver.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*
+ * Channel Invariant:
+ * There is an important invariant condition which must be maintained per
+ * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
+ * which may cause timeouts and/or failure of the sync_wait_on_event
+ * function.
+ */
+#include <linux/types.h>
+
+/* Host OS */
+#include <dspbridge/host_os.h>
+#include <linux/workqueue.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* Services Layer */
+#include <dspbridge/ntfy.h>
+#include <dspbridge/sync.h>
+
+/* Bridge Driver */
+#include <dspbridge/dspdeh.h>
+#include <dspbridge/dspio.h>
+#include <dspbridge/dspioctl.h>
+#include <dspbridge/wdt.h>
+#include <_tiomap.h>
+#include <tiomap_io.h>
+#include <_tiomap_pwr.h>
+
+/* Platform Manager */
+#include <dspbridge/cod.h>
+#include <dspbridge/node.h>
+#include <dspbridge/dev.h>
+
+/* Others */
+#include <dspbridge/rms_sh.h>
+#include <dspbridge/mgr.h>
+#include <dspbridge/drv.h>
+#include "_cmm.h"
+#include "module_list.h"
+
+/* This */
+#include <dspbridge/io_sm.h>
+#include "_msg_sm.h"
+
+/* Defines, Data Structures, Typedefs */
+#define OUTPUTNOTREADY 0xffff
+#define NOTENABLED 0xffff /* Channel(s) not enabled */
+
+#define EXTEND "_EXT_END"
+
+#define SWAP_WORD(x) (x)
+#define UL_PAGE_ALIGN_SIZE 0x10000 /* Page Align Size */
+
+#define MAX_PM_REQS 32
+
+#define MMU_FAULT_HEAD1 0xa5a5a5a5
+#define MMU_FAULT_HEAD2 0x96969696
+#define POLL_MAX 1000
+#define MAX_MMU_DBGBUFF 10240
+
+/* IO Manager: only one created per board */
+struct io_mgr {
+ /* These four fields must be the first fields in a io_mgr_ struct */
+ /* Bridge device context */
+ struct bridge_dev_context *hbridge_context;
+ /* Function interface to Bridge driver */
+ struct bridge_drv_interface *intf_fxns;
+ struct dev_object *hdev_obj; /* Device this board represents */
+
+ /* These fields initialized in bridge_io_create() */
+ struct chnl_mgr *hchnl_mgr;
+ struct shm *shared_mem; /* Shared Memory control */
+ u8 *input; /* Address of input channel */
+ u8 *output; /* Address of output channel */
+ struct msg_mgr *hmsg_mgr; /* Message manager */
+ /* Msg control for from DSP messages */
+ struct msg_ctrl *msg_input_ctrl;
+ /* Msg control for to DSP messages */
+ struct msg_ctrl *msg_output_ctrl;
+ u8 *msg_input; /* Address of input messages */
+ u8 *msg_output; /* Address of output messages */
+ u32 usm_buf_size; /* Size of a shared memory I/O channel */
+ bool shared_irq; /* Is this IRQ shared? */
+ u32 word_size; /* Size in bytes of DSP word */
+ u16 intr_val; /* Interrupt value */
+ /* Private extnd proc info; mmu setup */
+ struct mgr_processorextinfo ext_proc_info;
+ struct cmm_object *hcmm_mgr; /* Shared Mem Mngr */
+ struct work_struct io_workq; /* workqueue */
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+ u32 ul_trace_buffer_begin; /* Trace message start address */
+ u32 ul_trace_buffer_end; /* Trace message end address */
+ u32 ul_trace_buffer_current; /* Trace message current address */
+ u32 ul_gpp_read_pointer; /* GPP Read pointer to Trace buffer */
+ u8 *pmsg;
+ u32 ul_gpp_va;
+ u32 ul_dsp_va;
+#endif
+ /* IO Dpc */
+ u32 dpc_req; /* Number of requested DPC's. */
+ u32 dpc_sched; /* Number of executed DPC's. */
+ struct tasklet_struct dpc_tasklet;
+ spinlock_t dpc_lock;
+
+};
+
+/* Function Prototypes */
+static void io_dispatch_pm(struct io_mgr *pio_mgr);
+static void notify_chnl_complete(struct chnl_object *pchnl,
+ struct chnl_irp *chnl_packet_obj);
+static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
+ u8 io_mode);
+static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
+ u8 io_mode);
+static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
+static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
+static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
+ struct chnl_object *pchnl, u32 mask);
+
+/* Bus Addr (cached kernel) */
+static int register_shm_segs(struct io_mgr *hio_mgr,
+ struct cod_manager *cod_man,
+ u32 dw_gpp_base_pa);
+
+static inline void set_chnl_free(struct shm *sm, u32 chnl)
+{
+ sm->host_free_mask &= ~(1 << chnl);
+}
+
+static inline void set_chnl_busy(struct shm *sm, u32 chnl)
+{
+ sm->host_free_mask |= 1 << chnl;
+}
+
+
+/*
+ * ======== bridge_io_create ========
+ * Create an IO manager object.
+ */
+int bridge_io_create(struct io_mgr **io_man,
+ struct dev_object *hdev_obj,
+ const struct io_attrs *mgr_attrts)
+{
+ int status = 0;
+ struct io_mgr *pio_mgr = NULL;
+ struct shm *shared_mem = NULL;
+ struct bridge_dev_context *hbridge_context = NULL;
+ struct cfg_devnode *dev_node_obj;
+ struct chnl_mgr *hchnl_mgr;
+ u8 dev_type;
+
+ /* Check requirements */
+ if (!io_man || !mgr_attrts || mgr_attrts->word_size == 0) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ dev_get_chnl_mgr(hdev_obj, &hchnl_mgr);
+ if (!hchnl_mgr || hchnl_mgr->hio_mgr) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ /*
+ * Message manager will be created when a file is loaded, since
+ * size of message buffer in shared memory is configurable in
+ * the base image.
+ */
+ dev_get_bridge_context(hdev_obj, &hbridge_context);
+ if (!hbridge_context) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ dev_get_dev_type(hdev_obj, &dev_type);
+ /*
+ * DSP shared memory area will get set properly when
+ * a program is loaded. They are unknown until a COFF file is
+ * loaded. I chose the value -1 because it was less likely to be
+ * a valid address than 0.
+ */
+ shared_mem = (struct shm *)-1;
+
+ /* Allocate IO manager object */
+ pio_mgr = kzalloc(sizeof(struct io_mgr), GFP_KERNEL);
+ if (pio_mgr == NULL) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+
+ /* Initialize chnl_mgr object */
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+ pio_mgr->pmsg = NULL;
+#endif
+ pio_mgr->hchnl_mgr = hchnl_mgr;
+ pio_mgr->word_size = mgr_attrts->word_size;
+ pio_mgr->shared_mem = shared_mem;
+
+ if (dev_type == DSP_UNIT) {
+ /* Create an IO DPC */
+ tasklet_init(&pio_mgr->dpc_tasklet, io_dpc, (u32) pio_mgr);
+
+ /* Initialize DPC counters */
+ pio_mgr->dpc_req = 0;
+ pio_mgr->dpc_sched = 0;
+
+ spin_lock_init(&pio_mgr->dpc_lock);
+
+ status = dev_get_dev_node(hdev_obj, &dev_node_obj);
+ }
+
+ if (!status) {
+ pio_mgr->hbridge_context = hbridge_context;
+ pio_mgr->shared_irq = mgr_attrts->irq_shared;
+ if (dsp_wdt_init())
+ status = -EPERM;
+ } else {
+ status = -EIO;
+ }
+func_end:
+ if (status) {
+ /* Cleanup */
+ bridge_io_destroy(pio_mgr);
+ if (io_man)
+ *io_man = NULL;
+ } else {
+ /* Return IO manager object to caller... */
+ hchnl_mgr->hio_mgr = pio_mgr;
+ *io_man = pio_mgr;
+ }
+ return status;
+}
+
+/*
+ * ======== bridge_io_destroy ========
+ * Purpose:
+ * Disable interrupts, destroy the IO manager.
+ */
+int bridge_io_destroy(struct io_mgr *hio_mgr)
+{
+ int status = 0;
+ if (hio_mgr) {
+ /* Free IO DPC object */
+ tasklet_kill(&hio_mgr->dpc_tasklet);
+
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+ kfree(hio_mgr->pmsg);
+#endif
+ dsp_wdt_exit();
+ /* Free this IO manager object */
+ kfree(hio_mgr);
+ } else {
+ status = -EFAULT;
+ }
+
+ return status;
+}
+
+/*
+ * ======== bridge_io_on_loaded ========
+ * Purpose:
+ * Called when a new program is loaded to get shared memory buffer
+ * parameters from COFF file. ulSharedBufferBase and ulSharedBufferLimit
+ * are in DSP address units.
+ */
+int bridge_io_on_loaded(struct io_mgr *hio_mgr)
+{
+ struct cod_manager *cod_man;
+ struct chnl_mgr *hchnl_mgr;
+ struct msg_mgr *hmsg_mgr;
+ struct shm_segs *sm_sg;
+ u32 ul_shm_base;
+ u32 ul_shm_base_offset;
+ u32 ul_shm_limit;
+ u32 ul_shm_length = -1;
+ u32 ul_mem_length = -1;
+ u32 ul_msg_base;
+ u32 ul_msg_limit;
+ u32 ul_msg_length = -1;
+ u32 ul_ext_end;
+ u32 ul_gpp_pa = 0;
+ u32 ul_gpp_va = 0;
+ u32 ul_dsp_va = 0;
+ u32 ul_seg_size = 0;
+ u32 ul_pad_size = 0;
+ u32 i;
+ int status = 0;
+ u8 num_procs = 0;
+ s32 ndx = 0;
+ /* DSP MMU setup table */
+ struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB];
+ struct cfg_hostres *host_res;
+ struct bridge_dev_context *pbridge_context;
+ u32 shm0_end;
+ u32 ul_dyn_ext_base;
+ u32 ul_seg1_size = 0;
+
+ status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context);
+ if (!pbridge_context) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ host_res = pbridge_context->resources;
+ if (!host_res) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ sm_sg = &pbridge_context->sh_s;
+
+ status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man);
+ if (!cod_man) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ hchnl_mgr = hio_mgr->hchnl_mgr;
+ /* The message manager is destroyed when the board is stopped. */
+ dev_get_msg_mgr(hio_mgr->hdev_obj, &hio_mgr->hmsg_mgr);
+ hmsg_mgr = hio_mgr->hmsg_mgr;
+ if (!hchnl_mgr || !hmsg_mgr) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ if (hio_mgr->shared_mem)
+ hio_mgr->shared_mem = NULL;
+
+ /* Get start and length of channel part of shared memory */
+ status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_BASE_SYM,
+ &ul_shm_base);
+ if (status) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_LIMIT_SYM,
+ &ul_shm_limit);
+ if (status) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ if (ul_shm_limit <= ul_shm_base) {
+ status = -EINVAL;
+ goto func_end;
+ }
+ /* Get total length in bytes */
+ ul_shm_length = (ul_shm_limit - ul_shm_base + 1) * hio_mgr->word_size;
+ /* Calculate size of a PROCCOPY shared memory region */
+ dev_dbg(bridge, "%s: (proc)proccopy shmmem size: 0x%x bytes\n",
+ __func__, (ul_shm_length - sizeof(struct shm)));
+
+ /* Get start and length of message part of shared memory */
+ status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM,
+ &ul_msg_base);
+ if (!status) {
+ status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM,
+ &ul_msg_limit);
+ if (!status) {
+ if (ul_msg_limit <= ul_msg_base) {
+ status = -EINVAL;
+ } else {
+ /*
+ * Length (bytes) of messaging part of shared
+ * memory.
+ */
+ ul_msg_length =
+ (ul_msg_limit - ul_msg_base +
+ 1) * hio_mgr->word_size;
+ /*
+ * Total length (bytes) of shared memory:
+ * chnl + msg.
+ */
+ ul_mem_length = ul_shm_length + ul_msg_length;
+ }
+ } else {
+ status = -EFAULT;
+ }
+ } else {
+ status = -EFAULT;
+ }
+ if (!status) {
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+ status =
+ cod_get_sym_value(cod_man, DSP_TRACESEC_END, &shm0_end);
+#else
+ status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
+ &shm0_end);
+#endif
+ if (status)
+ status = -EFAULT;
+ }
+ if (!status) {
+ status =
+ cod_get_sym_value(cod_man, DYNEXTBASE, &ul_dyn_ext_base);
+ if (status)
+ status = -EFAULT;
+ }
+ if (!status) {
+ status = cod_get_sym_value(cod_man, EXTEND, &ul_ext_end);
+ if (status)
+ status = -EFAULT;
+ }
+ if (!status) {
+ /* Get memory reserved in host resources */
+ (void)mgr_enum_processor_info(0, (struct dsp_processorinfo *)
+ &hio_mgr->ext_proc_info,
+ sizeof(struct
+ mgr_processorextinfo),
+ &num_procs);
+
+ /* The first MMU TLB entry(TLB_0) in DCD is ShmBase. */
+ ndx = 0;
+ ul_gpp_pa = host_res->dw_mem_phys[1];
+ ul_gpp_va = host_res->dw_mem_base[1];
+ /* This is the virtual uncached ioremapped address!!! */
+ /* Why can't we directly take the DSPVA from the symbols? */
+ ul_dsp_va = hio_mgr->ext_proc_info.ty_tlb[0].ul_dsp_virt;
+ ul_seg_size = (shm0_end - ul_dsp_va) * hio_mgr->word_size;
+ ul_seg1_size =
+ (ul_ext_end - ul_dyn_ext_base) * hio_mgr->word_size;
+ /* 4K align */
+ ul_seg1_size = (ul_seg1_size + 0xFFF) & (~0xFFFUL);
+ /* 64K align */
+ ul_seg_size = (ul_seg_size + 0xFFFF) & (~0xFFFFUL);
+ ul_pad_size = UL_PAGE_ALIGN_SIZE - ((ul_gpp_pa + ul_seg1_size) %
+ UL_PAGE_ALIGN_SIZE);
+ if (ul_pad_size == UL_PAGE_ALIGN_SIZE)
+ ul_pad_size = 0x0;
+
+ dev_dbg(bridge, "%s: ul_gpp_pa %x, ul_gpp_va %x, ul_dsp_va %x, "
+ "shm0_end %x, ul_dyn_ext_base %x, ul_ext_end %x, "
+ "ul_seg_size %x ul_seg1_size %x \n", __func__,
+ ul_gpp_pa, ul_gpp_va, ul_dsp_va, shm0_end,
+ ul_dyn_ext_base, ul_ext_end, ul_seg_size, ul_seg1_size);
+
+ if ((ul_seg_size + ul_seg1_size + ul_pad_size) >
+ host_res->dw_mem_length[1]) {
+ pr_err("%s: shm Error, reserved 0x%x required 0x%x\n",
+ __func__, host_res->dw_mem_length[1],
+ ul_seg_size + ul_seg1_size + ul_pad_size);
+ status = -ENOMEM;
+ }
+ }
+ if (status)
+ goto func_end;
+
+ sm_sg->seg1_pa = ul_gpp_pa;
+ sm_sg->seg1_da = ul_dyn_ext_base;
+ sm_sg->seg1_va = ul_gpp_va;
+ sm_sg->seg1_size = ul_seg1_size;
+ sm_sg->seg0_pa = ul_gpp_pa + ul_pad_size + ul_seg1_size;
+ sm_sg->seg0_da = ul_dsp_va;
+ sm_sg->seg0_va = ul_gpp_va + ul_pad_size + ul_seg1_size;
+ sm_sg->seg0_size = ul_seg_size;
+
+ /*
+ * Copy remaining entries from CDB. All entries are 1 MB and
+ * should not conflict with shm entries on MPU or DSP side.
+ */
+ for (i = 3; i < 7 && ndx < BRDIOCTL_NUMOFMMUTLB; i++) {
+ if (hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys == 0)
+ continue;
+
+ if ((hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys >
+ ul_gpp_pa - 0x100000
+ && hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys <=
+ ul_gpp_pa + ul_seg_size)
+ || (hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt >
+ ul_dsp_va - 0x100000 / hio_mgr->word_size
+ && hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt <=
+ ul_dsp_va + ul_seg_size / hio_mgr->word_size)) {
+ dev_dbg(bridge,
+ "CDB MMU entry %d conflicts with "
+ "shm.\n\tCDB: GppPa %x, DspVa %x.\n\tSHM: "
+ "GppPa %x, DspVa %x, Bytes %x.\n", i,
+ hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys,
+ hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt,
+ ul_gpp_pa, ul_dsp_va, ul_seg_size);
+ status = -EPERM;
+ } else {
+ if (ndx < MAX_LOCK_TLB_ENTRIES) {
+ ae_proc[ndx].ul_dsp_va =
+ hio_mgr->ext_proc_info.ty_tlb[i].
+ ul_dsp_virt;
+ ae_proc[ndx].ul_gpp_pa =
+ hio_mgr->ext_proc_info.ty_tlb[i].
+ ul_gpp_phys;
+ ae_proc[ndx].ul_gpp_va = 0;
+ /* 1 MB */
+ ae_proc[ndx].ul_size = 0x100000;
+ dev_dbg(bridge, "shm MMU entry PA %x "
+ "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa,
+ ae_proc[ndx].ul_dsp_va);
+ ndx++;
+ }
+ }
+ if (status)
+ goto func_end;
+ }
+
+ for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
+ ae_proc[i].ul_dsp_va = 0;
+ ae_proc[i].ul_gpp_pa = 0;
+ ae_proc[i].ul_gpp_va = 0;
+ ae_proc[i].ul_size = 0;
+ }
+ /*
+ * Set the shm physical address entry (grayed out in CDB file)
+ * to the virtual uncached ioremapped address of shm reserved
+ * on MPU.
+ */
+ hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys =
+ (ul_gpp_va + ul_seg1_size + ul_pad_size);
+
+ /*
+ * Need shm Phys addr. IO supports only one DSP for now:
+ * num_procs = 1.
+ */
+ if (!hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys || num_procs != 1) {
+ status = -EFAULT;
+ goto func_end;
+ } else {
+ if (sm_sg->seg0_da > ul_shm_base) {
+ status = -EPERM;
+ goto func_end;
+ }
+ /* ul_shm_base may not be at ul_dsp_va address */
+ ul_shm_base_offset = (ul_shm_base - sm_sg->seg0_da) *
+ hio_mgr->word_size;
+ /*
+ * bridge_dev_ctrl() will set dev context dsp-mmu info. In
+ * bridge_brd_start() the MMU will be re-programed with MMU
+ * DSPVa-GPPPa pair info while DSP is in a known
+ * (reset) state.
+ */
+
+ status =
+ hio_mgr->intf_fxns->pfn_dev_cntrl(hio_mgr->hbridge_context,
+ BRDIOCTL_SETMMUCONFIG,
+ ae_proc);
+ if (status)
+ goto func_end;
+ ul_shm_base = hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys;
+ ul_shm_base += ul_shm_base_offset;
+ ul_shm_base = (u32) MEM_LINEAR_ADDRESS((void *)ul_shm_base,
+ ul_mem_length);
+ if (ul_shm_base == 0) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ /* Register SM */
+ status = register_shm_segs(hio_mgr, cod_man, sm_sg->seg0_pa);
+ }
+
+ hio_mgr->shared_mem = (struct shm *)ul_shm_base;
+ hio_mgr->input = (u8 *) hio_mgr->shared_mem + sizeof(struct shm);
+ hio_mgr->output = hio_mgr->input + (ul_shm_length -
+ sizeof(struct shm)) / 2;
+ hio_mgr->usm_buf_size = hio_mgr->output - hio_mgr->input;
+
+ /* Set up Shared memory addresses for messaging. */
+ hio_mgr->msg_input_ctrl = (struct msg_ctrl *)((u8 *) hio_mgr->shared_mem
+ + ul_shm_length);
+ hio_mgr->msg_input =
+ (u8 *) hio_mgr->msg_input_ctrl + sizeof(struct msg_ctrl);
+ hio_mgr->msg_output_ctrl =
+ (struct msg_ctrl *)((u8 *) hio_mgr->msg_input_ctrl +
+ ul_msg_length / 2);
+ hio_mgr->msg_output =
+ (u8 *) hio_mgr->msg_output_ctrl + sizeof(struct msg_ctrl);
+ hmsg_mgr->max_msgs =
+ ((u8 *) hio_mgr->msg_output_ctrl - hio_mgr->msg_input)
+ / sizeof(struct msg_dspmsg);
+ dev_dbg(bridge, "IO MGR shm details: shared_mem %p, input %p, "
+ "output %p, msg_input_ctrl %p, msg_input %p, "
+ "msg_output_ctrl %p, msg_output %p\n",
+ (u8 *) hio_mgr->shared_mem, hio_mgr->input,
+ hio_mgr->output, (u8 *) hio_mgr->msg_input_ctrl,
+ hio_mgr->msg_input, (u8 *) hio_mgr->msg_output_ctrl,
+ hio_mgr->msg_output);
+ dev_dbg(bridge, "(proc) Mas msgs in shared memory: 0x%x\n",
+ hmsg_mgr->max_msgs);
+ memset((void *)hio_mgr->shared_mem, 0, sizeof(struct shm));
+
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+ /* Get the start address of trace buffer */
+ status = cod_get_sym_value(cod_man, SYS_PUTCBEG,
+ &hio_mgr->ul_trace_buffer_begin);
+ if (status) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ hio_mgr->ul_gpp_read_pointer = hio_mgr->ul_trace_buffer_begin =
+ (ul_gpp_va + ul_seg1_size + ul_pad_size) +
+ (hio_mgr->ul_trace_buffer_begin - ul_dsp_va);
+ /* Get the end address of trace buffer */
+ status = cod_get_sym_value(cod_man, SYS_PUTCEND,
+ &hio_mgr->ul_trace_buffer_end);
+ if (status) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ hio_mgr->ul_trace_buffer_end =
+ (ul_gpp_va + ul_seg1_size + ul_pad_size) +
+ (hio_mgr->ul_trace_buffer_end - ul_dsp_va);
+ /* Get the current address of DSP write pointer */
+ status = cod_get_sym_value(cod_man, BRIDGE_SYS_PUTC_CURRENT,
+ &hio_mgr->ul_trace_buffer_current);
+ if (status) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ hio_mgr->ul_trace_buffer_current =
+ (ul_gpp_va + ul_seg1_size + ul_pad_size) +
+ (hio_mgr->ul_trace_buffer_current - ul_dsp_va);
+ /* Calculate the size of trace buffer */
+ kfree(hio_mgr->pmsg);
+ hio_mgr->pmsg = kmalloc(((hio_mgr->ul_trace_buffer_end -
+ hio_mgr->ul_trace_buffer_begin) *
+ hio_mgr->word_size) + 2, GFP_KERNEL);
+ if (!hio_mgr->pmsg)
+ status = -ENOMEM;
+
+ hio_mgr->ul_dsp_va = ul_dsp_va;
+ hio_mgr->ul_gpp_va = (ul_gpp_va + ul_seg1_size + ul_pad_size);
+
+#endif
+func_end:
+ return status;
+}
+
+/*
+ * ======== io_buf_size ========
+ * Size of shared memory I/O channel.
+ */
+u32 io_buf_size(struct io_mgr *hio_mgr)
+{
+ if (hio_mgr)
+ return hio_mgr->usm_buf_size;
+ else
+ return 0;
+}
+
+/*
+ * ======== io_cancel_chnl ========
+ * Cancel IO on a given PCPY channel.
+ */
+void io_cancel_chnl(struct io_mgr *hio_mgr, u32 chnl)
+{
+ struct io_mgr *pio_mgr = (struct io_mgr *)hio_mgr;
+ struct shm *sm;
+
+ if (!hio_mgr)
+ goto func_end;
+ sm = hio_mgr->shared_mem;
+
+ /* Inform DSP that we have no more buffers on this channel */
+ set_chnl_free(sm, chnl);
+
+ sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
+func_end:
+ return;
+}
+
+
+/*
+ * ======== io_dispatch_pm ========
+ * Performs I/O dispatch on PM related messages from DSP
+ */
+static void io_dispatch_pm(struct io_mgr *pio_mgr)
+{
+ int status;
+ u32 parg[2];
+
+ /* Perform Power message processing here */
+ parg[0] = pio_mgr->intr_val;
+
+ /* Send the command to the Bridge clk/pwr manager to handle */
+ if (parg[0] == MBX_PM_HIBERNATE_EN) {
+ dev_dbg(bridge, "PM: Hibernate command\n");
+ status = pio_mgr->intf_fxns->
+ pfn_dev_cntrl(pio_mgr->hbridge_context,
+ BRDIOCTL_PWR_HIBERNATE, parg);
+ if (status)
+ pr_err("%s: hibernate cmd failed 0x%x\n",
+ __func__, status);
+ } else if (parg[0] == MBX_PM_OPP_REQ) {
+ parg[1] = pio_mgr->shared_mem->opp_request.rqst_opp_pt;
+ dev_dbg(bridge, "PM: Requested OPP = 0x%x\n", parg[1]);
+ status = pio_mgr->intf_fxns->
+ pfn_dev_cntrl(pio_mgr->hbridge_context,
+ BRDIOCTL_CONSTRAINT_REQUEST, parg);
+ if (status)
+ dev_dbg(bridge, "PM: Failed to set constraint "
+ "= 0x%x\n", parg[1]);
+ } else {
+ dev_dbg(bridge, "PM: clk control value of msg = 0x%x\n",
+ parg[0]);
+ status = pio_mgr->intf_fxns->
+ pfn_dev_cntrl(pio_mgr->hbridge_context,
+ BRDIOCTL_CLK_CTRL, parg);
+ if (status)
+ dev_dbg(bridge, "PM: Failed to ctrl the DSP clk"
+ "= 0x%x\n", *parg);
+ }
+}
+
+/*
+ * ======== io_dpc ========
+ * Deferred procedure call for shared memory channel driver ISR. Carries
+ * out the dispatch of I/O as a non-preemptible event.It can only be
+ * pre-empted by an ISR.
+ */
+void io_dpc(unsigned long ref_data)
+{
+ struct io_mgr *pio_mgr = (struct io_mgr *)ref_data;
+ struct chnl_mgr *chnl_mgr_obj;
+ struct msg_mgr *msg_mgr_obj;
+ struct deh_mgr *hdeh_mgr;
+ u32 requested;
+ u32 serviced;
+
+ if (!pio_mgr)
+ goto func_end;
+ chnl_mgr_obj = pio_mgr->hchnl_mgr;
+ dev_get_msg_mgr(pio_mgr->hdev_obj, &msg_mgr_obj);
+ dev_get_deh_mgr(pio_mgr->hdev_obj, &hdeh_mgr);
+ if (!chnl_mgr_obj)
+ goto func_end;
+
+ requested = pio_mgr->dpc_req;
+ serviced = pio_mgr->dpc_sched;
+
+ if (serviced == requested)
+ goto func_end;
+
+ /* Process pending DPC's */
+ do {
+ /* Check value of interrupt reg to ensure it's a valid error */
+ if ((pio_mgr->intr_val > DEH_BASE) &&
+ (pio_mgr->intr_val < DEH_LIMIT)) {
+ /* Notify DSP/BIOS exception */
+ if (hdeh_mgr) {
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
+ print_dsp_debug_trace(pio_mgr);
+#endif
+ bridge_deh_notify(hdeh_mgr, DSP_SYSERROR,
+ pio_mgr->intr_val);
+ }
+ }
+ /* Proc-copy chanel dispatch */
+ input_chnl(pio_mgr, NULL, IO_SERVICE);
+ output_chnl(pio_mgr, NULL, IO_SERVICE);
+
+#ifdef CHNL_MESSAGES
+ if (msg_mgr_obj) {
+ /* Perform I/O dispatch on message queues */
+ input_msg(pio_mgr, msg_mgr_obj);
+ output_msg(pio_mgr, msg_mgr_obj);
+ }
+
+#endif
+#ifdef CONFIG_TIDSPBRIDGE_DEBUG
+ if (pio_mgr->intr_val & MBX_DBG_SYSPRINTF) {
+ /* Notify DSP Trace message */
+ print_dsp_debug_trace(pio_mgr);
+ }
+#endif
+ serviced++;
+ } while (serviced != requested);
+ pio_mgr->dpc_sched = requested;
+func_end:
+ return;
+}
+
+/*
+ * ======== io_mbox_msg ========
+ * Main interrupt handler for the shared memory IO manager.
+ * Calls the Bridge's CHNL_ISR to determine if this interrupt is ours, then
+ * schedules a DPC to dispatch I/O.
+ */
+void io_mbox_msg(u32 msg)
+{
+ struct io_mgr *pio_mgr;
+ struct dev_object *dev_obj;
+ unsigned long flags;
+
+ dev_obj = dev_get_first();
+ dev_get_io_mgr(dev_obj, &pio_mgr);
+
+ if (!pio_mgr)
+ return;
+
+ pio_mgr->intr_val = (u16)msg;
+ if (pio_mgr->intr_val & MBX_PM_CLASS)
+ io_dispatch_pm(pio_mgr);
+
+ if (pio_mgr->intr_val == MBX_DEH_RESET) {
+ pio_mgr->intr_val = 0;
+ } else {
+ spin_lock_irqsave(&pio_mgr->dpc_lock, flags);
+ pio_mgr->dpc_req++;
+ spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags);
+ tasklet_schedule(&pio_mgr->dpc_tasklet);
+ }
+ return;
+}
+
+/*
+ * ======== io_request_chnl ========
+ * Purpose:
+ * Request chanenel I/O from the DSP. Sets flags in shared memory, then
+ * interrupts the DSP.
+ */
+void io_request_chnl(struct io_mgr *io_manager, struct chnl_object *pchnl,
+ u8 io_mode, u16 *mbx_val)
+{
+ struct chnl_mgr *chnl_mgr_obj;
+ struct shm *sm;
+
+ if (!pchnl || !mbx_val)
+ goto func_end;
+ chnl_mgr_obj = io_manager->hchnl_mgr;
+ sm = io_manager->shared_mem;
+ if (io_mode == IO_INPUT) {
+ /*
+ * Assertion fires if CHNL_AddIOReq() called on a stream
+ * which was cancelled, or attached to a dead board.
+ */
+ DBC_ASSERT((pchnl->dw_state == CHNL_STATEREADY) ||
+ (pchnl->dw_state == CHNL_STATEEOS));
+ /* Indicate to the DSP we have a buffer available for input */
+ set_chnl_busy(sm, pchnl->chnl_id);
+ *mbx_val = MBX_PCPY_CLASS;
+ } else if (io_mode == IO_OUTPUT) {
+ /*
+ * This assertion fails if CHNL_AddIOReq() was called on a
+ * stream which was cancelled, or attached to a dead board.
+ */
+ DBC_ASSERT((pchnl->dw_state & ~CHNL_STATEEOS) ==
+ CHNL_STATEREADY);
+ /*
+ * Record the fact that we have a buffer available for
+ * output.
+ */
+ chnl_mgr_obj->dw_output_mask |= (1 << pchnl->chnl_id);
+ } else {
+ DBC_ASSERT(io_mode); /* Shouldn't get here. */
+ }
+func_end:
+ return;
+}
+
+/*
+ * ======== iosm_schedule ========
+ * Schedule DPC for IO.
+ */
+void iosm_schedule(struct io_mgr *io_manager)
+{
+ unsigned long flags;
+
+ if (!io_manager)
+ return;
+
+ /* Increment count of DPC's pending. */
+ spin_lock_irqsave(&io_manager->dpc_lock, flags);
+ io_manager->dpc_req++;
+ spin_unlock_irqrestore(&io_manager->dpc_lock, flags);
+
+ /* Schedule DPC */
+ tasklet_schedule(&io_manager->dpc_tasklet);
+}
+
+/*
+ * ======== find_ready_output ========
+ * Search for a host output channel which is ready to send. If this is
+ * called as a result of servicing the DPC, then implement a round
+ * robin search; otherwise, this was called by a client thread (via
+ * IO_Dispatch()), so just start searching from the current channel id.
+ */
+static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
+ struct chnl_object *pchnl, u32 mask)
+{
+ u32 ret = OUTPUTNOTREADY;
+ u32 id, start_id;
+ u32 shift;
+
+ id = (pchnl !=
+ NULL ? pchnl->chnl_id : (chnl_mgr_obj->dw_last_output + 1));
+ id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
+ if (id >= CHNL_MAXCHANNELS)
+ goto func_end;
+ if (mask) {
+ shift = (1 << id);
+ start_id = id;
+ do {
+ if (mask & shift) {
+ ret = id;
+ if (pchnl == NULL)
+ chnl_mgr_obj->dw_last_output = id;
+ break;
+ }
+ id = id + 1;
+ id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
+ shift = (1 << id);
+ } while (id != start_id);
+ }
+func_end:
+ return ret;
+}
+
+/*
+ * ======== input_chnl ========
+ * Dispatch a buffer on an input channel.
+ */
+static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
+ u8 io_mode)
+{
+ struct chnl_mgr *chnl_mgr_obj;
+ struct shm *sm;
+ u32 chnl_id;
+ u32 bytes;
+ struct chnl_irp *chnl_packet_obj = NULL;
+ u32 dw_arg;
+ bool clear_chnl = false;
+ bool notify_client = false;
+
+ sm = pio_mgr->shared_mem;
+ chnl_mgr_obj = pio_mgr->hchnl_mgr;
+
+ /* Attempt to perform input */
+ if (!sm->input_full)
+ goto func_end;
+
+ bytes = sm->input_size * chnl_mgr_obj->word_size;
+ chnl_id = sm->input_id;
+ dw_arg = sm->arg;
+ if (chnl_id >= CHNL_MAXCHANNELS) {
+ /* Shouldn't be here: would indicate corrupted shm. */
+ DBC_ASSERT(chnl_id);
+ goto func_end;
+ }
+ pchnl = chnl_mgr_obj->ap_channel[chnl_id];
+ if ((pchnl != NULL) && CHNL_IS_INPUT(pchnl->chnl_mode)) {
+ if ((pchnl->dw_state & ~CHNL_STATEEOS) == CHNL_STATEREADY) {
+ if (!pchnl->pio_requests)
+ goto func_end;
+ /* Get the I/O request, and attempt a transfer */
+ chnl_packet_obj = (struct chnl_irp *)
+ lst_get_head(pchnl->pio_requests);
+ if (chnl_packet_obj) {
+ pchnl->cio_reqs--;
+ if (pchnl->cio_reqs < 0)
+ goto func_end;
+ /*
+ * Ensure we don't overflow the client's
+ * buffer.
+ */
+ bytes = min(bytes, chnl_packet_obj->byte_size);
+ memcpy(chnl_packet_obj->host_sys_buf,
+ pio_mgr->input, bytes);
+ pchnl->bytes_moved += bytes;
+ chnl_packet_obj->byte_size = bytes;
+ chnl_packet_obj->dw_arg = dw_arg;
+ chnl_packet_obj->status = CHNL_IOCSTATCOMPLETE;
+
+ if (bytes == 0) {
+ /*
+ * This assertion fails if the DSP
+ * sends EOS more than once on this
+ * channel.
+ */
+ if (pchnl->dw_state & CHNL_STATEEOS)
+ goto func_end;
+ /*
+ * Zero bytes indicates EOS. Update
+ * IOC status for this chirp, and also
+ * the channel state.
+ */
+ chnl_packet_obj->status |=
+ CHNL_IOCSTATEOS;
+ pchnl->dw_state |= CHNL_STATEEOS;
+ /*
+ * Notify that end of stream has
+ * occurred.
+ */
+ ntfy_notify(pchnl->ntfy_obj,
+ DSP_STREAMDONE);
+ }
+ /* Tell DSP if no more I/O buffers available */
+ if (!pchnl->pio_requests)
+ goto func_end;
+ if (LST_IS_EMPTY(pchnl->pio_requests)) {
+ set_chnl_free(sm, pchnl->chnl_id);
+ }
+ clear_chnl = true;
+ notify_client = true;
+ } else {
+ /*
+ * Input full for this channel, but we have no
+ * buffers available. The channel must be
+ * "idling". Clear out the physical input
+ * channel.
+ */
+ clear_chnl = true;
+ }
+ } else {
+ /* Input channel cancelled: clear input channel */
+ clear_chnl = true;
+ }
+ } else {
+ /* DPC fired after host closed channel: clear input channel */
+ clear_chnl = true;
+ }
+ if (clear_chnl) {
+ /* Indicate to the DSP we have read the input */
+ sm->input_full = 0;
+ sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
+ }
+ if (notify_client) {
+ /* Notify client with IO completion record */
+ notify_chnl_complete(pchnl, chnl_packet_obj);
+ }
+func_end:
+ return;
+}
+
+/*
+ * ======== input_msg ========
+ * Copies messages from shared memory to the message queues.
+ */
+static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
+{
+ u32 num_msgs;
+ u32 i;
+ u8 *msg_input;
+ struct msg_queue *msg_queue_obj;
+ struct msg_frame *pmsg;
+ struct msg_dspmsg msg;
+ struct msg_ctrl *msg_ctr_obj;
+ u32 input_empty;
+ u32 addr;
+
+ msg_ctr_obj = pio_mgr->msg_input_ctrl;
+ /* Get the number of input messages to be read */
+ input_empty = msg_ctr_obj->buf_empty;
+ num_msgs = msg_ctr_obj->size;
+ if (input_empty)
+ goto func_end;
+
+ msg_input = pio_mgr->msg_input;
+ for (i = 0; i < num_msgs; i++) {
+ /* Read the next message */
+ addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_cmd);
+ msg.msg.dw_cmd =
+ read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
+ addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_arg1);
+ msg.msg.dw_arg1 =
+ read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
+ addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_arg2);
+ msg.msg.dw_arg2 =
+ read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
+ addr = (u32) &(((struct msg_dspmsg *)msg_input)->msgq_id);
+ msg.msgq_id =
+ read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
+ msg_input += sizeof(struct msg_dspmsg);
+ if (!hmsg_mgr->queue_list)
+ goto func_end;
+
+ /* Determine which queue to put the message in */
+ msg_queue_obj =
+ (struct msg_queue *)lst_first(hmsg_mgr->queue_list);
+ dev_dbg(bridge, "input msg: dw_cmd=0x%x dw_arg1=0x%x "
+ "dw_arg2=0x%x msgq_id=0x%x \n", msg.msg.dw_cmd,
+ msg.msg.dw_arg1, msg.msg.dw_arg2, msg.msgq_id);
+ /*
+ * Interrupt may occur before shared memory and message
+ * input locations have been set up. If all nodes were
+ * cleaned up, hmsg_mgr->max_msgs should be 0.
+ */
+ while (msg_queue_obj != NULL) {
+ if (msg.msgq_id == msg_queue_obj->msgq_id) {
+ /* Found it */
+ if (msg.msg.dw_cmd == RMS_EXITACK) {
+ /*
+ * Call the node exit notification.
+ * The exit message does not get
+ * queued.
+ */
+ (*hmsg_mgr->on_exit) ((void *)
+ msg_queue_obj->arg,
+ msg.msg.dw_arg1);
+ } else {
+ /*
+ * Not an exit acknowledgement, queue
+ * the message.
+ */
+ if (!msg_queue_obj->msg_free_list)
+ goto func_end;
+ pmsg = (struct msg_frame *)lst_get_head
+ (msg_queue_obj->msg_free_list);
+ if (msg_queue_obj->msg_used_list
+ && pmsg) {
+ pmsg->msg_data = msg;
+ lst_put_tail
+ (msg_queue_obj->msg_used_list,
+ (struct list_head *)pmsg);
+ ntfy_notify
+ (msg_queue_obj->ntfy_obj,
+ DSP_NODEMESSAGEREADY);
+ sync_set_event
+ (msg_queue_obj->sync_event);
+ } else {
+ /*
+ * No free frame to copy the
+ * message into.
+ */
+ pr_err("%s: no free msg frames,"
+ " discarding msg\n",
+ __func__);
+ }
+ }
+ break;
+ }
+
+ if (!hmsg_mgr->queue_list || !msg_queue_obj)
+ goto func_end;
+ msg_queue_obj =
+ (struct msg_queue *)lst_next(hmsg_mgr->queue_list,
+ (struct list_head *)
+ msg_queue_obj);
+ }
+ }
+ /* Set the post SWI flag */
+ if (num_msgs > 0) {
+ /* Tell the DSP we've read the messages */
+ msg_ctr_obj->buf_empty = true;
+ msg_ctr_obj->post_swi = true;
+ sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
+ }
+func_end:
+ return;
+}
+
+/*
+ * ======== notify_chnl_complete ========
+ * Purpose:
+ * Signal the channel event, notifying the client that I/O has completed.
+ */
+static void notify_chnl_complete(struct chnl_object *pchnl,
+ struct chnl_irp *chnl_packet_obj)
+{
+ bool signal_event;
+
+ if (!pchnl || !pchnl->sync_event ||
+ !pchnl->pio_completions || !chnl_packet_obj)
+ goto func_end;
+
+ /*
+ * Note: we signal the channel event only if the queue of IO
+ * completions is empty. If it is not empty, the event is sure to be
+ * signalled by the only IO completion list consumer:
+ * bridge_chnl_get_ioc().
+ */
+ signal_event = LST_IS_EMPTY(pchnl->pio_completions);
+ /* Enqueue the IO completion info for the client */
+ lst_put_tail(pchnl->pio_completions,
+ (struct list_head *)chnl_packet_obj);
+ pchnl->cio_cs++;
+
+ if (pchnl->cio_cs > pchnl->chnl_packets)
+ goto func_end;
+ /* Signal the channel event (if not already set) that IO is complete */
+ if (signal_event)
+ sync_set_event(pchnl->sync_event);
+
+ /* Notify that IO is complete */
+ ntfy_notify(pchnl->ntfy_obj, DSP_STREAMIOCOMPLETION);
+func_end:
+ return;
+}
+
+/*
+ * ======== output_chnl ========
+ * Purpose:
+ * Dispatch a buffer on an output channel.
+ */
+static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
+ u8 io_mode)
+{
+ struct chnl_mgr *chnl_mgr_obj;
+ struct shm *sm;
+ u32 chnl_id;
+ struct chnl_irp *chnl_packet_obj;
+ u32 dw_dsp_f_mask;
+
+ chnl_mgr_obj = pio_mgr->hchnl_mgr;
+ sm = pio_mgr->shared_mem;
+ /* Attempt to perform output */
+ if (sm->output_full)
+ goto func_end;
+
+ if (pchnl && !((pchnl->dw_state & ~CHNL_STATEEOS) == CHNL_STATEREADY))
+ goto func_end;
+
+ /* Look to see if both a PC and DSP output channel are ready */
+ dw_dsp_f_mask = sm->dsp_free_mask;
+ chnl_id =
+ find_ready_output(chnl_mgr_obj, pchnl,
+ (chnl_mgr_obj->dw_output_mask & dw_dsp_f_mask));
+ if (chnl_id == OUTPUTNOTREADY)
+ goto func_end;
+
+ pchnl = chnl_mgr_obj->ap_channel[chnl_id];
+ if (!pchnl || !pchnl->pio_requests) {
+ /* Shouldn't get here */
+ goto func_end;
+ }
+ /* Get the I/O request, and attempt a transfer */
+ chnl_packet_obj = (struct chnl_irp *)lst_get_head(pchnl->pio_requests);
+ if (!chnl_packet_obj)
+ goto func_end;
+
+ pchnl->cio_reqs--;
+ if (pchnl->cio_reqs < 0 || !pchnl->pio_requests)
+ goto func_end;
+
+ /* Record fact that no more I/O buffers available */
+ if (LST_IS_EMPTY(pchnl->pio_requests))
+ chnl_mgr_obj->dw_output_mask &= ~(1 << chnl_id);
+
+ /* Transfer buffer to DSP side */
+ chnl_packet_obj->byte_size = min(pio_mgr->usm_buf_size,
+ chnl_packet_obj->byte_size);
+ memcpy(pio_mgr->output, chnl_packet_obj->host_sys_buf,
+ chnl_packet_obj->byte_size);
+ pchnl->bytes_moved += chnl_packet_obj->byte_size;
+ /* Write all 32 bits of arg */
+ sm->arg = chnl_packet_obj->dw_arg;
+#if _CHNL_WORDSIZE == 2
+ /* Access can be different SM access word size (e.g. 16/32 bit words) */
+ sm->output_id = (u16) chnl_id;
+ sm->output_size = (u16) (chnl_packet_obj->byte_size +
+ chnl_mgr_obj->word_size - 1) /
+ (u16) chnl_mgr_obj->word_size;
+#else
+ sm->output_id = chnl_id;
+ sm->output_size = (chnl_packet_obj->byte_size +
+ chnl_mgr_obj->word_size - 1) / chnl_mgr_obj->word_size;
+#endif
+ sm->output_full = 1;
+ /* Indicate to the DSP we have written the output */
+ sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
+ /* Notify client with IO completion record (keep EOS) */
+ chnl_packet_obj->status &= CHNL_IOCSTATEOS;
+ notify_chnl_complete(pchnl, chnl_packet_obj);
+ /* Notify if stream is done. */
+ if (chnl_packet_obj->status & CHNL_IOCSTATEOS)
+ ntfy_notify(pchnl->ntfy_obj, DSP_STREAMDONE);
+
+func_end:
+ return;
+}
+
+/*
+ * ======== output_msg ========
+ * Copies messages from the message queues to the shared memory.
+ */
+static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
+{
+ u32 num_msgs = 0;
+ u32 i;
+ u8 *msg_output;
+ struct msg_frame *pmsg;
+ struct msg_ctrl *msg_ctr_obj;
+ u32 output_empty;
+ u32 val;
+ u32 addr;
+
+ msg_ctr_obj = pio_mgr->msg_output_ctrl;
+
+ /* Check if output has been cleared */
+ output_empty = msg_ctr_obj->buf_empty;
+ if (output_empty) {
+ num_msgs = (hmsg_mgr->msgs_pending > hmsg_mgr->max_msgs) ?
+ hmsg_mgr->max_msgs : hmsg_mgr->msgs_pending;
+ msg_output = pio_mgr->msg_output;
+ /* Copy num_msgs messages into shared memory */
+ for (i = 0; i < num_msgs; i++) {
+ if (!hmsg_mgr->msg_used_list) {
+ pmsg = NULL;
+ goto func_end;
+ } else {
+ pmsg = (struct msg_frame *)
+ lst_get_head(hmsg_mgr->msg_used_list);
+ }
+ if (pmsg != NULL) {
+ val = (pmsg->msg_data).msgq_id;
+ addr = (u32) &(((struct msg_dspmsg *)
+ msg_output)->msgq_id);
+ write_ext32_bit_dsp_data(
+ pio_mgr->hbridge_context, addr, val);
+ val = (pmsg->msg_data).msg.dw_cmd;
+ addr = (u32) &((((struct msg_dspmsg *)
+ msg_output)->msg).dw_cmd);
+ write_ext32_bit_dsp_data(
+ pio_mgr->hbridge_context, addr, val);
+ val = (pmsg->msg_data).msg.dw_arg1;
+ addr = (u32) &((((struct msg_dspmsg *)
+ msg_output)->msg).dw_arg1);
+ write_ext32_bit_dsp_data(
+ pio_mgr->hbridge_context, addr, val);
+ val = (pmsg->msg_data).msg.dw_arg2;
+ addr = (u32) &((((struct msg_dspmsg *)
+ msg_output)->msg).dw_arg2);
+ write_ext32_bit_dsp_data(
+ pio_mgr->hbridge_context, addr, val);
+ msg_output += sizeof(struct msg_dspmsg);
+ if (!hmsg_mgr->msg_free_list)
+ goto func_end;
+ lst_put_tail(hmsg_mgr->msg_free_list,
+ (struct list_head *)pmsg);
+ sync_set_event(hmsg_mgr->sync_event);
+ }
+ }
+
+ if (num_msgs > 0) {
+ hmsg_mgr->msgs_pending -= num_msgs;
+#if _CHNL_WORDSIZE == 2
+ /*
+ * Access can be different SM access word size
+ * (e.g. 16/32 bit words)
+ */
+ msg_ctr_obj->size = (u16) num_msgs;
+#else
+ msg_ctr_obj->size = num_msgs;
+#endif
+ msg_ctr_obj->buf_empty = false;
+ /* Set the post SWI flag */
+ msg_ctr_obj->post_swi = true;
+ /* Tell the DSP we have written the output. */
+ sm_interrupt_dsp(pio_mgr->hbridge_context,
+ MBX_PCPY_CLASS);
+ }
+ }
+func_end:
+ return;
+}
+
+/*
+ * ======== register_shm_segs ========
+ * purpose:
+ * Registers GPP SM segment with CMM.
+ */
+static int register_shm_segs(struct io_mgr *hio_mgr,
+ struct cod_manager *cod_man,
+ u32 dw_gpp_base_pa)
+{
+ int status = 0;
+ u32 ul_shm0_base = 0;
+ u32 shm0_end = 0;
+ u32 ul_shm0_rsrvd_start = 0;
+ u32 ul_rsrvd_size = 0;
+ u32 ul_gpp_phys;
+ u32 ul_dsp_virt;
+ u32 ul_shm_seg_id0 = 0;
+ u32 dw_offset, dw_gpp_base_va, ul_dsp_size;
+
+ /*
+ * Read address and size info for first SM region.
+ * Get start of 1st SM Heap region.
+ */
+ status =
+ cod_get_sym_value(cod_man, SHM0_SHARED_BASE_SYM, &ul_shm0_base);
+ if (ul_shm0_base == 0) {
+ status = -EPERM;
+ goto func_end;
+ }
+ /* Get end of 1st SM Heap region */
+ if (!status) {
+ /* Get start and length of message part of shared memory */
+ status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
+ &shm0_end);
+ if (shm0_end == 0) {
+ status = -EPERM;
+ goto func_end;
+ }
+ }
+ /* Start of Gpp reserved region */
+ if (!status) {
+ /* Get start and length of message part of shared memory */
+ status =
+ cod_get_sym_value(cod_man, SHM0_SHARED_RESERVED_BASE_SYM,
+ &ul_shm0_rsrvd_start);
+ if (ul_shm0_rsrvd_start == 0) {
+ status = -EPERM;
+ goto func_end;
+ }
+ }
+ /* Register with CMM */
+ if (!status) {
+ status = dev_get_cmm_mgr(hio_mgr->hdev_obj, &hio_mgr->hcmm_mgr);
+ if (!status) {
+ status = cmm_un_register_gppsm_seg(hio_mgr->hcmm_mgr,
+ CMM_ALLSEGMENTS);
+ }
+ }
+ /* Register new SM region(s) */
+ if (!status && (shm0_end - ul_shm0_base) > 0) {
+ /* Calc size (bytes) of SM the GPP can alloc from */
+ ul_rsrvd_size =
+ (shm0_end - ul_shm0_rsrvd_start + 1) * hio_mgr->word_size;
+ if (ul_rsrvd_size <= 0) {
+ status = -EPERM;
+ goto func_end;
+ }
+ /* Calc size of SM DSP can alloc from */
+ ul_dsp_size =
+ (ul_shm0_rsrvd_start - ul_shm0_base) * hio_mgr->word_size;
+ if (ul_dsp_size <= 0) {
+ status = -EPERM;
+ goto func_end;
+ }
+ /* First TLB entry reserved for Bridge SM use. */
+ ul_gpp_phys = hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys;
+ /* Get size in bytes */
+ ul_dsp_virt =
+ hio_mgr->ext_proc_info.ty_tlb[0].ul_dsp_virt *
+ hio_mgr->word_size;
+ /*
+ * Calc byte offset used to convert GPP phys <-> DSP byte
+ * address.
+ */
+ if (dw_gpp_base_pa > ul_dsp_virt)
+ dw_offset = dw_gpp_base_pa - ul_dsp_virt;
+ else
+ dw_offset = ul_dsp_virt - dw_gpp_base_pa;
+
+ if (ul_shm0_rsrvd_start * hio_mgr->word_size < ul_dsp_virt) {
+ status = -EPERM;
+ goto func_end;
+ }
+ /*
+ * Calc Gpp phys base of SM region.
+ * This is actually uncached kernel virtual address.
+ */
+ dw_gpp_base_va =
+ ul_gpp_phys + ul_shm0_rsrvd_start * hio_mgr->word_size -
+ ul_dsp_virt;
+ /*
+ * Calc Gpp phys base of SM region.
+ * This is the physical address.
+ */
+ dw_gpp_base_pa =
+ dw_gpp_base_pa + ul_shm0_rsrvd_start * hio_mgr->word_size -
+ ul_dsp_virt;
+ /* Register SM Segment 0. */
+ status =
+ cmm_register_gppsm_seg(hio_mgr->hcmm_mgr, dw_gpp_base_pa,
+ ul_rsrvd_size, dw_offset,
+ (dw_gpp_base_pa >
+ ul_dsp_virt) ? CMM_ADDTODSPPA :
+ CMM_SUBFROMDSPPA,
+ (u32) (ul_shm0_base *
+ hio_mgr->word_size),
+ ul_dsp_size, &ul_shm_seg_id0,
+ dw_gpp_base_va);
+ /* First SM region is seg_id = 1 */
+ if (ul_shm_seg_id0 != 1)
+ status = -EPERM;
+ }
+func_end:
+ return status;
+}
+
+/* ZCPY IO routines. */
+/*
+ * ======== IO_SHMcontrol ========
+ * Sets the requested shm setting.
+ */
+int io_sh_msetting(struct io_mgr *hio_mgr, u8 desc, void *pargs)
+{
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+ u32 i;
+ struct dspbridge_platform_data *pdata =
+ omap_dspbridge_dev->dev.platform_data;
+
+ switch (desc) {
+ case SHM_CURROPP:
+ /* Update the shared memory with requested OPP information */
+ if (pargs != NULL)
+ hio_mgr->shared_mem->opp_table_struct.curr_opp_pt =
+ *(u32 *) pargs;
+ else
+ return -EPERM;
+ break;
+ case SHM_OPPINFO:
+ /*
+ * Update the shared memory with the voltage, frequency,
+ * min and max frequency values for an OPP.
+ */
+ for (i = 0; i <= dsp_max_opps; i++) {
+ hio_mgr->shared_mem->opp_table_struct.opp_point[i].
+ voltage = vdd1_dsp_freq[i][0];
+ dev_dbg(bridge, "OPP-shm: voltage: %d\n",
+ vdd1_dsp_freq[i][0]);
+ hio_mgr->shared_mem->opp_table_struct.
+ opp_point[i].frequency = vdd1_dsp_freq[i][1];
+ dev_dbg(bridge, "OPP-shm: frequency: %d\n",
+ vdd1_dsp_freq[i][1]);
+ hio_mgr->shared_mem->opp_table_struct.opp_point[i].
+ min_freq = vdd1_dsp_freq[i][2];
+ dev_dbg(bridge, "OPP-shm: min freq: %d\n",
+ vdd1_dsp_freq[i][2]);
+ hio_mgr->shared_mem->opp_table_struct.opp_point[i].
+ max_freq = vdd1_dsp_freq[i][3];
+ dev_dbg(bridge, "OPP-shm: max freq: %d\n",
+ vdd1_dsp_freq[i][3]);
+ }
+ hio_mgr->shared_mem->opp_table_struct.num_opp_pts =
+ dsp_max_opps;
+ dev_dbg(bridge, "OPP-shm: max OPP number: %d\n", dsp_max_opps);
+ /* Update the current OPP number */
+ if (pdata->dsp_get_opp)
+ i = (*pdata->dsp_get_opp) ();
+ hio_mgr->shared_mem->opp_table_struct.curr_opp_pt = i;
+ dev_dbg(bridge, "OPP-shm: value programmed = %d\n", i);
+ break;
+ case SHM_GETOPP:
+ /* Get the OPP that DSP has requested */
+ *(u32 *) pargs = hio_mgr->shared_mem->opp_request.rqst_opp_pt;
+ break;
+ default:
+ break;
+ }
+#endif
+ return 0;
+}
+
+/*
+ * ======== bridge_io_get_proc_load ========
+ * Gets the Processor's Load information
+ */
+int bridge_io_get_proc_load(struct io_mgr *hio_mgr,
+ struct dsp_procloadstat *proc_lstat)
+{
+ proc_lstat->curr_load =
+ hio_mgr->shared_mem->load_mon_info.curr_dsp_load;
+ proc_lstat->predicted_load =
+ hio_mgr->shared_mem->load_mon_info.pred_dsp_load;
+ proc_lstat->curr_dsp_freq =
+ hio_mgr->shared_mem->load_mon_info.curr_dsp_freq;
+ proc_lstat->predicted_freq =
+ hio_mgr->shared_mem->load_mon_info.pred_dsp_freq;
+
+ dev_dbg(bridge, "Curr Load = %d, Pred Load = %d, Curr Freq = %d, "
+ "Pred Freq = %d\n", proc_lstat->curr_load,
+ proc_lstat->predicted_load, proc_lstat->curr_dsp_freq,
+ proc_lstat->predicted_freq);
+ return 0;
+}
+
+void io_sm_init(void)
+{
+ /* Do nothing */
+}
+
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+void print_dsp_debug_trace(struct io_mgr *hio_mgr)
+{
+ u32 ul_new_message_length = 0, ul_gpp_cur_pointer;
+
+ while (true) {
+ /* Get the DSP current pointer */
+ ul_gpp_cur_pointer =
+ *(u32 *) (hio_mgr->ul_trace_buffer_current);
+ ul_gpp_cur_pointer =
+ hio_mgr->ul_gpp_va + (ul_gpp_cur_pointer -
+ hio_mgr->ul_dsp_va);
+
+ /* No new debug messages available yet */
+ if (ul_gpp_cur_pointer == hio_mgr->ul_gpp_read_pointer) {
+ break;
+ } else if (ul_gpp_cur_pointer > hio_mgr->ul_gpp_read_pointer) {
+ /* Continuous data */
+ ul_new_message_length =
+ ul_gpp_cur_pointer - hio_mgr->ul_gpp_read_pointer;
+
+ memcpy(hio_mgr->pmsg,
+ (char *)hio_mgr->ul_gpp_read_pointer,
+ ul_new_message_length);
+ hio_mgr->pmsg[ul_new_message_length] = '\0';
+ /*
+ * Advance the GPP trace pointer to DSP current
+ * pointer.
+ */
+ hio_mgr->ul_gpp_read_pointer += ul_new_message_length;
+ /* Print the trace messages */
+ pr_info("DSPTrace: %s\n", hio_mgr->pmsg);
+ } else if (ul_gpp_cur_pointer < hio_mgr->ul_gpp_read_pointer) {
+ /* Handle trace buffer wraparound */
+ memcpy(hio_mgr->pmsg,
+ (char *)hio_mgr->ul_gpp_read_pointer,
+ hio_mgr->ul_trace_buffer_end -
+ hio_mgr->ul_gpp_read_pointer);
+ ul_new_message_length =
+ ul_gpp_cur_pointer - hio_mgr->ul_trace_buffer_begin;
+ memcpy(&hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end -
+ hio_mgr->ul_gpp_read_pointer],
+ (char *)hio_mgr->ul_trace_buffer_begin,
+ ul_new_message_length);
+ hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end -
+ hio_mgr->ul_gpp_read_pointer +
+ ul_new_message_length] = '\0';
+ /*
+ * Advance the GPP trace pointer to DSP current
+ * pointer.
+ */
+ hio_mgr->ul_gpp_read_pointer =
+ hio_mgr->ul_trace_buffer_begin +
+ ul_new_message_length;
+ /* Print the trace messages */
+ pr_info("DSPTrace: %s\n", hio_mgr->pmsg);
+ }
+ }
+}
+#endif
+
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
+/*
+ * ======== print_dsp_trace_buffer ========
+ * Prints the trace buffer returned from the DSP (if DBG_Trace is enabled).
+ * Parameters:
+ * hdeh_mgr: Handle to DEH manager object
+ * number of extra carriage returns to generate.
+ * Returns:
+ * 0: Success.
+ * -ENOMEM: Unable to allocate memory.
+ * Requires:
+ * hdeh_mgr muse be valid. Checked in bridge_deh_notify.
+ */
+int print_dsp_trace_buffer(struct bridge_dev_context *hbridge_context)
+{
+ int status = 0;
+ struct cod_manager *cod_mgr;
+ u32 ul_trace_end;
+ u32 ul_trace_begin;
+ u32 trace_cur_pos;
+ u32 ul_num_bytes = 0;
+ u32 ul_num_words = 0;
+ u32 ul_word_size = 2;
+ char *psz_buf;
+ char *str_beg;
+ char *trace_end;
+ char *buf_end;
+ char *new_line;
+
+ struct bridge_dev_context *pbridge_context = hbridge_context;
+ struct bridge_drv_interface *intf_fxns;
+ struct dev_object *dev_obj = (struct dev_object *)
+ pbridge_context->hdev_obj;
+
+ status = dev_get_cod_mgr(dev_obj, &cod_mgr);
+
+ if (cod_mgr) {
+ /* Look for SYS_PUTCBEG/SYS_PUTCEND */
+ status =
+ cod_get_sym_value(cod_mgr, COD_TRACEBEG, &ul_trace_begin);
+ } else {
+ status = -EFAULT;
+ }
+ if (!status)
+ status =
+ cod_get_sym_value(cod_mgr, COD_TRACEEND, &ul_trace_end);
+
+ if (!status)
+ /* trace_cur_pos will hold the address of a DSP pointer */
+ status = cod_get_sym_value(cod_mgr, COD_TRACECURPOS,
+ &trace_cur_pos);
+
+ if (status)
+ goto func_end;
+
+ ul_num_bytes = (ul_trace_end - ul_trace_begin);
+
+ ul_num_words = ul_num_bytes * ul_word_size;
+ status = dev_get_intf_fxns(dev_obj, &intf_fxns);
+
+ if (status)
+ goto func_end;
+
+ psz_buf = kzalloc(ul_num_bytes + 2, GFP_ATOMIC);
+ if (psz_buf != NULL) {
+ /* Read trace buffer data */
+ status = (*intf_fxns->pfn_brd_read)(pbridge_context,
+ (u8 *)psz_buf, (u32)ul_trace_begin,
+ ul_num_bytes, 0);
+
+ if (status)
+ goto func_end;
+
+ /* Pack and do newline conversion */
+ pr_debug("PrintDspTraceBuffer: "
+ "before pack and unpack.\n");
+ pr_debug("%s: DSP Trace Buffer Begin:\n"
+ "=======================\n%s\n",
+ __func__, psz_buf);
+
+ /* Read the value at the DSP address in trace_cur_pos. */
+ status = (*intf_fxns->pfn_brd_read)(pbridge_context,
+ (u8 *)&trace_cur_pos, (u32)trace_cur_pos,
+ 4, 0);
+ if (status)
+ goto func_end;
+ /* Pack and do newline conversion */
+ pr_info("DSP Trace Buffer Begin:\n"
+ "=======================\n%s\n",
+ psz_buf);
+
+
+ /* convert to offset */
+ trace_cur_pos = trace_cur_pos - ul_trace_begin;
+
+ if (ul_num_bytes) {
+ /*
+ * The buffer is not full, find the end of the
+ * data -- buf_end will be >= pszBuf after
+ * while.
+ */
+ buf_end = &psz_buf[ul_num_bytes+1];
+ /* DSP print position */
+ trace_end = &psz_buf[trace_cur_pos];
+
+ /*
+ * Search buffer for a new_line and replace it
+ * with '\0', then print as string.
+ * Continue until end of buffer is reached.
+ */
+ str_beg = trace_end;
+ ul_num_bytes = buf_end - str_beg;
+
+ while (str_beg < buf_end) {
+ new_line = strnchr(str_beg, ul_num_bytes,
+ '\n');
+ if (new_line && new_line < buf_end) {
+ *new_line = 0;
+ pr_debug("%s\n", str_beg);
+ str_beg = ++new_line;
+ ul_num_bytes = buf_end - str_beg;
+ } else {
+ /*
+ * Assume buffer empty if it contains
+ * a zero
+ */
+ if (*str_beg != '\0') {
+ str_beg[ul_num_bytes] = 0;
+ pr_debug("%s\n", str_beg);
+ }
+ str_beg = buf_end;
+ ul_num_bytes = 0;
+ }
+ }
+ /*
+ * Search buffer for a nNewLine and replace it
+ * with '\0', then print as string.
+ * Continue until buffer is exhausted.
+ */
+ str_beg = psz_buf;
+ ul_num_bytes = trace_end - str_beg;
+
+ while (str_beg < trace_end) {
+ new_line = strnchr(str_beg, ul_num_bytes, '\n');
+ if (new_line != NULL && new_line < trace_end) {
+ *new_line = 0;
+ pr_debug("%s\n", str_beg);
+ str_beg = ++new_line;
+ ul_num_bytes = trace_end - str_beg;
+ } else {
+ /*
+ * Assume buffer empty if it contains
+ * a zero
+ */
+ if (*str_beg != '\0') {
+ str_beg[ul_num_bytes] = 0;
+ pr_debug("%s\n", str_beg);
+ }
+ str_beg = trace_end;
+ ul_num_bytes = 0;
+ }
+ }
+ }
+ pr_info("\n=======================\n"
+ "DSP Trace Buffer End:\n");
+ kfree(psz_buf);
+ } else {
+ status = -ENOMEM;
+ }
+func_end:
+ if (status)
+ dev_dbg(bridge, "%s Failed, status 0x%x\n", __func__, status);
+ return status;
+}
+
+/**
+ * dump_dsp_stack() - This function dumps the data on the DSP stack.
+ * @bridge_context: Bridge driver's device context pointer.
+ *
+ */
+int dump_dsp_stack(struct bridge_dev_context *bridge_context)
+{
+ int status = 0;
+ struct cod_manager *code_mgr;
+ struct node_mgr *node_mgr;
+ u32 trace_begin;
+ char name[256];
+ struct {
+ u32 head[2];
+ u32 size;
+ } mmu_fault_dbg_info;
+ u32 *buffer;
+ u32 *buffer_beg;
+ u32 *buffer_end;
+ u32 exc_type;
+ u32 dyn_ext_base;
+ u32 i;
+ u32 offset_output;
+ u32 total_size;
+ u32 poll_cnt;
+ const char *dsp_regs[] = {"EFR", "IERR", "ITSR", "NTSR",
+ "IRP", "NRP", "AMR", "SSR",
+ "ILC", "RILC", "IER", "CSR"};
+ const char *exec_ctxt[] = {"Task", "SWI", "HWI", "Unknown"};
+ struct bridge_drv_interface *intf_fxns;
+ struct dev_object *dev_object = bridge_context->hdev_obj;
+
+ status = dev_get_cod_mgr(dev_object, &code_mgr);
+ if (!code_mgr) {
+ pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__);
+ status = -EFAULT;
+ }
+
+ if (!status) {
+ status = dev_get_node_manager(dev_object, &node_mgr);
+ if (!node_mgr) {
+ pr_debug("%s: Failed on dev_get_node_manager.\n",
+ __func__);
+ status = -EFAULT;
+ }
+ }
+
+ if (!status) {
+ /* Look for SYS_PUTCBEG/SYS_PUTCEND: */
+ status =
+ cod_get_sym_value(code_mgr, COD_TRACEBEG, &trace_begin);
+ pr_debug("%s: trace_begin Value 0x%x\n",
+ __func__, trace_begin);
+ if (status)
+ pr_debug("%s: Failed on cod_get_sym_value.\n",
+ __func__);
+ }
+ if (!status)
+ status = dev_get_intf_fxns(dev_object, &intf_fxns);
+ /*
+ * Check for the "magic number" in the trace buffer. If it has
+ * yet to appear then poll the trace buffer to wait for it. Its
+ * appearance signals that the DSP has finished dumping its state.
+ */
+ mmu_fault_dbg_info.head[0] = 0;
+ mmu_fault_dbg_info.head[1] = 0;
+ if (!status) {
+ poll_cnt = 0;
+ while ((mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 ||
+ mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) &&
+ poll_cnt < POLL_MAX) {
+
+ /* Read DSP dump size from the DSP trace buffer... */
+ status = (*intf_fxns->pfn_brd_read)(bridge_context,
+ (u8 *)&mmu_fault_dbg_info, (u32)trace_begin,
+ sizeof(mmu_fault_dbg_info), 0);
+
+ if (status)
+ break;
+
+ poll_cnt++;
+ }
+
+ if (mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 &&
+ mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) {
+ status = -ETIME;
+ pr_err("%s:No DSP MMU-Fault information available.\n",
+ __func__);
+ }
+ }
+
+ if (!status) {
+ total_size = mmu_fault_dbg_info.size;
+ /* Limit the size in case DSP went crazy */
+ if (total_size > MAX_MMU_DBGBUFF)
+ total_size = MAX_MMU_DBGBUFF;
+
+ buffer = kzalloc(total_size, GFP_ATOMIC);
+ if (!buffer) {
+ status = -ENOMEM;
+ pr_debug("%s: Failed to "
+ "allocate stack dump buffer.\n", __func__);
+ goto func_end;
+ }
+
+ buffer_beg = buffer;
+ buffer_end = buffer + total_size / 4;
+
+ /* Read bytes from the DSP trace buffer... */
+ status = (*intf_fxns->pfn_brd_read)(bridge_context,
+ (u8 *)buffer, (u32)trace_begin,
+ total_size, 0);
+ if (status) {
+ pr_debug("%s: Failed to Read Trace Buffer.\n",
+ __func__);
+ goto func_end;
+ }
+
+ pr_err("\nAproximate Crash Position:\n"
+ "--------------------------\n");
+
+ exc_type = buffer[3];
+ if (!exc_type)
+ i = buffer[79]; /* IRP */
+ else
+ i = buffer[80]; /* NRP */
+
+ status =
+ cod_get_sym_value(code_mgr, DYNEXTBASE, &dyn_ext_base);
+ if (status) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ if ((i > dyn_ext_base) && (node_find_addr(node_mgr, i,
+ 0x1000, &offset_output, name) == 0))
+ pr_err("0x%-8x [\"%s\" + 0x%x]\n", i, name,
+ i - offset_output);
+ else
+ pr_err("0x%-8x [Unable to match to a symbol.]\n", i);
+
+ buffer += 4;
+
+ pr_err("\nExecution Info:\n"
+ "---------------\n");
+
+ if (*buffer < ARRAY_SIZE(exec_ctxt)) {
+ pr_err("Execution context \t%s\n",
+ exec_ctxt[*buffer++]);
+ } else {
+ pr_err("Execution context corrupt\n");
+ kfree(buffer_beg);
+ return -EFAULT;
+ }
+ pr_err("Task Handle\t\t0x%x\n", *buffer++);
+ pr_err("Stack Pointer\t\t0x%x\n", *buffer++);
+ pr_err("Stack Top\t\t0x%x\n", *buffer++);
+ pr_err("Stack Bottom\t\t0x%x\n", *buffer++);
+ pr_err("Stack Size\t\t0x%x\n", *buffer++);
+ pr_err("Stack Size In Use\t0x%x\n", *buffer++);
+
+ pr_err("\nCPU Registers\n"
+ "---------------\n");
+
+ for (i = 0; i < 32; i++) {
+ if (i == 4 || i == 6 || i == 8)
+ pr_err("A%d 0x%-8x [Function Argument %d]\n",
+ i, *buffer++, i-3);
+ else if (i == 15)
+ pr_err("A15 0x%-8x [Frame Pointer]\n",
+ *buffer++);
+ else
+ pr_err("A%d 0x%x\n", i, *buffer++);
+ }
+
+ pr_err("\nB0 0x%x\n", *buffer++);
+ pr_err("B1 0x%x\n", *buffer++);
+ pr_err("B2 0x%x\n", *buffer++);
+
+ if ((*buffer > dyn_ext_base) && (node_find_addr(node_mgr,
+ *buffer, 0x1000, &offset_output, name) == 0))
+
+ pr_err("B3 0x%-8x [Function Return Pointer:"
+ " \"%s\" + 0x%x]\n", *buffer, name,
+ *buffer - offset_output);
+ else
+ pr_err("B3 0x%-8x [Function Return Pointer:"
+ "Unable to match to a symbol.]\n", *buffer);
+
+ buffer++;
+
+ for (i = 4; i < 32; i++) {
+ if (i == 4 || i == 6 || i == 8)
+ pr_err("B%d 0x%-8x [Function Argument %d]\n",
+ i, *buffer++, i-2);
+ else if (i == 14)
+ pr_err("B14 0x%-8x [Data Page Pointer]\n",
+ *buffer++);
+ else
+ pr_err("B%d 0x%x\n", i, *buffer++);
+ }
+
+ pr_err("\n");
+
+ for (i = 0; i < ARRAY_SIZE(dsp_regs); i++)
+ pr_err("%s 0x%x\n", dsp_regs[i], *buffer++);
+
+ pr_err("\nStack:\n"
+ "------\n");
+
+ for (i = 0; buffer < buffer_end; i++, buffer++) {
+ if ((*buffer > dyn_ext_base) && (
+ node_find_addr(node_mgr, *buffer , 0x600,
+ &offset_output, name) == 0))
+ pr_err("[%d] 0x%-8x [\"%s\" + 0x%x]\n",
+ i, *buffer, name,
+ *buffer - offset_output);
+ else
+ pr_err("[%d] 0x%x\n", i, *buffer);
+ }
+ kfree(buffer_beg);
+ }
+func_end:
+ return status;
+}
+
+/**
+ * dump_dl_modules() - This functions dumps the _DLModules loaded in DSP side
+ * @bridge_context: Bridge driver's device context pointer.
+ *
+ */
+void dump_dl_modules(struct bridge_dev_context *bridge_context)
+{
+ struct cod_manager *code_mgr;
+ struct bridge_drv_interface *intf_fxns;
+ struct bridge_dev_context *bridge_ctxt = bridge_context;
+ struct dev_object *dev_object = bridge_ctxt->hdev_obj;
+ struct modules_header modules_hdr;
+ struct dll_module *module_struct = NULL;
+ u32 module_dsp_addr;
+ u32 module_size;
+ u32 module_struct_size = 0;
+ u32 sect_ndx;
+ char *sect_str ;
+ int status = 0;
+
+ status = dev_get_intf_fxns(dev_object, &intf_fxns);
+ if (status) {
+ pr_debug("%s: Failed on dev_get_intf_fxns.\n", __func__);
+ goto func_end;
+ }
+
+ status = dev_get_cod_mgr(dev_object, &code_mgr);
+ if (!code_mgr) {
+ pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__);
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ /* Lookup the address of the modules_header structure */
+ status = cod_get_sym_value(code_mgr, "_DLModules", &module_dsp_addr);
+ if (status) {
+ pr_debug("%s: Failed on cod_get_sym_value for _DLModules.\n",
+ __func__);
+ goto func_end;
+ }
+
+ pr_debug("%s: _DLModules at 0x%x\n", __func__, module_dsp_addr);
+
+ /* Copy the modules_header structure from DSP memory. */
+ status = (*intf_fxns->pfn_brd_read)(bridge_context, (u8 *) &modules_hdr,
+ (u32) module_dsp_addr, sizeof(modules_hdr), 0);
+
+ if (status) {
+ pr_debug("%s: Failed failed to read modules header.\n",
+ __func__);
+ goto func_end;
+ }
+
+ module_dsp_addr = modules_hdr.first_module;
+ module_size = modules_hdr.first_module_size;
+
+ pr_debug("%s: dll_module_header 0x%x %d\n", __func__, module_dsp_addr,
+ module_size);
+
+ pr_err("\nDynamically Loaded Modules:\n"
+ "---------------------------\n");
+
+ /* For each dll_module structure in the list... */
+ while (module_size) {
+ /*
+ * Allocate/re-allocate memory to hold the dll_module
+ * structure. The memory is re-allocated only if the existing
+ * allocation is too small.
+ */
+ if (module_size > module_struct_size) {
+ kfree(module_struct);
+ module_struct = kzalloc(module_size+128, GFP_ATOMIC);
+ module_struct_size = module_size+128;
+ pr_debug("%s: allocated module struct %p %d\n",
+ __func__, module_struct, module_struct_size);
+ if (!module_struct)
+ goto func_end;
+ }
+ /* Copy the dll_module structure from DSP memory */
+ status = (*intf_fxns->pfn_brd_read)(bridge_context,
+ (u8 *)module_struct, module_dsp_addr, module_size, 0);
+
+ if (status) {
+ pr_debug(
+ "%s: Failed to read dll_module stuct for 0x%x.\n",
+ __func__, module_dsp_addr);
+ break;
+ }
+
+ /* Update info regarding the _next_ module in the list. */
+ module_dsp_addr = module_struct->next_module;
+ module_size = module_struct->next_module_size;
+
+ pr_debug("%s: next module 0x%x %d, this module num sects %d\n",
+ __func__, module_dsp_addr, module_size,
+ module_struct->num_sects);
+
+ /*
+ * The section name strings start immedialty following
+ * the array of dll_sect structures.
+ */
+ sect_str = (char *) &module_struct->
+ sects[module_struct->num_sects];
+ pr_err("%s\n", sect_str);
+
+ /*
+ * Advance to the first section name string.
+ * Each string follows the one before.
+ */
+ sect_str += strlen(sect_str) + 1;
+
+ /* Access each dll_sect structure and its name string. */
+ for (sect_ndx = 0;
+ sect_ndx < module_struct->num_sects; sect_ndx++) {
+ pr_err(" Section: 0x%x ",
+ module_struct->sects[sect_ndx].sect_load_adr);
+
+ if (((u32) sect_str - (u32) module_struct) <
+ module_struct_size) {
+ pr_err("%s\n", sect_str);
+ /* Each string follows the one before. */
+ sect_str += strlen(sect_str)+1;
+ } else {
+ pr_err("<string error>\n");
+ pr_debug("%s: section name sting address "
+ "is invalid %p\n", __func__, sect_str);
+ }
+ }
+ }
+func_end:
+ kfree(module_struct);
+}
+#endif
diff --git a/drivers/staging/tidspbridge/core/msg_sm.c b/drivers/staging/tidspbridge/core/msg_sm.c
new file mode 100644
index 000000000000..87712e24dfb1
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/msg_sm.c
@@ -0,0 +1,673 @@
+/*
+ * msg_sm.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Implements upper edge functions for Bridge message module.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/types.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/list.h>
+#include <dspbridge/sync.h>
+
+/* ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+
+/* ----------------------------------- Others */
+#include <dspbridge/io_sm.h>
+
+/* ----------------------------------- This */
+#include <_msg_sm.h>
+#include <dspbridge/dspmsg.h>
+
+/* ----------------------------------- Function Prototypes */
+static int add_new_msg(struct lst_list *msg_list);
+static void delete_msg_mgr(struct msg_mgr *hmsg_mgr);
+static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp);
+static void free_msg_list(struct lst_list *msg_list);
+
+/*
+ * ======== bridge_msg_create ========
+ * Create an object to manage message queues. Only one of these objects
+ * can exist per device object.
+ */
+int bridge_msg_create(struct msg_mgr **msg_man,
+ struct dev_object *hdev_obj,
+ msg_onexit msg_callback)
+{
+ struct msg_mgr *msg_mgr_obj;
+ struct io_mgr *hio_mgr;
+ int status = 0;
+
+ if (!msg_man || !msg_callback || !hdev_obj) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ dev_get_io_mgr(hdev_obj, &hio_mgr);
+ if (!hio_mgr) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ *msg_man = NULL;
+ /* Allocate msg_ctrl manager object */
+ msg_mgr_obj = kzalloc(sizeof(struct msg_mgr), GFP_KERNEL);
+
+ if (msg_mgr_obj) {
+ msg_mgr_obj->on_exit = msg_callback;
+ msg_mgr_obj->hio_mgr = hio_mgr;
+ /* List of MSG_QUEUEs */
+ msg_mgr_obj->queue_list = kzalloc(sizeof(struct lst_list),
+ GFP_KERNEL);
+ /* Queues of message frames for messages to the DSP. Message
+ * frames will only be added to the free queue when a
+ * msg_queue object is created. */
+ msg_mgr_obj->msg_free_list = kzalloc(sizeof(struct lst_list),
+ GFP_KERNEL);
+ msg_mgr_obj->msg_used_list = kzalloc(sizeof(struct lst_list),
+ GFP_KERNEL);
+ if (msg_mgr_obj->queue_list == NULL ||
+ msg_mgr_obj->msg_free_list == NULL ||
+ msg_mgr_obj->msg_used_list == NULL) {
+ status = -ENOMEM;
+ } else {
+ INIT_LIST_HEAD(&msg_mgr_obj->queue_list->head);
+ INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list->head);
+ INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list->head);
+ spin_lock_init(&msg_mgr_obj->msg_mgr_lock);
+ }
+
+ /* Create an event to be used by bridge_msg_put() in waiting
+ * for an available free frame from the message manager. */
+ msg_mgr_obj->sync_event =
+ kzalloc(sizeof(struct sync_object), GFP_KERNEL);
+ if (!msg_mgr_obj->sync_event)
+ status = -ENOMEM;
+ else
+ sync_init_event(msg_mgr_obj->sync_event);
+
+ if (!status)
+ *msg_man = msg_mgr_obj;
+ else
+ delete_msg_mgr(msg_mgr_obj);
+
+ } else {
+ status = -ENOMEM;
+ }
+func_end:
+ return status;
+}
+
+/*
+ * ======== bridge_msg_create_queue ========
+ * Create a msg_queue for sending/receiving messages to/from a node
+ * on the DSP.
+ */
+int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
+ struct msg_queue **msgq,
+ u32 msgq_id, u32 max_msgs, void *arg)
+{
+ u32 i;
+ u32 num_allocated = 0;
+ struct msg_queue *msg_q;
+ int status = 0;
+
+ if (!hmsg_mgr || msgq == NULL || !hmsg_mgr->msg_free_list) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ *msgq = NULL;
+ /* Allocate msg_queue object */
+ msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL);
+ if (!msg_q) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+ lst_init_elem((struct list_head *)msg_q);
+ msg_q->max_msgs = max_msgs;
+ msg_q->hmsg_mgr = hmsg_mgr;
+ msg_q->arg = arg; /* Node handle */
+ msg_q->msgq_id = msgq_id; /* Node env (not valid yet) */
+ /* Queues of Message frames for messages from the DSP */
+ msg_q->msg_free_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
+ msg_q->msg_used_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
+ if (msg_q->msg_free_list == NULL || msg_q->msg_used_list == NULL)
+ status = -ENOMEM;
+ else {
+ INIT_LIST_HEAD(&msg_q->msg_free_list->head);
+ INIT_LIST_HEAD(&msg_q->msg_used_list->head);
+ }
+
+ /* Create event that will be signalled when a message from
+ * the DSP is available. */
+ if (!status) {
+ msg_q->sync_event = kzalloc(sizeof(struct sync_object),
+ GFP_KERNEL);
+ if (msg_q->sync_event)
+ sync_init_event(msg_q->sync_event);
+ else
+ status = -ENOMEM;
+ }
+
+ /* Create a notification list for message ready notification. */
+ if (!status) {
+ msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
+ GFP_KERNEL);
+ if (msg_q->ntfy_obj)
+ ntfy_init(msg_q->ntfy_obj);
+ else
+ status = -ENOMEM;
+ }
+
+ /* Create events that will be used to synchronize cleanup
+ * when the object is deleted. sync_done will be set to
+ * unblock threads in MSG_Put() or MSG_Get(). sync_done_ack
+ * will be set by the unblocked thread to signal that it
+ * is unblocked and will no longer reference the object. */
+ if (!status) {
+ msg_q->sync_done = kzalloc(sizeof(struct sync_object),
+ GFP_KERNEL);
+ if (msg_q->sync_done)
+ sync_init_event(msg_q->sync_done);
+ else
+ status = -ENOMEM;
+ }
+
+ if (!status) {
+ msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object),
+ GFP_KERNEL);
+ if (msg_q->sync_done_ack)
+ sync_init_event(msg_q->sync_done_ack);
+ else
+ status = -ENOMEM;
+ }
+
+ if (!status) {
+ /* Enter critical section */
+ spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
+ /* Initialize message frames and put in appropriate queues */
+ for (i = 0; i < max_msgs && !status; i++) {
+ status = add_new_msg(hmsg_mgr->msg_free_list);
+ if (!status) {
+ num_allocated++;
+ status = add_new_msg(msg_q->msg_free_list);
+ }
+ }
+ if (status) {
+ /* Stay inside CS to prevent others from taking any
+ * of the newly allocated message frames. */
+ delete_msg_queue(msg_q, num_allocated);
+ } else {
+ lst_put_tail(hmsg_mgr->queue_list,
+ (struct list_head *)msg_q);
+ *msgq = msg_q;
+ /* Signal that free frames are now available */
+ if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list))
+ sync_set_event(hmsg_mgr->sync_event);
+
+ }
+ /* Exit critical section */
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+ } else {
+ delete_msg_queue(msg_q, 0);
+ }
+func_end:
+ return status;
+}
+
+/*
+ * ======== bridge_msg_delete ========
+ * Delete a msg_ctrl manager allocated in bridge_msg_create().
+ */
+void bridge_msg_delete(struct msg_mgr *hmsg_mgr)
+{
+ if (hmsg_mgr)
+ delete_msg_mgr(hmsg_mgr);
+}
+
+/*
+ * ======== bridge_msg_delete_queue ========
+ * Delete a msg_ctrl queue allocated in bridge_msg_create_queue.
+ */
+void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj)
+{
+ struct msg_mgr *hmsg_mgr;
+ u32 io_msg_pend;
+
+ if (!msg_queue_obj || !msg_queue_obj->hmsg_mgr)
+ goto func_end;
+
+ hmsg_mgr = msg_queue_obj->hmsg_mgr;
+ msg_queue_obj->done = true;
+ /* Unblock all threads blocked in MSG_Get() or MSG_Put(). */
+ io_msg_pend = msg_queue_obj->io_msg_pend;
+ while (io_msg_pend) {
+ /* Unblock thread */
+ sync_set_event(msg_queue_obj->sync_done);
+ /* Wait for acknowledgement */
+ sync_wait_on_event(msg_queue_obj->sync_done_ack, SYNC_INFINITE);
+ io_msg_pend = msg_queue_obj->io_msg_pend;
+ }
+ /* Remove message queue from hmsg_mgr->queue_list */
+ spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
+ lst_remove_elem(hmsg_mgr->queue_list,
+ (struct list_head *)msg_queue_obj);
+ /* Free the message queue object */
+ delete_msg_queue(msg_queue_obj, msg_queue_obj->max_msgs);
+ if (!hmsg_mgr->msg_free_list)
+ goto func_cont;
+ if (LST_IS_EMPTY(hmsg_mgr->msg_free_list))
+ sync_reset_event(hmsg_mgr->sync_event);
+func_cont:
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+func_end:
+ return;
+}
+
+/*
+ * ======== bridge_msg_get ========
+ * Get a message from a msg_ctrl queue.
+ */
+int bridge_msg_get(struct msg_queue *msg_queue_obj,
+ struct dsp_msg *pmsg, u32 utimeout)
+{
+ struct msg_frame *msg_frame_obj;
+ struct msg_mgr *hmsg_mgr;
+ bool got_msg = false;
+ struct sync_object *syncs[2];
+ u32 index;
+ int status = 0;
+
+ if (!msg_queue_obj || pmsg == NULL) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+
+ hmsg_mgr = msg_queue_obj->hmsg_mgr;
+ if (!msg_queue_obj->msg_used_list) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ /* Enter critical section */
+ spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
+ /* If a message is already there, get it */
+ if (!LST_IS_EMPTY(msg_queue_obj->msg_used_list)) {
+ msg_frame_obj = (struct msg_frame *)
+ lst_get_head(msg_queue_obj->msg_used_list);
+ if (msg_frame_obj != NULL) {
+ *pmsg = msg_frame_obj->msg_data.msg;
+ lst_put_tail(msg_queue_obj->msg_free_list,
+ (struct list_head *)msg_frame_obj);
+ if (LST_IS_EMPTY(msg_queue_obj->msg_used_list))
+ sync_reset_event(msg_queue_obj->sync_event);
+
+ got_msg = true;
+ }
+ } else {
+ if (msg_queue_obj->done)
+ status = -EPERM;
+ else
+ msg_queue_obj->io_msg_pend++;
+
+ }
+ /* Exit critical section */
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+ if (!status && !got_msg) {
+ /* Wait til message is available, timeout, or done. We don't
+ * have to schedule the DPC, since the DSP will send messages
+ * when they are available. */
+ syncs[0] = msg_queue_obj->sync_event;
+ syncs[1] = msg_queue_obj->sync_done;
+ status = sync_wait_on_multiple_events(syncs, 2, utimeout,
+ &index);
+ /* Enter critical section */
+ spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
+ if (msg_queue_obj->done) {
+ msg_queue_obj->io_msg_pend--;
+ /* Exit critical section */
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+ /* Signal that we're not going to access msg_queue_obj
+ * anymore, so it can be deleted. */
+ (void)sync_set_event(msg_queue_obj->sync_done_ack);
+ status = -EPERM;
+ } else {
+ if (!status) {
+ DBC_ASSERT(!LST_IS_EMPTY
+ (msg_queue_obj->msg_used_list));
+ /* Get msg from used list */
+ msg_frame_obj = (struct msg_frame *)
+ lst_get_head(msg_queue_obj->msg_used_list);
+ /* Copy message into pmsg and put frame on the
+ * free list */
+ if (msg_frame_obj != NULL) {
+ *pmsg = msg_frame_obj->msg_data.msg;
+ lst_put_tail
+ (msg_queue_obj->msg_free_list,
+ (struct list_head *)
+ msg_frame_obj);
+ }
+ }
+ msg_queue_obj->io_msg_pend--;
+ /* Reset the event if there are still queued messages */
+ if (!LST_IS_EMPTY(msg_queue_obj->msg_used_list))
+ sync_set_event(msg_queue_obj->sync_event);
+
+ /* Exit critical section */
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+ }
+ }
+func_end:
+ return status;
+}
+
+/*
+ * ======== bridge_msg_put ========
+ * Put a message onto a msg_ctrl queue.
+ */
+int bridge_msg_put(struct msg_queue *msg_queue_obj,
+ const struct dsp_msg *pmsg, u32 utimeout)
+{
+ struct msg_frame *msg_frame_obj;
+ struct msg_mgr *hmsg_mgr;
+ bool put_msg = false;
+ struct sync_object *syncs[2];
+ u32 index;
+ int status = 0;
+
+ if (!msg_queue_obj || !pmsg || !msg_queue_obj->hmsg_mgr) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+ hmsg_mgr = msg_queue_obj->hmsg_mgr;
+ if (!hmsg_mgr->msg_free_list) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
+
+ /* If a message frame is available, use it */
+ if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
+ msg_frame_obj =
+ (struct msg_frame *)lst_get_head(hmsg_mgr->msg_free_list);
+ if (msg_frame_obj != NULL) {
+ msg_frame_obj->msg_data.msg = *pmsg;
+ msg_frame_obj->msg_data.msgq_id =
+ msg_queue_obj->msgq_id;
+ lst_put_tail(hmsg_mgr->msg_used_list,
+ (struct list_head *)msg_frame_obj);
+ hmsg_mgr->msgs_pending++;
+ put_msg = true;
+ }
+ if (LST_IS_EMPTY(hmsg_mgr->msg_free_list))
+ sync_reset_event(hmsg_mgr->sync_event);
+
+ /* Release critical section before scheduling DPC */
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+ /* Schedule a DPC, to do the actual data transfer: */
+ iosm_schedule(hmsg_mgr->hio_mgr);
+ } else {
+ if (msg_queue_obj->done)
+ status = -EPERM;
+ else
+ msg_queue_obj->io_msg_pend++;
+
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+ }
+ if (!status && !put_msg) {
+ /* Wait til a free message frame is available, timeout,
+ * or done */
+ syncs[0] = hmsg_mgr->sync_event;
+ syncs[1] = msg_queue_obj->sync_done;
+ status = sync_wait_on_multiple_events(syncs, 2, utimeout,
+ &index);
+ if (status)
+ goto func_end;
+ /* Enter critical section */
+ spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
+ if (msg_queue_obj->done) {
+ msg_queue_obj->io_msg_pend--;
+ /* Exit critical section */
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+ /* Signal that we're not going to access msg_queue_obj
+ * anymore, so it can be deleted. */
+ (void)sync_set_event(msg_queue_obj->sync_done_ack);
+ status = -EPERM;
+ } else {
+ if (LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
+ status = -EFAULT;
+ goto func_cont;
+ }
+ /* Get msg from free list */
+ msg_frame_obj = (struct msg_frame *)
+ lst_get_head(hmsg_mgr->msg_free_list);
+ /*
+ * Copy message into pmsg and put frame on the
+ * used list.
+ */
+ if (msg_frame_obj) {
+ msg_frame_obj->msg_data.msg = *pmsg;
+ msg_frame_obj->msg_data.msgq_id =
+ msg_queue_obj->msgq_id;
+ lst_put_tail(hmsg_mgr->msg_used_list,
+ (struct list_head *)msg_frame_obj);
+ hmsg_mgr->msgs_pending++;
+ /*
+ * Schedule a DPC, to do the actual
+ * data transfer.
+ */
+ iosm_schedule(hmsg_mgr->hio_mgr);
+ }
+
+ msg_queue_obj->io_msg_pend--;
+ /* Reset event if there are still frames available */
+ if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list))
+ sync_set_event(hmsg_mgr->sync_event);
+func_cont:
+ /* Exit critical section */
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+ }
+ }
+func_end:
+ return status;
+}
+
+/*
+ * ======== bridge_msg_register_notify ========
+ */
+int bridge_msg_register_notify(struct msg_queue *msg_queue_obj,
+ u32 event_mask, u32 notify_type,
+ struct dsp_notification *hnotification)
+{
+ int status = 0;
+
+ if (!msg_queue_obj || !hnotification) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+
+ if (!(event_mask == DSP_NODEMESSAGEREADY || event_mask == 0)) {
+ status = -EPERM;
+ goto func_end;
+ }
+
+ if (notify_type != DSP_SIGNALEVENT) {
+ status = -EBADR;
+ goto func_end;
+ }
+
+ if (event_mask)
+ status = ntfy_register(msg_queue_obj->ntfy_obj, hnotification,
+ event_mask, notify_type);
+ else
+ status = ntfy_unregister(msg_queue_obj->ntfy_obj,
+ hnotification);
+
+ if (status == -EINVAL) {
+ /* Not registered. Ok, since we couldn't have known. Node
+ * notifications are split between node state change handled
+ * by NODE, and message ready handled by msg_ctrl. */
+ status = 0;
+ }
+func_end:
+ return status;
+}
+
+/*
+ * ======== bridge_msg_set_queue_id ========
+ */
+void bridge_msg_set_queue_id(struct msg_queue *msg_queue_obj, u32 msgq_id)
+{
+ /*
+ * A message queue must be created when a node is allocated,
+ * so that node_register_notify() can be called before the node
+ * is created. Since we don't know the node environment until the
+ * node is created, we need this function to set msg_queue_obj->msgq_id
+ * to the node environment, after the node is created.
+ */
+ if (msg_queue_obj)
+ msg_queue_obj->msgq_id = msgq_id;
+}
+
+/*
+ * ======== add_new_msg ========
+ * Must be called in message manager critical section.
+ */
+static int add_new_msg(struct lst_list *msg_list)
+{
+ struct msg_frame *pmsg;
+ int status = 0;
+
+ pmsg = kzalloc(sizeof(struct msg_frame), GFP_ATOMIC);
+ if (pmsg != NULL) {
+ lst_init_elem((struct list_head *)pmsg);
+ lst_put_tail(msg_list, (struct list_head *)pmsg);
+ } else {
+ status = -ENOMEM;
+ }
+
+ return status;
+}
+
+/*
+ * ======== delete_msg_mgr ========
+ */
+static void delete_msg_mgr(struct msg_mgr *hmsg_mgr)
+{
+ if (!hmsg_mgr)
+ goto func_end;
+
+ if (hmsg_mgr->queue_list) {
+ if (LST_IS_EMPTY(hmsg_mgr->queue_list)) {
+ kfree(hmsg_mgr->queue_list);
+ hmsg_mgr->queue_list = NULL;
+ }
+ }
+
+ if (hmsg_mgr->msg_free_list) {
+ free_msg_list(hmsg_mgr->msg_free_list);
+ hmsg_mgr->msg_free_list = NULL;
+ }
+
+ if (hmsg_mgr->msg_used_list) {
+ free_msg_list(hmsg_mgr->msg_used_list);
+ hmsg_mgr->msg_used_list = NULL;
+ }
+
+ kfree(hmsg_mgr->sync_event);
+
+ kfree(hmsg_mgr);
+func_end:
+ return;
+}
+
+/*
+ * ======== delete_msg_queue ========
+ */
+static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp)
+{
+ struct msg_mgr *hmsg_mgr;
+ struct msg_frame *pmsg;
+ u32 i;
+
+ if (!msg_queue_obj ||
+ !msg_queue_obj->hmsg_mgr || !msg_queue_obj->hmsg_mgr->msg_free_list)
+ goto func_end;
+
+ hmsg_mgr = msg_queue_obj->hmsg_mgr;
+
+ /* Pull off num_to_dsp message frames from Msg manager and free */
+ for (i = 0; i < num_to_dsp; i++) {
+
+ if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
+ pmsg = (struct msg_frame *)
+ lst_get_head(hmsg_mgr->msg_free_list);
+ kfree(pmsg);
+ } else {
+ /* Cannot free all of the message frames */
+ break;
+ }
+ }
+
+ if (msg_queue_obj->msg_free_list) {
+ free_msg_list(msg_queue_obj->msg_free_list);
+ msg_queue_obj->msg_free_list = NULL;
+ }
+
+ if (msg_queue_obj->msg_used_list) {
+ free_msg_list(msg_queue_obj->msg_used_list);
+ msg_queue_obj->msg_used_list = NULL;
+ }
+
+ if (msg_queue_obj->ntfy_obj) {
+ ntfy_delete(msg_queue_obj->ntfy_obj);
+ kfree(msg_queue_obj->ntfy_obj);
+ }
+
+ kfree(msg_queue_obj->sync_event);
+ kfree(msg_queue_obj->sync_done);
+ kfree(msg_queue_obj->sync_done_ack);
+
+ kfree(msg_queue_obj);
+func_end:
+ return;
+
+}
+
+/*
+ * ======== free_msg_list ========
+ */
+static void free_msg_list(struct lst_list *msg_list)
+{
+ struct msg_frame *pmsg;
+
+ if (!msg_list)
+ goto func_end;
+
+ while ((pmsg = (struct msg_frame *)lst_get_head(msg_list)) != NULL)
+ kfree(pmsg);
+
+ DBC_ASSERT(LST_IS_EMPTY(msg_list));
+
+ kfree(msg_list);
+func_end:
+ return;
+}
diff --git a/drivers/staging/tidspbridge/core/sync.c b/drivers/staging/tidspbridge/core/sync.c
new file mode 100644
index 000000000000..995986a9d03b
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/sync.c
@@ -0,0 +1,121 @@
+/*
+ * sync.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Synchronization services.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/* ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/sync.h>
+#include <dspbridge/ntfy.h>
+
+DEFINE_SPINLOCK(sync_lock);
+
+/**
+ * sync_set_event() - set or signal and specified event
+ * @event: Event to be set..
+ *
+ * set the @event, if there is an thread waiting for the event
+ * it will be waken up, this function only wakes one thread.
+ */
+
+void sync_set_event(struct sync_object *event)
+{
+ spin_lock_bh(&sync_lock);
+ complete(&event->comp);
+ if (event->multi_comp)
+ complete(event->multi_comp);
+ spin_unlock_bh(&sync_lock);
+}
+
+/**
+ * sync_wait_on_multiple_events() - waits for multiple events to be set.
+ * @events: Array of events to wait for them.
+ * @count: number of elements of the array.
+ * @timeout timeout on waiting for the evetns.
+ * @pu_index index of the event set.
+ *
+ * This functios will wait until any of the array element is set or until
+ * timeout. In case of success the function will return 0 and
+ * @pu_index will store the index of the array element set or in case
+ * of timeout the function will return -ETIME or in case of
+ * interrupting by a signal it will return -EPERM.
+ */
+
+int sync_wait_on_multiple_events(struct sync_object **events,
+ unsigned count, unsigned timeout,
+ unsigned *index)
+{
+ unsigned i;
+ int status = -EPERM;
+ struct completion m_comp;
+
+ init_completion(&m_comp);
+
+ if (SYNC_INFINITE == timeout)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+
+ spin_lock_bh(&sync_lock);
+ for (i = 0; i < count; i++) {
+ if (completion_done(&events[i]->comp)) {
+ INIT_COMPLETION(events[i]->comp);
+ *index = i;
+ spin_unlock_bh(&sync_lock);
+ status = 0;
+ goto func_end;
+ }
+ }
+
+ for (i = 0; i < count; i++)
+ events[i]->multi_comp = &m_comp;
+
+ spin_unlock_bh(&sync_lock);
+
+ if (!wait_for_completion_interruptible_timeout(&m_comp,
+ msecs_to_jiffies(timeout)))
+ status = -ETIME;
+
+ spin_lock_bh(&sync_lock);
+ for (i = 0; i < count; i++) {
+ if (completion_done(&events[i]->comp)) {
+ INIT_COMPLETION(events[i]->comp);
+ *index = i;
+ status = 0;
+ }
+ events[i]->multi_comp = NULL;
+ }
+ spin_unlock_bh(&sync_lock);
+func_end:
+ return status;
+}
+
+/**
+ * dsp_notifier_event() - callback function to nofity events
+ * @this: pointer to itself struct notifier_block
+ * @event: event to be notified.
+ * @data: Currently not used.
+ *
+ */
+int dsp_notifier_event(struct notifier_block *this, unsigned long event,
+ void *data)
+{
+ struct ntfy_event *ne = container_of(this, struct ntfy_event,
+ noti_block);
+ if (ne->event & event)
+ sync_set_event(&ne->sync_obj);
+ return NOTIFY_OK;
+}
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
new file mode 100644
index 000000000000..f22bc12bc0d3
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -0,0 +1,948 @@
+/*
+ * tiomap.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Processor Manager Driver for TI OMAP3430 EVM.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <plat/dsp.h>
+
+#include <linux/types.h>
+/* ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <plat/control.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/drv.h>
+#include <dspbridge/sync.h>
+
+/* ----------------------------------- Link Driver */
+#include <dspbridge/dspdefs.h>
+#include <dspbridge/dspchnl.h>
+#include <dspbridge/dspdeh.h>
+#include <dspbridge/dspio.h>
+#include <dspbridge/dspmsg.h>
+#include <dspbridge/pwr.h>
+#include <dspbridge/io_sm.h>
+
+/* ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+#include <dspbridge/dspapi.h>
+#include <dspbridge/wdt.h>
+
+/* ----------------------------------- Local */
+#include "_tiomap.h"
+#include "_tiomap_pwr.h"
+#include "tiomap_io.h"
+
+/* Offset in shared mem to write to in order to synchronize start with DSP */
+#define SHMSYNCOFFSET 4 /* GPP byte offset */
+
+#define BUFFERSIZE 1024
+
+#define TIHELEN_ACKTIMEOUT 10000
+
+#define MMU_SECTION_ADDR_MASK 0xFFF00000
+#define MMU_SSECTION_ADDR_MASK 0xFF000000
+#define MMU_LARGE_PAGE_MASK 0xFFFF0000
+#define MMU_SMALL_PAGE_MASK 0xFFFFF000
+#define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
+#define PAGES_II_LVL_TABLE 512
+
+/* Forward Declarations: */
+static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
+static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
+ u8 *host_buff,
+ u32 dsp_addr, u32 ul_num_bytes,
+ u32 mem_type);
+static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
+ u32 dsp_addr);
+static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
+ int *board_state);
+static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt);
+static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
+ u8 *host_buff,
+ u32 dsp_addr, u32 ul_num_bytes,
+ u32 mem_type);
+static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
+ u32 brd_state);
+static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
+ u32 dsp_dest_addr, u32 dsp_src_addr,
+ u32 ul_num_bytes, u32 mem_type);
+static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
+ u8 *host_buff, u32 dsp_addr,
+ u32 ul_num_bytes, u32 mem_type);
+static int bridge_dev_create(struct bridge_dev_context
+ **dev_cntxt,
+ struct dev_object *hdev_obj,
+ struct cfg_hostres *config_param);
+static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
+ u32 dw_cmd, void *pargs);
+static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
+bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr);
+
+/*
+ * This Bridge driver's function interface table.
+ */
+static struct bridge_drv_interface drv_interface_fxns = {
+ /* Bridge API ver. for which this bridge driver is built. */
+ BRD_API_MAJOR_VERSION,
+ BRD_API_MINOR_VERSION,
+ bridge_dev_create,
+ bridge_dev_destroy,
+ bridge_dev_ctrl,
+ bridge_brd_monitor,
+ bridge_brd_start,
+ bridge_brd_stop,
+ bridge_brd_status,
+ bridge_brd_read,
+ bridge_brd_write,
+ bridge_brd_set_state,
+ bridge_brd_mem_copy,
+ bridge_brd_mem_write,
+ /* The following CHNL functions are provided by chnl_io.lib: */
+ bridge_chnl_create,
+ bridge_chnl_destroy,
+ bridge_chnl_open,
+ bridge_chnl_close,
+ bridge_chnl_add_io_req,
+ bridge_chnl_get_ioc,
+ bridge_chnl_cancel_io,
+ bridge_chnl_flush_io,
+ bridge_chnl_get_info,
+ bridge_chnl_get_mgr_info,
+ bridge_chnl_idle,
+ bridge_chnl_register_notify,
+ /* The following IO functions are provided by chnl_io.lib: */
+ bridge_io_create,
+ bridge_io_destroy,
+ bridge_io_on_loaded,
+ bridge_io_get_proc_load,
+ /* The following msg_ctrl functions are provided by chnl_io.lib: */
+ bridge_msg_create,
+ bridge_msg_create_queue,
+ bridge_msg_delete,
+ bridge_msg_delete_queue,
+ bridge_msg_get,
+ bridge_msg_put,
+ bridge_msg_register_notify,
+ bridge_msg_set_queue_id,
+};
+
+/*
+ * ======== bridge_drv_entry ========
+ * purpose:
+ * Bridge Driver entry point.
+ */
+void bridge_drv_entry(struct bridge_drv_interface **drv_intf,
+ const char *driver_file_name)
+{
+
+ DBC_REQUIRE(driver_file_name != NULL);
+
+ io_sm_init(); /* Initialization of io_sm module */
+
+ if (strcmp(driver_file_name, "UMA") == 0)
+ *drv_intf = &drv_interface_fxns;
+ else
+ dev_dbg(bridge, "%s Unknown Bridge file name", __func__);
+
+}
+
+/*
+ * ======== bridge_brd_monitor ========
+ * purpose:
+ * This bridge_brd_monitor puts DSP into a Loadable state.
+ * i.e Application can load and start the device.
+ *
+ * Preconditions:
+ * Device in 'OFF' state.
+ */
+static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
+{
+ struct bridge_dev_context *dev_context = dev_ctxt;
+ u32 temp;
+ struct omap_dsp_platform_data *pdata =
+ omap_dspbridge_dev->dev.platform_data;
+
+ temp = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
+ OMAP_POWERSTATEST_MASK;
+ if (!(temp & 0x02)) {
+ /* IVA2 is not in ON state */
+ /* Read and set PM_PWSTCTRL_IVA2 to ON */
+ (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
+ PWRDM_POWER_ON, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
+ /* Set the SW supervised state transition */
+ (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP,
+ OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
+
+ /* Wait until the state has moved to ON */
+ while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
+ OMAP_INTRANSITION_MASK)
+ ;
+ /* Disable Automatic transition */
+ (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
+ OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
+ }
+
+ dsp_clk_enable(DSP_CLK_IVA2);
+
+ /* set the device state to IDLE */
+ dev_context->dw_brd_state = BRD_IDLE;
+
+ return 0;
+}
+
+/*
+ * ======== bridge_brd_read ========
+ * purpose:
+ * Reads buffers for DSP memory.
+ */
+static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
+ u8 *host_buff, u32 dsp_addr,
+ u32 ul_num_bytes, u32 mem_type)
+{
+ int status = 0;
+ struct bridge_dev_context *dev_context = dev_ctxt;
+ u32 offset;
+ u32 dsp_base_addr = dev_ctxt->dw_dsp_base_addr;
+
+ if (dsp_addr < dev_context->dw_dsp_start_add) {
+ status = -EPERM;
+ return status;
+ }
+ /* change here to account for the 3 bands of the DSP internal memory */
+ if ((dsp_addr - dev_context->dw_dsp_start_add) <
+ dev_context->dw_internal_size) {
+ offset = dsp_addr - dev_context->dw_dsp_start_add;
+ } else {
+ status = read_ext_dsp_data(dev_context, host_buff, dsp_addr,
+ ul_num_bytes, mem_type);
+ return status;
+ }
+ /* copy the data from DSP memory, */
+ memcpy(host_buff, (void *)(dsp_base_addr + offset), ul_num_bytes);
+ return status;
+}
+
+/*
+ * ======== bridge_brd_set_state ========
+ * purpose:
+ * This routine updates the Board status.
+ */
+static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
+ u32 brd_state)
+{
+ int status = 0;
+ struct bridge_dev_context *dev_context = dev_ctxt;
+
+ dev_context->dw_brd_state = brd_state;
+ return status;
+}
+
+/*
+ * ======== bridge_brd_start ========
+ * purpose:
+ * Initializes DSP MMU and Starts DSP.
+ *
+ * Preconditions:
+ * a) DSP domain is 'ACTIVE'.
+ * b) DSP_RST1 is asserted.
+ * b) DSP_RST2 is released.
+ */
+static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
+ u32 dsp_addr)
+{
+ int status = 0;
+ struct bridge_dev_context *dev_context = dev_ctxt;
+ struct iommu *mmu = NULL;
+ struct shm_segs *sm_sg;
+ int l4_i = 0, tlb_i = 0;
+ u32 sg0_da = 0, sg1_da = 0;
+ struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
+ u32 dw_sync_addr = 0;
+ u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */
+ u32 ul_shm_base_virt; /* Dsp Virt SM base addr */
+ u32 ul_tlb_base_virt; /* Base of MMU TLB entry */
+ /* Offset of shm_base_virt from tlb_base_virt */
+ u32 ul_shm_offset_virt;
+ struct cfg_hostres *resources = NULL;
+ u32 temp;
+ u32 ul_dsp_clk_rate;
+ u32 ul_dsp_clk_addr;
+ u32 ul_bios_gp_timer;
+ u32 clk_cmd;
+ struct io_mgr *hio_mgr;
+ u32 ul_load_monitor_timer;
+ struct omap_dsp_platform_data *pdata =
+ omap_dspbridge_dev->dev.platform_data;
+
+ /* The device context contains all the mmu setup info from when the
+ * last dsp base image was loaded. The first entry is always
+ * SHMMEM base. */
+ /* Get SHM_BEG - convert to byte address */
+ (void)dev_get_symbol(dev_context->hdev_obj, SHMBASENAME,
+ &ul_shm_base_virt);
+ ul_shm_base_virt *= DSPWORDSIZE;
+ DBC_ASSERT(ul_shm_base_virt != 0);
+ /* DSP Virtual address */
+ ul_tlb_base_virt = dev_context->sh_s.seg0_da;
+ DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
+ ul_shm_offset_virt =
+ ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
+ /* Kernel logical address */
+ ul_shm_base = dev_context->sh_s.seg0_va + ul_shm_offset_virt;
+
+ DBC_ASSERT(ul_shm_base != 0);
+ /* 2nd wd is used as sync field */
+ dw_sync_addr = ul_shm_base + SHMSYNCOFFSET;
+ /* Write a signature into the shm base + offset; this will
+ * get cleared when the DSP program starts. */
+ if ((ul_shm_base_virt == 0) || (ul_shm_base == 0)) {
+ pr_err("%s: Illegal SM base\n", __func__);
+ status = -EPERM;
+ } else
+ __raw_writel(0xffffffff, dw_sync_addr);
+
+ if (!status) {
+ resources = dev_context->resources;
+ if (!resources)
+ status = -EPERM;
+
+ /* Assert RST1 i.e only the RST only for DSP megacell */
+ if (!status) {
+ (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
+ OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
+ OMAP2_RM_RSTCTRL);
+ /* Mask address with 1K for compatibility */
+ __raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK,
+ OMAP343X_CTRL_REGADDR(
+ OMAP343X_CONTROL_IVA2_BOOTADDR));
+ /*
+ * Set bootmode to self loop if dsp_debug flag is true
+ */
+ __raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0,
+ OMAP343X_CTRL_REGADDR(
+ OMAP343X_CONTROL_IVA2_BOOTMOD));
+ }
+ }
+
+ if (!status) {
+ (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
+ OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+ mmu = dev_context->dsp_mmu;
+ if (mmu)
+ dsp_mmu_exit(mmu);
+ mmu = dsp_mmu_init();
+ if (IS_ERR(mmu)) {
+ dev_err(bridge, "dsp_mmu_init failed!\n");
+ dev_context->dsp_mmu = NULL;
+ status = (int)mmu;
+ }
+ }
+ if (!status) {
+ dev_context->dsp_mmu = mmu;
+ sm_sg = &dev_context->sh_s;
+ sg0_da = iommu_kmap(mmu, sm_sg->seg0_da, sm_sg->seg0_pa,
+ sm_sg->seg0_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
+ if (IS_ERR_VALUE(sg0_da)) {
+ status = (int)sg0_da;
+ sg0_da = 0;
+ }
+ }
+ if (!status) {
+ sg1_da = iommu_kmap(mmu, sm_sg->seg1_da, sm_sg->seg1_pa,
+ sm_sg->seg1_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
+ if (IS_ERR_VALUE(sg1_da)) {
+ status = (int)sg1_da;
+ sg1_da = 0;
+ }
+ }
+ if (!status) {
+ u32 da;
+ for (tlb_i = 0; tlb_i < BRDIOCTL_NUMOFMMUTLB; tlb_i++) {
+ if (!tlb[tlb_i].ul_gpp_pa)
+ continue;
+
+ dev_dbg(bridge, "IOMMU %d GppPa: 0x%x DspVa 0x%x Size"
+ " 0x%x\n", tlb_i, tlb[tlb_i].ul_gpp_pa,
+ tlb[tlb_i].ul_dsp_va, tlb[tlb_i].ul_size);
+
+ da = iommu_kmap(mmu, tlb[tlb_i].ul_dsp_va,
+ tlb[tlb_i].ul_gpp_pa, PAGE_SIZE,
+ IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
+ if (IS_ERR_VALUE(da)) {
+ status = (int)da;
+ break;
+ }
+ }
+ }
+ if (!status) {
+ u32 da;
+ l4_i = 0;
+ while (l4_peripheral_table[l4_i].phys_addr) {
+ da = iommu_kmap(mmu, l4_peripheral_table[l4_i].
+ dsp_virt_addr, l4_peripheral_table[l4_i].
+ phys_addr, PAGE_SIZE,
+ IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
+ if (IS_ERR_VALUE(da)) {
+ status = (int)da;
+ break;
+ }
+ l4_i++;
+ }
+ }
+
+ /* Lock the above TLB entries and get the BIOS and load monitor timer
+ * information */
+ if (!status) {
+ /* Enable the BIOS clock */
+ (void)dev_get_symbol(dev_context->hdev_obj,
+ BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
+ (void)dev_get_symbol(dev_context->hdev_obj,
+ BRIDGEINIT_LOADMON_GPTIMER,
+ &ul_load_monitor_timer);
+
+ if (ul_load_monitor_timer != 0xFFFF) {
+ clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
+ ul_load_monitor_timer;
+ dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
+ } else {
+ dev_dbg(bridge, "Not able to get the symbol for Load "
+ "Monitor Timer\n");
+ }
+
+ if (ul_bios_gp_timer != 0xFFFF) {
+ clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
+ ul_bios_gp_timer;
+ dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
+ } else {
+ dev_dbg(bridge,
+ "Not able to get the symbol for BIOS Timer\n");
+ }
+
+ /* Set the DSP clock rate */
+ (void)dev_get_symbol(dev_context->hdev_obj,
+ "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
+ /*Set Autoidle Mode for IVA2 PLL */
+ (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
+ OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL);
+
+ if ((unsigned int *)ul_dsp_clk_addr != NULL) {
+ /* Get the clock rate */
+ ul_dsp_clk_rate = dsp_clk_get_iva2_rate();
+ dev_dbg(bridge, "%s: DSP clock rate (KHZ): 0x%x \n",
+ __func__, ul_dsp_clk_rate);
+ (void)bridge_brd_write(dev_context,
+ (u8 *) &ul_dsp_clk_rate,
+ ul_dsp_clk_addr, sizeof(u32), 0);
+ }
+ /*
+ * Enable Mailbox events and also drain any pending
+ * stale messages.
+ */
+ dev_context->mbox = omap_mbox_get("dsp");
+ if (IS_ERR(dev_context->mbox)) {
+ dev_context->mbox = NULL;
+ pr_err("%s: Failed to get dsp mailbox handle\n",
+ __func__);
+ status = -EPERM;
+ }
+
+ }
+ if (!status) {
+ dev_context->mbox->rxq->callback = (int (*)(void *))io_mbox_msg;
+
+/*PM_IVA2GRPSEL_PER = 0xC0;*/
+ temp = readl(resources->dw_per_pm_base + 0xA8);
+ temp = (temp & 0xFFFFFF30) | 0xC0;
+ writel(temp, resources->dw_per_pm_base + 0xA8);
+
+/*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */
+ temp = readl(resources->dw_per_pm_base + 0xA4);
+ temp = (temp & 0xFFFFFF3F);
+ writel(temp, resources->dw_per_pm_base + 0xA4);
+/*CM_SLEEPDEP_PER |= 0x04; */
+ temp = readl(resources->dw_per_base + 0x44);
+ temp = (temp & 0xFFFFFFFB) | 0x04;
+ writel(temp, resources->dw_per_base + 0x44);
+
+/*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */
+ (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO,
+ OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
+
+ /* Let DSP go */
+ dev_dbg(bridge, "%s Unreset\n", __func__);
+ /* release the RST1, DSP starts executing now .. */
+ (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
+ OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+
+ dev_dbg(bridge, "Waiting for Sync @ 0x%x\n", dw_sync_addr);
+ dev_dbg(bridge, "DSP c_int00 Address = 0x%x\n", dsp_addr);
+ if (dsp_debug)
+ while (__raw_readw(dw_sync_addr))
+ ;;
+
+ /* Wait for DSP to clear word in shared memory */
+ /* Read the Location */
+ if (!wait_for_start(dev_context, dw_sync_addr))
+ status = -ETIMEDOUT;
+
+ /* Start wdt */
+ dsp_wdt_sm_set((void *)ul_shm_base);
+ dsp_wdt_enable(true);
+
+ status = dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
+ if (hio_mgr) {
+ io_sh_msetting(hio_mgr, SHM_OPPINFO, NULL);
+ /* Write the synchronization bit to indicate the
+ * completion of OPP table update to DSP
+ */
+ __raw_writel(0XCAFECAFE, dw_sync_addr);
+
+ /* update board state */
+ dev_context->dw_brd_state = BRD_RUNNING;
+ return 0;
+ } else {
+ dev_context->dw_brd_state = BRD_UNKNOWN;
+ }
+ }
+
+ while (tlb_i--) {
+ if (!tlb[tlb_i].ul_gpp_pa)
+ continue;
+ iommu_kunmap(mmu, tlb[tlb_i].ul_gpp_va);
+ }
+ while (l4_i--)
+ iommu_kunmap(mmu, l4_peripheral_table[l4_i].dsp_virt_addr);
+ if (sg0_da)
+ iommu_kunmap(mmu, sg0_da);
+ if (sg1_da)
+ iommu_kunmap(mmu, sg1_da);
+ return status;
+}
+
+/*
+ * ======== bridge_brd_stop ========
+ * purpose:
+ * Puts DSP in self loop.
+ *
+ * Preconditions :
+ * a) None
+ */
+static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
+{
+ int status = 0;
+ struct bridge_dev_context *dev_context = dev_ctxt;
+ u32 dsp_pwr_state;
+ int i;
+ struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
+ struct omap_dsp_platform_data *pdata =
+ omap_dspbridge_dev->dev.platform_data;
+
+ if (dev_context->dw_brd_state == BRD_STOPPED)
+ return status;
+
+ /* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode,
+ * before turning off the clocks.. This is to ensure that there are no
+ * pending L3 or other transactons from IVA2 */
+ dsp_pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
+ OMAP_POWERSTATEST_MASK;
+ if (dsp_pwr_state != PWRDM_POWER_OFF) {
+ (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
+ OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+ sm_interrupt_dsp(dev_context, MBX_PM_DSPIDLE);
+ mdelay(10);
+
+ /* IVA2 is not in OFF state */
+ /* Set PM_PWSTCTRL_IVA2 to OFF */
+ (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
+ PWRDM_POWER_OFF, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
+ /* Set the SW supervised state transition for Sleep */
+ (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_SLEEP,
+ OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
+ }
+ udelay(10);
+ /* Release the Ext Base virtual Address as the next DSP Program
+ * may have a different load address */
+ if (dev_context->dw_dsp_ext_base_addr)
+ dev_context->dw_dsp_ext_base_addr = 0;
+
+ dev_context->dw_brd_state = BRD_STOPPED; /* update board state */
+
+ dsp_wdt_enable(false);
+
+ /* Reset DSP */
+ (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
+ OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+
+ /* Disable the mailbox interrupts */
+ if (dev_context->mbox) {
+ omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
+ omap_mbox_put(dev_context->mbox);
+ dev_context->mbox = NULL;
+ }
+ if (dev_context->dsp_mmu) {
+ pr_err("Proc stop mmu if statement\n");
+ for (i = 0; i < BRDIOCTL_NUMOFMMUTLB; i++) {
+ if (!tlb[i].ul_gpp_pa)
+ continue;
+ iommu_kunmap(dev_context->dsp_mmu, tlb[i].ul_gpp_va);
+ }
+ i = 0;
+ while (l4_peripheral_table[i].phys_addr) {
+ iommu_kunmap(dev_context->dsp_mmu,
+ l4_peripheral_table[i].dsp_virt_addr);
+ i++;
+ }
+ iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg0_da);
+ iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg1_da);
+ dsp_mmu_exit(dev_context->dsp_mmu);
+ dev_context->dsp_mmu = NULL;
+ }
+ /* Reset IVA IOMMU*/
+ (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
+ OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+
+ dsp_clock_disable_all(dev_context->dsp_per_clks);
+ dsp_clk_disable(DSP_CLK_IVA2);
+
+ return status;
+}
+
+/*
+ * ======== bridge_brd_status ========
+ * Returns the board status.
+ */
+static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
+ int *board_state)
+{
+ struct bridge_dev_context *dev_context = dev_ctxt;
+ *board_state = dev_context->dw_brd_state;
+ return 0;
+}
+
+/*
+ * ======== bridge_brd_write ========
+ * Copies the buffers to DSP internal or external memory.
+ */
+static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
+ u8 *host_buff, u32 dsp_addr,
+ u32 ul_num_bytes, u32 mem_type)
+{
+ int status = 0;
+ struct bridge_dev_context *dev_context = dev_ctxt;
+
+ if (dsp_addr < dev_context->dw_dsp_start_add) {
+ status = -EPERM;
+ return status;
+ }
+ if ((dsp_addr - dev_context->dw_dsp_start_add) <
+ dev_context->dw_internal_size) {
+ status = write_dsp_data(dev_ctxt, host_buff, dsp_addr,
+ ul_num_bytes, mem_type);
+ } else {
+ status = write_ext_dsp_data(dev_context, host_buff, dsp_addr,
+ ul_num_bytes, mem_type, false);
+ }
+
+ return status;
+}
+
+/*
+ * ======== bridge_dev_create ========
+ * Creates a driver object. Puts DSP in self loop.
+ */
+static int bridge_dev_create(struct bridge_dev_context
+ **dev_cntxt,
+ struct dev_object *hdev_obj,
+ struct cfg_hostres *config_param)
+{
+ int status = 0;
+ struct bridge_dev_context *dev_context = NULL;
+ s32 entry_ndx;
+ struct cfg_hostres *resources = config_param;
+ struct drv_data *drv_datap = dev_get_drvdata(bridge);
+
+ /* Allocate and initialize a data structure to contain the bridge driver
+ * state, which becomes the context for later calls into this driver */
+ dev_context = kzalloc(sizeof(struct bridge_dev_context), GFP_KERNEL);
+ if (!dev_context) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+
+ dev_context->dw_dsp_start_add = (u32) OMAP_GEM_BASE;
+ dev_context->dw_self_loop = (u32) NULL;
+ dev_context->dsp_per_clks = 0;
+ dev_context->dw_internal_size = OMAP_DSP_SIZE;
+ /* Clear dev context MMU table entries.
+ * These get set on bridge_io_on_loaded() call after program loaded. */
+ for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) {
+ dev_context->atlb_entry[entry_ndx].ul_gpp_pa =
+ dev_context->atlb_entry[entry_ndx].ul_dsp_va = 0;
+ }
+ dev_context->dw_dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *)
+ (config_param->
+ dw_mem_base
+ [3]),
+ config_param->
+ dw_mem_length
+ [3]);
+ if (!dev_context->dw_dsp_base_addr)
+ status = -EPERM;
+
+ if (!status) {
+ dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
+ dev_context->hdev_obj = hdev_obj;
+ /* Store current board state. */
+ dev_context->dw_brd_state = BRD_UNKNOWN;
+ dev_context->resources = resources;
+ dsp_clk_enable(DSP_CLK_IVA2);
+ bridge_brd_stop(dev_context);
+ /* Return ptr to our device state to the DSP API for storage */
+ *dev_cntxt = dev_context;
+ } else {
+ kfree(dev_context);
+ }
+func_end:
+ return status;
+}
+
+/*
+ * ======== bridge_dev_ctrl ========
+ * Receives device specific commands.
+ */
+static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
+ u32 dw_cmd, void *pargs)
+{
+ int status = 0;
+ struct bridge_ioctl_extproc *pa_ext_proc =
+ (struct bridge_ioctl_extproc *)pargs;
+ s32 ndx;
+
+ switch (dw_cmd) {
+ case BRDIOCTL_CHNLREAD:
+ break;
+ case BRDIOCTL_CHNLWRITE:
+ break;
+ case BRDIOCTL_SETMMUCONFIG:
+ /* store away dsp-mmu setup values for later use */
+ for (ndx = 0; ndx < BRDIOCTL_NUMOFMMUTLB; ndx++, pa_ext_proc++)
+ dev_context->atlb_entry[ndx] = *pa_ext_proc;
+ break;
+ case BRDIOCTL_DEEPSLEEP:
+ case BRDIOCTL_EMERGENCYSLEEP:
+ /* Currently only DSP Idle is supported Need to update for
+ * later releases */
+ status = sleep_dsp(dev_context, PWR_DEEPSLEEP, pargs);
+ break;
+ case BRDIOCTL_WAKEUP:
+ status = wake_dsp(dev_context, pargs);
+ break;
+ case BRDIOCTL_CLK_CTRL:
+ status = 0;
+ /* Looking For Baseport Fix for Clocks */
+ status = dsp_peripheral_clk_ctrl(dev_context, pargs);
+ break;
+ case BRDIOCTL_PWR_HIBERNATE:
+ status = handle_hibernation_from_dsp(dev_context);
+ break;
+ case BRDIOCTL_PRESCALE_NOTIFY:
+ status = pre_scale_dsp(dev_context, pargs);
+ break;
+ case BRDIOCTL_POSTSCALE_NOTIFY:
+ status = post_scale_dsp(dev_context, pargs);
+ break;
+ case BRDIOCTL_CONSTRAINT_REQUEST:
+ status = handle_constraints_set(dev_context, pargs);
+ break;
+ default:
+ status = -EPERM;
+ break;
+ }
+ return status;
+}
+
+/*
+ * ======== bridge_dev_destroy ========
+ * Destroys the driver object.
+ */
+static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
+{
+ int status = 0;
+ struct bridge_dev_context *dev_context = (struct bridge_dev_context *)
+ dev_ctxt;
+ struct cfg_hostres *host_res;
+ u32 shm_size;
+ struct drv_data *drv_datap = dev_get_drvdata(bridge);
+
+ /* It should never happen */
+ if (!dev_ctxt)
+ return -EFAULT;
+
+ /* first put the device to stop state */
+ bridge_brd_stop(dev_context);
+
+ if (dev_context->resources) {
+ host_res = dev_context->resources;
+ shm_size = drv_datap->shm_size;
+ if (shm_size >= 0x10000) {
+ if ((host_res->dw_mem_base[1]) &&
+ (host_res->dw_mem_phys[1])) {
+ mem_free_phys_mem((void *)
+ host_res->dw_mem_base
+ [1],
+ host_res->dw_mem_phys
+ [1], shm_size);
+ }
+ } else {
+ dev_dbg(bridge, "%s: Error getting shm size "
+ "from registry: %x. Not calling "
+ "mem_free_phys_mem\n", __func__,
+ status);
+ }
+ host_res->dw_mem_base[1] = 0;
+ host_res->dw_mem_phys[1] = 0;
+
+ if (host_res->dw_mem_base[0])
+ iounmap((void *)host_res->dw_mem_base[0]);
+ if (host_res->dw_mem_base[2])
+ iounmap((void *)host_res->dw_mem_base[2]);
+ if (host_res->dw_mem_base[3])
+ iounmap((void *)host_res->dw_mem_base[3]);
+ if (host_res->dw_mem_base[4])
+ iounmap((void *)host_res->dw_mem_base[4]);
+ if (host_res->dw_per_base)
+ iounmap(host_res->dw_per_base);
+ if (host_res->dw_per_pm_base)
+ iounmap((void *)host_res->dw_per_pm_base);
+ if (host_res->dw_core_pm_base)
+ iounmap((void *)host_res->dw_core_pm_base);
+ if (host_res->dw_sys_ctrl_base)
+ iounmap(host_res->dw_sys_ctrl_base);
+
+ host_res->dw_mem_base[0] = (u32) NULL;
+ host_res->dw_mem_base[2] = (u32) NULL;
+ host_res->dw_mem_base[3] = (u32) NULL;
+ host_res->dw_mem_base[4] = (u32) NULL;
+ host_res->dw_sys_ctrl_base = NULL;
+
+ kfree(host_res);
+ }
+
+ /* Free the driver's device context: */
+ kfree(drv_datap->base_img);
+ kfree(drv_datap);
+ dev_set_drvdata(bridge, NULL);
+ kfree((void *)dev_ctxt);
+ return status;
+}
+
+static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
+ u32 dsp_dest_addr, u32 dsp_src_addr,
+ u32 ul_num_bytes, u32 mem_type)
+{
+ int status = 0;
+ u32 src_addr = dsp_src_addr;
+ u32 dest_addr = dsp_dest_addr;
+ u32 copy_bytes = 0;
+ u32 total_bytes = ul_num_bytes;
+ u8 host_buf[BUFFERSIZE];
+ struct bridge_dev_context *dev_context = dev_ctxt;
+ while (total_bytes > 0 && !status) {
+ copy_bytes =
+ total_bytes > BUFFERSIZE ? BUFFERSIZE : total_bytes;
+ /* Read from External memory */
+ status = read_ext_dsp_data(dev_ctxt, host_buf, src_addr,
+ copy_bytes, mem_type);
+ if (!status) {
+ if (dest_addr < (dev_context->dw_dsp_start_add +
+ dev_context->dw_internal_size)) {
+ /* Write to Internal memory */
+ status = write_dsp_data(dev_ctxt, host_buf,
+ dest_addr, copy_bytes,
+ mem_type);
+ } else {
+ /* Write to External memory */
+ status =
+ write_ext_dsp_data(dev_ctxt, host_buf,
+ dest_addr, copy_bytes,
+ mem_type, false);
+ }
+ }
+ total_bytes -= copy_bytes;
+ src_addr += copy_bytes;
+ dest_addr += copy_bytes;
+ }
+ return status;
+}
+
+/* Mem Write does not halt the DSP to write unlike bridge_brd_write */
+static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
+ u8 *host_buff, u32 dsp_addr,
+ u32 ul_num_bytes, u32 mem_type)
+{
+ int status = 0;
+ struct bridge_dev_context *dev_context = dev_ctxt;
+ u32 ul_remain_bytes = 0;
+ u32 ul_bytes = 0;
+ ul_remain_bytes = ul_num_bytes;
+ while (ul_remain_bytes > 0 && !status) {
+ ul_bytes =
+ ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes;
+ if (dsp_addr < (dev_context->dw_dsp_start_add +
+ dev_context->dw_internal_size)) {
+ status =
+ write_dsp_data(dev_ctxt, host_buff, dsp_addr,
+ ul_bytes, mem_type);
+ } else {
+ status = write_ext_dsp_data(dev_ctxt, host_buff,
+ dsp_addr, ul_bytes,
+ mem_type, true);
+ }
+ ul_remain_bytes -= ul_bytes;
+ dsp_addr += ul_bytes;
+ host_buff = host_buff + ul_bytes;
+ }
+ return status;
+}
+
+/*
+ * ======== wait_for_start ========
+ * Wait for the singal from DSP that it has started, or time out.
+ */
+bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr)
+{
+ u16 timeout = TIHELEN_ACKTIMEOUT;
+
+ /* Wait for response from board */
+ while (__raw_readw(dw_sync_addr) && --timeout)
+ udelay(10);
+
+ /* If timed out: return false */
+ if (!timeout) {
+ pr_err("%s: Timed out waiting DSP to Start\n", __func__);
+ return false;
+ }
+ return true;
+}
diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
new file mode 100644
index 000000000000..b57a9fd5e757
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
@@ -0,0 +1,550 @@
+/*
+ * tiomap_pwr.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Implementation of DSP wake/sleep routines.
+ *
+ * Copyright (C) 2007-2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/* ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+#include <plat/dsp.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+#include <dspbridge/drv.h>
+#include <dspbridge/io_sm.h>
+
+/* ----------------------------------- Platform Manager */
+#include <dspbridge/brddefs.h>
+#include <dspbridge/dev.h>
+#include <dspbridge/iodefs.h>
+
+#include <dspbridge/pwr_sh.h>
+
+/* ----------------------------------- Bridge Driver */
+#include <dspbridge/dspdeh.h>
+#include <dspbridge/wdt.h>
+
+/* ----------------------------------- specific to this file */
+#include "_tiomap.h"
+#include "_tiomap_pwr.h"
+#include <mach-omap2/prm-regbits-34xx.h>
+#include <mach-omap2/cm-regbits-34xx.h>
+
+#define PWRSTST_TIMEOUT 200
+
+/*
+ * ======== handle_constraints_set ========
+ * Sets new DSP constraint
+ */
+int handle_constraints_set(struct bridge_dev_context *dev_context,
+ void *pargs)
+{
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+ u32 *constraint_val;
+ struct omap_dsp_platform_data *pdata =
+ omap_dspbridge_dev->dev.platform_data;
+
+ constraint_val = (u32 *) (pargs);
+ /* Read the target value requested by DSP */
+ dev_dbg(bridge, "OPP: %s opp requested = 0x%x\n", __func__,
+ (u32) *(constraint_val + 1));
+
+ /* Set the new opp value */
+ if (pdata->dsp_set_min_opp)
+ (*pdata->dsp_set_min_opp) ((u32) *(constraint_val + 1));
+#endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */
+ return 0;
+}
+
+/*
+ * ======== handle_hibernation_from_dsp ========
+ * Handle Hibernation requested from DSP
+ */
+int handle_hibernation_from_dsp(struct bridge_dev_context *dev_context)
+{
+ int status = 0;
+#ifdef CONFIG_PM
+ u16 timeout = PWRSTST_TIMEOUT / 10;
+ u32 pwr_state;
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+ u32 opplevel;
+ struct io_mgr *hio_mgr;
+#endif
+ struct omap_dsp_platform_data *pdata =
+ omap_dspbridge_dev->dev.platform_data;
+
+ pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
+ OMAP_POWERSTATEST_MASK;
+ /* Wait for DSP to move into OFF state */
+ while ((pwr_state != PWRDM_POWER_OFF) && --timeout) {
+ if (msleep_interruptible(10)) {
+ pr_err("Waiting for DSP OFF mode interrupted\n");
+ return -EPERM;
+ }
+ pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD,
+ OMAP2_PM_PWSTST) & OMAP_POWERSTATEST_MASK;
+ }
+ if (timeout == 0) {
+ pr_err("%s: Timed out waiting for DSP off mode\n", __func__);
+ status = -ETIMEDOUT;
+ return status;
+ } else {
+
+ /* Save mailbox settings */
+ omap_mbox_save_ctx(dev_context->mbox);
+
+ /* Turn off DSP Peripheral clocks and DSP Load monitor timer */
+ status = dsp_clock_disable_all(dev_context->dsp_per_clks);
+
+ /* Disable wdt on hibernation. */
+ dsp_wdt_enable(false);
+
+ if (!status) {
+ /* Update the Bridger Driver state */
+ dev_context->dw_brd_state = BRD_DSP_HIBERNATION;
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+ status =
+ dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
+ if (!hio_mgr) {
+ status = DSP_EHANDLE;
+ return status;
+ }
+ io_sh_msetting(hio_mgr, SHM_GETOPP, &opplevel);
+
+ /*
+ * Set the OPP to low level before moving to OFF
+ * mode
+ */
+ if (pdata->dsp_set_min_opp)
+ (*pdata->dsp_set_min_opp) (VDD1_OPP1);
+ status = 0;
+#endif /* CONFIG_TIDSPBRIDGE_DVFS */
+ }
+ }
+#endif
+ return status;
+}
+
+/*
+ * ======== sleep_dsp ========
+ * Put DSP in low power consuming state.
+ */
+int sleep_dsp(struct bridge_dev_context *dev_context, u32 dw_cmd,
+ void *pargs)
+{
+ int status = 0;
+#ifdef CONFIG_PM
+#ifdef CONFIG_TIDSPBRIDGE_NTFY_PWRERR
+ struct deh_mgr *hdeh_mgr;
+#endif /* CONFIG_TIDSPBRIDGE_NTFY_PWRERR */
+ u16 timeout = PWRSTST_TIMEOUT / 10;
+ u32 pwr_state, target_pwr_state;
+ struct omap_dsp_platform_data *pdata =
+ omap_dspbridge_dev->dev.platform_data;
+
+ /* Check if sleep code is valid */
+ if ((dw_cmd != PWR_DEEPSLEEP) && (dw_cmd != PWR_EMERGENCYDEEPSLEEP))
+ return -EINVAL;
+
+ switch (dev_context->dw_brd_state) {
+ case BRD_RUNNING:
+ omap_mbox_save_ctx(dev_context->mbox);
+ if (dsp_test_sleepstate == PWRDM_POWER_OFF) {
+ sm_interrupt_dsp(dev_context, MBX_PM_DSPHIBERNATE);
+ dev_dbg(bridge, "PM: %s - sent hibernate cmd to DSP\n",
+ __func__);
+ target_pwr_state = PWRDM_POWER_OFF;
+ } else {
+ sm_interrupt_dsp(dev_context, MBX_PM_DSPRETENTION);
+ target_pwr_state = PWRDM_POWER_RET;
+ }
+ break;
+ case BRD_RETENTION:
+ omap_mbox_save_ctx(dev_context->mbox);
+ if (dsp_test_sleepstate == PWRDM_POWER_OFF) {
+ sm_interrupt_dsp(dev_context, MBX_PM_DSPHIBERNATE);
+ target_pwr_state = PWRDM_POWER_OFF;
+ } else
+ return 0;
+ break;
+ case BRD_HIBERNATION:
+ case BRD_DSP_HIBERNATION:
+ /* Already in Hibernation, so just return */
+ dev_dbg(bridge, "PM: %s - DSP already in hibernation\n",
+ __func__);
+ return 0;
+ case BRD_STOPPED:
+ dev_dbg(bridge, "PM: %s - Board in STOP state\n", __func__);
+ return 0;
+ default:
+ dev_dbg(bridge, "PM: %s - Bridge in Illegal state\n", __func__);
+ return -EPERM;
+ }
+
+ /* Get the PRCM DSP power domain status */
+ pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
+ OMAP_POWERSTATEST_MASK;
+
+ /* Wait for DSP to move into target power state */
+ while ((pwr_state != target_pwr_state) && --timeout) {
+ if (msleep_interruptible(10)) {
+ pr_err("Waiting for DSP to Suspend interrupted\n");
+ return -EPERM;
+ }
+ pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD,
+ OMAP2_PM_PWSTST) & OMAP_POWERSTATEST_MASK;
+ }
+
+ if (!timeout) {
+ pr_err("%s: Timed out waiting for DSP off mode, state %x\n",
+ __func__, pwr_state);
+#ifdef CONFIG_TIDSPBRIDGE_NTFY_PWRERR
+ dev_get_deh_mgr(dev_context->hdev_obj, &hdeh_mgr);
+ bridge_deh_notify(hdeh_mgr, DSP_PWRERROR, 0);
+#endif /* CONFIG_TIDSPBRIDGE_NTFY_PWRERR */
+ return -ETIMEDOUT;
+ } else {
+ /* Update the Bridger Driver state */
+ if (dsp_test_sleepstate == PWRDM_POWER_OFF)
+ dev_context->dw_brd_state = BRD_HIBERNATION;
+ else
+ dev_context->dw_brd_state = BRD_RETENTION;
+
+ /* Disable wdt on hibernation. */
+ dsp_wdt_enable(false);
+
+ /* Turn off DSP Peripheral clocks */
+ status = dsp_clock_disable_all(dev_context->dsp_per_clks);
+ if (status)
+ return status;
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+ else if (target_pwr_state == PWRDM_POWER_OFF) {
+ /*
+ * Set the OPP to low level before moving to OFF mode
+ */
+ if (pdata->dsp_set_min_opp)
+ (*pdata->dsp_set_min_opp) (VDD1_OPP1);
+ }
+#endif /* CONFIG_TIDSPBRIDGE_DVFS */
+ }
+#endif /* CONFIG_PM */
+ return status;
+}
+
+/*
+ * ======== wake_dsp ========
+ * Wake up DSP from sleep.
+ */
+int wake_dsp(struct bridge_dev_context *dev_context, void *pargs)
+{
+ int status = 0;
+#ifdef CONFIG_PM
+
+ /* Check the board state, if it is not 'SLEEP' then return */
+ if (dev_context->dw_brd_state == BRD_RUNNING ||
+ dev_context->dw_brd_state == BRD_STOPPED) {
+ /* The Device is in 'RET' or 'OFF' state and Bridge state is not
+ * 'SLEEP', this means state inconsistency, so return */
+ return 0;
+ }
+
+ /* Send a wakeup message to DSP */
+ sm_interrupt_dsp(dev_context, MBX_PM_DSPWAKEUP);
+
+ /* Set the device state to RUNNIG */
+ dev_context->dw_brd_state = BRD_RUNNING;
+#endif /* CONFIG_PM */
+ return status;
+}
+
+/*
+ * ======== dsp_peripheral_clk_ctrl ========
+ * Enable/Disable the DSP peripheral clocks as needed..
+ */
+int dsp_peripheral_clk_ctrl(struct bridge_dev_context *dev_context,
+ void *pargs)
+{
+ u32 ext_clk = 0;
+ u32 ext_clk_id = 0;
+ u32 ext_clk_cmd = 0;
+ u32 clk_id_index = MBX_PM_MAX_RESOURCES;
+ u32 tmp_index;
+ u32 dsp_per_clks_before;
+ int status = 0;
+
+ dsp_per_clks_before = dev_context->dsp_per_clks;
+
+ ext_clk = (u32) *((u32 *) pargs);
+ ext_clk_id = ext_clk & MBX_PM_CLK_IDMASK;
+
+ /* process the power message -- TODO, keep it in a separate function */
+ for (tmp_index = 0; tmp_index < MBX_PM_MAX_RESOURCES; tmp_index++) {
+ if (ext_clk_id == bpwr_clkid[tmp_index]) {
+ clk_id_index = tmp_index;
+ break;
+ }
+ }
+ /* TODO -- Assert may be a too hard restriction here.. May be we should
+ * just return with failure when the CLK ID does not match */
+ /* DBC_ASSERT(clk_id_index < MBX_PM_MAX_RESOURCES); */
+ if (clk_id_index == MBX_PM_MAX_RESOURCES) {
+ /* return with a more meaningfull error code */
+ return -EPERM;
+ }
+ ext_clk_cmd = (ext_clk >> MBX_PM_CLK_CMDSHIFT) & MBX_PM_CLK_CMDMASK;
+ switch (ext_clk_cmd) {
+ case BPWR_DISABLE_CLOCK:
+ status = dsp_clk_disable(bpwr_clks[clk_id_index].clk);
+ dsp_clk_wakeup_event_ctrl(bpwr_clks[clk_id_index].clk_id,
+ false);
+ if (!status) {
+ (dev_context->dsp_per_clks) &=
+ (~((u32) (1 << bpwr_clks[clk_id_index].clk)));
+ }
+ break;
+ case BPWR_ENABLE_CLOCK:
+ status = dsp_clk_enable(bpwr_clks[clk_id_index].clk);
+ dsp_clk_wakeup_event_ctrl(bpwr_clks[clk_id_index].clk_id, true);
+ if (!status)
+ (dev_context->dsp_per_clks) |=
+ (1 << bpwr_clks[clk_id_index].clk);
+ break;
+ default:
+ dev_dbg(bridge, "%s: Unsupported CMD\n", __func__);
+ /* unsupported cmd */
+ /* TODO -- provide support for AUTOIDLE Enable/Disable
+ * commands */
+ }
+ return status;
+}
+
+/*
+ * ========pre_scale_dsp========
+ * Sends prescale notification to DSP
+ *
+ */
+int pre_scale_dsp(struct bridge_dev_context *dev_context, void *pargs)
+{
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+ u32 level;
+ u32 voltage_domain;
+
+ voltage_domain = *((u32 *) pargs);
+ level = *((u32 *) pargs + 1);
+
+ dev_dbg(bridge, "OPP: %s voltage_domain = %x, level = 0x%x\n",
+ __func__, voltage_domain, level);
+ if ((dev_context->dw_brd_state == BRD_HIBERNATION) ||
+ (dev_context->dw_brd_state == BRD_RETENTION) ||
+ (dev_context->dw_brd_state == BRD_DSP_HIBERNATION)) {
+ dev_dbg(bridge, "OPP: %s IVA in sleep. No message to DSP\n");
+ return 0;
+ } else if ((dev_context->dw_brd_state == BRD_RUNNING)) {
+ /* Send a prenotificatio to DSP */
+ dev_dbg(bridge, "OPP: %s sent notification to DSP\n", __func__);
+ sm_interrupt_dsp(dev_context, MBX_PM_SETPOINT_PRENOTIFY);
+ return 0;
+ } else {
+ return -EPERM;
+ }
+#endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */
+ return 0;
+}
+
+/*
+ * ========post_scale_dsp========
+ * Sends postscale notification to DSP
+ *
+ */
+int post_scale_dsp(struct bridge_dev_context *dev_context,
+ void *pargs)
+{
+ int status = 0;
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+ u32 level;
+ u32 voltage_domain;
+ struct io_mgr *hio_mgr;
+
+ status = dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
+ if (!hio_mgr)
+ return -EFAULT;
+
+ voltage_domain = *((u32 *) pargs);
+ level = *((u32 *) pargs + 1);
+ dev_dbg(bridge, "OPP: %s voltage_domain = %x, level = 0x%x\n",
+ __func__, voltage_domain, level);
+ if ((dev_context->dw_brd_state == BRD_HIBERNATION) ||
+ (dev_context->dw_brd_state == BRD_RETENTION) ||
+ (dev_context->dw_brd_state == BRD_DSP_HIBERNATION)) {
+ /* Update the OPP value in shared memory */
+ io_sh_msetting(hio_mgr, SHM_CURROPP, &level);
+ dev_dbg(bridge, "OPP: %s IVA in sleep. Wrote to shm\n",
+ __func__);
+ } else if ((dev_context->dw_brd_state == BRD_RUNNING)) {
+ /* Update the OPP value in shared memory */
+ io_sh_msetting(hio_mgr, SHM_CURROPP, &level);
+ /* Send a post notification to DSP */
+ sm_interrupt_dsp(dev_context, MBX_PM_SETPOINT_POSTNOTIFY);
+ dev_dbg(bridge, "OPP: %s wrote to shm. Sent post notification "
+ "to DSP\n", __func__);
+ } else {
+ status = -EPERM;
+ }
+#endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */
+ return status;
+}
+
+void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
+{
+ struct cfg_hostres *resources;
+ int status = 0;
+ u32 iva2_grpsel;
+ u32 mpu_grpsel;
+ struct dev_object *hdev_object = NULL;
+ struct bridge_dev_context *bridge_context = NULL;
+
+ hdev_object = (struct dev_object *)drv_get_first_dev_object();
+ if (!hdev_object)
+ return;
+
+ status = dev_get_bridge_context(hdev_object, &bridge_context);
+ if (!bridge_context)
+ return;
+
+ resources = bridge_context->resources;
+ if (!resources)
+ return;
+
+ switch (clock_id) {
+ case BPWR_GP_TIMER5:
+ iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
+ if (enable) {
+ iva2_grpsel |= OMAP3430_GRPSEL_GPT5_MASK;
+ mpu_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK;
+ } else {
+ mpu_grpsel |= OMAP3430_GRPSEL_GPT5_MASK;
+ iva2_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK;
+ }
+ writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
+ break;
+ case BPWR_GP_TIMER6:
+ iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
+ if (enable) {
+ iva2_grpsel |= OMAP3430_GRPSEL_GPT6_MASK;
+ mpu_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK;
+ } else {
+ mpu_grpsel |= OMAP3430_GRPSEL_GPT6_MASK;
+ iva2_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK;
+ }
+ writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
+ break;
+ case BPWR_GP_TIMER7:
+ iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
+ if (enable) {
+ iva2_grpsel |= OMAP3430_GRPSEL_GPT7_MASK;
+ mpu_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK;
+ } else {
+ mpu_grpsel |= OMAP3430_GRPSEL_GPT7_MASK;
+ iva2_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK;
+ }
+ writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
+ break;
+ case BPWR_GP_TIMER8:
+ iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
+ if (enable) {
+ iva2_grpsel |= OMAP3430_GRPSEL_GPT8_MASK;
+ mpu_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK;
+ } else {
+ mpu_grpsel |= OMAP3430_GRPSEL_GPT8_MASK;
+ iva2_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK;
+ }
+ writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
+ break;
+ case BPWR_MCBSP1:
+ iva2_grpsel = readl(resources->dw_core_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->dw_core_pm_base + 0xA4);
+ if (enable) {
+ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP1_MASK;
+ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP1_MASK;
+ } else {
+ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP1_MASK;
+ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP1_MASK;
+ }
+ writel(iva2_grpsel, resources->dw_core_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->dw_core_pm_base + 0xA4);
+ break;
+ case BPWR_MCBSP2:
+ iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
+ if (enable) {
+ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK;
+ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK;
+ } else {
+ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK;
+ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK;
+ }
+ writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
+ break;
+ case BPWR_MCBSP3:
+ iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
+ if (enable) {
+ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK;
+ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK;
+ } else {
+ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK;
+ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK;
+ }
+ writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
+ break;
+ case BPWR_MCBSP4:
+ iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
+ if (enable) {
+ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK;
+ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK;
+ } else {
+ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK;
+ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK;
+ }
+ writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
+ break;
+ case BPWR_MCBSP5:
+ iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
+ if (enable) {
+ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK;
+ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK;
+ } else {
+ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK;
+ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK;
+ }
+ writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
+ break;
+ }
+}
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.c b/drivers/staging/tidspbridge/core/tiomap_io.c
new file mode 100644
index 000000000000..66dbf02549e4
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/tiomap_io.c
@@ -0,0 +1,455 @@
+/*
+ * tiomap_io.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Implementation for the io read/write routines.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <plat/dsp.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+#include <dspbridge/drv.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/wdt.h>
+
+/* ----------------------------------- specific to this file */
+#include "_tiomap.h"
+#include "_tiomap_pwr.h"
+#include "tiomap_io.h"
+
+static u32 ul_ext_base;
+static u32 ul_ext_end;
+
+static u32 shm0_end;
+static u32 ul_dyn_ext_base;
+static u32 ul_trace_sec_beg;
+static u32 ul_trace_sec_end;
+static u32 ul_shm_base_virt;
+
+bool symbols_reloaded = true;
+
+/*
+ * ======== read_ext_dsp_data ========
+ * Copies DSP external memory buffers to the host side buffers.
+ */
+int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
+ u8 *host_buff, u32 dsp_addr,
+ u32 ul_num_bytes, u32 mem_type)
+{
+ int status = 0;
+ struct bridge_dev_context *dev_context = dev_ctxt;
+ u32 offset;
+ u32 ul_tlb_base_virt = 0;
+ u32 ul_shm_offset_virt = 0;
+ u32 dw_ext_prog_virt_mem;
+ u32 dw_base_addr = dev_context->dw_dsp_ext_base_addr;
+ bool trace_read = false;
+
+ if (!ul_shm_base_virt) {
+ status = dev_get_symbol(dev_context->hdev_obj,
+ SHMBASENAME, &ul_shm_base_virt);
+ }
+ DBC_ASSERT(ul_shm_base_virt != 0);
+
+ /* Check if it is a read of Trace section */
+ if (!status && !ul_trace_sec_beg) {
+ status = dev_get_symbol(dev_context->hdev_obj,
+ DSP_TRACESEC_BEG, &ul_trace_sec_beg);
+ }
+ DBC_ASSERT(ul_trace_sec_beg != 0);
+
+ if (!status && !ul_trace_sec_end) {
+ status = dev_get_symbol(dev_context->hdev_obj,
+ DSP_TRACESEC_END, &ul_trace_sec_end);
+ }
+ DBC_ASSERT(ul_trace_sec_end != 0);
+
+ if (!status) {
+ if ((dsp_addr <= ul_trace_sec_end) &&
+ (dsp_addr >= ul_trace_sec_beg))
+ trace_read = true;
+ }
+
+ /* If reading from TRACE, force remap/unmap */
+ if (trace_read && dw_base_addr) {
+ dw_base_addr = 0;
+ dev_context->dw_dsp_ext_base_addr = 0;
+ }
+
+ if (!dw_base_addr) {
+ /* Initialize ul_ext_base and ul_ext_end */
+ ul_ext_base = 0;
+ ul_ext_end = 0;
+
+ /* Get DYNEXT_BEG, EXT_BEG and EXT_END. */
+ if (!status && !ul_dyn_ext_base) {
+ status = dev_get_symbol(dev_context->hdev_obj,
+ DYNEXTBASE, &ul_dyn_ext_base);
+ }
+ DBC_ASSERT(ul_dyn_ext_base != 0);
+
+ if (!status) {
+ status = dev_get_symbol(dev_context->hdev_obj,
+ EXTBASE, &ul_ext_base);
+ }
+ DBC_ASSERT(ul_ext_base != 0);
+
+ if (!status) {
+ status = dev_get_symbol(dev_context->hdev_obj,
+ EXTEND, &ul_ext_end);
+ }
+ DBC_ASSERT(ul_ext_end != 0);
+
+ /* Trace buffer is right after the shm SEG0,
+ * so set the base address to SHMBASE */
+ if (trace_read) {
+ ul_ext_base = ul_shm_base_virt;
+ ul_ext_end = ul_trace_sec_end;
+ }
+
+ DBC_ASSERT(ul_ext_end != 0);
+ DBC_ASSERT(ul_ext_end > ul_ext_base);
+
+ if (ul_ext_end < ul_ext_base)
+ status = -EPERM;
+
+ if (!status) {
+ ul_tlb_base_virt =
+ dev_context->sh_s.seg0_da * DSPWORDSIZE;
+ DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
+ dw_ext_prog_virt_mem = dev_context->sh_s.seg0_va;
+
+ if (!trace_read) {
+ ul_shm_offset_virt =
+ ul_shm_base_virt - ul_tlb_base_virt;
+ ul_shm_offset_virt +=
+ PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base +
+ 1, PAGE_SIZE * 16);
+ dw_ext_prog_virt_mem -= ul_shm_offset_virt;
+ dw_ext_prog_virt_mem +=
+ (ul_ext_base - ul_dyn_ext_base);
+ dev_context->dw_dsp_ext_base_addr =
+ dw_ext_prog_virt_mem;
+
+ /*
+ * This dw_dsp_ext_base_addr will get cleared
+ * only when the board is stopped.
+ */
+ if (!dev_context->dw_dsp_ext_base_addr)
+ status = -EPERM;
+ }
+
+ dw_base_addr = dw_ext_prog_virt_mem;
+ }
+ }
+
+ if (!dw_base_addr || !ul_ext_base || !ul_ext_end)
+ status = -EPERM;
+
+ offset = dsp_addr - ul_ext_base;
+
+ if (!status)
+ memcpy(host_buff, (u8 *) dw_base_addr + offset, ul_num_bytes);
+
+ return status;
+}
+
+/*
+ * ======== write_dsp_data ========
+ * purpose:
+ * Copies buffers to the DSP internal/external memory.
+ */
+int write_dsp_data(struct bridge_dev_context *dev_context,
+ u8 *host_buff, u32 dsp_addr, u32 ul_num_bytes,
+ u32 mem_type)
+{
+ u32 offset;
+ u32 dw_base_addr = dev_context->dw_dsp_base_addr;
+ struct cfg_hostres *resources = dev_context->resources;
+ int status = 0;
+ u32 base1, base2, base3;
+ base1 = OMAP_DSP_MEM1_SIZE;
+ base2 = OMAP_DSP_MEM2_BASE - OMAP_DSP_MEM1_BASE;
+ base3 = OMAP_DSP_MEM3_BASE - OMAP_DSP_MEM1_BASE;
+
+ if (!resources)
+ return -EPERM;
+
+ offset = dsp_addr - dev_context->dw_dsp_start_add;
+ if (offset < base1) {
+ dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[2],
+ resources->dw_mem_length[2]);
+ } else if (offset > base1 && offset < base2 + OMAP_DSP_MEM2_SIZE) {
+ dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[3],
+ resources->dw_mem_length[3]);
+ offset = offset - base2;
+ } else if (offset >= base2 + OMAP_DSP_MEM2_SIZE &&
+ offset < base3 + OMAP_DSP_MEM3_SIZE) {
+ dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[4],
+ resources->dw_mem_length[4]);
+ offset = offset - base3;
+ } else {
+ return -EPERM;
+ }
+ if (ul_num_bytes)
+ memcpy((u8 *) (dw_base_addr + offset), host_buff, ul_num_bytes);
+ else
+ *((u32 *) host_buff) = dw_base_addr + offset;
+
+ return status;
+}
+
+/*
+ * ======== write_ext_dsp_data ========
+ * purpose:
+ * Copies buffers to the external memory.
+ *
+ */
+int write_ext_dsp_data(struct bridge_dev_context *dev_context,
+ u8 *host_buff, u32 dsp_addr,
+ u32 ul_num_bytes, u32 mem_type,
+ bool dynamic_load)
+{
+ u32 dw_base_addr = dev_context->dw_dsp_ext_base_addr;
+ u32 dw_offset = 0;
+ u8 temp_byte1, temp_byte2;
+ u8 remain_byte[4];
+ s32 i;
+ int ret = 0;
+ u32 dw_ext_prog_virt_mem;
+ u32 ul_tlb_base_virt = 0;
+ u32 ul_shm_offset_virt = 0;
+ struct cfg_hostres *host_res = dev_context->resources;
+ bool trace_load = false;
+ temp_byte1 = 0x0;
+ temp_byte2 = 0x0;
+
+ if (symbols_reloaded) {
+ /* Check if it is a load to Trace section */
+ ret = dev_get_symbol(dev_context->hdev_obj,
+ DSP_TRACESEC_BEG, &ul_trace_sec_beg);
+ if (!ret)
+ ret = dev_get_symbol(dev_context->hdev_obj,
+ DSP_TRACESEC_END,
+ &ul_trace_sec_end);
+ }
+ if (!ret) {
+ if ((dsp_addr <= ul_trace_sec_end) &&
+ (dsp_addr >= ul_trace_sec_beg))
+ trace_load = true;
+ }
+
+ /* If dynamic, force remap/unmap */
+ if ((dynamic_load || trace_load) && dw_base_addr) {
+ dw_base_addr = 0;
+ MEM_UNMAP_LINEAR_ADDRESS((void *)
+ dev_context->dw_dsp_ext_base_addr);
+ dev_context->dw_dsp_ext_base_addr = 0x0;
+ }
+ if (!dw_base_addr) {
+ if (symbols_reloaded)
+ /* Get SHM_BEG EXT_BEG and EXT_END. */
+ ret = dev_get_symbol(dev_context->hdev_obj,
+ SHMBASENAME, &ul_shm_base_virt);
+ DBC_ASSERT(ul_shm_base_virt != 0);
+ if (dynamic_load) {
+ if (!ret) {
+ if (symbols_reloaded)
+ ret =
+ dev_get_symbol
+ (dev_context->hdev_obj, DYNEXTBASE,
+ &ul_ext_base);
+ }
+ DBC_ASSERT(ul_ext_base != 0);
+ if (!ret) {
+ /* DR OMAPS00013235 : DLModules array may be
+ * in EXTMEM. It is expected that DYNEXTMEM and
+ * EXTMEM are contiguous, so checking for the
+ * upper bound at EXTEND should be Ok. */
+ if (symbols_reloaded)
+ ret =
+ dev_get_symbol
+ (dev_context->hdev_obj, EXTEND,
+ &ul_ext_end);
+ }
+ } else {
+ if (symbols_reloaded) {
+ if (!ret)
+ ret =
+ dev_get_symbol
+ (dev_context->hdev_obj, EXTBASE,
+ &ul_ext_base);
+ DBC_ASSERT(ul_ext_base != 0);
+ if (!ret)
+ ret =
+ dev_get_symbol
+ (dev_context->hdev_obj, EXTEND,
+ &ul_ext_end);
+ }
+ }
+ /* Trace buffer it right after the shm SEG0, so set the
+ * base address to SHMBASE */
+ if (trace_load)
+ ul_ext_base = ul_shm_base_virt;
+
+ DBC_ASSERT(ul_ext_end != 0);
+ DBC_ASSERT(ul_ext_end > ul_ext_base);
+ if (ul_ext_end < ul_ext_base)
+ ret = -EPERM;
+
+ if (!ret) {
+ ul_tlb_base_virt = dev_context->sh_s.seg0_da *
+ DSPWORDSIZE;
+
+ DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
+
+ if (symbols_reloaded) {
+ ret = dev_get_symbol
+ (dev_context->hdev_obj,
+ DSP_TRACESEC_END, &shm0_end);
+ if (!ret) {
+ ret =
+ dev_get_symbol
+ (dev_context->hdev_obj, DYNEXTBASE,
+ &ul_dyn_ext_base);
+ }
+ }
+ ul_shm_offset_virt =
+ ul_shm_base_virt - ul_tlb_base_virt;
+ if (trace_load) {
+ dw_ext_prog_virt_mem =
+ dev_context->sh_s.seg0_va;
+ } else {
+ dw_ext_prog_virt_mem = host_res->dw_mem_base[1];
+ dw_ext_prog_virt_mem +=
+ (ul_ext_base - ul_dyn_ext_base);
+ }
+
+ dev_context->dw_dsp_ext_base_addr =
+ (u32) MEM_LINEAR_ADDRESS((void *)
+ dw_ext_prog_virt_mem,
+ ul_ext_end - ul_ext_base);
+ dw_base_addr += dev_context->dw_dsp_ext_base_addr;
+ /* This dw_dsp_ext_base_addr will get cleared only when
+ * the board is stopped. */
+ if (!dev_context->dw_dsp_ext_base_addr)
+ ret = -EPERM;
+ }
+ }
+ if (!dw_base_addr || !ul_ext_base || !ul_ext_end)
+ ret = -EPERM;
+
+ if (!ret) {
+ for (i = 0; i < 4; i++)
+ remain_byte[i] = 0x0;
+
+ dw_offset = dsp_addr - ul_ext_base;
+ /* Also make sure the dsp_addr is < ul_ext_end */
+ if (dsp_addr > ul_ext_end || dw_offset > dsp_addr)
+ ret = -EPERM;
+ }
+ if (!ret) {
+ if (ul_num_bytes)
+ memcpy((u8 *) dw_base_addr + dw_offset, host_buff,
+ ul_num_bytes);
+ else
+ *((u32 *) host_buff) = dw_base_addr + dw_offset;
+ }
+ /* Unmap here to force remap for other Ext loads */
+ if ((dynamic_load || trace_load) && dev_context->dw_dsp_ext_base_addr) {
+ MEM_UNMAP_LINEAR_ADDRESS((void *)
+ dev_context->dw_dsp_ext_base_addr);
+ dev_context->dw_dsp_ext_base_addr = 0x0;
+ }
+ symbols_reloaded = false;
+ return ret;
+}
+
+int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val)
+{
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+ u32 opplevel = 0;
+#endif
+ struct omap_dsp_platform_data *pdata =
+ omap_dspbridge_dev->dev.platform_data;
+ struct cfg_hostres *resources = dev_context->resources;
+ int status = 0;
+
+ if (!dev_context->mbox)
+ return 0;
+
+ if (!resources)
+ return -EPERM;
+
+ if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
+ dev_context->dw_brd_state == BRD_HIBERNATION) {
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+ if (pdata->dsp_get_opp)
+ opplevel = (*pdata->dsp_get_opp) ();
+ if (opplevel == VDD1_OPP1) {
+ if (pdata->dsp_set_min_opp)
+ (*pdata->dsp_set_min_opp) (VDD1_OPP2);
+ }
+#endif
+ /* Restart the peripheral clocks */
+ dsp_clock_enable_all(dev_context->dsp_per_clks);
+ dsp_wdt_enable(true);
+
+ /*
+ * 2:0 AUTO_IVA2_DPLL - Enabling IVA2 DPLL auto control
+ * in CM_AUTOIDLE_PLL_IVA2 register
+ */
+ (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
+ OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL);
+
+ /*
+ * 7:4 IVA2_DPLL_FREQSEL - IVA2 internal frq set to
+ * 0.75 MHz - 1.0 MHz
+ * 2:0 EN_IVA2_DPLL - Enable IVA2 DPLL in lock mode
+ */
+ (*pdata->dsp_cm_rmw_bits)(OMAP3430_IVA2_DPLL_FREQSEL_MASK |
+ OMAP3430_EN_IVA2_DPLL_MASK,
+ 0x3 << OMAP3430_IVA2_DPLL_FREQSEL_SHIFT |
+ 0x7 << OMAP3430_EN_IVA2_DPLL_SHIFT,
+ OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL);
+
+ /* Restore mailbox settings */
+ omap_mbox_restore_ctx(dev_context->mbox);
+
+ /* Access MMU SYS CONFIG register to generate a short wakeup */
+ iommu_read_reg(dev_context->dsp_mmu, MMU_SYSCONFIG);
+
+ dev_context->dw_brd_state = BRD_RUNNING;
+ } else if (dev_context->dw_brd_state == BRD_RETENTION) {
+ /* Restart the peripheral clocks */
+ dsp_clock_enable_all(dev_context->dsp_per_clks);
+ }
+
+ status = omap_mbox_msg_send(dev_context->mbox, mb_val);
+
+ if (status) {
+ pr_err("omap_mbox_msg_send Fail and status = %d\n", status);
+ status = -EPERM;
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.h b/drivers/staging/tidspbridge/core/tiomap_io.h
new file mode 100644
index 000000000000..a3f19c7b79f3
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/tiomap_io.h
@@ -0,0 +1,104 @@
+/*
+ * tiomap_io.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Definitions, types and function prototypes for the io (r/w external mem).
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _TIOMAP_IO_
+#define _TIOMAP_IO_
+
+/*
+ * Symbol that defines beginning of shared memory.
+ * For OMAP (Helen) this is the DSP Virtual base address of SDRAM.
+ * This will be used to program DSP MMU to map DSP Virt to GPP phys.
+ * (see dspMmuTlbEntry()).
+ */
+#define SHMBASENAME "SHM_BEG"
+#define EXTBASE "EXT_BEG"
+#define EXTEND "_EXT_END"
+#define DYNEXTBASE "_DYNEXT_BEG"
+#define DYNEXTEND "_DYNEXT_END"
+#define IVAEXTMEMBASE "_IVAEXTMEM_BEG"
+#define IVAEXTMEMEND "_IVAEXTMEM_END"
+
+#define DSP_TRACESEC_BEG "_BRIDGE_TRACE_BEG"
+#define DSP_TRACESEC_END "_BRIDGE_TRACE_END"
+
+#define SYS_PUTCBEG "_SYS_PUTCBEG"
+#define SYS_PUTCEND "_SYS_PUTCEND"
+#define BRIDGE_SYS_PUTC_CURRENT "_BRIDGE_SYS_PUTC_current"
+
+#define WORDSWAP_ENABLE 0x3 /* Enable word swap */
+
+/*
+ * ======== read_ext_dsp_data ========
+ * Reads it from DSP External memory. The external memory for the DSP
+ * is configured by the combination of DSP MMU and shm Memory manager in the CDB
+ */
+extern int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
+ u8 *host_buff, u32 dsp_addr,
+ u32 ul_num_bytes, u32 mem_type);
+
+/*
+ * ======== write_dsp_data ========
+ */
+extern int write_dsp_data(struct bridge_dev_context *dev_context,
+ u8 *host_buff, u32 dsp_addr,
+ u32 ul_num_bytes, u32 mem_type);
+
+/*
+ * ======== write_ext_dsp_data ========
+ * Writes to the DSP External memory for external program.
+ * The ext mem for progra is configured by the combination of DSP MMU and
+ * shm Memory manager in the CDB
+ */
+extern int write_ext_dsp_data(struct bridge_dev_context *dev_context,
+ u8 *host_buff, u32 dsp_addr,
+ u32 ul_num_bytes, u32 mem_type,
+ bool dynamic_load);
+
+/*
+ * ======== write_ext32_bit_dsp_data ========
+ * Writes 32 bit data to the external memory
+ */
+extern inline void write_ext32_bit_dsp_data(const
+ struct bridge_dev_context *dev_context,
+ u32 dsp_addr, u32 val)
+{
+ *(u32 *) dsp_addr = ((dev_context->tc_word_swap_on) ? (((val << 16) &
+ 0xFFFF0000) |
+ ((val >> 16) &
+ 0x0000FFFF)) :
+ val);
+}
+
+/*
+ * ======== read_ext32_bit_dsp_data ========
+ * Reads 32 bit data from the external memory
+ */
+extern inline u32 read_ext32_bit_dsp_data(const struct bridge_dev_context
+ *dev_context, u32 dsp_addr)
+{
+ u32 ret;
+ ret = *(u32 *) dsp_addr;
+
+ ret = ((dev_context->tc_word_swap_on) ? (((ret << 16)
+ & 0xFFFF0000) | ((ret >> 16) &
+ 0x0000FFFF))
+ : ret);
+ return ret;
+}
+
+#endif /* _TIOMAP_IO_ */
diff --git a/drivers/staging/tidspbridge/core/ue_deh.c b/drivers/staging/tidspbridge/core/ue_deh.c
new file mode 100644
index 000000000000..e24ea0c73914
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/ue_deh.c
@@ -0,0 +1,160 @@
+/*
+ * ue_deh.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Implements upper edge DSP exception handling (DEH) functions.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ * Copyright (C) 2010 Felipe Contreras
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <plat/dmtimer.h>
+
+#include <dspbridge/dbdefs.h>
+#include <dspbridge/dspdeh.h>
+#include <dspbridge/dev.h>
+#include "_tiomap.h"
+#include "_deh.h"
+
+#include <dspbridge/io_sm.h>
+#include <dspbridge/drv.h>
+#include <dspbridge/wdt.h>
+
+int bridge_deh_create(struct deh_mgr **ret_deh,
+ struct dev_object *hdev_obj)
+{
+ int status;
+ struct deh_mgr *deh;
+ struct bridge_dev_context *hbridge_context = NULL;
+
+ /* Message manager will be created when a file is loaded, since
+ * size of message buffer in shared memory is configurable in
+ * the base image. */
+ /* Get Bridge context info. */
+ dev_get_bridge_context(hdev_obj, &hbridge_context);
+ /* Allocate IO manager object: */
+ deh = kzalloc(sizeof(*deh), GFP_KERNEL);
+ if (!deh) {
+ status = -ENOMEM;
+ goto err;
+ }
+
+ /* Create an NTFY object to manage notifications */
+ deh->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL);
+ if (!deh->ntfy_obj) {
+ status = -ENOMEM;
+ goto err;
+ }
+ ntfy_init(deh->ntfy_obj);
+
+ /* Fill in context structure */
+ deh->hbridge_context = hbridge_context;
+
+ *ret_deh = deh;
+ return 0;
+
+err:
+ bridge_deh_destroy(deh);
+ *ret_deh = NULL;
+ return status;
+}
+
+int bridge_deh_destroy(struct deh_mgr *deh)
+{
+ if (!deh)
+ return -EFAULT;
+
+ /* If notification object exists, delete it */
+ if (deh->ntfy_obj) {
+ ntfy_delete(deh->ntfy_obj);
+ kfree(deh->ntfy_obj);
+ }
+
+ /* Deallocate the DEH manager object */
+ kfree(deh);
+
+ return 0;
+}
+
+int bridge_deh_register_notify(struct deh_mgr *deh, u32 event_mask,
+ u32 notify_type,
+ struct dsp_notification *hnotification)
+{
+ if (!deh)
+ return -EFAULT;
+
+ if (event_mask)
+ return ntfy_register(deh->ntfy_obj, hnotification,
+ event_mask, notify_type);
+ else
+ return ntfy_unregister(deh->ntfy_obj, hnotification);
+}
+
+static inline const char *event_to_string(int event)
+{
+ switch (event) {
+ case DSP_SYSERROR: return "DSP_SYSERROR"; break;
+ case DSP_MMUFAULT: return "DSP_MMUFAULT"; break;
+ case DSP_PWRERROR: return "DSP_PWRERROR"; break;
+ case DSP_WDTOVERFLOW: return "DSP_WDTOVERFLOW"; break;
+ default: return "unkown event"; break;
+ }
+}
+
+void bridge_deh_notify(struct deh_mgr *deh, int event, int info)
+{
+ struct bridge_dev_context *dev_context;
+ const char *str = event_to_string(event);
+
+ if (!deh)
+ return;
+
+ dev_dbg(bridge, "%s: device exception", __func__);
+ dev_context = deh->hbridge_context;
+
+ switch (event) {
+ case DSP_SYSERROR:
+ dev_err(bridge, "%s: %s, info=0x%x", __func__,
+ str, info);
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
+ dump_dl_modules(dev_context);
+ dump_dsp_stack(dev_context);
+#endif
+ break;
+ case DSP_MMUFAULT:
+ dev_err(bridge, "%s: %s, addr=0x%x", __func__, str, info);
+ break;
+ default:
+ dev_err(bridge, "%s: %s", __func__, str);
+ break;
+ }
+
+ /* Filter subsequent notifications when an error occurs */
+ if (dev_context->dw_brd_state != BRD_ERROR) {
+ ntfy_notify(deh->ntfy_obj, event);
+#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
+ bridge_recover_schedule();
+#endif
+ }
+
+ /* Set the Board state as ERROR */
+ dev_context->dw_brd_state = BRD_ERROR;
+ /* Disable all the clocks that were enabled by DSP */
+ dsp_clock_disable_all(dev_context->dsp_per_clks);
+ /*
+ * Avoid the subsequent WDT if it happens once,
+ * also if fatal error occurs.
+ */
+ dsp_wdt_enable(false);
+}
diff --git a/drivers/staging/tidspbridge/core/wdt.c b/drivers/staging/tidspbridge/core/wdt.c
new file mode 100644
index 000000000000..2126f5977530
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/wdt.c
@@ -0,0 +1,150 @@
+/*
+ * wdt.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * IO dispatcher for a shared memory channel driver.
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/types.h>
+
+#include <dspbridge/dbdefs.h>
+#include <dspbridge/dspdeh.h>
+#include <dspbridge/dev.h>
+#include <dspbridge/_chnl_sm.h>
+#include <dspbridge/wdt.h>
+#include <dspbridge/host_os.h>
+
+
+#ifdef CONFIG_TIDSPBRIDGE_WDT3
+
+#define OMAP34XX_WDT3_BASE (L4_PER_34XX_BASE + 0x30000)
+
+static struct dsp_wdt_setting dsp_wdt;
+
+void dsp_wdt_dpc(unsigned long data)
+{
+ struct deh_mgr *deh_mgr;
+ dev_get_deh_mgr(dev_get_first(), &deh_mgr);
+ if (deh_mgr)
+ bridge_deh_notify(deh_mgr, DSP_WDTOVERFLOW, 0);
+}
+
+irqreturn_t dsp_wdt_isr(int irq, void *data)
+{
+ u32 value;
+ /* ack wdt3 interrupt */
+ value = __raw_readl(dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET);
+ __raw_writel(value, dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET);
+
+ tasklet_schedule(&dsp_wdt.wdt3_tasklet);
+ return IRQ_HANDLED;
+}
+
+int dsp_wdt_init(void)
+{
+ int ret = 0;
+
+ dsp_wdt.sm_wdt = NULL;
+ dsp_wdt.reg_base = OMAP2_L4_IO_ADDRESS(OMAP34XX_WDT3_BASE);
+ tasklet_init(&dsp_wdt.wdt3_tasklet, dsp_wdt_dpc, 0);
+
+ dsp_wdt.fclk = clk_get(NULL, "wdt3_fck");
+
+ if (dsp_wdt.fclk) {
+ dsp_wdt.iclk = clk_get(NULL, "wdt3_ick");
+ if (!dsp_wdt.iclk) {
+ clk_put(dsp_wdt.fclk);
+ dsp_wdt.fclk = NULL;
+ ret = -EFAULT;
+ }
+ } else
+ ret = -EFAULT;
+
+ if (!ret)
+ ret = request_irq(INT_34XX_WDT3_IRQ, dsp_wdt_isr, 0,
+ "dsp_wdt", &dsp_wdt);
+
+ /* Disable at this moment, it will be enabled when DSP starts */
+ if (!ret)
+ disable_irq(INT_34XX_WDT3_IRQ);
+
+ return ret;
+}
+
+void dsp_wdt_sm_set(void *data)
+{
+ dsp_wdt.sm_wdt = data;
+ dsp_wdt.sm_wdt->wdt_overflow = CONFIG_TIDSPBRIDGE_WDT_TIMEOUT;
+}
+
+
+void dsp_wdt_exit(void)
+{
+ free_irq(INT_34XX_WDT3_IRQ, &dsp_wdt);
+ tasklet_kill(&dsp_wdt.wdt3_tasklet);
+
+ if (dsp_wdt.fclk)
+ clk_put(dsp_wdt.fclk);
+ if (dsp_wdt.iclk)
+ clk_put(dsp_wdt.iclk);
+
+ dsp_wdt.fclk = NULL;
+ dsp_wdt.iclk = NULL;
+ dsp_wdt.sm_wdt = NULL;
+ dsp_wdt.reg_base = NULL;
+}
+
+void dsp_wdt_enable(bool enable)
+{
+ u32 tmp;
+ static bool wdt_enable;
+
+ if (wdt_enable == enable || !dsp_wdt.fclk || !dsp_wdt.iclk)
+ return;
+
+ wdt_enable = enable;
+
+ if (enable) {
+ clk_enable(dsp_wdt.fclk);
+ clk_enable(dsp_wdt.iclk);
+ dsp_wdt.sm_wdt->wdt_setclocks = 1;
+ tmp = __raw_readl(dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET);
+ __raw_writel(tmp, dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET);
+ enable_irq(INT_34XX_WDT3_IRQ);
+ } else {
+ disable_irq(INT_34XX_WDT3_IRQ);
+ dsp_wdt.sm_wdt->wdt_setclocks = 0;
+ clk_disable(dsp_wdt.iclk);
+ clk_disable(dsp_wdt.fclk);
+ }
+}
+
+#else
+void dsp_wdt_enable(bool enable)
+{
+}
+
+void dsp_wdt_sm_set(void *data)
+{
+}
+
+int dsp_wdt_init(void)
+{
+ return 0;
+}
+
+void dsp_wdt_exit(void)
+{
+}
+#endif
+