summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS2
-rw-r--r--Documentation/ABI/stable/sysfs-bus-vmbus33
-rw-r--r--Documentation/ABI/testing/debugfs-driver-habanalabs126
-rw-r--r--Documentation/ABI/testing/sysfs-bus-intel_th-output-devices6
-rw-r--r--Documentation/ABI/testing/sysfs-driver-habanalabs190
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-ptn5150.txt27
-rw-r--r--Documentation/devicetree/bindings/gnss/gnss.txt1
-rw-r--r--Documentation/devicetree/bindings/gnss/mediatek.txt35
-rw-r--r--Documentation/devicetree/bindings/gnss/sirfstar.txt1
-rw-r--r--Documentation/devicetree/bindings/interconnect/interconnect.txt60
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,sdm845.txt24
-rw-r--r--Documentation/devicetree/bindings/misc/qcom,fastrpc.txt78
-rw-r--r--Documentation/devicetree/bindings/nvmem/imx-ocotp.txt4
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/driver-api/component.rst17
-rw-r--r--Documentation/driver-api/device_link.rst3
-rw-r--r--Documentation/driver-api/index.rst1
-rw-r--r--Documentation/interconnect/interconnect.rst94
-rw-r--r--MAINTAINERS21
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/m68k/Kconfig.machine2
-rw-r--r--arch/m68k/atari/Makefile2
-rw-r--r--arch/m68k/atari/nvram.c272
-rw-r--r--arch/m68k/include/asm/atarihw.h6
-rw-r--r--arch/m68k/include/asm/macintosh.h4
-rw-r--r--arch/m68k/kernel/setup_mm.c82
-rw-r--r--arch/m68k/mac/misc.c174
-rw-r--r--arch/parisc/include/asm/io.h9
-rw-r--r--arch/parisc/lib/iomap.c64
-rw-r--r--arch/powerpc/Kconfig6
-rw-r--r--arch/powerpc/include/asm/io.h2
-rw-r--r--arch/powerpc/include/asm/nvram.h9
-rw-r--r--arch/powerpc/kernel/nvram_64.c158
-rw-r--r--arch/powerpc/kernel/setup_32.c36
-rw-r--r--arch/powerpc/platforms/chrp/Makefile2
-rw-r--r--arch/powerpc/platforms/chrp/nvram.c14
-rw-r--r--arch/powerpc/platforms/chrp/setup.c2
-rw-r--r--arch/powerpc/platforms/powermac/Makefile2
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c9
-rw-r--r--arch/powerpc/platforms/powermac/setup.c3
-rw-r--r--arch/powerpc/platforms/powermac/time.c2
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c2
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/android/Kconfig2
-rw-r--r--drivers/android/binder.c556
-rw-r--r--drivers/android/binder_alloc.c303
-rw-r--r--drivers/android/binder_alloc.h47
-rw-r--r--drivers/android/binder_alloc_selftest.c7
-rw-r--r--drivers/android/binder_trace.h2
-rw-r--r--drivers/base/component.c206
-rw-r--r--drivers/char/Kconfig19
-rw-r--r--drivers/char/Makefile6
-rw-r--r--drivers/char/applicom.c35
-rw-r--r--drivers/char/efirtc.c23
-rw-r--r--drivers/char/generic_nvram.c159
-rw-r--r--drivers/char/hpet.c9
-rw-r--r--drivers/char/lp.c4
-rw-r--r--drivers/char/mbcs.c1
-rw-r--r--drivers/char/nvram.c673
-rw-r--r--drivers/extcon/Kconfig8
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/extcon-ptn5150.c339
-rw-r--r--drivers/fpga/Kconfig2
-rw-r--r--drivers/fpga/altera-ps-spi.c2
-rw-r--r--drivers/gnss/Kconfig13
-rw-r--r--drivers/gnss/Makefile3
-rw-r--r--drivers/gnss/core.c1
-rw-r--r--drivers/gnss/mtk.c152
-rw-r--r--drivers/gnss/sirf.c256
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.h16
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c20
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h3
-rw-r--r--drivers/hv/channel.c4
-rw-r--r--drivers/hv/channel_mgmt.c18
-rw-r--r--drivers/hv/hyperv_vmbus.h4
-rw-r--r--drivers/hv/ring_buffer.c14
-rw-r--r--drivers/hv/vmbus_drv.c86
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c3
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c12
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c4
-rw-r--r--drivers/hwtracing/intel_th/core.c6
-rw-r--r--drivers/hwtracing/intel_th/gth.c4
-rw-r--r--drivers/hwtracing/intel_th/pti.c16
-rw-r--r--drivers/hwtracing/intel_th/sth.c4
-rw-r--r--drivers/hwtracing/stm/core.c11
-rw-r--r--drivers/interconnect/Kconfig15
-rw-r--r--drivers/interconnect/Makefile6
-rw-r--r--drivers/interconnect/core.c799
-rw-r--r--drivers/interconnect/qcom/Kconfig13
-rw-r--r--drivers/interconnect/qcom/Makefile5
-rw-r--r--drivers/interconnect/qcom/sdm845.c838
-rw-r--r--drivers/macintosh/via-cuda.c8
-rw-r--r--drivers/misc/Kconfig12
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/ad525x_dpot.c24
-rw-r--r--drivers/misc/cardreader/rts5227.c64
-rw-r--r--drivers/misc/cardreader/rts5249.c32
-rw-r--r--drivers/misc/cardreader/rts5260.c136
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c40
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h5
-rw-r--r--drivers/misc/enclosure.c4
-rw-r--r--drivers/misc/fastrpc.c1401
-rw-r--r--drivers/misc/habanalabs/Kconfig25
-rw-r--r--drivers/misc/habanalabs/Makefile14
-rw-r--r--drivers/misc/habanalabs/asid.c57
-rw-r--r--drivers/misc/habanalabs/command_buffer.c445
-rw-r--r--drivers/misc/habanalabs/command_submission.c780
-rw-r--r--drivers/misc/habanalabs/context.c215
-rw-r--r--drivers/misc/habanalabs/debugfs.c1077
-rw-r--r--drivers/misc/habanalabs/device.c1140
-rw-r--r--drivers/misc/habanalabs/goya/Makefile3
-rw-r--r--drivers/misc/habanalabs/goya/goya.c5391
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h211
-rw-r--r--drivers/misc/habanalabs/goya/goya_hwmgr.c254
-rw-r--r--drivers/misc/habanalabs/goya/goya_security.c2999
-rw-r--r--drivers/misc/habanalabs/habanalabs.h1464
-rw-r--r--drivers/misc/habanalabs/habanalabs_drv.c461
-rw-r--r--drivers/misc/habanalabs/habanalabs_ioctl.c234
-rw-r--r--drivers/misc/habanalabs/hw_queue.c635
-rw-r--r--drivers/misc/habanalabs/hwmon.c458
-rw-r--r--drivers/misc/habanalabs/include/armcp_if.h335
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h191
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h49
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h105
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h209
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h209
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h209
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h209
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h209
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h105
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h181
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h209
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h227
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h465
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h1372
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h275
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h118
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h105
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h105
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h653
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h331
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h331
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h331
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h331
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h331
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h331
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h373
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h1537
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h465
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h1153
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h143
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h53
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h209
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h227
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h243
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h105
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h447
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h745
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h105
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h105
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h143
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h83
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h83
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h83
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h83
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h83
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h117
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h55
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h1607
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h887
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h373
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h347
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h313
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h209
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h227
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h465
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h887
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h323
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h887
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h323
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h887
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h323
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h887
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h323
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h887
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h323
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h887
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h323
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h887
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h227
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h105
-rw-r--r--drivers/misc/habanalabs/include/goya/goya.h45
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_async_events.h186
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_fw_if.h28
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_packets.h129
-rw-r--r--drivers/misc/habanalabs/include/hl_boot_if.h30
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h47
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h15
-rw-r--r--drivers/misc/habanalabs/include/qman_if.h56
-rw-r--r--drivers/misc/habanalabs/irq.c327
-rw-r--r--drivers/misc/habanalabs/memory.c1723
-rw-r--r--drivers/misc/habanalabs/mmu.c906
-rw-r--r--drivers/misc/habanalabs/sysfs.c539
-rw-r--r--drivers/misc/hpilo.c14
-rw-r--r--drivers/misc/ics932s401.c2
-rw-r--r--drivers/misc/lkdtm/core.c15
-rw-r--r--drivers/misc/lkdtm/lkdtm.h2
-rw-r--r--drivers/misc/lkdtm/perms.c36
-rw-r--r--drivers/misc/mei/Kconfig10
-rw-r--r--drivers/misc/mei/Makefile2
-rw-r--r--drivers/misc/mei/bus-fixup.c16
-rw-r--r--drivers/misc/mei/bus.c22
-rw-r--r--drivers/misc/mei/hbm.c7
-rw-r--r--drivers/misc/mei/hdcp/Makefile7
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c849
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.h377
-rw-r--r--drivers/misc/mei/hw.h3
-rw-r--r--drivers/misc/mic/Kconfig3
-rw-r--r--drivers/misc/mic/bus/scif_bus.h8
-rw-r--r--drivers/misc/mic/bus/vop_bus.h8
-rw-r--r--drivers/misc/mic/card/mic_device.c8
-rw-r--r--drivers/misc/mic/host/mic_boot.c8
-rw-r--r--drivers/misc/mic/scif/scif_map.h4
-rw-r--r--drivers/misc/mic/scif/scif_rma.c2
-rw-r--r--drivers/misc/mic/vop/vop_main.c29
-rw-r--r--drivers/misc/mic/vop/vop_vringh.c51
-rw-r--r--drivers/misc/sgi-gru/grufault.c4
-rw-r--r--drivers/misc/vmw_balloon.c24
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c9
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.h2
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.h2
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c39
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c63
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.h4
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.h30
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c36
-rw-r--r--drivers/nvmem/Kconfig2
-rw-r--r--drivers/nvmem/bcm-ocotp.c37
-rw-r--r--drivers/nvmem/core.c42
-rw-r--r--drivers/nvmem/imx-ocotp.c13
-rw-r--r--drivers/nvmem/sc27xx-efuse.c12
-rw-r--r--drivers/parport/daisy.c32
-rw-r--r--drivers/parport/parport_pc.c2
-rw-r--r--drivers/parport/probe.c2
-rw-r--r--drivers/parport/share.c10
-rw-r--r--drivers/platform/goldfish/Kconfig4
-rw-r--r--drivers/scsi/Kconfig6
-rw-r--r--drivers/scsi/atari_scsi.c10
-rw-r--r--drivers/slimbus/core.c45
-rw-r--r--drivers/uio/uio.c16
-rw-r--r--drivers/uio/uio_pci_generic.c17
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/controlfb.c42
-rw-r--r--drivers/video/fbdev/imsttfb.c23
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c7
-rw-r--r--drivers/video/fbdev/platinumfb.c21
-rw-r--r--drivers/video/fbdev/valkyriefb.c30
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c2
-rw-r--r--include/asm-generic/iomap.h22
-rw-r--r--include/drm/drm_audio_component.h1
-rw-r--r--include/drm/drm_hdcp.h18
-rw-r--r--include/drm/i915_component.h5
-rw-r--r--include/drm/i915_drm.h15
-rw-r--r--include/drm/i915_mei_hdcp_interface.h149
-rw-r--r--include/dt-bindings/interconnect/qcom,sdm845.h143
-rw-r--r--include/linux/component.h76
-rw-r--r--include/linux/gnss.h1
-rw-r--r--include/linux/hyperv.h144
-rw-r--r--include/linux/interconnect-provider.h142
-rw-r--r--include/linux/interconnect.h59
-rw-r--r--include/linux/io-64-nonatomic-hi-lo.h64
-rw-r--r--include/linux/io-64-nonatomic-lo-hi.h64
-rw-r--r--include/linux/mei_cl_bus.h2
-rw-r--r--include/linux/nvram.h133
-rw-r--r--include/linux/parport.h13
-rw-r--r--include/linux/vmw_vmci_defs.h7
-rw-r--r--include/sound/hda_component.h5
-rw-r--r--include/uapi/linux/android/binder.h19
-rw-r--r--include/uapi/linux/pmu.h2
-rw-r--r--include/uapi/misc/fastrpc.h41
-rw-r--r--include/uapi/misc/habanalabs.h450
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--lib/iomap.c140
-rwxr-xr-xscripts/ver_linux6
-rw-r--r--sound/hda/hdac_component.c4
-rw-r--r--sound/hda/hdac_i915.c6
315 files changed, 60732 insertions, 1926 deletions
diff --git a/CREDITS b/CREDITS
index 0175098d4776..8e0342620a06 100644
--- a/CREDITS
+++ b/CREDITS
@@ -1221,7 +1221,7 @@ S: Brazil
N: Oded Gabbay
E: oded.gabbay@gmail.com
-D: AMD KFD maintainer
+D: HabanaLabs and AMD KFD maintainer
S: 12 Shraga Raphaeli
S: Petah-Tikva, 4906418
S: Israel
diff --git a/Documentation/ABI/stable/sysfs-bus-vmbus b/Documentation/ABI/stable/sysfs-bus-vmbus
index 3fed8fdb873d..826689dcc2e6 100644
--- a/Documentation/ABI/stable/sysfs-bus-vmbus
+++ b/Documentation/ABI/stable/sysfs-bus-vmbus
@@ -146,3 +146,36 @@ KernelVersion: 4.16
Contact: Stephen Hemminger <sthemmin@microsoft.com>
Description: Binary file created by uio_hv_generic for ring buffer
Users: Userspace drivers
+
+What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/intr_in_full
+Date: February 2019
+KernelVersion: 5.0
+Contact: Michael Kelley <mikelley@microsoft.com>
+Description: Number of guest to host interrupts caused by the inbound ring
+ buffer transitioning from full to not full while a packet is
+ waiting for buffer space to become available
+Users: Debugging tools
+
+What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/intr_out_empty
+Date: February 2019
+KernelVersion: 5.0
+Contact: Michael Kelley <mikelley@microsoft.com>
+Description: Number of guest to host interrupts caused by the outbound ring
+ buffer transitioning from empty to not empty
+Users: Debugging tools
+
+What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/out_full_first
+Date: February 2019
+KernelVersion: 5.0
+Contact: Michael Kelley <mikelley@microsoft.com>
+Description: Number of write operations that were the first to encounter an
+ outbound ring buffer full condition
+Users: Debugging tools
+
+What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/out_full_total
+Date: February 2019
+KernelVersion: 5.0
+Contact: Michael Kelley <mikelley@microsoft.com>
+Description: Total number of write operations that encountered an outbound
+ ring buffer full condition
+Users: Debugging tools
diff --git a/Documentation/ABI/testing/debugfs-driver-habanalabs b/Documentation/ABI/testing/debugfs-driver-habanalabs
new file mode 100644
index 000000000000..2f5b80be07a3
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-driver-habanalabs
@@ -0,0 +1,126 @@
+What: /sys/kernel/debug/habanalabs/hl<n>/addr
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Sets the device address to be used for read or write through
+ PCI bar. The acceptable value is a string that starts with "0x"
+
+What: /sys/kernel/debug/habanalabs/hl<n>/command_buffers
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays a list with information about the currently allocated
+ command buffers
+
+What: /sys/kernel/debug/habanalabs/hl<n>/command_submission
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays a list with information about the currently active
+ command submissions
+
+What: /sys/kernel/debug/habanalabs/hl<n>/command_submission_jobs
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays a list with detailed information about each JOB (CB) of
+ each active command submission
+
+What: /sys/kernel/debug/habanalabs/hl<n>/data32
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Allows the root user to read or write directly through the
+ device's PCI bar. Writing to this file generates a write
+ transaction while reading from the file generates a read
+ transcation. This custom interface is needed (instead of using
+ the generic Linux user-space PCI mapping) because the DDR bar
+ is very small compared to the DDR memory and only the driver can
+ move the bar before and after the transaction
+
+What: /sys/kernel/debug/habanalabs/hl<n>/device
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Enables the root user to set the device to specific state.
+ Valid values are "disable", "enable", "suspend", "resume".
+ User can read this property to see the valid values
+
+What: /sys/kernel/debug/habanalabs/hl<n>/i2c_addr
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Sets I2C device address for I2C transaction that is generated
+ by the device's CPU
+
+What: /sys/kernel/debug/habanalabs/hl<n>/i2c_bus
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Sets I2C bus address for I2C transaction that is generated by
+ the device's CPU
+
+What: /sys/kernel/debug/habanalabs/hl<n>/i2c_data
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Triggers an I2C transaction that is generated by the device's
+ CPU. Writing to this file generates a write transaction while
+ reading from the file generates a read transcation
+
+What: /sys/kernel/debug/habanalabs/hl<n>/i2c_reg
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Sets I2C register id for I2C transaction that is generated by
+ the device's CPU
+
+What: /sys/kernel/debug/habanalabs/hl<n>/led0
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Sets the state of the first S/W led on the device
+
+What: /sys/kernel/debug/habanalabs/hl<n>/led1
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Sets the state of the second S/W led on the device
+
+What: /sys/kernel/debug/habanalabs/hl<n>/led2
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Sets the state of the third S/W led on the device
+
+What: /sys/kernel/debug/habanalabs/hl<n>/mmu
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays the hop values and physical address for a given ASID
+ and virtual address. The user should write the ASID and VA into
+ the file and then read the file to get the result.
+ e.g. to display info about VA 0x1000 for ASID 1 you need to do:
+ echo "1 0x1000" > /sys/kernel/debug/habanalabs/hl0/mmu
+
+What: /sys/kernel/debug/habanalabs/hl<n>/set_power_state
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Sets the PCI power state. Valid values are "1" for D0 and "2"
+ for D3Hot
+
+What: /sys/kernel/debug/habanalabs/hl<n>/userptr
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays a list with information about the currently user
+ pointers (user virtual addresses) that are pinned and mapped
+ to DMA addresses
+
+What: /sys/kernel/debug/habanalabs/hl<n>/vm
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays a list with information about all the active virtual
+ address mappings per ASID
diff --git a/Documentation/ABI/testing/sysfs-bus-intel_th-output-devices b/Documentation/ABI/testing/sysfs-bus-intel_th-output-devices
index 4d48a9451866..d1f667104944 100644
--- a/Documentation/ABI/testing/sysfs-bus-intel_th-output-devices
+++ b/Documentation/ABI/testing/sysfs-bus-intel_th-output-devices
@@ -3,11 +3,13 @@ Date: June 2015
KernelVersion: 4.3
Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Description: (RW) Writes of 1 or 0 enable or disable trace output to this
- output device. Reads return current status.
+ output device. Reads return current status. Requires that the
+ correstponding output port driver be loaded.
What: /sys/bus/intel_th/devices/<intel_th_id>-msc<msc-id>/port
Date: June 2015
KernelVersion: 4.3
Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Description: (RO) Port number, corresponding to this output device on the
- switch (GTH).
+ switch (GTH) or "unassigned" if the corresponding output
+ port driver is not loaded.
diff --git a/Documentation/ABI/testing/sysfs-driver-habanalabs b/Documentation/ABI/testing/sysfs-driver-habanalabs
new file mode 100644
index 000000000000..78b2bcf316a3
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-habanalabs
@@ -0,0 +1,190 @@
+What: /sys/class/habanalabs/hl<n>/armcp_kernel_ver
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Version of the Linux kernel running on the device's CPU
+
+What: /sys/class/habanalabs/hl<n>/armcp_ver
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Version of the application running on the device's CPU
+
+What: /sys/class/habanalabs/hl<n>/cpld_ver
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Version of the Device's CPLD F/W
+
+What: /sys/class/habanalabs/hl<n>/device_type
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays the code name of the device according to its type.
+ The supported values are: "GOYA"
+
+What: /sys/class/habanalabs/hl<n>/eeprom
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: A binary file attribute that contains the contents of the
+ on-board EEPROM
+
+What: /sys/class/habanalabs/hl<n>/fuse_ver
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays the device's version from the eFuse
+
+What: /sys/class/habanalabs/hl<n>/hard_reset
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Interface to trigger a hard-reset operation for the device.
+ Hard-reset will reset ALL internal components of the device
+ except for the PCI interface and the internal PLLs
+
+What: /sys/class/habanalabs/hl<n>/hard_reset_cnt
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays how many times the device have undergone a hard-reset
+ operation since the driver was loaded
+
+What: /sys/class/habanalabs/hl<n>/high_pll
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Allows the user to set the maximum clock frequency for MME, TPC
+ and IC when the power management profile is set to "automatic".
+
+What: /sys/class/habanalabs/hl<n>/ic_clk
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Allows the user to set the maximum clock frequency of the
+ Interconnect fabric. Writes to this parameter affect the device
+ only when the power management profile is set to "manual" mode.
+ The device IC clock might be set to lower value then the
+ maximum. The user should read the ic_clk_curr to see the actual
+ frequency value of the IC
+
+What: /sys/class/habanalabs/hl<n>/ic_clk_curr
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays the current clock frequency of the Interconnect fabric
+
+What: /sys/class/habanalabs/hl<n>/infineon_ver
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Version of the Device's power supply F/W code
+
+What: /sys/class/habanalabs/hl<n>/max_power
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Allows the user to set the maximum power consumption of the
+ device in milliwatts.
+
+What: /sys/class/habanalabs/hl<n>/mme_clk
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Allows the user to set the maximum clock frequency of the
+ MME compute engine. Writes to this parameter affect the device
+ only when the power management profile is set to "manual" mode.
+ The device MME clock might be set to lower value then the
+ maximum. The user should read the mme_clk_curr to see the actual
+ frequency value of the MME
+
+What: /sys/class/habanalabs/hl<n>/mme_clk_curr
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays the current clock frequency of the MME compute engine
+
+What: /sys/class/habanalabs/hl<n>/pci_addr
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays the PCI address of the device. This is needed so the
+ user would be able to open a device based on its PCI address
+
+What: /sys/class/habanalabs/hl<n>/pm_mng_profile
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Power management profile. Values are "auto", "manual". In "auto"
+ mode, the driver will set the maximum clock frequency to a high
+ value when a user-space process opens the device's file (unless
+ it was already opened by another process). The driver will set
+ the max clock frequency to a low value when there are no user
+ processes that are opened on the device's file. In "manual"
+ mode, the user sets the maximum clock frequency by writing to
+ ic_clk, mme_clk and tpc_clk
+
+
+What: /sys/class/habanalabs/hl<n>/preboot_btl_ver
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Version of the device's preboot F/W code
+
+What: /sys/class/habanalabs/hl<n>/soft_reset
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Interface to trigger a soft-reset operation for the device.
+ Soft-reset will reset only the compute and DMA engines of the
+ device
+
+What: /sys/class/habanalabs/hl<n>/soft_reset_cnt
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays how many times the device have undergone a soft-reset
+ operation since the driver was loaded
+
+What: /sys/class/habanalabs/hl<n>/status
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Status of the card: "Operational", "Malfunction", "In reset".
+
+What: /sys/class/habanalabs/hl<n>/thermal_ver
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Version of the Device's thermal daemon
+
+What: /sys/class/habanalabs/hl<n>/tpc_clk
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Allows the user to set the maximum clock frequency of the
+ TPC compute engines. Writes to this parameter affect the device
+ only when the power management profile is set to "manual" mode.
+ The device TPC clock might be set to lower value then the
+ maximum. The user should read the tpc_clk_curr to see the actual
+ frequency value of the TPC
+
+What: /sys/class/habanalabs/hl<n>/tpc_clk_curr
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays the current clock frequency of the TPC compute engines
+
+What: /sys/class/habanalabs/hl<n>/uboot_ver
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Version of the u-boot running on the device's CPU
+
+What: /sys/class/habanalabs/hl<n>/write_open_cnt
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays the total number of user processes that are currently
+ opened on the device's file
diff --git a/Documentation/devicetree/bindings/extcon/extcon-ptn5150.txt b/Documentation/devicetree/bindings/extcon/extcon-ptn5150.txt
new file mode 100644
index 000000000000..936fbdf12815
--- /dev/null
+++ b/Documentation/devicetree/bindings/extcon/extcon-ptn5150.txt
@@ -0,0 +1,27 @@
+* PTN5150 CC (Configuration Channel) Logic device
+
+PTN5150 is a small thin low power CC logic chip supporting the USB Type-C
+connector application with CC control logic detection and indication functions.
+It is interfaced to the host controller using an I2C interface.
+
+Required properties:
+- compatible: should be "nxp,ptn5150"
+- reg: specifies the I2C slave address of the device
+- int-gpio: should contain a phandle and GPIO specifier for the GPIO pin
+ connected to the PTN5150's INTB pin.
+- vbus-gpio: should contain a phandle and GPIO specifier for the GPIO pin which
+ is used to control VBUS.
+- pinctrl-names : a pinctrl state named "default" must be defined.
+- pinctrl-0 : phandle referencing pin configuration of interrupt and vbus
+ control.
+
+Example:
+ ptn5150@1d {
+ compatible = "nxp,ptn5150";
+ reg = <0x1d>;
+ int-gpio = <&msmgpio 78 GPIO_ACTIVE_HIGH>;
+ vbus-gpio = <&msmgpio 148 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ptn5150_default>;
+ status = "okay";
+ };
diff --git a/Documentation/devicetree/bindings/gnss/gnss.txt b/Documentation/devicetree/bindings/gnss/gnss.txt
index f1e4a2ff47c5..f547bd4549fe 100644
--- a/Documentation/devicetree/bindings/gnss/gnss.txt
+++ b/Documentation/devicetree/bindings/gnss/gnss.txt
@@ -17,6 +17,7 @@ Required properties:
represents
Optional properties:
+- lna-supply : Separate supply for an LNA
- enable-gpios : GPIO used to enable the device
- timepulse-gpios : Time pulse GPIO
diff --git a/Documentation/devicetree/bindings/gnss/mediatek.txt b/Documentation/devicetree/bindings/gnss/mediatek.txt
new file mode 100644
index 000000000000..80cb802813c5
--- /dev/null
+++ b/Documentation/devicetree/bindings/gnss/mediatek.txt
@@ -0,0 +1,35 @@
+Mediatek-based GNSS Receiver DT binding
+
+Mediatek chipsets are used in GNSS-receiver modules produced by several
+vendors and can use a UART interface.
+
+Please see Documentation/devicetree/bindings/gnss/gnss.txt for generic
+properties.
+
+Required properties:
+
+- compatible : Must be
+
+ "globaltop,pa6h"
+
+- vcc-supply : Main voltage regulator (pin name: VCC)
+
+Optional properties:
+
+- current-speed : Default UART baud rate
+- gnss-fix-gpios : GPIO used to determine device position fix state
+ (pin name: FIX, 3D_FIX)
+- reset-gpios : GPIO used to reset the device (pin name: RESET, NRESET)
+- timepulse-gpios : Time pulse GPIO (pin name: PPS1, 1PPS)
+- vbackup-supply : Backup voltage regulator (pin name: VBAT, VBACKUP)
+
+Example:
+
+serial@1234 {
+ compatible = "ns16550a";
+
+ gnss {
+ compatible = "globaltop,pa6h";
+ vcc-supply = <&vcc_3v3>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/gnss/sirfstar.txt b/Documentation/devicetree/bindings/gnss/sirfstar.txt
index 648d183cdb77..f4252b6b660b 100644
--- a/Documentation/devicetree/bindings/gnss/sirfstar.txt
+++ b/Documentation/devicetree/bindings/gnss/sirfstar.txt
@@ -12,6 +12,7 @@ Required properties:
"fastrax,uc430"
"linx,r4"
+ "wi2wi,w2sg0004"
"wi2wi,w2sg0008i"
"wi2wi,w2sg0084i"
diff --git a/Documentation/devicetree/bindings/interconnect/interconnect.txt b/Documentation/devicetree/bindings/interconnect/interconnect.txt
new file mode 100644
index 000000000000..5a3c575b387a
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/interconnect.txt
@@ -0,0 +1,60 @@
+Interconnect Provider Device Tree Bindings
+=========================================
+
+The purpose of this document is to define a common set of generic interconnect
+providers/consumers properties.
+
+
+= interconnect providers =
+
+The interconnect provider binding is intended to represent the interconnect
+controllers in the system. Each provider registers a set of interconnect
+nodes, which expose the interconnect related capabilities of the interconnect
+to consumer drivers. These capabilities can be throughput, latency, priority
+etc. The consumer drivers set constraints on interconnect path (or endpoints)
+depending on the use case. Interconnect providers can also be interconnect
+consumers, such as in the case where two network-on-chip fabrics interface
+directly.
+
+Required properties:
+- compatible : contains the interconnect provider compatible string
+- #interconnect-cells : number of cells in a interconnect specifier needed to
+ encode the interconnect node id
+
+Example:
+
+ snoc: interconnect@580000 {
+ compatible = "qcom,msm8916-snoc";
+ #interconnect-cells = <1>;
+ reg = <0x580000 0x14000>;
+ clock-names = "bus_clk", "bus_a_clk";
+ clocks = <&rpmcc RPM_SMD_SNOC_CLK>,
+ <&rpmcc RPM_SMD_SNOC_A_CLK>;
+ };
+
+
+= interconnect consumers =
+
+The interconnect consumers are device nodes which dynamically express their
+bandwidth requirements along interconnect paths they are connected to. There
+can be multiple interconnect providers on a SoC and the consumer may consume
+multiple paths from different providers depending on use case and the
+components it has to interact with.
+
+Required properties:
+interconnects : Pairs of phandles and interconnect provider specifier to denote
+ the edge source and destination ports of the interconnect path.
+
+Optional properties:
+interconnect-names : List of interconnect path name strings sorted in the same
+ order as the interconnects property. Consumers drivers will use
+ interconnect-names to match interconnect paths with interconnect
+ specifier pairs.
+
+Example:
+
+ sdhci@7864000 {
+ ...
+ interconnects = <&pnoc MASTER_SDCC_1 &bimc SLAVE_EBI_CH0>;
+ interconnect-names = "sdhc-mem";
+ };
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,sdm845.txt b/Documentation/devicetree/bindings/interconnect/qcom,sdm845.txt
new file mode 100644
index 000000000000..5c4f1d911630
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/qcom,sdm845.txt
@@ -0,0 +1,24 @@
+Qualcomm SDM845 Network-On-Chip interconnect driver binding
+-----------------------------------------------------------
+
+SDM845 interconnect providers support system bandwidth requirements through
+RPMh hardware accelerators known as Bus Clock Manager (BCM). The provider is
+able to communicate with the BCM through the Resource State Coordinator (RSC)
+associated with each execution environment. Provider nodes must reside within
+an RPMh device node pertaining to their RSC and each provider maps to a single
+RPMh resource.
+
+Required properties :
+- compatible : shall contain only one of the following:
+ "qcom,sdm845-rsc-hlos"
+- #interconnect-cells : should contain 1
+
+Examples:
+
+apps_rsc: rsc {
+ rsc_hlos: interconnect {
+ compatible = "qcom,sdm845-rsc-hlos";
+ #interconnect-cells = <1>;
+ };
+};
+
diff --git a/Documentation/devicetree/bindings/misc/qcom,fastrpc.txt b/Documentation/devicetree/bindings/misc/qcom,fastrpc.txt
new file mode 100644
index 000000000000..2a1827ab50d2
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/qcom,fastrpc.txt
@@ -0,0 +1,78 @@
+Qualcomm Technologies, Inc. FastRPC Driver
+
+The FastRPC implements an IPC (Inter-Processor Communication)
+mechanism that allows for clients to transparently make remote method
+invocations across DSP and APPS boundaries. This enables developers
+to offload tasks to the DSP and free up the application processor for
+other tasks.
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,fastrpc"
+
+- label
+ Usage: required
+ Value type: <string>
+ Definition: should specify the dsp domain name this fastrpc
+ corresponds to. must be one of this: "adsp", "mdsp", "sdsp", "cdsp"
+
+- #address-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 1
+
+- #size-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 0
+
+= COMPUTE BANKS
+Each subnode of the Fastrpc represents compute context banks available
+on the dsp.
+- All Compute context banks MUST contain the following properties:
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,fastrpc-compute-cb"
+
+- reg
+ Usage: required
+ Value type: <u32>
+ Definition: Context Bank ID.
+
+- qcom,nsessions:
+ Usage: Optional
+ Value type: <u32>
+ Defination: A value indicating how many sessions can share this
+ context bank. Defaults to 1 when this property
+ is not specified.
+
+Example:
+
+adsp-pil {
+ compatible = "qcom,msm8996-adsp-pil";
+ ...
+ smd-edge {
+ label = "lpass";
+ fastrpc {
+ compatible = "qcom,fastrpc";
+ qcom,smd-channels = "fastrpcsmd-apps-dsp";
+ label = "adsp";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cb@1 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <1>;
+ };
+
+ cb@2 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <2>;
+ };
+ ...
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
index 792bc5fafeb9..7a999a135e56 100644
--- a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
+++ b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
@@ -1,7 +1,7 @@
Freescale i.MX6 On-Chip OTP Controller (OCOTP) device tree bindings
This binding represents the on-chip eFuse OTP controller found on
-i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX, i.MX6UL and i.MX6SLL SoCs.
+i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX, i.MX6UL, i.MX6ULL/ULZ and i.MX6SLL SoCs.
Required properties:
- compatible: should be one of
@@ -9,8 +9,10 @@ Required properties:
"fsl,imx6sl-ocotp" (i.MX6SL), or
"fsl,imx6sx-ocotp" (i.MX6SX),
"fsl,imx6ul-ocotp" (i.MX6UL),
+ "fsl,imx6ull-ocotp" (i.MX6ULL/ULZ),
"fsl,imx7d-ocotp" (i.MX7D/S),
"fsl,imx6sll-ocotp" (i.MX6SLL),
+ "fsl,imx7ulp-ocotp" (i.MX7ULP),
followed by "syscon".
- #address-cells : Should be 1
- #size-cells : Should be 1
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 8f574c778290..542bbf304f13 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -154,6 +154,7 @@ geniatech Geniatech, Inc.
giantec Giantec Semiconductor, Inc.
giantplus Giantplus Technology Co., Ltd.
globalscale Globalscale Technologies, Inc.
+globaltop GlobalTop Technology, Inc.
gmt Global Mixed-mode Technology, Inc.
goodix Shenzhen Huiding Technology Co., Ltd.
google Google, Inc.
diff --git a/Documentation/driver-api/component.rst b/Documentation/driver-api/component.rst
new file mode 100644
index 000000000000..2da4a8f20607
--- /dev/null
+++ b/Documentation/driver-api/component.rst
@@ -0,0 +1,17 @@
+======================================
+Component Helper for Aggregate Drivers
+======================================
+
+.. kernel-doc:: drivers/base/component.c
+ :doc: overview
+
+
+API
+===
+
+.. kernel-doc:: include/linux/component.h
+ :internal:
+
+.. kernel-doc:: drivers/base/component.c
+ :export:
+
diff --git a/Documentation/driver-api/device_link.rst b/Documentation/driver-api/device_link.rst
index d6763272e747..2d5919b2b337 100644
--- a/Documentation/driver-api/device_link.rst
+++ b/Documentation/driver-api/device_link.rst
@@ -1,6 +1,9 @@
.. |struct dev_pm_domain| replace:: :c:type:`struct dev_pm_domain <dev_pm_domain>`
.. |struct generic_pm_domain| replace:: :c:type:`struct generic_pm_domain <generic_pm_domain>`
+
+.. _device_link:
+
============
Device links
============
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index ab38ced66a44..c0b600ed9961 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -22,6 +22,7 @@ available subsections can be seen below.
device_connection
dma-buf
device_link
+ component
message-based
sound
frame-buffer
diff --git a/Documentation/interconnect/interconnect.rst b/Documentation/interconnect/interconnect.rst
new file mode 100644
index 000000000000..b8107dcc4cd3
--- /dev/null
+++ b/Documentation/interconnect/interconnect.rst
@@ -0,0 +1,94 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================================
+GENERIC SYSTEM INTERCONNECT SUBSYSTEM
+=====================================
+
+Introduction
+------------
+
+This framework is designed to provide a standard kernel interface to control
+the settings of the interconnects on an SoC. These settings can be throughput,
+latency and priority between multiple interconnected devices or functional
+blocks. This can be controlled dynamically in order to save power or provide
+maximum performance.
+
+The interconnect bus is hardware with configurable parameters, which can be
+set on a data path according to the requests received from various drivers.
+An example of interconnect buses are the interconnects between various
+components or functional blocks in chipsets. There can be multiple interconnects
+on an SoC that can be multi-tiered.
+
+Below is a simplified diagram of a real-world SoC interconnect bus topology.
+
+::
+
+ +----------------+ +----------------+
+ | HW Accelerator |--->| M NoC |<---------------+
+ +----------------+ +----------------+ |
+ | | +------------+
+ +-----+ +-------------+ V +------+ | |
+ | DDR | | +--------+ | PCIe | | |
+ +-----+ | | Slaves | +------+ | |
+ ^ ^ | +--------+ | | C NoC |
+ | | V V | |
+ +------------------+ +------------------------+ | | +-----+
+ | |-->| |-->| |-->| CPU |
+ | |-->| |<--| | +-----+
+ | Mem NoC | | S NoC | +------------+
+ | |<--| |---------+ |
+ | |<--| |<------+ | | +--------+
+ +------------------+ +------------------------+ | | +-->| Slaves |
+ ^ ^ ^ ^ ^ | | +--------+
+ | | | | | | V
+ +------+ | +-----+ +-----+ +---------+ +----------------+ +--------+
+ | CPUs | | | GPU | | DSP | | Masters |-->| P NoC |-->| Slaves |
+ +------+ | +-----+ +-----+ +---------+ +----------------+ +--------+
+ |
+ +-------+
+ | Modem |
+ +-------+
+
+Terminology
+-----------
+
+Interconnect provider is the software definition of the interconnect hardware.
+The interconnect providers on the above diagram are M NoC, S NoC, C NoC, P NoC
+and Mem NoC.
+
+Interconnect node is the software definition of the interconnect hardware
+port. Each interconnect provider consists of multiple interconnect nodes,
+which are connected to other SoC components including other interconnect
+providers. The point on the diagram where the CPUs connect to the memory is
+called an interconnect node, which belongs to the Mem NoC interconnect provider.
+
+Interconnect endpoints are the first or the last element of the path. Every
+endpoint is a node, but not every node is an endpoint.
+
+Interconnect path is everything between two endpoints including all the nodes
+that have to be traversed to reach from a source to destination node. It may
+include multiple master-slave pairs across several interconnect providers.
+
+Interconnect consumers are the entities which make use of the data paths exposed
+by the providers. The consumers send requests to providers requesting various
+throughput, latency and priority. Usually the consumers are device drivers, that
+send request based on their needs. An example for a consumer is a video decoder
+that supports various formats and image sizes.
+
+Interconnect providers
+----------------------
+
+Interconnect provider is an entity that implements methods to initialize and
+configure interconnect bus hardware. The interconnect provider drivers should
+be registered with the interconnect provider core.
+
+.. kernel-doc:: include/linux/interconnect-provider.h
+
+Interconnect consumers
+----------------------
+
+Interconnect consumers are the clients which use the interconnect APIs to
+get paths between endpoints and set their bandwidth/latency/QoS requirements
+for these interconnect paths.
+
+.. kernel-doc:: include/linux/interconnect.h
diff --git a/MAINTAINERS b/MAINTAINERS
index d1559363898f..108f3b1b7a79 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6699,6 +6699,15 @@ F: drivers/clocksource/h8300_*.c
F: drivers/clk/h8300/
F: drivers/irqchip/irq-renesas-h8*.c
+HABANALABS PCI DRIVER
+M: Oded Gabbay <oded.gabbay@gmail.com>
+T: git https://github.com/HabanaAI/linux.git
+S: Supported
+F: drivers/misc/habanalabs/
+F: include/uapi/misc/habanalabs.h
+F: Documentation/ABI/testing/sysfs-driver-habanalabs
+F: Documentation/ABI/testing/debugfs-driver-habanalabs
+
HACKRF MEDIA DRIVER
M: Antti Palosaari <crope@iki.fi>
L: linux-media@vger.kernel.org
@@ -7056,7 +7065,7 @@ M: Haiyang Zhang <haiyangz@microsoft.com>
M: Stephen Hemminger <sthemmin@microsoft.com>
M: Sasha Levin <sashal@kernel.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git
-L: devel@linuxdriverproject.org
+L: linux-hyperv@vger.kernel.org
S: Supported
F: Documentation/networking/device_drivers/microsoft/netvsc.txt
F: arch/x86/include/asm/mshyperv.h
@@ -7941,6 +7950,16 @@ L: linux-gpio@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-intel-mid.c
+INTERCONNECT API
+M: Georgi Djakov <georgi.djakov@linaro.org>
+S: Maintained
+F: Documentation/interconnect/
+F: Documentation/devicetree/bindings/interconnect/
+F: drivers/interconnect/
+F: include/dt-bindings/interconnect/
+F: include/linux/interconnect-provider.h
+F: include/linux/interconnect.h
+
INVENSENSE MPU-3050 GYROSCOPE DRIVER
M: Linus Walleij <linus.walleij@linaro.org>
L: linux-iio@vger.kernel.org
diff --git a/arch/Kconfig b/arch/Kconfig
index 3aff508ffd86..33687dddd86a 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -711,6 +711,9 @@ config HAVE_ARCH_HASH
file which provides platform-specific implementations of some
functions in <linux/hash.h> or fs/namei.c.
+config HAVE_ARCH_NVRAM_OPS
+ bool
+
config ISA_BUS_API
def_bool ISA
diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
index 328ba83d735b..c01e103492fd 100644
--- a/arch/m68k/Kconfig.machine
+++ b/arch/m68k/Kconfig.machine
@@ -16,6 +16,7 @@ config ATARI
bool "Atari support"
depends on MMU
select MMU_MOTOROLA if MMU
+ select HAVE_ARCH_NVRAM_OPS
help
This option enables support for the 68000-based Atari series of
computers (including the TT, Falcon and Medusa). If you plan to use
@@ -26,6 +27,7 @@ config MAC
bool "Macintosh support"
depends on MMU
select MMU_MOTOROLA if MMU
+ select HAVE_ARCH_NVRAM_OPS
help
This option enables support for the Apple Macintosh series of
computers (yes, there is experimental support now, at least for part
diff --git a/arch/m68k/atari/Makefile b/arch/m68k/atari/Makefile
index 0cac723306f9..0b86bb6cfa87 100644
--- a/arch/m68k/atari/Makefile
+++ b/arch/m68k/atari/Makefile
@@ -6,3 +6,5 @@ obj-y := config.o time.o debug.o ataints.o stdma.o \
atasound.o stram.o
obj-$(CONFIG_ATARI_KBD_CORE) += atakeyb.o
+
+obj-$(CONFIG_NVRAM:m=y) += nvram.o
diff --git a/arch/m68k/atari/nvram.c b/arch/m68k/atari/nvram.c
new file mode 100644
index 000000000000..7000d2443aa3
--- /dev/null
+++ b/arch/m68k/atari/nvram.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * CMOS/NV-RAM driver for Atari. Adapted from drivers/char/nvram.c.
+ * Copyright (C) 1997 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
+ * idea by and with help from Richard Jelinek <rj@suse.de>
+ * Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com)
+ * Further contributions from Cesar Barros, Erik Gilling, Tim Hockin and
+ * Wim Van Sebroeck.
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/mc146818rtc.h>
+#include <linux/module.h>
+#include <linux/nvram.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <asm/atarihw.h>
+#include <asm/atariints.h>
+
+#define NVRAM_BYTES 50
+
+/* It is worth noting that these functions all access bytes of general
+ * purpose memory in the NVRAM - that is to say, they all add the
+ * NVRAM_FIRST_BYTE offset. Pass them offsets into NVRAM as if you did not
+ * know about the RTC cruft.
+ */
+
+/* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with
+ * rtc_lock held. Due to the index-port/data-port design of the RTC, we
+ * don't want two different things trying to get to it at once. (e.g. the
+ * periodic 11 min sync from kernel/time/ntp.c vs. this driver.)
+ */
+
+static unsigned char __nvram_read_byte(int i)
+{
+ return CMOS_READ(NVRAM_FIRST_BYTE + i);
+}
+
+/* This races nicely with trying to read with checksum checking */
+static void __nvram_write_byte(unsigned char c, int i)
+{
+ CMOS_WRITE(c, NVRAM_FIRST_BYTE + i);
+}
+
+/* On Ataris, the checksum is over all bytes except the checksum bytes
+ * themselves; these are at the very end.
+ */
+#define ATARI_CKS_RANGE_START 0
+#define ATARI_CKS_RANGE_END 47
+#define ATARI_CKS_LOC 48
+
+static int __nvram_check_checksum(void)
+{
+ int i;
+ unsigned char sum = 0;
+
+ for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i)
+ sum += __nvram_read_byte(i);
+ return (__nvram_read_byte(ATARI_CKS_LOC) == (~sum & 0xff)) &&
+ (__nvram_read_byte(ATARI_CKS_LOC + 1) == (sum & 0xff));
+}
+
+static void __nvram_set_checksum(void)
+{
+ int i;
+ unsigned char sum = 0;
+
+ for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i)
+ sum += __nvram_read_byte(i);
+ __nvram_write_byte(~sum, ATARI_CKS_LOC);
+ __nvram_write_byte(sum, ATARI_CKS_LOC + 1);
+}
+
+long atari_nvram_set_checksum(void)
+{
+ spin_lock_irq(&rtc_lock);
+ __nvram_set_checksum();
+ spin_unlock_irq(&rtc_lock);
+ return 0;
+}
+
+long atari_nvram_initialize(void)
+{
+ loff_t i;
+
+ spin_lock_irq(&rtc_lock);
+ for (i = 0; i < NVRAM_BYTES; ++i)
+ __nvram_write_byte(0, i);
+ __nvram_set_checksum();
+ spin_unlock_irq(&rtc_lock);
+ return 0;
+}
+
+ssize_t atari_nvram_read(char *buf, size_t count, loff_t *ppos)
+{
+ char *p = buf;
+ loff_t i;
+
+ spin_lock_irq(&rtc_lock);
+ if (!__nvram_check_checksum()) {
+ spin_unlock_irq(&rtc_lock);
+ return -EIO;
+ }
+ for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p)
+ *p = __nvram_read_byte(i);
+ spin_unlock_irq(&rtc_lock);
+
+ *ppos = i;
+ return p - buf;
+}
+
+ssize_t atari_nvram_write(char *buf, size_t count, loff_t *ppos)
+{
+ char *p = buf;
+ loff_t i;
+
+ spin_lock_irq(&rtc_lock);
+ if (!__nvram_check_checksum()) {
+ spin_unlock_irq(&rtc_lock);
+ return -EIO;
+ }
+ for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p)
+ __nvram_write_byte(*p, i);
+ __nvram_set_checksum();
+ spin_unlock_irq(&rtc_lock);
+
+ *ppos = i;
+ return p - buf;
+}
+
+ssize_t atari_nvram_get_size(void)
+{
+ return NVRAM_BYTES;
+}
+
+#ifdef CONFIG_PROC_FS
+static struct {
+ unsigned char val;
+ const char *name;
+} boot_prefs[] = {
+ { 0x80, "TOS" },
+ { 0x40, "ASV" },
+ { 0x20, "NetBSD (?)" },
+ { 0x10, "Linux" },
+ { 0x00, "unspecified" },
+};
+
+static const char * const languages[] = {
+ "English (US)",
+ "German",
+ "French",
+ "English (UK)",
+ "Spanish",
+ "Italian",
+ "6 (undefined)",
+ "Swiss (French)",
+ "Swiss (German)",
+};
+
+static const char * const dateformat[] = {
+ "MM%cDD%cYY",
+ "DD%cMM%cYY",
+ "YY%cMM%cDD",
+ "YY%cDD%cMM",
+ "4 (undefined)",
+ "5 (undefined)",
+ "6 (undefined)",
+ "7 (undefined)",
+};
+
+static const char * const colors[] = {
+ "2", "4", "16", "256", "65536", "??", "??", "??"
+};
+
+static void atari_nvram_proc_read(unsigned char *nvram, struct seq_file *seq,
+ void *offset)
+{
+ int checksum;
+ int i;
+ unsigned int vmode;
+
+ spin_lock_irq(&rtc_lock);
+ checksum = __nvram_check_checksum();
+ spin_unlock_irq(&rtc_lock);
+
+ seq_printf(seq, "Checksum status : %svalid\n", checksum ? "" : "not ");
+
+ seq_puts(seq, "Boot preference : ");
+ for (i = ARRAY_SIZE(boot_prefs) - 1; i >= 0; --i)
+ if (nvram[1] == boot_prefs[i].val) {
+ seq_printf(seq, "%s\n", boot_prefs[i].name);
+ break;
+ }
+ if (i < 0)
+ seq_printf(seq, "0x%02x (undefined)\n", nvram[1]);
+
+ seq_printf(seq, "SCSI arbitration : %s\n",
+ (nvram[16] & 0x80) ? "on" : "off");
+ seq_puts(seq, "SCSI host ID : ");
+ if (nvram[16] & 0x80)
+ seq_printf(seq, "%d\n", nvram[16] & 7);
+ else
+ seq_puts(seq, "n/a\n");
+
+ if (!MACH_IS_FALCON)
+ return;
+
+ seq_puts(seq, "OS language : ");
+ if (nvram[6] < ARRAY_SIZE(languages))
+ seq_printf(seq, "%s\n", languages[nvram[6]]);
+ else
+ seq_printf(seq, "%u (undefined)\n", nvram[6]);
+ seq_puts(seq, "Keyboard language: ");
+ if (nvram[7] < ARRAY_SIZE(languages))
+ seq_printf(seq, "%s\n", languages[nvram[7]]);
+ else
+ seq_printf(seq, "%u (undefined)\n", nvram[7]);
+ seq_puts(seq, "Date format : ");
+ seq_printf(seq, dateformat[nvram[8] & 7],
+ nvram[9] ? nvram[9] : '/', nvram[9] ? nvram[9] : '/');
+ seq_printf(seq, ", %dh clock\n", nvram[8] & 16 ? 24 : 12);
+ seq_puts(seq, "Boot delay : ");
+ if (nvram[10] == 0)
+ seq_puts(seq, "default\n");
+ else
+ seq_printf(seq, "%ds%s\n", nvram[10],
+ nvram[10] < 8 ? ", no memory test" : "");
+
+ vmode = (nvram[14] << 8) | nvram[15];
+ seq_printf(seq,
+ "Video mode : %s colors, %d columns, %s %s monitor\n",
+ colors[vmode & 7], vmode & 8 ? 80 : 40,
+ vmode & 16 ? "VGA" : "TV", vmode & 32 ? "PAL" : "NTSC");
+ seq_printf(seq,
+ " %soverscan, compat. mode %s%s\n",
+ vmode & 64 ? "" : "no ", vmode & 128 ? "on" : "off",
+ vmode & 256 ?
+ (vmode & 16 ? ", line doubling" : ", half screen") : "");
+}
+
+static int nvram_proc_read(struct seq_file *seq, void *offset)
+{
+ unsigned char contents[NVRAM_BYTES];
+ int i;
+
+ spin_lock_irq(&rtc_lock);
+ for (i = 0; i < NVRAM_BYTES; ++i)
+ contents[i] = __nvram_read_byte(i);
+ spin_unlock_irq(&rtc_lock);
+
+ atari_nvram_proc_read(contents, seq, offset);
+
+ return 0;
+}
+
+static int __init atari_nvram_init(void)
+{
+ if (!(MACH_IS_ATARI && ATARIHW_PRESENT(TT_CLK)))
+ return -ENODEV;
+
+ if (!proc_create_single("driver/nvram", 0, NULL, nvram_proc_read)) {
+ pr_err("nvram: can't create /proc/driver/nvram\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+device_initcall(atari_nvram_init);
+#endif /* CONFIG_PROC_FS */
diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h
index 9000b249d225..533008262b69 100644
--- a/arch/m68k/include/asm/atarihw.h
+++ b/arch/m68k/include/asm/atarihw.h
@@ -33,6 +33,12 @@ extern int atari_dont_touch_floppy_select;
extern int atari_SCC_reset_done;
+extern ssize_t atari_nvram_read(char *, size_t, loff_t *);
+extern ssize_t atari_nvram_write(char *, size_t, loff_t *);
+extern ssize_t atari_nvram_get_size(void);
+extern long atari_nvram_set_checksum(void);
+extern long atari_nvram_initialize(void);
+
/* convenience macros for testing machine type */
#define MACH_IS_ST ((atari_mch_cookie >> 16) == ATARI_MCH_ST)
#define MACH_IS_STE ((atari_mch_cookie >> 16) == ATARI_MCH_STE && \
diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
index 08cee11180e6..d9a08bed4b12 100644
--- a/arch/m68k/include/asm/macintosh.h
+++ b/arch/m68k/include/asm/macintosh.h
@@ -19,6 +19,10 @@ extern void mac_init_IRQ(void);
extern void mac_irq_enable(struct irq_data *data);
extern void mac_irq_disable(struct irq_data *data);
+extern unsigned char mac_pram_read_byte(int);
+extern void mac_pram_write_byte(unsigned char, int);
+extern ssize_t mac_pram_get_size(void);
+
/*
* Macintosh Table
*/
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index ad0195cbe042..528484feff80 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -24,6 +24,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/module.h>
+#include <linux/nvram.h>
#include <linux/initrd.h>
#include <asm/bootinfo.h>
@@ -37,13 +38,14 @@
#ifdef CONFIG_AMIGA
#include <asm/amigahw.h>
#endif
-#ifdef CONFIG_ATARI
#include <asm/atarihw.h>
+#ifdef CONFIG_ATARI
#include <asm/atari_stram.h>
#endif
#ifdef CONFIG_SUN3X
#include <asm/dvma.h>
#endif
+#include <asm/macintosh.h>
#include <asm/natfeat.h>
#if !FPSTATESIZE || !NR_IRQS
@@ -547,3 +549,81 @@ static int __init adb_probe_sync_enable (char *str) {
__setup("adb_sync", adb_probe_sync_enable);
#endif /* CONFIG_ADB */
+
+#if IS_ENABLED(CONFIG_NVRAM)
+#ifdef CONFIG_MAC
+static unsigned char m68k_nvram_read_byte(int addr)
+{
+ if (MACH_IS_MAC)
+ return mac_pram_read_byte(addr);
+ return 0xff;
+}
+
+static void m68k_nvram_write_byte(unsigned char val, int addr)
+{
+ if (MACH_IS_MAC)
+ mac_pram_write_byte(val, addr);
+}
+#endif /* CONFIG_MAC */
+
+#ifdef CONFIG_ATARI
+static ssize_t m68k_nvram_read(char *buf, size_t count, loff_t *ppos)
+{
+ if (MACH_IS_ATARI)
+ return atari_nvram_read(buf, count, ppos);
+ else if (MACH_IS_MAC)
+ return nvram_read_bytes(buf, count, ppos);
+ return -EINVAL;
+}
+
+static ssize_t m68k_nvram_write(char *buf, size_t count, loff_t *ppos)
+{
+ if (MACH_IS_ATARI)
+ return atari_nvram_write(buf, count, ppos);
+ else if (MACH_IS_MAC)
+ return nvram_write_bytes(buf, count, ppos);
+ return -EINVAL;
+}
+
+static long m68k_nvram_set_checksum(void)
+{
+ if (MACH_IS_ATARI)
+ return atari_nvram_set_checksum();
+ return -EINVAL;
+}
+
+static long m68k_nvram_initialize(void)
+{
+ if (MACH_IS_ATARI)
+ return atari_nvram_initialize();
+ return -EINVAL;
+}
+#endif /* CONFIG_ATARI */
+
+static ssize_t m68k_nvram_get_size(void)
+{
+ if (MACH_IS_ATARI)
+ return atari_nvram_get_size();
+ else if (MACH_IS_MAC)
+ return mac_pram_get_size();
+ return -ENODEV;
+}
+
+/* Atari device drivers call .read (to get checksum validation) whereas
+ * Mac and PowerMac device drivers just use .read_byte.
+ */
+const struct nvram_ops arch_nvram_ops = {
+#ifdef CONFIG_MAC
+ .read_byte = m68k_nvram_read_byte,
+ .write_byte = m68k_nvram_write_byte,
+#endif
+#ifdef CONFIG_ATARI
+ .read = m68k_nvram_read,
+ .write = m68k_nvram_write,
+ .set_checksum = m68k_nvram_set_checksum,
+ .initialize = m68k_nvram_initialize,
+#endif
+ .get_size = m68k_nvram_get_size,
+};
+EXPORT_SYMBOL(arch_nvram_ops);
+#endif /* CONFIG_NVRAM */
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index 1423e1fe0261..90f4e9ca1276 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -36,8 +36,9 @@
static void (*rom_reset)(void);
+#if IS_ENABLED(CONFIG_NVRAM)
#ifdef CONFIG_ADB_CUDA
-static __u8 cuda_read_pram(int offset)
+static unsigned char cuda_pram_read_byte(int offset)
{
struct adb_request req;
@@ -49,7 +50,7 @@ static __u8 cuda_read_pram(int offset)
return req.reply[3];
}
-static void cuda_write_pram(int offset, __u8 data)
+static void cuda_pram_write_byte(unsigned char data, int offset)
{
struct adb_request req;
@@ -62,29 +63,29 @@ static void cuda_write_pram(int offset, __u8 data)
#endif /* CONFIG_ADB_CUDA */
#ifdef CONFIG_ADB_PMU
-static __u8 pmu_read_pram(int offset)
+static unsigned char pmu_pram_read_byte(int offset)
{
struct adb_request req;
- if (pmu_request(&req, NULL, 3, PMU_READ_NVRAM,
- (offset >> 8) & 0xFF, offset & 0xFF) < 0)
+ if (pmu_request(&req, NULL, 3, PMU_READ_XPRAM,
+ offset & 0xFF, 1) < 0)
return 0;
- while (!req.complete)
- pmu_poll();
- return req.reply[3];
+ pmu_wait_complete(&req);
+
+ return req.reply[0];
}
-static void pmu_write_pram(int offset, __u8 data)
+static void pmu_pram_write_byte(unsigned char data, int offset)
{
struct adb_request req;
- if (pmu_request(&req, NULL, 4, PMU_WRITE_NVRAM,
- (offset >> 8) & 0xFF, offset & 0xFF, data) < 0)
+ if (pmu_request(&req, NULL, 4, PMU_WRITE_XPRAM,
+ offset & 0xFF, 1, data) < 0)
return;
- while (!req.complete)
- pmu_poll();
+ pmu_wait_complete(&req);
}
#endif /* CONFIG_ADB_PMU */
+#endif /* CONFIG_NVRAM */
/*
* VIA PRAM/RTC access routines
@@ -93,7 +94,7 @@ static void pmu_write_pram(int offset, __u8 data)
* the RTC should be enabled.
*/
-static __u8 via_pram_readbyte(void)
+static __u8 via_rtc_recv(void)
{
int i, reg;
__u8 data;
@@ -120,7 +121,7 @@ static __u8 via_pram_readbyte(void)
return data;
}
-static void via_pram_writebyte(__u8 data)
+static void via_rtc_send(__u8 data)
{
int i, reg, bit;
@@ -137,6 +138,31 @@ static void via_pram_writebyte(__u8 data)
}
/*
+ * These values can be found in Inside Macintosh vol. III ch. 2
+ * which has a description of the RTC chip in the original Mac.
+ */
+
+#define RTC_FLG_READ BIT(7)
+#define RTC_FLG_WRITE_PROTECT BIT(7)
+#define RTC_CMD_READ(r) (RTC_FLG_READ | (r << 2))
+#define RTC_CMD_WRITE(r) (r << 2)
+#define RTC_REG_SECONDS_0 0
+#define RTC_REG_SECONDS_1 1
+#define RTC_REG_SECONDS_2 2
+#define RTC_REG_SECONDS_3 3
+#define RTC_REG_WRITE_PROTECT 13
+
+/*
+ * Inside Mac has no information about two-byte RTC commands but
+ * the MAME/MESS source code has the essentials.
+ */
+
+#define RTC_REG_XPRAM 14
+#define RTC_CMD_XPRAM_READ (RTC_CMD_READ(RTC_REG_XPRAM) << 8)
+#define RTC_CMD_XPRAM_WRITE (RTC_CMD_WRITE(RTC_REG_XPRAM) << 8)
+#define RTC_CMD_XPRAM_ARG(a) (((a & 0xE0) << 3) | ((a & 0x1F) << 2))
+
+/*
* Execute a VIA PRAM/RTC command. For read commands
* data should point to a one-byte buffer for the
* resulting data. For write commands it should point
@@ -145,29 +171,33 @@ static void via_pram_writebyte(__u8 data)
* This function disables all interrupts while running.
*/
-static void via_pram_command(int command, __u8 *data)
+static void via_rtc_command(int command, __u8 *data)
{
unsigned long flags;
int is_read;
local_irq_save(flags);
+ /* The least significant bits must be 0b01 according to Inside Mac */
+
+ command = (command & ~3) | 1;
+
/* Enable the RTC and make sure the strobe line is high */
via1[vBufB] = (via1[vBufB] | VIA1B_vRTCClk) & ~VIA1B_vRTCEnb;
if (command & 0xFF00) { /* extended (two-byte) command */
- via_pram_writebyte((command & 0xFF00) >> 8);
- via_pram_writebyte(command & 0xFF);
- is_read = command & 0x8000;
+ via_rtc_send((command & 0xFF00) >> 8);
+ via_rtc_send(command & 0xFF);
+ is_read = command & (RTC_FLG_READ << 8);
} else { /* one-byte command */
- via_pram_writebyte(command);
- is_read = command & 0x80;
+ via_rtc_send(command);
+ is_read = command & RTC_FLG_READ;
}
if (is_read) {
- *data = via_pram_readbyte();
+ *data = via_rtc_recv();
} else {
- via_pram_writebyte(*data);
+ via_rtc_send(*data);
}
/* All done, disable the RTC */
@@ -177,14 +207,30 @@ static void via_pram_command(int command, __u8 *data)
local_irq_restore(flags);
}
-static __u8 via_read_pram(int offset)
+#if IS_ENABLED(CONFIG_NVRAM)
+static unsigned char via_pram_read_byte(int offset)
{
- return 0;
+ unsigned char temp;
+
+ via_rtc_command(RTC_CMD_XPRAM_READ | RTC_CMD_XPRAM_ARG(offset), &temp);
+
+ return temp;
}
-static void via_write_pram(int offset, __u8 data)
+static void via_pram_write_byte(unsigned char data, int offset)
{
+ unsigned char temp;
+
+ temp = 0x55;
+ via_rtc_command(RTC_CMD_WRITE(RTC_REG_WRITE_PROTECT), &temp);
+
+ temp = data;
+ via_rtc_command(RTC_CMD_XPRAM_WRITE | RTC_CMD_XPRAM_ARG(offset), &temp);
+
+ temp = 0x55 | RTC_FLG_WRITE_PROTECT;
+ via_rtc_command(RTC_CMD_WRITE(RTC_REG_WRITE_PROTECT), &temp);
}
+#endif /* CONFIG_NVRAM */
/*
* Return the current time in seconds since January 1, 1904.
@@ -201,10 +247,10 @@ static time64_t via_read_time(void)
} result, last_result;
int count = 1;
- via_pram_command(0x81, &last_result.cdata[3]);
- via_pram_command(0x85, &last_result.cdata[2]);
- via_pram_command(0x89, &last_result.cdata[1]);
- via_pram_command(0x8D, &last_result.cdata[0]);
+ via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_0), &last_result.cdata[3]);
+ via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_1), &last_result.cdata[2]);
+ via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_2), &last_result.cdata[1]);
+ via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_3), &last_result.cdata[0]);
/*
* The NetBSD guys say to loop until you get the same reading
@@ -212,10 +258,14 @@ static time64_t via_read_time(void)
*/
while (1) {
- via_pram_command(0x81, &result.cdata[3]);
- via_pram_command(0x85, &result.cdata[2]);
- via_pram_command(0x89, &result.cdata[1]);
- via_pram_command(0x8D, &result.cdata[0]);
+ via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_0),
+ &result.cdata[3]);
+ via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_1),
+ &result.cdata[2]);
+ via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_2),
+ &result.cdata[1]);
+ via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_3),
+ &result.cdata[0]);
if (result.idata == last_result.idata)
return (time64_t)result.idata - RTC_OFFSET;
@@ -254,18 +304,18 @@ static void via_set_rtc_time(struct rtc_time *tm)
/* Clear the write protect bit */
temp = 0x55;
- via_pram_command(0x35, &temp);
+ via_rtc_command(RTC_CMD_WRITE(RTC_REG_WRITE_PROTECT), &temp);
data.idata = lower_32_bits(time + RTC_OFFSET);
- via_pram_command(0x01, &data.cdata[3]);
- via_pram_command(0x05, &data.cdata[2]);
- via_pram_command(0x09, &data.cdata[1]);
- via_pram_command(0x0D, &data.cdata[0]);
+ via_rtc_command(RTC_CMD_WRITE(RTC_REG_SECONDS_0), &data.cdata[3]);
+ via_rtc_command(RTC_CMD_WRITE(RTC_REG_SECONDS_1), &data.cdata[2]);
+ via_rtc_command(RTC_CMD_WRITE(RTC_REG_SECONDS_2), &data.cdata[1]);
+ via_rtc_command(RTC_CMD_WRITE(RTC_REG_SECONDS_3), &data.cdata[0]);
/* Set the write protect bit */
- temp = 0xD5;
- via_pram_command(0x35, &temp);
+ temp = 0x55 | RTC_FLG_WRITE_PROTECT;
+ via_rtc_command(RTC_CMD_WRITE(RTC_REG_WRITE_PROTECT), &temp);
}
static void via_shutdown(void)
@@ -326,66 +376,58 @@ static void cuda_shutdown(void)
*-------------------------------------------------------------------
*/
-void mac_pram_read(int offset, __u8 *buffer, int len)
+#if IS_ENABLED(CONFIG_NVRAM)
+unsigned char mac_pram_read_byte(int addr)
{
- __u8 (*func)(int);
- int i;
-
switch (macintosh_config->adb_type) {
case MAC_ADB_IOP:
case MAC_ADB_II:
case MAC_ADB_PB1:
- func = via_read_pram;
- break;
+ return via_pram_read_byte(addr);
#ifdef CONFIG_ADB_CUDA
case MAC_ADB_EGRET:
case MAC_ADB_CUDA:
- func = cuda_read_pram;
- break;
+ return cuda_pram_read_byte(addr);
#endif
#ifdef CONFIG_ADB_PMU
case MAC_ADB_PB2:
- func = pmu_read_pram;
- break;
+ return pmu_pram_read_byte(addr);
#endif
default:
- return;
- }
- for (i = 0 ; i < len ; i++) {
- buffer[i] = (*func)(offset++);
+ return 0xFF;
}
}
-void mac_pram_write(int offset, __u8 *buffer, int len)
+void mac_pram_write_byte(unsigned char val, int addr)
{
- void (*func)(int, __u8);
- int i;
-
switch (macintosh_config->adb_type) {
case MAC_ADB_IOP:
case MAC_ADB_II:
case MAC_ADB_PB1:
- func = via_write_pram;
+ via_pram_write_byte(val, addr);
break;
#ifdef CONFIG_ADB_CUDA
case MAC_ADB_EGRET:
case MAC_ADB_CUDA:
- func = cuda_write_pram;
+ cuda_pram_write_byte(val, addr);
break;
#endif
#ifdef CONFIG_ADB_PMU
case MAC_ADB_PB2:
- func = pmu_write_pram;
+ pmu_pram_write_byte(val, addr);
break;
#endif
default:
- return;
- }
- for (i = 0 ; i < len ; i++) {
- (*func)(offset++, buffer[i]);
+ break;
}
}
+ssize_t mac_pram_get_size(void)
+{
+ return 256;
+}
+#endif /* CONFIG_NVRAM */
+
void mac_poweroff(void)
{
if (oss_present) {
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h
index afe493b23d04..30a8315d5c07 100644
--- a/arch/parisc/include/asm/io.h
+++ b/arch/parisc/include/asm/io.h
@@ -311,6 +311,15 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
* value for either 32 or 64 bit mode */
#define F_EXTEND(x) ((unsigned long)((x) | (0xffffffff00000000ULL)))
+#define ioread64 ioread64
+#define ioread64be ioread64be
+#define iowrite64 iowrite64
+#define iowrite64be iowrite64be
+extern u64 ioread64(void __iomem *addr);
+extern u64 ioread64be(void __iomem *addr);
+extern void iowrite64(u64 val, void __iomem *addr);
+extern void iowrite64be(u64 val, void __iomem *addr);
+
#include <asm-generic/iomap.h>
/*
diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c
index 4b19e6e64fb7..0195aec657e2 100644
--- a/arch/parisc/lib/iomap.c
+++ b/arch/parisc/lib/iomap.c
@@ -48,11 +48,15 @@ struct iomap_ops {
unsigned int (*read16be)(void __iomem *);
unsigned int (*read32)(void __iomem *);
unsigned int (*read32be)(void __iomem *);
+ u64 (*read64)(void __iomem *);
+ u64 (*read64be)(void __iomem *);
void (*write8)(u8, void __iomem *);
void (*write16)(u16, void __iomem *);
void (*write16be)(u16, void __iomem *);
void (*write32)(u32, void __iomem *);
void (*write32be)(u32, void __iomem *);
+ void (*write64)(u64, void __iomem *);
+ void (*write64be)(u64, void __iomem *);
void (*read8r)(void __iomem *, void *, unsigned long);
void (*read16r)(void __iomem *, void *, unsigned long);
void (*read32r)(void __iomem *, void *, unsigned long);
@@ -171,6 +175,16 @@ static unsigned int iomem_read32be(void __iomem *addr)
return __raw_readl(addr);
}
+static u64 iomem_read64(void __iomem *addr)
+{
+ return readq(addr);
+}
+
+static u64 iomem_read64be(void __iomem *addr)
+{
+ return __raw_readq(addr);
+}
+
static void iomem_write8(u8 datum, void __iomem *addr)
{
writeb(datum, addr);
@@ -196,6 +210,16 @@ static void iomem_write32be(u32 datum, void __iomem *addr)
__raw_writel(datum, addr);
}
+static void iomem_write64(u64 datum, void __iomem *addr)
+{
+ writel(datum, addr);
+}
+
+static void iomem_write64be(u64 datum, void __iomem *addr)
+{
+ __raw_writel(datum, addr);
+}
+
static void iomem_read8r(void __iomem *addr, void *dst, unsigned long count)
{
while (count--) {
@@ -250,11 +274,15 @@ static const struct iomap_ops iomem_ops = {
.read16be = iomem_read16be,
.read32 = iomem_read32,
.read32be = iomem_read32be,
+ .read64 = iomem_read64,
+ .read64be = iomem_read64be,
.write8 = iomem_write8,
.write16 = iomem_write16,
.write16be = iomem_write16be,
.write32 = iomem_write32,
.write32be = iomem_write32be,
+ .write64 = iomem_write64,
+ .write64be = iomem_write64be,
.read8r = iomem_read8r,
.read16r = iomem_read16r,
.read32r = iomem_read32r,
@@ -304,6 +332,20 @@ unsigned int ioread32be(void __iomem *addr)
return *((u32 *)addr);
}
+u64 ioread64(void __iomem *addr)
+{
+ if (unlikely(INDIRECT_ADDR(addr)))
+ return iomap_ops[ADDR_TO_REGION(addr)]->read64(addr);
+ return le64_to_cpup((u64 *)addr);
+}
+
+u64 ioread64be(void __iomem *addr)
+{
+ if (unlikely(INDIRECT_ADDR(addr)))
+ return iomap_ops[ADDR_TO_REGION(addr)]->read64be(addr);
+ return *((u64 *)addr);
+}
+
void iowrite8(u8 datum, void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr))) {
@@ -349,6 +391,24 @@ void iowrite32be(u32 datum, void __iomem *addr)
}
}
+void iowrite64(u64 datum, void __iomem *addr)
+{
+ if (unlikely(INDIRECT_ADDR(addr))) {
+ iomap_ops[ADDR_TO_REGION(addr)]->write64(datum, addr);
+ } else {
+ *((u64 *)addr) = cpu_to_le64(datum);
+ }
+}
+
+void iowrite64be(u64 datum, void __iomem *addr)
+{
+ if (unlikely(INDIRECT_ADDR(addr))) {
+ iomap_ops[ADDR_TO_REGION(addr)]->write64be(datum, addr);
+ } else {
+ *((u64 *)addr) = datum;
+ }
+}
+
/* Repeating interfaces */
void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
@@ -449,11 +509,15 @@ EXPORT_SYMBOL(ioread16);
EXPORT_SYMBOL(ioread16be);
EXPORT_SYMBOL(ioread32);
EXPORT_SYMBOL(ioread32be);
+EXPORT_SYMBOL(ioread64);
+EXPORT_SYMBOL(ioread64be);
EXPORT_SYMBOL(iowrite8);
EXPORT_SYMBOL(iowrite16);
EXPORT_SYMBOL(iowrite16be);
EXPORT_SYMBOL(iowrite32);
EXPORT_SYMBOL(iowrite32be);
+EXPORT_SYMBOL(iowrite64);
+EXPORT_SYMBOL(iowrite64be);
EXPORT_SYMBOL(ioread8_rep);
EXPORT_SYMBOL(ioread16_rep);
EXPORT_SYMBOL(ioread32_rep);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 375d0dc0dc7d..7deb3ea2dd3f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -179,6 +179,7 @@ config PPC
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
+ select HAVE_ARCH_NVRAM_OPS
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_CBPF_JIT if !PPC64
@@ -275,11 +276,6 @@ config SYSVIPC_COMPAT
depends on COMPAT && SYSVIPC
default y
-# All PPC32s use generic nvram driver through ppc_md
-config GENERIC_NVRAM
- bool
- default y if PPC32
-
config SCHED_OMIT_FRAME_POINTER
bool
default y
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 7f19fbd3ba55..4b73847e9b95 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -783,8 +783,10 @@ extern void __iounmap_at(void *ea, unsigned long size);
#define mmio_read16be(addr) readw_be(addr)
#define mmio_read32be(addr) readl_be(addr)
+#define mmio_read64be(addr) readq_be(addr)
#define mmio_write16be(val, addr) writew_be(val, addr)
#define mmio_write32be(val, addr) writel_be(val, addr)
+#define mmio_write64be(val, addr) writeq_be(val, addr)
#define mmio_insb(addr, dst, count) readsb(addr, dst, count)
#define mmio_insw(addr, dst, count) readsw(addr, dst, count)
#define mmio_insl(addr, dst, count) readsl(addr, dst, count)
diff --git a/arch/powerpc/include/asm/nvram.h b/arch/powerpc/include/asm/nvram.h
index 09a518bb7c03..629a5cdcc865 100644
--- a/arch/powerpc/include/asm/nvram.h
+++ b/arch/powerpc/include/asm/nvram.h
@@ -78,9 +78,6 @@ extern int pmac_get_partition(int partition);
extern u8 pmac_xpram_read(int xpaddr);
extern void pmac_xpram_write(int xpaddr, u8 data);
-/* Synchronize NVRAM */
-extern void nvram_sync(void);
-
/* Initialize NVRAM OS partition */
extern int __init nvram_init_os_partition(struct nvram_os_partition *part);
@@ -98,10 +95,4 @@ extern int nvram_write_os_partition(struct nvram_os_partition *part,
unsigned int err_type,
unsigned int error_log_cnt);
-/* Determine NVRAM size */
-extern ssize_t nvram_get_size(void);
-
-/* Normal access to NVRAM */
-extern unsigned char nvram_read_byte(int i);
-extern void nvram_write_byte(unsigned char c, int i);
#endif /* _ASM_POWERPC_NVRAM_H */
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 38b03a330cd2..244d2462e781 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -7,12 +7,6 @@
* 2 of the License, or (at your option) any later version.
*
* /dev/nvram driver for PPC64
- *
- * This perhaps should live in drivers/char
- *
- * TODO: Split the /dev/nvram part (that one can use
- * drivers/char/generic_nvram.c) from the arch & partition
- * parsing code.
*/
#include <linux/types.h>
@@ -714,137 +708,6 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
spin_unlock_irqrestore(&lock, flags);
}
-static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin)
-{
- if (ppc_md.nvram_size == NULL)
- return -ENODEV;
- return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE,
- ppc_md.nvram_size());
-}
-
-
-static ssize_t dev_nvram_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- ssize_t ret;
- char *tmp = NULL;
- ssize_t size;
-
- if (!ppc_md.nvram_size) {
- ret = -ENODEV;
- goto out;
- }
-
- size = ppc_md.nvram_size();
- if (size < 0) {
- ret = size;
- goto out;
- }
-
- if (*ppos >= size) {
- ret = 0;
- goto out;
- }
-
- count = min_t(size_t, count, size - *ppos);
- count = min(count, PAGE_SIZE);
-
- tmp = kmalloc(count, GFP_KERNEL);
- if (!tmp) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = ppc_md.nvram_read(tmp, count, ppos);
- if (ret <= 0)
- goto out;
-
- if (copy_to_user(buf, tmp, ret))
- ret = -EFAULT;
-
-out:
- kfree(tmp);
- return ret;
-
-}
-
-static ssize_t dev_nvram_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- ssize_t ret;
- char *tmp = NULL;
- ssize_t size;
-
- ret = -ENODEV;
- if (!ppc_md.nvram_size)
- goto out;
-
- ret = 0;
- size = ppc_md.nvram_size();
- if (*ppos >= size || size < 0)
- goto out;
-
- count = min_t(size_t, count, size - *ppos);
- count = min(count, PAGE_SIZE);
-
- tmp = memdup_user(buf, count);
- if (IS_ERR(tmp)) {
- ret = PTR_ERR(tmp);
- goto out;
- }
-
- ret = ppc_md.nvram_write(tmp, count, ppos);
-
- kfree(tmp);
-out:
- return ret;
-}
-
-static long dev_nvram_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- switch(cmd) {
-#ifdef CONFIG_PPC_PMAC
- case OBSOLETE_PMAC_NVRAM_GET_OFFSET:
- printk(KERN_WARNING "nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n");
- /* fall through */
- case IOC_NVRAM_GET_OFFSET: {
- int part, offset;
-
- if (!machine_is(powermac))
- return -EINVAL;
- if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0)
- return -EFAULT;
- if (part < pmac_nvram_OF || part > pmac_nvram_NR)
- return -EINVAL;
- offset = pmac_get_partition(part);
- if (offset < 0)
- return offset;
- if (copy_to_user((void __user*)arg, &offset, sizeof(offset)) != 0)
- return -EFAULT;
- return 0;
- }
-#endif /* CONFIG_PPC_PMAC */
- default:
- return -EINVAL;
- }
-}
-
-static const struct file_operations nvram_fops = {
- .owner = THIS_MODULE,
- .llseek = dev_nvram_llseek,
- .read = dev_nvram_read,
- .write = dev_nvram_write,
- .unlocked_ioctl = dev_nvram_ioctl,
-};
-
-static struct miscdevice nvram_dev = {
- NVRAM_MINOR,
- "nvram",
- &nvram_fops
-};
-
-
#ifdef DEBUG_NVRAM
static void __init nvram_print_partitions(char * label)
{
@@ -992,6 +855,8 @@ loff_t __init nvram_create_partition(const char *name, int sig,
long size = 0;
int rc;
+ BUILD_BUG_ON(NVRAM_BLOCK_LEN != 16);
+
/* Convert sizes from bytes to blocks */
req_size = _ALIGN_UP(req_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
min_size = _ALIGN_UP(min_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
@@ -1192,22 +1057,3 @@ int __init nvram_scan_partitions(void)
kfree(header);
return err;
}
-
-static int __init nvram_init(void)
-{
- int rc;
-
- BUILD_BUG_ON(NVRAM_BLOCK_LEN != 16);
-
- if (ppc_md.nvram_size == NULL || ppc_md.nvram_size() <= 0)
- return -ENODEV;
-
- rc = misc_register(&nvram_dev);
- if (rc != 0) {
- printk(KERN_ERR "nvram_init: failed to register device\n");
- return rc;
- }
-
- return rc;
-}
-device_initcall(nvram_init);
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 947f904688b0..c31082233a25 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -17,6 +17,7 @@
#include <linux/console.h>
#include <linux/memblock.h>
#include <linux/export.h>
+#include <linux/nvram.h>
#include <asm/io.h>
#include <asm/prom.h>
@@ -147,41 +148,6 @@ static int __init ppc_setup_l3cr(char *str)
}
__setup("l3cr=", ppc_setup_l3cr);
-#ifdef CONFIG_GENERIC_NVRAM
-
-/* Generic nvram hooks used by drivers/char/gen_nvram.c */
-unsigned char nvram_read_byte(int addr)
-{
- if (ppc_md.nvram_read_val)
- return ppc_md.nvram_read_val(addr);
- return 0xff;
-}
-EXPORT_SYMBOL(nvram_read_byte);
-
-void nvram_write_byte(unsigned char val, int addr)
-{
- if (ppc_md.nvram_write_val)
- ppc_md.nvram_write_val(addr, val);
-}
-EXPORT_SYMBOL(nvram_write_byte);
-
-ssize_t nvram_get_size(void)
-{
- if (ppc_md.nvram_size)
- return ppc_md.nvram_size();
- return -1;
-}
-EXPORT_SYMBOL(nvram_get_size);
-
-void nvram_sync(void)
-{
- if (ppc_md.nvram_sync)
- ppc_md.nvram_sync();
-}
-EXPORT_SYMBOL(nvram_sync);
-
-#endif /* CONFIG_NVRAM */
-
static int __init ppc_init(void)
{
/* clear the progress line */
diff --git a/arch/powerpc/platforms/chrp/Makefile b/arch/powerpc/platforms/chrp/Makefile
index 4b3bfadc70fa..dc3465cc8bc6 100644
--- a/arch/powerpc/platforms/chrp/Makefile
+++ b/arch/powerpc/platforms/chrp/Makefile
@@ -1,3 +1,3 @@
obj-y += setup.o time.o pegasos_eth.o pci.o
obj-$(CONFIG_SMP) += smp.o
-obj-$(CONFIG_NVRAM) += nvram.o
+obj-$(CONFIG_NVRAM:m=y) += nvram.o
diff --git a/arch/powerpc/platforms/chrp/nvram.c b/arch/powerpc/platforms/chrp/nvram.c
index 791b86398e1d..37ac20ccbb19 100644
--- a/arch/powerpc/platforms/chrp/nvram.c
+++ b/arch/powerpc/platforms/chrp/nvram.c
@@ -24,7 +24,7 @@ static unsigned int nvram_size;
static unsigned char nvram_buf[4];
static DEFINE_SPINLOCK(nvram_lock);
-static unsigned char chrp_nvram_read(int addr)
+static unsigned char chrp_nvram_read_val(int addr)
{
unsigned int done;
unsigned long flags;
@@ -46,7 +46,7 @@ static unsigned char chrp_nvram_read(int addr)
return ret;
}
-static void chrp_nvram_write(int addr, unsigned char val)
+static void chrp_nvram_write_val(int addr, unsigned char val)
{
unsigned int done;
unsigned long flags;
@@ -64,6 +64,11 @@ static void chrp_nvram_write(int addr, unsigned char val)
spin_unlock_irqrestore(&nvram_lock, flags);
}
+static ssize_t chrp_nvram_size(void)
+{
+ return nvram_size;
+}
+
void __init chrp_nvram_init(void)
{
struct device_node *nvram;
@@ -85,8 +90,9 @@ void __init chrp_nvram_init(void)
printk(KERN_INFO "CHRP nvram contains %u bytes\n", nvram_size);
of_node_put(nvram);
- ppc_md.nvram_read_val = chrp_nvram_read;
- ppc_md.nvram_write_val = chrp_nvram_write;
+ ppc_md.nvram_read_val = chrp_nvram_read_val;
+ ppc_md.nvram_write_val = chrp_nvram_write_val;
+ ppc_md.nvram_size = chrp_nvram_size;
return;
}
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 9438fa0fc355..fcf6f2342ef4 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -549,7 +549,7 @@ static void __init chrp_init_IRQ(void)
static void __init
chrp_init2(void)
{
-#ifdef CONFIG_NVRAM
+#if IS_ENABLED(CONFIG_NVRAM)
chrp_nvram_init();
#endif
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile
index 923bfb340433..20ebf35d7913 100644
--- a/arch/powerpc/platforms/powermac/Makefile
+++ b/arch/powerpc/platforms/powermac/Makefile
@@ -15,7 +15,5 @@ obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o
# need this to be a bool. Cheat here and pretend CONFIG_NVRAM=m is really
# CONFIG_NVRAM=y
obj-$(CONFIG_NVRAM:m=y) += nvram.o
-# ppc64 pmac doesn't define CONFIG_NVRAM but needs nvram stuff
-obj-$(CONFIG_PPC64) += nvram.o
obj-$(CONFIG_PPC32) += bootx_init.o
obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index ae54d7fe68f3..9360cdc408c1 100644
--- a/arch/powerpc/platforms/powermac/nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -147,6 +147,11 @@ static ssize_t core99_nvram_size(void)
static volatile unsigned char __iomem *nvram_addr;
static int nvram_mult;
+static ssize_t ppc32_nvram_size(void)
+{
+ return NVRAM_SIZE;
+}
+
static unsigned char direct_nvram_read_byte(int addr)
{
return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]);
@@ -590,21 +595,25 @@ int __init pmac_nvram_init(void)
nvram_mult = 1;
ppc_md.nvram_read_val = direct_nvram_read_byte;
ppc_md.nvram_write_val = direct_nvram_write_byte;
+ ppc_md.nvram_size = ppc32_nvram_size;
} else if (nvram_naddrs == 1) {
nvram_data = ioremap(r1.start, s1);
nvram_mult = (s1 + NVRAM_SIZE - 1) / NVRAM_SIZE;
ppc_md.nvram_read_val = direct_nvram_read_byte;
ppc_md.nvram_write_val = direct_nvram_write_byte;
+ ppc_md.nvram_size = ppc32_nvram_size;
} else if (nvram_naddrs == 2) {
nvram_addr = ioremap(r1.start, s1);
nvram_data = ioremap(r2.start, s2);
ppc_md.nvram_read_val = indirect_nvram_read_byte;
ppc_md.nvram_write_val = indirect_nvram_write_byte;
+ ppc_md.nvram_size = ppc32_nvram_size;
} else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) {
#ifdef CONFIG_ADB_PMU
nvram_naddrs = -1;
ppc_md.nvram_read_val = pmu_nvram_read_byte;
ppc_md.nvram_write_val = pmu_nvram_write_byte;
+ ppc_md.nvram_size = ppc32_nvram_size;
#endif /* CONFIG_ADB_PMU */
} else {
printk(KERN_ERR "Incompatible type of NVRAM\n");
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 2e8221e20ee8..b7efcf336589 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -316,8 +316,7 @@ static void __init pmac_setup_arch(void)
find_via_pmu();
smu_init();
-#if defined(CONFIG_NVRAM) || defined(CONFIG_NVRAM_MODULE) || \
- defined(CONFIG_PPC64)
+#if IS_ENABLED(CONFIG_NVRAM)
pmac_nvram_init();
#endif
#ifdef CONFIG_PPC32
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
index f157e3d071f2..b36ddee17c87 100644
--- a/arch/powerpc/platforms/powermac/time.c
+++ b/arch/powerpc/platforms/powermac/time.c
@@ -68,7 +68,7 @@
long __init pmac_time_init(void)
{
s32 delta = 0;
-#ifdef CONFIG_NVRAM
+#if defined(CONFIG_NVRAM) && defined(CONFIG_PPC32)
int dst;
delta = ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x9)) << 16;
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 69cedc1b3b8a..1136a38ff039 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -7,8 +7,6 @@
* 2 of the License, or (at your option) any later version.
*
* /dev/nvram driver for PPC64
- *
- * This perhaps should live in drivers/char
*/
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 4f9f99057ff8..45f9decb9848 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -228,4 +228,6 @@ source "drivers/siox/Kconfig"
source "drivers/slimbus/Kconfig"
+source "drivers/interconnect/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index e1ce029d28fd..bb15b9d0e793 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -186,3 +186,4 @@ obj-$(CONFIG_MULTIPLEXER) += mux/
obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/
obj-$(CONFIG_SIOX) += siox/
obj-$(CONFIG_GNSS) += gnss/
+obj-$(CONFIG_INTERCONNECT) += interconnect/
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 4c190f8d1f4c..6fdf2abe4598 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -10,7 +10,7 @@ if ANDROID
config ANDROID_BINDER_IPC
bool "Android Binder IPC Driver"
- depends on MMU && !CPU_CACHE_VIVT
+ depends on MMU
default n
---help---
Binder is used in Android for both communication between processes,
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 4d2b2ad1ee0e..8685882da64c 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -329,6 +329,8 @@ struct binder_error {
* (invariant after initialized)
* @min_priority: minimum scheduling priority
* (invariant after initialized)
+ * @txn_security_ctx: require sender's security context
+ * (invariant after initialized)
* @async_todo: list of async work items
* (protected by @proc->inner_lock)
*
@@ -365,6 +367,7 @@ struct binder_node {
* invariant after initialization
*/
u8 accept_fds:1;
+ u8 txn_security_ctx:1;
u8 min_priority;
};
bool has_async_transaction;
@@ -615,6 +618,7 @@ struct binder_transaction {
long saved_priority;
kuid_t sender_euid;
struct list_head fd_fixups;
+ binder_uintptr_t security_ctx;
/**
* @lock: protects @from, @to_proc, and @to_thread
*
@@ -625,6 +629,26 @@ struct binder_transaction {
};
/**
+ * struct binder_object - union of flat binder object types
+ * @hdr: generic object header
+ * @fbo: binder object (nodes and refs)
+ * @fdo: file descriptor object
+ * @bbo: binder buffer pointer
+ * @fdao: file descriptor array
+ *
+ * Used for type-independent object copies
+ */
+struct binder_object {
+ union {
+ struct binder_object_header hdr;
+ struct flat_binder_object fbo;
+ struct binder_fd_object fdo;
+ struct binder_buffer_object bbo;
+ struct binder_fd_array_object fdao;
+ };
+};
+
+/**
* binder_proc_lock() - Acquire outer lock for given binder_proc
* @proc: struct binder_proc to acquire
*
@@ -1152,6 +1176,7 @@ static struct binder_node *binder_init_node_ilocked(
node->work.type = BINDER_WORK_NODE;
node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+ node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
spin_lock_init(&node->lock);
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
@@ -2012,26 +2037,33 @@ static void binder_cleanup_transaction(struct binder_transaction *t,
}
/**
- * binder_validate_object() - checks for a valid metadata object in a buffer.
+ * binder_get_object() - gets object and checks for valid metadata
+ * @proc: binder_proc owning the buffer
* @buffer: binder_buffer that we're parsing.
- * @offset: offset in the buffer at which to validate an object.
+ * @offset: offset in the @buffer at which to validate an object.
+ * @object: struct binder_object to read into
*
* Return: If there's a valid metadata object at @offset in @buffer, the
- * size of that object. Otherwise, it returns zero.
+ * size of that object. Otherwise, it returns zero. The object
+ * is read into the struct binder_object pointed to by @object.
*/
-static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
+static size_t binder_get_object(struct binder_proc *proc,
+ struct binder_buffer *buffer,
+ unsigned long offset,
+ struct binder_object *object)
{
- /* Check if we can read a header first */
+ size_t read_size;
struct binder_object_header *hdr;
size_t object_size = 0;
- if (buffer->data_size < sizeof(*hdr) ||
- offset > buffer->data_size - sizeof(*hdr) ||
- !IS_ALIGNED(offset, sizeof(u32)))
+ read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
+ if (read_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32)))
return 0;
+ binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
+ offset, read_size);
- /* Ok, now see if we can read a complete object. */
- hdr = (struct binder_object_header *)(buffer->data + offset);
+ /* Ok, now see if we read a complete object. */
+ hdr = &object->hdr;
switch (hdr->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER:
@@ -2060,10 +2092,13 @@ static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
/**
* binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
+ * @proc: binder_proc owning the buffer
* @b: binder_buffer containing the object
+ * @object: struct binder_object to read into
* @index: index in offset array at which the binder_buffer_object is
* located
- * @start: points to the start of the offset array
+ * @start_offset: points to the start of the offset array
+ * @object_offsetp: offset of @object read from @b
* @num_valid: the number of valid offsets in the offset array
*
* Return: If @index is within the valid range of the offset array
@@ -2074,34 +2109,46 @@ static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
* Note that the offset found in index @index itself is not
* verified; this function assumes that @num_valid elements
* from @start were previously verified to have valid offsets.
+ * If @object_offsetp is non-NULL, then the offset within
+ * @b is written to it.
*/
-static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
- binder_size_t index,
- binder_size_t *start,
- binder_size_t num_valid)
+static struct binder_buffer_object *binder_validate_ptr(
+ struct binder_proc *proc,
+ struct binder_buffer *b,
+ struct binder_object *object,
+ binder_size_t index,
+ binder_size_t start_offset,
+ binder_size_t *object_offsetp,
+ binder_size_t num_valid)
{
- struct binder_buffer_object *buffer_obj;
- binder_size_t *offp;
+ size_t object_size;
+ binder_size_t object_offset;
+ unsigned long buffer_offset;
if (index >= num_valid)
return NULL;
- offp = start + index;
- buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
- if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
+ buffer_offset = start_offset + sizeof(binder_size_t) * index;
+ binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
+ b, buffer_offset, sizeof(object_offset));
+ object_size = binder_get_object(proc, b, object_offset, object);
+ if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
return NULL;
+ if (object_offsetp)
+ *object_offsetp = object_offset;
- return buffer_obj;
+ return &object->bbo;
}
/**
* binder_validate_fixup() - validates pointer/fd fixups happen in order.
+ * @proc: binder_proc owning the buffer
* @b: transaction buffer
- * @objects_start start of objects buffer
- * @buffer: binder_buffer_object in which to fix up
- * @offset: start offset in @buffer to fix up
- * @last_obj: last binder_buffer_object that we fixed up in
- * @last_min_offset: minimum fixup offset in @last_obj
+ * @objects_start_offset: offset to start of objects buffer
+ * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
+ * @fixup_offset: start offset in @buffer to fix up
+ * @last_obj_offset: offset to last binder_buffer_object that we fixed
+ * @last_min_offset: minimum fixup offset in object at @last_obj_offset
*
* Return: %true if a fixup in buffer @buffer at offset @offset is
* allowed.
@@ -2132,28 +2179,41 @@ static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
* C (parent = A, offset = 16)
* D (parent = B, offset = 0) // B is not A or any of A's parents
*/
-static bool binder_validate_fixup(struct binder_buffer *b,
- binder_size_t *objects_start,
- struct binder_buffer_object *buffer,
+static bool binder_validate_fixup(struct binder_proc *proc,
+ struct binder_buffer *b,
+ binder_size_t objects_start_offset,
+ binder_size_t buffer_obj_offset,
binder_size_t fixup_offset,
- struct binder_buffer_object *last_obj,
+ binder_size_t last_obj_offset,
binder_size_t last_min_offset)
{
- if (!last_obj) {
+ if (!last_obj_offset) {
/* Nothing to fix up in */
return false;
}
- while (last_obj != buffer) {
+ while (last_obj_offset != buffer_obj_offset) {
+ unsigned long buffer_offset;
+ struct binder_object last_object;
+ struct binder_buffer_object *last_bbo;
+ size_t object_size = binder_get_object(proc, b, last_obj_offset,
+ &last_object);
+ if (object_size != sizeof(*last_bbo))
+ return false;
+
+ last_bbo = &last_object.bbo;
/*
* Safe to retrieve the parent of last_obj, since it
* was already previously verified by the driver.
*/
- if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
+ if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
return false;
- last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
- last_obj = (struct binder_buffer_object *)
- (b->data + *(objects_start + last_obj->parent));
+ last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
+ buffer_offset = objects_start_offset +
+ sizeof(binder_size_t) * last_bbo->parent,
+ binder_alloc_copy_from_buffer(&proc->alloc, &last_obj_offset,
+ b, buffer_offset,
+ sizeof(last_obj_offset));
}
return (fixup_offset >= last_min_offset);
}
@@ -2218,35 +2278,42 @@ static void binder_deferred_fd_close(int fd)
static void binder_transaction_buffer_release(struct binder_proc *proc,
struct binder_buffer *buffer,
- binder_size_t *failed_at)
+ binder_size_t failed_at,
+ bool is_failure)
{
- binder_size_t *offp, *off_start, *off_end;
int debug_id = buffer->debug_id;
+ binder_size_t off_start_offset, buffer_offset, off_end_offset;
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d buffer release %d, size %zd-%zd, failed at %pK\n",
+ "%d buffer release %d, size %zd-%zd, failed at %llx\n",
proc->pid, buffer->debug_id,
- buffer->data_size, buffer->offsets_size, failed_at);
+ buffer->data_size, buffer->offsets_size,
+ (unsigned long long)failed_at);
if (buffer->target_node)
binder_dec_node(buffer->target_node, 1, 0);
- off_start = (binder_size_t *)(buffer->data +
- ALIGN(buffer->data_size, sizeof(void *)));
- if (failed_at)
- off_end = failed_at;
- else
- off_end = (void *)off_start + buffer->offsets_size;
- for (offp = off_start; offp < off_end; offp++) {
+ off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
+ off_end_offset = is_failure ? failed_at :
+ off_start_offset + buffer->offsets_size;
+ for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
+ buffer_offset += sizeof(binder_size_t)) {
struct binder_object_header *hdr;
- size_t object_size = binder_validate_object(buffer, *offp);
-
+ size_t object_size;
+ struct binder_object object;
+ binder_size_t object_offset;
+
+ binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
+ buffer, buffer_offset,
+ sizeof(object_offset));
+ object_size = binder_get_object(proc, buffer,
+ object_offset, &object);
if (object_size == 0) {
pr_err("transaction release %d bad object at offset %lld, size %zd\n",
- debug_id, (u64)*offp, buffer->data_size);
+ debug_id, (u64)object_offset, buffer->data_size);
continue;
}
- hdr = (struct binder_object_header *)(buffer->data + *offp);
+ hdr = &object.hdr;
switch (hdr->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
@@ -2309,10 +2376,11 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
case BINDER_TYPE_FDA: {
struct binder_fd_array_object *fda;
struct binder_buffer_object *parent;
- uintptr_t parent_buffer;
- u32 *fd_array;
+ struct binder_object ptr_object;
+ binder_size_t fda_offset;
size_t fd_index;
binder_size_t fd_buf_size;
+ binder_size_t num_valid;
if (proc->tsk != current->group_leader) {
/*
@@ -2323,23 +2391,19 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
continue;
}
+ num_valid = (buffer_offset - off_start_offset) /
+ sizeof(binder_size_t);
fda = to_binder_fd_array_object(hdr);
- parent = binder_validate_ptr(buffer, fda->parent,
- off_start,
- offp - off_start);
+ parent = binder_validate_ptr(proc, buffer, &ptr_object,
+ fda->parent,
+ off_start_offset,
+ NULL,
+ num_valid);
if (!parent) {
pr_err("transaction release %d bad parent offset\n",
debug_id);
continue;
}
- /*
- * Since the parent was already fixed up, convert it
- * back to kernel address space to access it
- */
- parent_buffer = parent->buffer -
- binder_alloc_get_user_buffer_offset(
- &proc->alloc);
-
fd_buf_size = sizeof(u32) * fda->num_fds;
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
pr_err("transaction release %d invalid number of fds (%lld)\n",
@@ -2353,9 +2417,29 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
debug_id, (u64)fda->num_fds);
continue;
}
- fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
- for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
- binder_deferred_fd_close(fd_array[fd_index]);
+ /*
+ * the source data for binder_buffer_object is visible
+ * to user-space and the @buffer element is the user
+ * pointer to the buffer_object containing the fd_array.
+ * Convert the address to an offset relative to
+ * the base of the transaction buffer.
+ */
+ fda_offset =
+ (parent->buffer - (uintptr_t)buffer->user_data) +
+ fda->parent_offset;
+ for (fd_index = 0; fd_index < fda->num_fds;
+ fd_index++) {
+ u32 fd;
+ binder_size_t offset = fda_offset +
+ fd_index * sizeof(fd);
+
+ binder_alloc_copy_from_buffer(&proc->alloc,
+ &fd,
+ buffer,
+ offset,
+ sizeof(fd));
+ binder_deferred_fd_close(fd);
+ }
} break;
default:
pr_err("transaction release %d bad object type %x\n",
@@ -2491,7 +2575,7 @@ done:
return ret;
}
-static int binder_translate_fd(u32 *fdp,
+static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
struct binder_transaction *t,
struct binder_thread *thread,
struct binder_transaction *in_reply_to)
@@ -2502,7 +2586,6 @@ static int binder_translate_fd(u32 *fdp,
struct file *file;
int ret = 0;
bool target_allows_fd;
- int fd = *fdp;
if (in_reply_to)
target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
@@ -2541,7 +2624,7 @@ static int binder_translate_fd(u32 *fdp,
goto err_alloc;
}
fixup->file = file;
- fixup->offset = (uintptr_t)fdp - (uintptr_t)t->buffer->data;
+ fixup->offset = fd_offset;
trace_binder_transaction_fd_send(t, fd, fixup->offset);
list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
@@ -2562,8 +2645,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
struct binder_transaction *in_reply_to)
{
binder_size_t fdi, fd_buf_size;
- uintptr_t parent_buffer;
- u32 *fd_array;
+ binder_size_t fda_offset;
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
@@ -2581,20 +2663,29 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
return -EINVAL;
}
/*
- * Since the parent was already fixed up, convert it
- * back to the kernel address space to access it
+ * the source data for binder_buffer_object is visible
+ * to user-space and the @buffer element is the user
+ * pointer to the buffer_object containing the fd_array.
+ * Convert the address to an offset relative to
+ * the base of the transaction buffer.
*/
- parent_buffer = parent->buffer -
- binder_alloc_get_user_buffer_offset(&target_proc->alloc);
- fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
- if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
+ fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
+ fda->parent_offset;
+ if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
binder_user_error("%d:%d parent offset not aligned correctly.\n",
proc->pid, thread->pid);
return -EINVAL;
}
for (fdi = 0; fdi < fda->num_fds; fdi++) {
- int ret = binder_translate_fd(&fd_array[fdi], t, thread,
- in_reply_to);
+ u32 fd;
+ int ret;
+ binder_size_t offset = fda_offset + fdi * sizeof(fd);
+
+ binder_alloc_copy_from_buffer(&target_proc->alloc,
+ &fd, t->buffer,
+ offset, sizeof(fd));
+ ret = binder_translate_fd(fd, offset, t, thread,
+ in_reply_to);
if (ret < 0)
return ret;
}
@@ -2604,30 +2695,34 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
static int binder_fixup_parent(struct binder_transaction *t,
struct binder_thread *thread,
struct binder_buffer_object *bp,
- binder_size_t *off_start,
+ binder_size_t off_start_offset,
binder_size_t num_valid,
- struct binder_buffer_object *last_fixup_obj,
+ binder_size_t last_fixup_obj_off,
binder_size_t last_fixup_min_off)
{
struct binder_buffer_object *parent;
- u8 *parent_buffer;
struct binder_buffer *b = t->buffer;
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
+ struct binder_object object;
+ binder_size_t buffer_offset;
+ binder_size_t parent_offset;
if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
return 0;
- parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
+ parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
+ off_start_offset, &parent_offset,
+ num_valid);
if (!parent) {
binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
proc->pid, thread->pid);
return -EINVAL;
}
- if (!binder_validate_fixup(b, off_start,
- parent, bp->parent_offset,
- last_fixup_obj,
+ if (!binder_validate_fixup(target_proc, b, off_start_offset,
+ parent_offset, bp->parent_offset,
+ last_fixup_obj_off,
last_fixup_min_off)) {
binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
proc->pid, thread->pid);
@@ -2641,10 +2736,10 @@ static int binder_fixup_parent(struct binder_transaction *t,
proc->pid, thread->pid);
return -EINVAL;
}
- parent_buffer = (u8 *)((uintptr_t)parent->buffer -
- binder_alloc_get_user_buffer_offset(
- &target_proc->alloc));
- *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
+ buffer_offset = bp->parent_offset +
+ (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
+ binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
+ &bp->buffer, sizeof(bp->buffer));
return 0;
}
@@ -2763,9 +2858,10 @@ static void binder_transaction(struct binder_proc *proc,
struct binder_transaction *t;
struct binder_work *w;
struct binder_work *tcomplete;
- binder_size_t *offp, *off_end, *off_start;
+ binder_size_t buffer_offset = 0;
+ binder_size_t off_start_offset, off_end_offset;
binder_size_t off_min;
- u8 *sg_bufp, *sg_buf_end;
+ binder_size_t sg_buf_offset, sg_buf_end_offset;
struct binder_proc *target_proc = NULL;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
@@ -2774,10 +2870,12 @@ static void binder_transaction(struct binder_proc *proc,
uint32_t return_error = 0;
uint32_t return_error_param = 0;
uint32_t return_error_line = 0;
- struct binder_buffer_object *last_fixup_obj = NULL;
+ binder_size_t last_fixup_obj_off = 0;
binder_size_t last_fixup_min_off = 0;
struct binder_context *context = proc->context;
int t_debug_id = atomic_inc_return(&binder_last_id);
+ char *secctx = NULL;
+ u32 secctx_sz = 0;
e = binder_transaction_log_add(&binder_transaction_log);
e->debug_id = t_debug_id;
@@ -3020,6 +3118,20 @@ static void binder_transaction(struct binder_proc *proc,
t->flags = tr->flags;
t->priority = task_nice(current);
+ if (target_node && target_node->txn_security_ctx) {
+ u32 secid;
+
+ security_task_getsecid(proc->tsk, &secid);
+ ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
+ if (ret) {
+ return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
+ goto err_get_secctx_failed;
+ }
+ extra_buffers_size += ALIGN(secctx_sz, sizeof(u64));
+ }
+
trace_binder_transaction(reply, t, target_node);
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
@@ -3036,16 +3148,30 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer = NULL;
goto err_binder_alloc_buf_failed;
}
+ if (secctx) {
+ size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
+ ALIGN(tr->offsets_size, sizeof(void *)) +
+ ALIGN(extra_buffers_size, sizeof(void *)) -
+ ALIGN(secctx_sz, sizeof(u64));
+
+ t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
+ binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer, buf_offset,
+ secctx, secctx_sz);
+ security_release_secctx(secctx, secctx_sz);
+ secctx = NULL;
+ }
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
trace_binder_transaction_alloc_buf(t->buffer);
- off_start = (binder_size_t *)(t->buffer->data +
- ALIGN(tr->data_size, sizeof(void *)));
- offp = off_start;
- if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
- tr->data.ptr.buffer, tr->data_size)) {
+ if (binder_alloc_copy_user_to_buffer(
+ &target_proc->alloc,
+ t->buffer, 0,
+ (const void __user *)
+ (uintptr_t)tr->data.ptr.buffer,
+ tr->data_size)) {
binder_user_error("%d:%d got transaction with invalid data ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
@@ -3053,8 +3179,13 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_copy_data_failed;
}
- if (copy_from_user(offp, (const void __user *)(uintptr_t)
- tr->data.ptr.offsets, tr->offsets_size)) {
+ if (binder_alloc_copy_user_to_buffer(
+ &target_proc->alloc,
+ t->buffer,
+ ALIGN(tr->data_size, sizeof(void *)),
+ (const void __user *)
+ (uintptr_t)tr->data.ptr.offsets,
+ tr->offsets_size)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
@@ -3079,17 +3210,30 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_bad_offset;
}
- off_end = (void *)off_start + tr->offsets_size;
- sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
- sg_buf_end = sg_bufp + extra_buffers_size;
+ off_start_offset = ALIGN(tr->data_size, sizeof(void *));
+ buffer_offset = off_start_offset;
+ off_end_offset = off_start_offset + tr->offsets_size;
+ sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
+ sg_buf_end_offset = sg_buf_offset + extra_buffers_size;
off_min = 0;
- for (; offp < off_end; offp++) {
+ for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
+ buffer_offset += sizeof(binder_size_t)) {
struct binder_object_header *hdr;
- size_t object_size = binder_validate_object(t->buffer, *offp);
-
- if (object_size == 0 || *offp < off_min) {
+ size_t object_size;
+ struct binder_object object;
+ binder_size_t object_offset;
+
+ binder_alloc_copy_from_buffer(&target_proc->alloc,
+ &object_offset,
+ t->buffer,
+ buffer_offset,
+ sizeof(object_offset));
+ object_size = binder_get_object(target_proc, t->buffer,
+ object_offset, &object);
+ if (object_size == 0 || object_offset < off_min) {
binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
- proc->pid, thread->pid, (u64)*offp,
+ proc->pid, thread->pid,
+ (u64)object_offset,
(u64)off_min,
(u64)t->buffer->data_size);
return_error = BR_FAILED_REPLY;
@@ -3098,8 +3242,8 @@ static void binder_transaction(struct binder_proc *proc,
goto err_bad_offset;
}
- hdr = (struct binder_object_header *)(t->buffer->data + *offp);
- off_min = *offp + object_size;
+ hdr = &object.hdr;
+ off_min = object_offset + object_size;
switch (hdr->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
@@ -3113,6 +3257,9 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_translate_failed;
}
+ binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer, object_offset,
+ fp, sizeof(*fp));
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
@@ -3126,12 +3273,17 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_translate_failed;
}
+ binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer, object_offset,
+ fp, sizeof(*fp));
} break;
case BINDER_TYPE_FD: {
struct binder_fd_object *fp = to_binder_fd_object(hdr);
- int ret = binder_translate_fd(&fp->fd, t, thread,
- in_reply_to);
+ binder_size_t fd_offset = object_offset +
+ (uintptr_t)&fp->fd - (uintptr_t)fp;
+ int ret = binder_translate_fd(fp->fd, fd_offset, t,
+ thread, in_reply_to);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
@@ -3140,14 +3292,23 @@ static void binder_transaction(struct binder_proc *proc,
goto err_translate_failed;
}
fp->pad_binder = 0;
+ binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer, object_offset,
+ fp, sizeof(*fp));
} break;
case BINDER_TYPE_FDA: {
+ struct binder_object ptr_object;
+ binder_size_t parent_offset;
struct binder_fd_array_object *fda =
to_binder_fd_array_object(hdr);
+ size_t num_valid = (buffer_offset - off_start_offset) *
+ sizeof(binder_size_t);
struct binder_buffer_object *parent =
- binder_validate_ptr(t->buffer, fda->parent,
- off_start,
- offp - off_start);
+ binder_validate_ptr(target_proc, t->buffer,
+ &ptr_object, fda->parent,
+ off_start_offset,
+ &parent_offset,
+ num_valid);
if (!parent) {
binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
proc->pid, thread->pid);
@@ -3156,9 +3317,11 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_bad_parent;
}
- if (!binder_validate_fixup(t->buffer, off_start,
- parent, fda->parent_offset,
- last_fixup_obj,
+ if (!binder_validate_fixup(target_proc, t->buffer,
+ off_start_offset,
+ parent_offset,
+ fda->parent_offset,
+ last_fixup_obj_off,
last_fixup_min_off)) {
binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
proc->pid, thread->pid);
@@ -3175,14 +3338,15 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_translate_failed;
}
- last_fixup_obj = parent;
+ last_fixup_obj_off = parent_offset;
last_fixup_min_off =
fda->parent_offset + sizeof(u32) * fda->num_fds;
} break;
case BINDER_TYPE_PTR: {
struct binder_buffer_object *bp =
to_binder_buffer_object(hdr);
- size_t buf_left = sg_buf_end - sg_bufp;
+ size_t buf_left = sg_buf_end_offset - sg_buf_offset;
+ size_t num_valid;
if (bp->length > buf_left) {
binder_user_error("%d:%d got transaction with too large buffer\n",
@@ -3192,9 +3356,13 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_bad_offset;
}
- if (copy_from_user(sg_bufp,
- (const void __user *)(uintptr_t)
- bp->buffer, bp->length)) {
+ if (binder_alloc_copy_user_to_buffer(
+ &target_proc->alloc,
+ t->buffer,
+ sg_buf_offset,
+ (const void __user *)
+ (uintptr_t)bp->buffer,
+ bp->length)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
return_error_param = -EFAULT;
@@ -3203,14 +3371,16 @@ static void binder_transaction(struct binder_proc *proc,
goto err_copy_data_failed;
}
/* Fixup buffer pointer to target proc address space */
- bp->buffer = (uintptr_t)sg_bufp +
- binder_alloc_get_user_buffer_offset(
- &target_proc->alloc);
- sg_bufp += ALIGN(bp->length, sizeof(u64));
-
- ret = binder_fixup_parent(t, thread, bp, off_start,
- offp - off_start,
- last_fixup_obj,
+ bp->buffer = (uintptr_t)
+ t->buffer->user_data + sg_buf_offset;
+ sg_buf_offset += ALIGN(bp->length, sizeof(u64));
+
+ num_valid = (buffer_offset - off_start_offset) *
+ sizeof(binder_size_t);
+ ret = binder_fixup_parent(t, thread, bp,
+ off_start_offset,
+ num_valid,
+ last_fixup_obj_off,
last_fixup_min_off);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
@@ -3218,7 +3388,10 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_translate_failed;
}
- last_fixup_obj = bp;
+ binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer, object_offset,
+ bp, sizeof(*bp));
+ last_fixup_obj_off = object_offset;
last_fixup_min_off = 0;
} break;
default:
@@ -3298,13 +3471,17 @@ err_bad_parent:
err_copy_data_failed:
binder_free_txn_fixups(t);
trace_binder_transaction_failed_buffer_release(t->buffer);
- binder_transaction_buffer_release(target_proc, t->buffer, offp);
+ binder_transaction_buffer_release(target_proc, t->buffer,
+ buffer_offset, true);
if (target_node)
binder_dec_node_tmpref(target_node);
target_node = NULL;
t->buffer->transaction = NULL;
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
+ if (secctx)
+ security_release_secctx(secctx, secctx_sz);
+err_get_secctx_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
err_alloc_tcomplete_failed:
@@ -3396,7 +3573,7 @@ binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
binder_node_inner_unlock(buf_node);
}
trace_binder_transaction_buffer_release(buffer);
- binder_transaction_buffer_release(proc, buffer, NULL);
+ binder_transaction_buffer_release(proc, buffer, 0, false);
binder_alloc_free_buf(&proc->alloc, buffer);
}
@@ -3915,6 +4092,7 @@ static int binder_wait_for_work(struct binder_thread *thread,
/**
* binder_apply_fd_fixups() - finish fd translation
+ * @proc: binder_proc associated @t->buffer
* @t: binder transaction with list of fd fixups
*
* Now that we are in the context of the transaction target
@@ -3926,14 +4104,14 @@ static int binder_wait_for_work(struct binder_thread *thread,
* fput'ing files that have not been processed and ksys_close'ing
* any fds that have already been allocated.
*/
-static int binder_apply_fd_fixups(struct binder_transaction *t)
+static int binder_apply_fd_fixups(struct binder_proc *proc,
+ struct binder_transaction *t)
{
struct binder_txn_fd_fixup *fixup, *tmp;
int ret = 0;
list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
int fd = get_unused_fd_flags(O_CLOEXEC);
- u32 *fdp;
if (fd < 0) {
binder_debug(BINDER_DEBUG_TRANSACTION,
@@ -3948,33 +4126,20 @@ static int binder_apply_fd_fixups(struct binder_transaction *t)
trace_binder_transaction_fd_recv(t, fd, fixup->offset);
fd_install(fd, fixup->file);
fixup->file = NULL;
- fdp = (u32 *)(t->buffer->data + fixup->offset);
- /*
- * This store can cause problems for CPUs with a
- * VIVT cache (eg ARMv5) since the cache cannot
- * detect virtual aliases to the same physical cacheline.
- * To support VIVT, this address and the user-space VA
- * would both need to be flushed. Since this kernel
- * VA is not constructed via page_to_virt(), we can't
- * use flush_dcache_page() on it, so we'd have to use
- * an internal function. If devices with VIVT ever
- * need to run Android, we'll either need to go back
- * to patching the translated fd from the sender side
- * (using the non-standard kernel functions), or rework
- * how the kernel uses the buffer to use page_to_virt()
- * addresses instead of allocating in our own vm area.
- *
- * For now, we disable compilation if CONFIG_CPU_CACHE_VIVT.
- */
- *fdp = fd;
+ binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
+ fixup->offset, &fd,
+ sizeof(u32));
}
list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
if (fixup->file) {
fput(fixup->file);
} else if (ret) {
- u32 *fdp = (u32 *)(t->buffer->data + fixup->offset);
+ u32 fd;
- binder_deferred_fd_close(*fdp);
+ binder_alloc_copy_from_buffer(&proc->alloc, &fd,
+ t->buffer, fixup->offset,
+ sizeof(fd));
+ binder_deferred_fd_close(fd);
}
list_del(&fixup->fixup_entry);
kfree(fixup);
@@ -4036,11 +4201,13 @@ retry:
while (1) {
uint32_t cmd;
- struct binder_transaction_data tr;
+ struct binder_transaction_data_secctx tr;
+ struct binder_transaction_data *trd = &tr.transaction_data;
struct binder_work *w = NULL;
struct list_head *list = NULL;
struct binder_transaction *t = NULL;
struct binder_thread *t_from;
+ size_t trsize = sizeof(*trd);
binder_inner_proc_lock(proc);
if (!binder_worklist_empty_ilocked(&thread->todo))
@@ -4240,8 +4407,8 @@ retry:
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
- tr.target.ptr = target_node->ptr;
- tr.cookie = target_node->cookie;
+ trd->target.ptr = target_node->ptr;
+ trd->cookie = target_node->cookie;
t->saved_priority = task_nice(current);
if (t->priority < target_node->min_priority &&
!(t->flags & TF_ONE_WAY))
@@ -4251,25 +4418,26 @@ retry:
binder_set_nice(target_node->min_priority);
cmd = BR_TRANSACTION;
} else {
- tr.target.ptr = 0;
- tr.cookie = 0;
+ trd->target.ptr = 0;
+ trd->cookie = 0;
cmd = BR_REPLY;
}
- tr.code = t->code;
- tr.flags = t->flags;
- tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
+ trd->code = t->code;
+ trd->flags = t->flags;
+ trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
t_from = binder_get_txn_from(t);
if (t_from) {
struct task_struct *sender = t_from->proc->tsk;
- tr.sender_pid = task_tgid_nr_ns(sender,
- task_active_pid_ns(current));
+ trd->sender_pid =
+ task_tgid_nr_ns(sender,
+ task_active_pid_ns(current));
} else {
- tr.sender_pid = 0;
+ trd->sender_pid = 0;
}
- ret = binder_apply_fd_fixups(t);
+ ret = binder_apply_fd_fixups(proc, t);
if (ret) {
struct binder_buffer *buffer = t->buffer;
bool oneway = !!(t->flags & TF_ONE_WAY);
@@ -4297,15 +4465,18 @@ retry:
}
continue;
}
- tr.data_size = t->buffer->data_size;
- tr.offsets_size = t->buffer->offsets_size;
- tr.data.ptr.buffer = (binder_uintptr_t)
- ((uintptr_t)t->buffer->data +
- binder_alloc_get_user_buffer_offset(&proc->alloc));
- tr.data.ptr.offsets = tr.data.ptr.buffer +
+ trd->data_size = t->buffer->data_size;
+ trd->offsets_size = t->buffer->offsets_size;
+ trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
+ trd->data.ptr.offsets = trd->data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
+ tr.secctx = t->security_ctx;
+ if (t->security_ctx) {
+ cmd = BR_TRANSACTION_SEC_CTX;
+ trsize = sizeof(tr);
+ }
if (put_user(cmd, (uint32_t __user *)ptr)) {
if (t_from)
binder_thread_dec_tmpref(t_from);
@@ -4316,7 +4487,7 @@ retry:
return -EFAULT;
}
ptr += sizeof(uint32_t);
- if (copy_to_user(ptr, &tr, sizeof(tr))) {
+ if (copy_to_user(ptr, &tr, trsize)) {
if (t_from)
binder_thread_dec_tmpref(t_from);
@@ -4325,7 +4496,7 @@ retry:
return -EFAULT;
}
- ptr += sizeof(tr);
+ ptr += trsize;
trace_binder_transaction_received(t);
binder_stat_br(proc, thread, cmd);
@@ -4333,16 +4504,18 @@ retry:
"%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
- "BR_REPLY",
+ (cmd == BR_TRANSACTION_SEC_CTX) ?
+ "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
t->debug_id, t_from ? t_from->proc->pid : 0,
t_from ? t_from->pid : 0, cmd,
t->buffer->data_size, t->buffer->offsets_size,
- (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
+ (u64)trd->data.ptr.buffer,
+ (u64)trd->data.ptr.offsets);
if (t_from)
binder_thread_dec_tmpref(t_from);
t->buffer->allow_user_free = 1;
- if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+ if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
binder_inner_proc_lock(thread->proc);
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
@@ -4690,7 +4863,8 @@ out:
return ret;
}
-static int binder_ioctl_set_ctx_mgr(struct file *filp)
+static int binder_ioctl_set_ctx_mgr(struct file *filp,
+ struct flat_binder_object *fbo)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
@@ -4719,7 +4893,7 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
} else {
context->binder_context_mgr_uid = curr_euid;
}
- new_node = binder_new_node(proc, NULL);
+ new_node = binder_new_node(proc, fbo);
if (!new_node) {
ret = -ENOMEM;
goto out;
@@ -4842,8 +5016,20 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
binder_inner_proc_unlock(proc);
break;
}
+ case BINDER_SET_CONTEXT_MGR_EXT: {
+ struct flat_binder_object fbo;
+
+ if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
+ if (ret)
+ goto err;
+ break;
+ }
case BINDER_SET_CONTEXT_MGR:
- ret = binder_ioctl_set_ctx_mgr(filp);
+ ret = binder_ioctl_set_ctx_mgr(filp, NULL);
if (ret)
goto err;
break;
@@ -5319,7 +5505,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
seq_printf(m, " node %d", buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd data %pK\n",
buffer->data_size, buffer->offsets_size,
- buffer->data);
+ buffer->user_data);
}
static void print_binder_work_ilocked(struct seq_file *m,
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 022cd80e80cc..6389467670a0 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -29,6 +29,8 @@
#include <linux/list_lru.h>
#include <linux/ratelimit.h>
#include <asm/cacheflush.h>
+#include <linux/uaccess.h>
+#include <linux/highmem.h>
#include "binder_alloc.h"
#include "binder_trace.h"
@@ -67,9 +69,8 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
if (list_is_last(&buffer->entry, &alloc->buffers))
- return (u8 *)alloc->buffer +
- alloc->buffer_size - (u8 *)buffer->data;
- return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
+ return alloc->buffer + alloc->buffer_size - buffer->user_data;
+ return binder_buffer_next(buffer)->user_data - buffer->user_data;
}
static void binder_insert_free_buffer(struct binder_alloc *alloc,
@@ -119,9 +120,9 @@ static void binder_insert_allocated_buffer_locked(
buffer = rb_entry(parent, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
- if (new_buffer->data < buffer->data)
+ if (new_buffer->user_data < buffer->user_data)
p = &parent->rb_left;
- else if (new_buffer->data > buffer->data)
+ else if (new_buffer->user_data > buffer->user_data)
p = &parent->rb_right;
else
BUG();
@@ -136,17 +137,17 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
{
struct rb_node *n = alloc->allocated_buffers.rb_node;
struct binder_buffer *buffer;
- void *kern_ptr;
+ void __user *uptr;
- kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
+ uptr = (void __user *)user_ptr;
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
- if (kern_ptr < buffer->data)
+ if (uptr < buffer->user_data)
n = n->rb_left;
- else if (kern_ptr > buffer->data)
+ else if (uptr > buffer->user_data)
n = n->rb_right;
else {
/*
@@ -186,9 +187,9 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
}
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
- void *start, void *end)
+ void __user *start, void __user *end)
{
- void *page_addr;
+ void __user *page_addr;
unsigned long user_page_addr;
struct binder_lru_page *page;
struct vm_area_struct *vma = NULL;
@@ -263,18 +264,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
page->alloc = alloc;
INIT_LIST_HEAD(&page->lru);
- ret = map_kernel_range_noflush((unsigned long)page_addr,
- PAGE_SIZE, PAGE_KERNEL,
- &page->page_ptr);
- flush_cache_vmap((unsigned long)page_addr,
- (unsigned long)page_addr + PAGE_SIZE);
- if (ret != 1) {
- pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
- alloc->pid, page_addr);
- goto err_map_kernel_failed;
- }
- user_page_addr =
- (uintptr_t)page_addr + alloc->user_buffer_offset;
+ user_page_addr = (uintptr_t)page_addr;
ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
if (ret) {
pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
@@ -312,8 +302,6 @@ free_range:
continue;
err_vm_insert_page_failed:
- unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
-err_map_kernel_failed:
__free_page(page->page_ptr);
page->page_ptr = NULL;
err_alloc_page_failed:
@@ -368,8 +356,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
struct binder_buffer *buffer;
size_t buffer_size;
struct rb_node *best_fit = NULL;
- void *has_page_addr;
- void *end_page_addr;
+ void __user *has_page_addr;
+ void __user *end_page_addr;
size_t size, data_offsets_size;
int ret;
@@ -467,15 +455,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
"%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
alloc->pid, size, buffer, buffer_size);
- has_page_addr =
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
+ has_page_addr = (void __user *)
+ (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
WARN_ON(n && buffer_size != size);
end_page_addr =
- (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
+ (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr;
- ret = binder_update_page_range(alloc, 1,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr);
+ ret = binder_update_page_range(alloc, 1, (void __user *)
+ PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr);
if (ret)
return ERR_PTR(ret);
@@ -488,7 +476,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
__func__, alloc->pid);
goto err_alloc_buf_struct_failed;
}
- new_buffer->data = (u8 *)buffer->data + size;
+ new_buffer->user_data = (u8 __user *)buffer->user_data + size;
list_add(&new_buffer->entry, &buffer->entry);
new_buffer->free = 1;
binder_insert_free_buffer(alloc, new_buffer);
@@ -514,8 +502,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
return buffer;
err_alloc_buf_struct_failed:
- binder_update_page_range(alloc, 0,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ binder_update_page_range(alloc, 0, (void __user *)
+ PAGE_ALIGN((uintptr_t)buffer->user_data),
end_page_addr);
return ERR_PTR(-ENOMEM);
}
@@ -550,14 +538,15 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
return buffer;
}
-static void *buffer_start_page(struct binder_buffer *buffer)
+static void __user *buffer_start_page(struct binder_buffer *buffer)
{
- return (void *)((uintptr_t)buffer->data & PAGE_MASK);
+ return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK);
}
-static void *prev_buffer_end_page(struct binder_buffer *buffer)
+static void __user *prev_buffer_end_page(struct binder_buffer *buffer)
{
- return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
+ return (void __user *)
+ (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK);
}
static void binder_delete_free_buffer(struct binder_alloc *alloc,
@@ -572,7 +561,8 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
to_free = false;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK share page with %pK\n",
- alloc->pid, buffer->data, prev->data);
+ alloc->pid, buffer->user_data,
+ prev->user_data);
}
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
@@ -582,23 +572,24 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK share page with %pK\n",
alloc->pid,
- buffer->data,
- next->data);
+ buffer->user_data,
+ next->user_data);
}
}
- if (PAGE_ALIGNED(buffer->data)) {
+ if (PAGE_ALIGNED(buffer->user_data)) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer start %pK is page aligned\n",
- alloc->pid, buffer->data);
+ alloc->pid, buffer->user_data);
to_free = false;
}
if (to_free) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK do not share page with %pK or %pK\n",
- alloc->pid, buffer->data,
- prev->data, next ? next->data : NULL);
+ alloc->pid, buffer->user_data,
+ prev->user_data,
+ next ? next->user_data : NULL);
binder_update_page_range(alloc, 0, buffer_start_page(buffer),
buffer_start_page(buffer) + PAGE_SIZE);
}
@@ -624,8 +615,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->free);
BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL);
- BUG_ON(buffer->data < alloc->buffer);
- BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
+ BUG_ON(buffer->user_data < alloc->buffer);
+ BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
if (buffer->async_transaction) {
alloc->free_async_space += size + sizeof(struct binder_buffer);
@@ -636,8 +627,9 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
}
binder_update_page_range(alloc, 0,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data),
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK));
+ (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data),
+ (void __user *)(((uintptr_t)
+ buffer->user_data + buffer_size) & PAGE_MASK));
rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
buffer->free = 1;
@@ -693,7 +685,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
struct vm_area_struct *vma)
{
int ret;
- struct vm_struct *area;
const char *failure_string;
struct binder_buffer *buffer;
@@ -704,28 +695,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
goto err_already_mapped;
}
- area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC);
- if (area == NULL) {
- ret = -ENOMEM;
- failure_string = "get_vm_area";
- goto err_get_vm_area_failed;
- }
- alloc->buffer = area->addr;
- alloc->user_buffer_offset =
- vma->vm_start - (uintptr_t)alloc->buffer;
+ alloc->buffer = (void __user *)vma->vm_start;
mutex_unlock(&binder_alloc_mmap_lock);
-#ifdef CONFIG_CPU_CACHE_VIPT
- if (cache_is_vipt_aliasing()) {
- while (CACHE_COLOUR(
- (vma->vm_start ^ (uint32_t)alloc->buffer))) {
- pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
- __func__, alloc->pid, vma->vm_start,
- vma->vm_end, alloc->buffer);
- vma->vm_start += PAGE_SIZE;
- }
- }
-#endif
alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
sizeof(alloc->pages[0]),
GFP_KERNEL);
@@ -743,7 +715,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
goto err_alloc_buf_struct_failed;
}
- buffer->data = alloc->buffer;
+ buffer->user_data = alloc->buffer;
list_add(&buffer->entry, &alloc->buffers);
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
@@ -758,9 +730,7 @@ err_alloc_buf_struct_failed:
alloc->pages = NULL;
err_alloc_pages_failed:
mutex_lock(&binder_alloc_mmap_lock);
- vfree(alloc->buffer);
alloc->buffer = NULL;
-err_get_vm_area_failed:
err_already_mapped:
mutex_unlock(&binder_alloc_mmap_lock);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
@@ -806,7 +776,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
int i;
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- void *page_addr;
+ void __user *page_addr;
bool on_lru;
if (!alloc->pages[i].page_ptr)
@@ -819,12 +789,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
"%s: %d: page %d at %pK %s\n",
__func__, alloc->pid, i, page_addr,
on_lru ? "on lru" : "active");
- unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
__free_page(alloc->pages[i].page_ptr);
page_count++;
}
kfree(alloc->pages);
- vfree(alloc->buffer);
}
mutex_unlock(&alloc->mutex);
if (alloc->vma_vm_mm)
@@ -839,7 +807,7 @@ static void print_binder_buffer(struct seq_file *m, const char *prefix,
struct binder_buffer *buffer)
{
seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
- prefix, buffer->debug_id, buffer->data,
+ prefix, buffer->debug_id, buffer->user_data,
buffer->data_size, buffer->offsets_size,
buffer->extra_buffers_size,
buffer->transaction ? "active" : "delivered");
@@ -964,7 +932,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
if (!mmget_not_zero(alloc->vma_vm_mm))
goto err_mmget;
mm = alloc->vma_vm_mm;
- if (!down_write_trylock(&mm->mmap_sem))
+ if (!down_read_trylock(&mm->mmap_sem))
goto err_down_write_mmap_sem_failed;
}
@@ -974,19 +942,16 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
if (vma) {
trace_binder_unmap_user_start(alloc, index);
- zap_page_range(vma,
- page_addr + alloc->user_buffer_offset,
- PAGE_SIZE);
+ zap_page_range(vma, page_addr, PAGE_SIZE);
trace_binder_unmap_user_end(alloc, index);
- up_write(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
mmput(mm);
}
trace_binder_unmap_kernel_start(alloc, index);
- unmap_kernel_range(page_addr, PAGE_SIZE);
__free_page(page->page_ptr);
page->page_ptr = NULL;
@@ -1053,3 +1018,173 @@ int binder_alloc_shrinker_init(void)
}
return ret;
}
+
+/**
+ * check_buffer() - verify that buffer/offset is safe to access
+ * @alloc: binder_alloc for this proc
+ * @buffer: binder buffer to be accessed
+ * @offset: offset into @buffer data
+ * @bytes: bytes to access from offset
+ *
+ * Check that the @offset/@bytes are within the size of the given
+ * @buffer and that the buffer is currently active and not freeable.
+ * Offsets must also be multiples of sizeof(u32). The kernel is
+ * allowed to touch the buffer in two cases:
+ *
+ * 1) when the buffer is being created:
+ * (buffer->free == 0 && buffer->allow_user_free == 0)
+ * 2) when the buffer is being torn down:
+ * (buffer->free == 0 && buffer->transaction == NULL).
+ *
+ * Return: true if the buffer is safe to access
+ */
+static inline bool check_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ binder_size_t offset, size_t bytes)
+{
+ size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ return buffer_size >= bytes &&
+ offset <= buffer_size - bytes &&
+ IS_ALIGNED(offset, sizeof(u32)) &&
+ !buffer->free &&
+ (!buffer->allow_user_free || !buffer->transaction);
+}
+
+/**
+ * binder_alloc_get_page() - get kernel pointer for given buffer offset
+ * @alloc: binder_alloc for this proc
+ * @buffer: binder buffer to be accessed
+ * @buffer_offset: offset into @buffer data
+ * @pgoffp: address to copy final page offset to
+ *
+ * Lookup the struct page corresponding to the address
+ * at @buffer_offset into @buffer->user_data. If @pgoffp is not
+ * NULL, the byte-offset into the page is written there.
+ *
+ * The caller is responsible to ensure that the offset points
+ * to a valid address within the @buffer and that @buffer is
+ * not freeable by the user. Since it can't be freed, we are
+ * guaranteed that the corresponding elements of @alloc->pages[]
+ * cannot change.
+ *
+ * Return: struct page
+ */
+static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ pgoff_t *pgoffp)
+{
+ binder_size_t buffer_space_offset = buffer_offset +
+ (buffer->user_data - alloc->buffer);
+ pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
+ size_t index = buffer_space_offset >> PAGE_SHIFT;
+ struct binder_lru_page *lru_page;
+
+ lru_page = &alloc->pages[index];
+ *pgoffp = pgoff;
+ return lru_page->page_ptr;
+}
+
+/**
+ * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
+ * @alloc: binder_alloc for this proc
+ * @buffer: binder buffer to be accessed
+ * @buffer_offset: offset into @buffer data
+ * @from: userspace pointer to source buffer
+ * @bytes: bytes to copy
+ *
+ * Copy bytes from source userspace to target buffer.
+ *
+ * Return: bytes remaining to be copied
+ */
+unsigned long
+binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ const void __user *from,
+ size_t bytes)
+{
+ if (!check_buffer(alloc, buffer, buffer_offset, bytes))
+ return bytes;
+
+ while (bytes) {
+ unsigned long size;
+ unsigned long ret;
+ struct page *page;
+ pgoff_t pgoff;
+ void *kptr;
+
+ page = binder_alloc_get_page(alloc, buffer,
+ buffer_offset, &pgoff);
+ size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
+ kptr = kmap(page) + pgoff;
+ ret = copy_from_user(kptr, from, size);
+ kunmap(page);
+ if (ret)
+ return bytes - size + ret;
+ bytes -= size;
+ from += size;
+ buffer_offset += size;
+ }
+ return 0;
+}
+
+static void binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
+ bool to_buffer,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ void *ptr,
+ size_t bytes)
+{
+ /* All copies must be 32-bit aligned and 32-bit size */
+ BUG_ON(!check_buffer(alloc, buffer, buffer_offset, bytes));
+
+ while (bytes) {
+ unsigned long size;
+ struct page *page;
+ pgoff_t pgoff;
+ void *tmpptr;
+ void *base_ptr;
+
+ page = binder_alloc_get_page(alloc, buffer,
+ buffer_offset, &pgoff);
+ size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
+ base_ptr = kmap_atomic(page);
+ tmpptr = base_ptr + pgoff;
+ if (to_buffer)
+ memcpy(tmpptr, ptr, size);
+ else
+ memcpy(ptr, tmpptr, size);
+ /*
+ * kunmap_atomic() takes care of flushing the cache
+ * if this device has VIVT cache arch
+ */
+ kunmap_atomic(base_ptr);
+ bytes -= size;
+ pgoff = 0;
+ ptr = ptr + size;
+ buffer_offset += size;
+ }
+}
+
+void binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ void *src,
+ size_t bytes)
+{
+ binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
+ src, bytes);
+}
+
+void binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
+ void *dest,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ size_t bytes)
+{
+ binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
+ dest, bytes);
+}
+
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index c0aadbbf7f19..b60d161b7a7a 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -22,6 +22,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/list_lru.h>
+#include <uapi/linux/android/binder.h>
extern struct list_lru binder_alloc_lru;
struct binder_transaction;
@@ -39,7 +40,7 @@ struct binder_transaction;
* @data_size: size of @transaction data
* @offsets_size: size of array of offsets
* @extra_buffers_size: size of space for other objects (like sg lists)
- * @data: pointer to base of buffer space
+ * @user_data: user pointer to base of buffer space
*
* Bookkeeping structure for binder transaction buffers
*/
@@ -58,7 +59,7 @@ struct binder_buffer {
size_t data_size;
size_t offsets_size;
size_t extra_buffers_size;
- void *data;
+ void __user *user_data;
};
/**
@@ -81,7 +82,6 @@ struct binder_lru_page {
* (invariant after init)
* @vma_vm_mm: copy of vma->vm_mm (invarient after mmap)
* @buffer: base of per-proc address space mapped via mmap
- * @user_buffer_offset: offset between user and kernel VAs for buffer
* @buffers: list of all buffers for this proc
* @free_buffers: rb tree of buffers available for allocation
* sorted by size
@@ -102,8 +102,7 @@ struct binder_alloc {
struct mutex mutex;
struct vm_area_struct *vma;
struct mm_struct *vma_vm_mm;
- void *buffer;
- ptrdiff_t user_buffer_offset;
+ void __user *buffer;
struct list_head buffers;
struct rb_root free_buffers;
struct rb_root allocated_buffers;
@@ -162,26 +161,24 @@ binder_alloc_get_free_async_space(struct binder_alloc *alloc)
return free_async_space;
}
-/**
- * binder_alloc_get_user_buffer_offset() - get offset between kernel/user addrs
- * @alloc: binder_alloc for this proc
- *
- * Return: the offset between kernel and user-space addresses to use for
- * virtual address conversion
- */
-static inline ptrdiff_t
-binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc)
-{
- /*
- * user_buffer_offset is constant if vma is set and
- * undefined if vma is not set. It is possible to
- * get here with !alloc->vma if the target process
- * is dying while a transaction is being initiated.
- * Returning the old value is ok in this case and
- * the transaction will fail.
- */
- return alloc->user_buffer_offset;
-}
+unsigned long
+binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ const void __user *from,
+ size_t bytes);
+
+void binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ void *src,
+ size_t bytes);
+
+void binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
+ void *dest,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ size_t bytes);
#endif /* _LINUX_BINDER_ALLOC_H */
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
index 8bd7bcef967d..b72708918b06 100644
--- a/drivers/android/binder_alloc_selftest.c
+++ b/drivers/android/binder_alloc_selftest.c
@@ -102,11 +102,12 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
struct binder_buffer *buffer,
size_t size)
{
- void *page_addr, *end;
+ void __user *page_addr;
+ void __user *end;
int page_index;
- end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
- page_addr = buffer->data;
+ end = (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
+ page_addr = buffer->user_data;
for (; page_addr < end; page_addr += PAGE_SIZE) {
page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
if (!alloc->pages[page_index].page_ptr ||
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 14de7ac57a34..83cc254d2335 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -293,7 +293,7 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release,
TRACE_EVENT(binder_update_page_range,
TP_PROTO(struct binder_alloc *alloc, bool allocate,
- void *start, void *end),
+ void __user *start, void __user *end),
TP_ARGS(alloc, allocate, start, end),
TP_STRUCT__entry(
__field(int, proc)
diff --git a/drivers/base/component.c b/drivers/base/component.c
index ddcea8739c12..7dbc41cccd58 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -16,11 +16,38 @@
#include <linux/slab.h>
#include <linux/debugfs.h>
+/**
+ * DOC: overview
+ *
+ * The component helper allows drivers to collect a pile of sub-devices,
+ * including their bound drivers, into an aggregate driver. Various subsystems
+ * already provide functions to get hold of such components, e.g.
+ * of_clk_get_by_name(). The component helper can be used when such a
+ * subsystem-specific way to find a device is not available: The component
+ * helper fills the niche of aggregate drivers for specific hardware, where
+ * further standardization into a subsystem would not be practical. The common
+ * example is when a logical device (e.g. a DRM display driver) is spread around
+ * the SoC on various component (scanout engines, blending blocks, transcoders
+ * for various outputs and so on).
+ *
+ * The component helper also doesn't solve runtime dependencies, e.g. for system
+ * suspend and resume operations. See also :ref:`device links<device_link>`.
+ *
+ * Components are registered using component_add() and unregistered with
+ * component_del(), usually from the driver's probe and disconnect functions.
+ *
+ * Aggregate drivers first assemble a component match list of what they need
+ * using component_match_add(). This is then registered as an aggregate driver
+ * using component_master_add_with_match(), and unregistered using
+ * component_master_del().
+ */
+
struct component;
struct component_match_array {
void *data;
int (*compare)(struct device *, void *);
+ int (*compare_typed)(struct device *, int, void *);
void (*release)(struct device *, void *);
struct component *component;
bool duplicate;
@@ -48,6 +75,7 @@ struct component {
bool bound;
const struct component_ops *ops;
+ int subcomponent;
struct device *dev;
};
@@ -132,7 +160,7 @@ static struct master *__master_find(struct device *dev,
}
static struct component *find_component(struct master *master,
- int (*compare)(struct device *, void *), void *compare_data)
+ struct component_match_array *mc)
{
struct component *c;
@@ -140,7 +168,11 @@ static struct component *find_component(struct master *master,
if (c->master && c->master != master)
continue;
- if (compare(c->dev, compare_data))
+ if (mc->compare && mc->compare(c->dev, mc->data))
+ return c;
+
+ if (mc->compare_typed &&
+ mc->compare_typed(c->dev, c->subcomponent, mc->data))
return c;
}
@@ -166,7 +198,7 @@ static int find_components(struct master *master)
if (match->compare[i].component)
continue;
- c = find_component(master, mc->compare, mc->data);
+ c = find_component(master, mc);
if (!c) {
ret = -ENXIO;
break;
@@ -301,15 +333,12 @@ static int component_match_realloc(struct device *dev,
return 0;
}
-/*
- * Add a component to be matched, with a release function.
- *
- * The match array is first created or extended if necessary.
- */
-void component_match_add_release(struct device *master,
+static void __component_match_add(struct device *master,
struct component_match **matchptr,
void (*release)(struct device *, void *),
- int (*compare)(struct device *, void *), void *compare_data)
+ int (*compare)(struct device *, void *),
+ int (*compare_typed)(struct device *, int, void *),
+ void *compare_data)
{
struct component_match *match = *matchptr;
@@ -341,13 +370,69 @@ void component_match_add_release(struct device *master,
}
match->compare[match->num].compare = compare;
+ match->compare[match->num].compare_typed = compare_typed;
match->compare[match->num].release = release;
match->compare[match->num].data = compare_data;
match->compare[match->num].component = NULL;
match->num++;
}
+
+/**
+ * component_match_add_release - add a component match with release callback
+ * @master: device with the aggregate driver
+ * @matchptr: pointer to the list of component matches
+ * @release: release function for @compare_data
+ * @compare: compare function to match against all components
+ * @compare_data: opaque pointer passed to the @compare function
+ *
+ * Adds a new component match to the list stored in @matchptr, which the @master
+ * aggregate driver needs to function. The list of component matches pointed to
+ * by @matchptr must be initialized to NULL before adding the first match. This
+ * only matches against components added with component_add().
+ *
+ * The allocated match list in @matchptr is automatically released using devm
+ * actions, where upon @release will be called to free any references held by
+ * @compare_data, e.g. when @compare_data is a &device_node that must be
+ * released with of_node_put().
+ *
+ * See also component_match_add() and component_match_add_typed().
+ */
+void component_match_add_release(struct device *master,
+ struct component_match **matchptr,
+ void (*release)(struct device *, void *),
+ int (*compare)(struct device *, void *), void *compare_data)
+{
+ __component_match_add(master, matchptr, release, compare, NULL,
+ compare_data);
+}
EXPORT_SYMBOL(component_match_add_release);
+/**
+ * component_match_add_typed - add a compent match for a typed component
+ * @master: device with the aggregate driver
+ * @matchptr: pointer to the list of component matches
+ * @compare_typed: compare function to match against all typed components
+ * @compare_data: opaque pointer passed to the @compare function
+ *
+ * Adds a new component match to the list stored in @matchptr, which the @master
+ * aggregate driver needs to function. The list of component matches pointed to
+ * by @matchptr must be initialized to NULL before adding the first match. This
+ * only matches against components added with component_add_typed().
+ *
+ * The allocated match list in @matchptr is automatically released using devm
+ * actions.
+ *
+ * See also component_match_add_release() and component_match_add_typed().
+ */
+void component_match_add_typed(struct device *master,
+ struct component_match **matchptr,
+ int (*compare_typed)(struct device *, int, void *), void *compare_data)
+{
+ __component_match_add(master, matchptr, NULL, NULL, compare_typed,
+ compare_data);
+}
+EXPORT_SYMBOL(component_match_add_typed);
+
static void free_master(struct master *master)
{
struct component_match *match = master->match;
@@ -367,6 +452,18 @@ static void free_master(struct master *master)
kfree(master);
}
+/**
+ * component_master_add_with_match - register an aggregate driver
+ * @dev: device with the aggregate driver
+ * @ops: callbacks for the aggregate driver
+ * @match: component match list for the aggregate driver
+ *
+ * Registers a new aggregate driver consisting of the components added to @match
+ * by calling one of the component_match_add() functions. Once all components in
+ * @match are available, it will be assembled by calling
+ * &component_master_ops.bind from @ops. Must be unregistered by calling
+ * component_master_del().
+ */
int component_master_add_with_match(struct device *dev,
const struct component_master_ops *ops,
struct component_match *match)
@@ -403,6 +500,15 @@ int component_master_add_with_match(struct device *dev,
}
EXPORT_SYMBOL_GPL(component_master_add_with_match);
+/**
+ * component_master_del - unregister an aggregate driver
+ * @dev: device with the aggregate driver
+ * @ops: callbacks for the aggregate driver
+ *
+ * Unregisters an aggregate driver registered with
+ * component_master_add_with_match(). If necessary the aggregate driver is first
+ * disassembled by calling &component_master_ops.unbind from @ops.
+ */
void component_master_del(struct device *dev,
const struct component_master_ops *ops)
{
@@ -430,6 +536,15 @@ static void component_unbind(struct component *component,
devres_release_group(component->dev, component);
}
+/**
+ * component_unbind_all - unbind all component to an aggregate driver
+ * @master_dev: device with the aggregate driver
+ * @data: opaque pointer, passed to all components
+ *
+ * Unbinds all components to the aggregate @dev by passing @data to their
+ * &component_ops.unbind functions. Should be called from
+ * &component_master_ops.unbind.
+ */
void component_unbind_all(struct device *master_dev, void *data)
{
struct master *master;
@@ -503,6 +618,15 @@ static int component_bind(struct component *component, struct master *master,
return ret;
}
+/**
+ * component_bind_all - bind all component to an aggregate driver
+ * @master_dev: device with the aggregate driver
+ * @data: opaque pointer, passed to all components
+ *
+ * Binds all components to the aggregate @dev by passing @data to their
+ * &component_ops.bind functions. Should be called from
+ * &component_master_ops.bind.
+ */
int component_bind_all(struct device *master_dev, void *data)
{
struct master *master;
@@ -537,7 +661,8 @@ int component_bind_all(struct device *master_dev, void *data)
}
EXPORT_SYMBOL_GPL(component_bind_all);
-int component_add(struct device *dev, const struct component_ops *ops)
+static int __component_add(struct device *dev, const struct component_ops *ops,
+ int subcomponent)
{
struct component *component;
int ret;
@@ -548,6 +673,7 @@ int component_add(struct device *dev, const struct component_ops *ops)
component->ops = ops;
component->dev = dev;
+ component->subcomponent = subcomponent;
dev_dbg(dev, "adding component (ops %ps)\n", ops);
@@ -566,8 +692,66 @@ int component_add(struct device *dev, const struct component_ops *ops)
return ret < 0 ? ret : 0;
}
+
+/**
+ * component_add_typed - register a component
+ * @dev: component device
+ * @ops: component callbacks
+ * @subcomponent: nonzero identifier for subcomponents
+ *
+ * Register a new component for @dev. Functions in @ops will be call when the
+ * aggregate driver is ready to bind the overall driver by calling
+ * component_bind_all(). See also &struct component_ops.
+ *
+ * @subcomponent must be nonzero and is used to differentiate between multiple
+ * components registerd on the same device @dev. These components are match
+ * using component_match_add_typed().
+ *
+ * The component needs to be unregistered at driver unload/disconnect by
+ * calling component_del().
+ *
+ * See also component_add().
+ */
+int component_add_typed(struct device *dev, const struct component_ops *ops,
+ int subcomponent)
+{
+ if (WARN_ON(subcomponent == 0))
+ return -EINVAL;
+
+ return __component_add(dev, ops, subcomponent);
+}
+EXPORT_SYMBOL_GPL(component_add_typed);
+
+/**
+ * component_add - register a component
+ * @dev: component device
+ * @ops: component callbacks
+ *
+ * Register a new component for @dev. Functions in @ops will be called when the
+ * aggregate driver is ready to bind the overall driver by calling
+ * component_bind_all(). See also &struct component_ops.
+ *
+ * The component needs to be unregistered at driver unload/disconnect by
+ * calling component_del().
+ *
+ * See also component_add_typed() for a variant that allows multipled different
+ * components on the same device.
+ */
+int component_add(struct device *dev, const struct component_ops *ops)
+{
+ return __component_add(dev, ops, 0);
+}
EXPORT_SYMBOL_GPL(component_add);
+/**
+ * component_del - unregister a component
+ * @dev: component device
+ * @ops: component callbacks
+ *
+ * Unregister a component added with component_add(). If the component is bound
+ * into an aggregate driver, this will force the entire aggregate driver, including
+ * all its components, to be unbound.
+ */
void component_del(struct device *dev, const struct component_ops *ops)
{
struct component *c, *component = NULL;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 2e2ffe7010aa..72866a004f07 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -244,26 +244,23 @@ source "drivers/char/hw_random/Kconfig"
config NVRAM
tristate "/dev/nvram support"
- depends on ATARI || X86 || GENERIC_NVRAM
+ depends on X86 || HAVE_ARCH_NVRAM_OPS
+ default M68K || PPC
---help---
If you say Y here and create a character special file /dev/nvram
with major number 10 and minor number 144 using mknod ("man mknod"),
- you get read and write access to the extra bytes of non-volatile
- memory in the real time clock (RTC), which is contained in every PC
- and most Ataris. The actual number of bytes varies, depending on the
- nvram in the system, but is usually 114 (128-14 for the RTC).
-
- This memory is conventionally called "CMOS RAM" on PCs and "NVRAM"
- on Ataris. /dev/nvram may be used to view settings there, or to
- change them (with some utility). It could also be used to frequently
+ you get read and write access to the non-volatile memory.
+
+ /dev/nvram may be used to view settings in NVRAM or to change them
+ (with some utility). It could also be used to frequently
save a few bits of very important data that may not be lost over
power-off and for which writing to disk is too insecure. Note
however that most NVRAM space in a PC belongs to the BIOS and you
should NEVER idly tamper with it. See Ralf Brown's interrupt list
for a guide to the use of CMOS bytes by your BIOS.
- On Atari machines, /dev/nvram is always configured and does not need
- to be selected.
+ This memory is conventionally called "NVRAM" on PowerPC machines,
+ "CMOS RAM" on PCs, "NVRAM" on Ataris and "PRAM" on Macintoshes.
To compile this driver as a module, choose M here: the
module will be called nvram.
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index b8d42b4e979b..fbea7dd12932 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -26,11 +26,7 @@ obj-$(CONFIG_RTC) += rtc.o
obj-$(CONFIG_HPET) += hpet.o
obj-$(CONFIG_EFI_RTC) += efirtc.o
obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap/
-ifeq ($(CONFIG_GENERIC_NVRAM),y)
- obj-$(CONFIG_NVRAM) += generic_nvram.o
-else
- obj-$(CONFIG_NVRAM) += nvram.o
-endif
+obj-$(CONFIG_NVRAM) += nvram.o
obj-$(CONFIG_TOSHIBA) += toshiba.o
obj-$(CONFIG_DS1620) += ds1620.o
obj-$(CONFIG_HW_RANDOM) += hw_random/
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index c0a5b1f3a986..4ccc39e00ced 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -32,6 +32,7 @@
#include <linux/wait.h>
#include <linux/init.h>
#include <linux/fs.h>
+#include <linux/nospec.h>
#include <asm/io.h>
#include <linux/uaccess.h>
@@ -386,7 +387,11 @@ static ssize_t ac_write(struct file *file, const char __user *buf, size_t count,
TicCard = st_loc.tic_des_from_pc; /* tic number to send */
IndexCard = NumCard - 1;
- if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO)
+ if (IndexCard >= MAX_BOARD)
+ return -EINVAL;
+ IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
+
+ if (!apbs[IndexCard].RamIO)
return -EINVAL;
#ifdef DEBUG
@@ -697,6 +702,7 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
unsigned char IndexCard;
void __iomem *pmem;
int ret = 0;
+ static int warncount = 10;
volatile unsigned char byte_reset_it;
struct st_ram_io *adgl;
void __user *argp = (void __user *)arg;
@@ -711,16 +717,12 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
mutex_lock(&ac_mutex);
IndexCard = adgl->num_card-1;
- if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
- static int warncount = 10;
- if (warncount) {
- printk( KERN_WARNING "APPLICOM driver IOCTL, bad board number %d\n",(int)IndexCard+1);
- warncount--;
- }
- kfree(adgl);
- mutex_unlock(&ac_mutex);
- return -EINVAL;
- }
+ if (cmd != 6 && IndexCard >= MAX_BOARD)
+ goto err;
+ IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
+
+ if (cmd != 6 && !apbs[IndexCard].RamIO)
+ goto err;
switch (cmd) {
@@ -838,5 +840,16 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
kfree(adgl);
mutex_unlock(&ac_mutex);
return 0;
+
+err:
+ if (warncount) {
+ pr_warn("APPLICOM driver IOCTL, bad board number %d\n",
+ (int)IndexCard + 1);
+ warncount--;
+ }
+ kfree(adgl);
+ mutex_unlock(&ac_mutex);
+ return -EINVAL;
+
}
diff --git a/drivers/char/efirtc.c b/drivers/char/efirtc.c
index d9aab643997e..11781ebffbf7 100644
--- a/drivers/char/efirtc.c
+++ b/drivers/char/efirtc.c
@@ -255,35 +255,12 @@ static long efi_rtc_ioctl(struct file *file, unsigned int cmd,
}
/*
- * We enforce only one user at a time here with the open/close.
- * Also clear the previous interrupt data on an open, and clean
- * up things on a close.
- */
-
-static int efi_rtc_open(struct inode *inode, struct file *file)
-{
- /*
- * nothing special to do here
- * We do accept multiple open files at the same time as we
- * synchronize on the per call operation.
- */
- return 0;
-}
-
-static int efi_rtc_close(struct inode *inode, struct file *file)
-{
- return 0;
-}
-
-/*
* The various file operations we support.
*/
static const struct file_operations efi_rtc_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = efi_rtc_ioctl,
- .open = efi_rtc_open,
- .release = efi_rtc_close,
.llseek = no_llseek,
};
diff --git a/drivers/char/generic_nvram.c b/drivers/char/generic_nvram.c
deleted file mode 100644
index ff5394f47587..000000000000
--- a/drivers/char/generic_nvram.c
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Generic /dev/nvram driver for architectures providing some
- * "generic" hooks, that is :
- *
- * nvram_read_byte, nvram_write_byte, nvram_sync, nvram_get_size
- *
- * Note that an additional hook is supported for PowerMac only
- * for getting the nvram "partition" informations
- *
- */
-
-#define NVRAM_VERSION "1.1"
-
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/mutex.h>
-#include <linux/pagemap.h>
-#include <linux/uaccess.h>
-#include <asm/nvram.h>
-#ifdef CONFIG_PPC_PMAC
-#include <asm/machdep.h>
-#endif
-
-#define NVRAM_SIZE 8192
-
-static DEFINE_MUTEX(nvram_mutex);
-static ssize_t nvram_len;
-
-static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
-{
- return generic_file_llseek_size(file, offset, origin,
- MAX_LFS_FILESIZE, nvram_len);
-}
-
-static ssize_t read_nvram(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- unsigned int i;
- char __user *p = buf;
-
- if (!access_ok(buf, count))
- return -EFAULT;
- if (*ppos >= nvram_len)
- return 0;
- for (i = *ppos; count > 0 && i < nvram_len; ++i, ++p, --count)
- if (__put_user(nvram_read_byte(i), p))
- return -EFAULT;
- *ppos = i;
- return p - buf;
-}
-
-static ssize_t write_nvram(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- unsigned int i;
- const char __user *p = buf;
- char c;
-
- if (!access_ok(buf, count))
- return -EFAULT;
- if (*ppos >= nvram_len)
- return 0;
- for (i = *ppos; count > 0 && i < nvram_len; ++i, ++p, --count) {
- if (__get_user(c, p))
- return -EFAULT;
- nvram_write_byte(c, i);
- }
- *ppos = i;
- return p - buf;
-}
-
-static int nvram_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- switch(cmd) {
-#ifdef CONFIG_PPC_PMAC
- case OBSOLETE_PMAC_NVRAM_GET_OFFSET:
- printk(KERN_WARNING "nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n");
- case IOC_NVRAM_GET_OFFSET: {
- int part, offset;
-
- if (!machine_is(powermac))
- return -EINVAL;
- if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0)
- return -EFAULT;
- if (part < pmac_nvram_OF || part > pmac_nvram_NR)
- return -EINVAL;
- offset = pmac_get_partition(part);
- if (copy_to_user((void __user*)arg, &offset, sizeof(offset)) != 0)
- return -EFAULT;
- break;
- }
-#endif /* CONFIG_PPC_PMAC */
- case IOC_NVRAM_SYNC:
- nvram_sync();
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static long nvram_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int ret;
-
- mutex_lock(&nvram_mutex);
- ret = nvram_ioctl(file, cmd, arg);
- mutex_unlock(&nvram_mutex);
-
- return ret;
-}
-
-const struct file_operations nvram_fops = {
- .owner = THIS_MODULE,
- .llseek = nvram_llseek,
- .read = read_nvram,
- .write = write_nvram,
- .unlocked_ioctl = nvram_unlocked_ioctl,
-};
-
-static struct miscdevice nvram_dev = {
- NVRAM_MINOR,
- "nvram",
- &nvram_fops
-};
-
-int __init nvram_init(void)
-{
- int ret = 0;
-
- printk(KERN_INFO "Generic non-volatile memory driver v%s\n",
- NVRAM_VERSION);
- ret = misc_register(&nvram_dev);
- if (ret != 0)
- goto out;
-
- nvram_len = nvram_get_size();
- if (nvram_len < 0)
- nvram_len = NVRAM_SIZE;
-
-out:
- return ret;
-}
-
-void __exit nvram_cleanup(void)
-{
- misc_deregister( &nvram_dev );
-}
-
-module_init(nvram_init);
-module_exit(nvram_cleanup);
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 4a22b4b41aef..d0ad85900b79 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -377,7 +377,7 @@ static __init int hpet_mmap_enable(char *str)
pr_info("HPET mmap %s\n", hpet_mmap_enabled ? "enabled" : "disabled");
return 1;
}
-__setup("hpet_mmap", hpet_mmap_enable);
+__setup("hpet_mmap=", hpet_mmap_enable);
static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
{
@@ -842,7 +842,6 @@ int hpet_alloc(struct hpet_data *hdp)
struct hpet_dev *devp;
u32 i, ntimer;
struct hpets *hpetp;
- size_t siz;
struct hpet __iomem *hpet;
static struct hpets *last;
unsigned long period;
@@ -860,10 +859,8 @@ int hpet_alloc(struct hpet_data *hdp)
return 0;
}
- siz = sizeof(struct hpets) + ((hdp->hd_nirqs - 1) *
- sizeof(struct hpet_dev));
-
- hpetp = kzalloc(siz, GFP_KERNEL);
+ hpetp = kzalloc(struct_size(hpetp, hp_dev, hdp->hd_nirqs - 1),
+ GFP_KERNEL);
if (!hpetp)
return -ENOMEM;
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 5c8d780637bd..3406852f67ff 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -729,7 +729,7 @@ static long lp_ioctl(struct file *file, unsigned int cmd,
ret = lp_set_timeout32(minor, (void __user *)arg);
break;
}
- /* fallthrough for 64-bit */
+ /* fall through - for 64-bit */
case LPSETTIMEOUT_NEW:
ret = lp_set_timeout64(minor, (void __user *)arg);
break;
@@ -757,7 +757,7 @@ static long lp_compat_ioctl(struct file *file, unsigned int cmd,
ret = lp_set_timeout32(minor, (void __user *)arg);
break;
}
- /* fallthrough for x32 mode */
+ /* fall through - for x32 mode */
case LPSETTIMEOUT_NEW:
ret = lp_set_timeout64(minor, (void __user *)arg);
break;
diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
index 8c9216a0f62e..0a31b60bee7b 100644
--- a/drivers/char/mbcs.c
+++ b/drivers/char/mbcs.c
@@ -50,6 +50,7 @@ static LIST_HEAD(soft_list);
* file operations
*/
static const struct file_operations mbcs_ops = {
+ .owner = THIS_MODULE,
.open = mbcs_open,
.llseek = mbcs_sram_llseek,
.read = mbcs_sram_read,
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 25264d65e716..eff1e3f1b3a2 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -21,13 +21,6 @@
* ioctl(NVRAM_SETCKS) (doesn't change contents, just makes checksum valid
* again; use with care!)
*
- * This file also provides some functions for other parts of the kernel that
- * want to access the NVRAM: nvram_{read,write,check_checksum,set_checksum}.
- * Obviously this can be used only if this driver is always configured into
- * the kernel and is not a module. Since the functions are used by some Atari
- * drivers, this is the case on the Atari.
- *
- *
* 1.1 Cesar Barros: SMP locking fixes
* added changelog
* 1.2 Erik Gilling: Cobalt Networks support
@@ -39,64 +32,6 @@
#include <linux/module.h>
#include <linux/nvram.h>
-
-#define PC 1
-#define ATARI 2
-
-/* select machine configuration */
-#if defined(CONFIG_ATARI)
-# define MACH ATARI
-#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) /* and ?? */
-# define MACH PC
-#else
-# error Cannot build nvram driver for this machine configuration.
-#endif
-
-#if MACH == PC
-
-/* RTC in a PC */
-#define CHECK_DRIVER_INIT() 1
-
-/* On PCs, the checksum is built only over bytes 2..31 */
-#define PC_CKS_RANGE_START 2
-#define PC_CKS_RANGE_END 31
-#define PC_CKS_LOC 32
-#define NVRAM_BYTES (128-NVRAM_FIRST_BYTE)
-
-#define mach_check_checksum pc_check_checksum
-#define mach_set_checksum pc_set_checksum
-#define mach_proc_infos pc_proc_infos
-
-#endif
-
-#if MACH == ATARI
-
-/* Special parameters for RTC in Atari machines */
-#include <asm/atarihw.h>
-#include <asm/atariints.h>
-#define RTC_PORT(x) (TT_RTC_BAS + 2*(x))
-#define CHECK_DRIVER_INIT() (MACH_IS_ATARI && ATARIHW_PRESENT(TT_CLK))
-
-#define NVRAM_BYTES 50
-
-/* On Ataris, the checksum is over all bytes except the checksum bytes
- * themselves; these are at the very end */
-#define ATARI_CKS_RANGE_START 0
-#define ATARI_CKS_RANGE_END 47
-#define ATARI_CKS_LOC 48
-
-#define mach_check_checksum atari_check_checksum
-#define mach_set_checksum atari_set_checksum
-#define mach_proc_infos atari_proc_infos
-
-#endif
-
-/* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with
- * rtc_lock held. Due to the index-port/data-port design of the RTC, we
- * don't want two different things trying to get to it at once. (e.g. the
- * periodic 11 min sync from kernel/time/ntp.c vs. this driver.)
- */
-
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/miscdevice.h>
@@ -106,28 +41,26 @@
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
+#ifdef CONFIG_PPC
+#include <asm/nvram.h>
+#endif
static DEFINE_MUTEX(nvram_mutex);
static DEFINE_SPINLOCK(nvram_state_lock);
static int nvram_open_cnt; /* #times opened */
static int nvram_open_mode; /* special open modes */
+static ssize_t nvram_size;
#define NVRAM_WRITE 1 /* opened for writing (exclusive) */
#define NVRAM_EXCL 2 /* opened with O_EXCL */
-static int mach_check_checksum(void);
-static void mach_set_checksum(void);
-
-#ifdef CONFIG_PROC_FS
-static void mach_proc_infos(unsigned char *contents, struct seq_file *seq,
- void *offset);
-#endif
-
+#ifdef CONFIG_X86
/*
* These functions are provided to be called internally or by other parts of
* the kernel. It's up to the caller to ensure correct checksum before reading
@@ -139,13 +72,20 @@ static void mach_proc_infos(unsigned char *contents, struct seq_file *seq,
* know about the RTC cruft.
*/
-unsigned char __nvram_read_byte(int i)
+#define NVRAM_BYTES (128 - NVRAM_FIRST_BYTE)
+
+/* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with
+ * rtc_lock held. Due to the index-port/data-port design of the RTC, we
+ * don't want two different things trying to get to it at once. (e.g. the
+ * periodic 11 min sync from kernel/time/ntp.c vs. this driver.)
+ */
+
+static unsigned char __nvram_read_byte(int i)
{
return CMOS_READ(NVRAM_FIRST_BYTE + i);
}
-EXPORT_SYMBOL(__nvram_read_byte);
-unsigned char nvram_read_byte(int i)
+static unsigned char pc_nvram_read_byte(int i)
{
unsigned long flags;
unsigned char c;
@@ -155,16 +95,14 @@ unsigned char nvram_read_byte(int i)
spin_unlock_irqrestore(&rtc_lock, flags);
return c;
}
-EXPORT_SYMBOL(nvram_read_byte);
/* This races nicely with trying to read with checksum checking (nvram_read) */
-void __nvram_write_byte(unsigned char c, int i)
+static void __nvram_write_byte(unsigned char c, int i)
{
CMOS_WRITE(c, NVRAM_FIRST_BYTE + i);
}
-EXPORT_SYMBOL(__nvram_write_byte);
-void nvram_write_byte(unsigned char c, int i)
+static void pc_nvram_write_byte(unsigned char c, int i)
{
unsigned long flags;
@@ -172,172 +110,266 @@ void nvram_write_byte(unsigned char c, int i)
__nvram_write_byte(c, i);
spin_unlock_irqrestore(&rtc_lock, flags);
}
-EXPORT_SYMBOL(nvram_write_byte);
-int __nvram_check_checksum(void)
+/* On PCs, the checksum is built only over bytes 2..31 */
+#define PC_CKS_RANGE_START 2
+#define PC_CKS_RANGE_END 31
+#define PC_CKS_LOC 32
+
+static int __nvram_check_checksum(void)
{
- return mach_check_checksum();
+ int i;
+ unsigned short sum = 0;
+ unsigned short expect;
+
+ for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i)
+ sum += __nvram_read_byte(i);
+ expect = __nvram_read_byte(PC_CKS_LOC)<<8 |
+ __nvram_read_byte(PC_CKS_LOC+1);
+ return (sum & 0xffff) == expect;
}
-EXPORT_SYMBOL(__nvram_check_checksum);
-int nvram_check_checksum(void)
+static void __nvram_set_checksum(void)
{
- unsigned long flags;
- int rv;
+ int i;
+ unsigned short sum = 0;
- spin_lock_irqsave(&rtc_lock, flags);
- rv = __nvram_check_checksum();
- spin_unlock_irqrestore(&rtc_lock, flags);
- return rv;
+ for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i)
+ sum += __nvram_read_byte(i);
+ __nvram_write_byte(sum >> 8, PC_CKS_LOC);
+ __nvram_write_byte(sum & 0xff, PC_CKS_LOC + 1);
}
-EXPORT_SYMBOL(nvram_check_checksum);
-static void __nvram_set_checksum(void)
+static long pc_nvram_set_checksum(void)
{
- mach_set_checksum();
+ spin_lock_irq(&rtc_lock);
+ __nvram_set_checksum();
+ spin_unlock_irq(&rtc_lock);
+ return 0;
}
-#if 0
-void nvram_set_checksum(void)
+static long pc_nvram_initialize(void)
{
- unsigned long flags;
+ ssize_t i;
- spin_lock_irqsave(&rtc_lock, flags);
+ spin_lock_irq(&rtc_lock);
+ for (i = 0; i < NVRAM_BYTES; ++i)
+ __nvram_write_byte(0, i);
__nvram_set_checksum();
- spin_unlock_irqrestore(&rtc_lock, flags);
+ spin_unlock_irq(&rtc_lock);
+ return 0;
}
-#endif /* 0 */
-
-/*
- * The are the file operation function for user access to /dev/nvram
- */
-static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
+static ssize_t pc_nvram_get_size(void)
{
- return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE,
- NVRAM_BYTES);
+ return NVRAM_BYTES;
}
-static ssize_t nvram_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t pc_nvram_read(char *buf, size_t count, loff_t *ppos)
{
- unsigned char contents[NVRAM_BYTES];
- unsigned i = *ppos;
- unsigned char *tmp;
+ char *p = buf;
+ loff_t i;
spin_lock_irq(&rtc_lock);
+ if (!__nvram_check_checksum()) {
+ spin_unlock_irq(&rtc_lock);
+ return -EIO;
+ }
+ for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p)
+ *p = __nvram_read_byte(i);
+ spin_unlock_irq(&rtc_lock);
- if (!__nvram_check_checksum())
- goto checksum_err;
+ *ppos = i;
+ return p - buf;
+}
- for (tmp = contents; count-- > 0 && i < NVRAM_BYTES; ++i, ++tmp)
- *tmp = __nvram_read_byte(i);
+static ssize_t pc_nvram_write(char *buf, size_t count, loff_t *ppos)
+{
+ char *p = buf;
+ loff_t i;
+ spin_lock_irq(&rtc_lock);
+ if (!__nvram_check_checksum()) {
+ spin_unlock_irq(&rtc_lock);
+ return -EIO;
+ }
+ for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p)
+ __nvram_write_byte(*p, i);
+ __nvram_set_checksum();
spin_unlock_irq(&rtc_lock);
- if (copy_to_user(buf, contents, tmp - contents))
- return -EFAULT;
-
*ppos = i;
+ return p - buf;
+}
- return tmp - contents;
+const struct nvram_ops arch_nvram_ops = {
+ .read = pc_nvram_read,
+ .write = pc_nvram_write,
+ .read_byte = pc_nvram_read_byte,
+ .write_byte = pc_nvram_write_byte,
+ .get_size = pc_nvram_get_size,
+ .set_checksum = pc_nvram_set_checksum,
+ .initialize = pc_nvram_initialize,
+};
+EXPORT_SYMBOL(arch_nvram_ops);
+#endif /* CONFIG_X86 */
-checksum_err:
- spin_unlock_irq(&rtc_lock);
- return -EIO;
-}
+/*
+ * The are the file operation function for user access to /dev/nvram
+ */
-static ssize_t nvram_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static loff_t nvram_misc_llseek(struct file *file, loff_t offset, int origin)
{
- unsigned char contents[NVRAM_BYTES];
- unsigned i = *ppos;
- unsigned char *tmp;
+ return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE,
+ nvram_size);
+}
- if (i >= NVRAM_BYTES)
- return 0; /* Past EOF */
+static ssize_t nvram_misc_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *tmp;
+ ssize_t ret;
- if (count > NVRAM_BYTES - i)
- count = NVRAM_BYTES - i;
- if (count > NVRAM_BYTES)
- return -EFAULT; /* Can't happen, but prove it to gcc */
- if (copy_from_user(contents, buf, count))
+ if (!access_ok(buf, count))
return -EFAULT;
+ if (*ppos >= nvram_size)
+ return 0;
- spin_lock_irq(&rtc_lock);
+ count = min_t(size_t, count, nvram_size - *ppos);
+ count = min_t(size_t, count, PAGE_SIZE);
- if (!__nvram_check_checksum())
- goto checksum_err;
+ tmp = kmalloc(count, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
- for (tmp = contents; count--; ++i, ++tmp)
- __nvram_write_byte(*tmp, i);
+ ret = nvram_read(tmp, count, ppos);
+ if (ret <= 0)
+ goto out;
- __nvram_set_checksum();
+ if (copy_to_user(buf, tmp, ret)) {
+ *ppos -= ret;
+ ret = -EFAULT;
+ }
- spin_unlock_irq(&rtc_lock);
+out:
+ kfree(tmp);
+ return ret;
+}
- *ppos = i;
+static ssize_t nvram_misc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *tmp;
+ ssize_t ret;
- return tmp - contents;
+ if (!access_ok(buf, count))
+ return -EFAULT;
+ if (*ppos >= nvram_size)
+ return 0;
-checksum_err:
- spin_unlock_irq(&rtc_lock);
- return -EIO;
+ count = min_t(size_t, count, nvram_size - *ppos);
+ count = min_t(size_t, count, PAGE_SIZE);
+
+ tmp = memdup_user(buf, count);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+
+ ret = nvram_write(tmp, count, ppos);
+ kfree(tmp);
+ return ret;
}
-static long nvram_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+static long nvram_misc_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
- int i;
+ long ret = -ENOTTY;
switch (cmd) {
-
+#ifdef CONFIG_PPC
+ case OBSOLETE_PMAC_NVRAM_GET_OFFSET:
+ pr_warn("nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n");
+ /* fall through */
+ case IOC_NVRAM_GET_OFFSET:
+ ret = -EINVAL;
+#ifdef CONFIG_PPC_PMAC
+ if (machine_is(powermac)) {
+ int part, offset;
+
+ if (copy_from_user(&part, (void __user *)arg,
+ sizeof(part)) != 0)
+ return -EFAULT;
+ if (part < pmac_nvram_OF || part > pmac_nvram_NR)
+ return -EINVAL;
+ offset = pmac_get_partition(part);
+ if (offset < 0)
+ return -EINVAL;
+ if (copy_to_user((void __user *)arg,
+ &offset, sizeof(offset)) != 0)
+ return -EFAULT;
+ ret = 0;
+ }
+#endif
+ break;
+#ifdef CONFIG_PPC32
+ case IOC_NVRAM_SYNC:
+ if (ppc_md.nvram_sync != NULL) {
+ mutex_lock(&nvram_mutex);
+ ppc_md.nvram_sync();
+ mutex_unlock(&nvram_mutex);
+ }
+ ret = 0;
+ break;
+#endif
+#elif defined(CONFIG_X86) || defined(CONFIG_M68K)
case NVRAM_INIT:
/* initialize NVRAM contents and checksum */
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- mutex_lock(&nvram_mutex);
- spin_lock_irq(&rtc_lock);
-
- for (i = 0; i < NVRAM_BYTES; ++i)
- __nvram_write_byte(0, i);
- __nvram_set_checksum();
-
- spin_unlock_irq(&rtc_lock);
- mutex_unlock(&nvram_mutex);
- return 0;
-
+ if (arch_nvram_ops.initialize != NULL) {
+ mutex_lock(&nvram_mutex);
+ ret = arch_nvram_ops.initialize();
+ mutex_unlock(&nvram_mutex);
+ }
+ break;
case NVRAM_SETCKS:
/* just set checksum, contents unchanged (maybe useful after
* checksum garbaged somehow...) */
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- mutex_lock(&nvram_mutex);
- spin_lock_irq(&rtc_lock);
- __nvram_set_checksum();
- spin_unlock_irq(&rtc_lock);
- mutex_unlock(&nvram_mutex);
- return 0;
-
- default:
- return -ENOTTY;
+ if (arch_nvram_ops.set_checksum != NULL) {
+ mutex_lock(&nvram_mutex);
+ ret = arch_nvram_ops.set_checksum();
+ mutex_unlock(&nvram_mutex);
+ }
+ break;
+#endif /* CONFIG_X86 || CONFIG_M68K */
}
+ return ret;
}
-static int nvram_open(struct inode *inode, struct file *file)
+static int nvram_misc_open(struct inode *inode, struct file *file)
{
spin_lock(&nvram_state_lock);
+ /* Prevent multiple readers/writers if desired. */
if ((nvram_open_cnt && (file->f_flags & O_EXCL)) ||
- (nvram_open_mode & NVRAM_EXCL) ||
- ((file->f_mode & FMODE_WRITE) && (nvram_open_mode & NVRAM_WRITE))) {
+ (nvram_open_mode & NVRAM_EXCL)) {
spin_unlock(&nvram_state_lock);
return -EBUSY;
}
+#if defined(CONFIG_X86) || defined(CONFIG_M68K)
+ /* Prevent multiple writers if the set_checksum ioctl is implemented. */
+ if ((arch_nvram_ops.set_checksum != NULL) &&
+ (file->f_mode & FMODE_WRITE) && (nvram_open_mode & NVRAM_WRITE)) {
+ spin_unlock(&nvram_state_lock);
+ return -EBUSY;
+ }
+#endif
+
if (file->f_flags & O_EXCL)
nvram_open_mode |= NVRAM_EXCL;
if (file->f_mode & FMODE_WRITE)
@@ -349,7 +381,7 @@ static int nvram_open(struct inode *inode, struct file *file)
return 0;
}
-static int nvram_release(struct inode *inode, struct file *file)
+static int nvram_misc_release(struct inode *inode, struct file *file)
{
spin_lock(&nvram_state_lock);
@@ -366,123 +398,7 @@ static int nvram_release(struct inode *inode, struct file *file)
return 0;
}
-#ifndef CONFIG_PROC_FS
-static int nvram_add_proc_fs(void)
-{
- return 0;
-}
-
-#else
-
-static int nvram_proc_read(struct seq_file *seq, void *offset)
-{
- unsigned char contents[NVRAM_BYTES];
- int i = 0;
-
- spin_lock_irq(&rtc_lock);
- for (i = 0; i < NVRAM_BYTES; ++i)
- contents[i] = __nvram_read_byte(i);
- spin_unlock_irq(&rtc_lock);
-
- mach_proc_infos(contents, seq, offset);
-
- return 0;
-}
-
-static int nvram_add_proc_fs(void)
-{
- if (!proc_create_single("driver/nvram", 0, NULL, nvram_proc_read))
- return -ENOMEM;
- return 0;
-}
-
-#endif /* CONFIG_PROC_FS */
-
-static const struct file_operations nvram_fops = {
- .owner = THIS_MODULE,
- .llseek = nvram_llseek,
- .read = nvram_read,
- .write = nvram_write,
- .unlocked_ioctl = nvram_ioctl,
- .open = nvram_open,
- .release = nvram_release,
-};
-
-static struct miscdevice nvram_dev = {
- NVRAM_MINOR,
- "nvram",
- &nvram_fops
-};
-
-static int __init nvram_init(void)
-{
- int ret;
-
- /* First test whether the driver should init at all */
- if (!CHECK_DRIVER_INIT())
- return -ENODEV;
-
- ret = misc_register(&nvram_dev);
- if (ret) {
- printk(KERN_ERR "nvram: can't misc_register on minor=%d\n",
- NVRAM_MINOR);
- goto out;
- }
- ret = nvram_add_proc_fs();
- if (ret) {
- printk(KERN_ERR "nvram: can't create /proc/driver/nvram\n");
- goto outmisc;
- }
- ret = 0;
- printk(KERN_INFO "Non-volatile memory driver v" NVRAM_VERSION "\n");
-out:
- return ret;
-outmisc:
- misc_deregister(&nvram_dev);
- goto out;
-}
-
-static void __exit nvram_cleanup_module(void)
-{
- remove_proc_entry("driver/nvram", NULL);
- misc_deregister(&nvram_dev);
-}
-
-module_init(nvram_init);
-module_exit(nvram_cleanup_module);
-
-/*
- * Machine specific functions
- */
-
-#if MACH == PC
-
-static int pc_check_checksum(void)
-{
- int i;
- unsigned short sum = 0;
- unsigned short expect;
-
- for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i)
- sum += __nvram_read_byte(i);
- expect = __nvram_read_byte(PC_CKS_LOC)<<8 |
- __nvram_read_byte(PC_CKS_LOC+1);
- return (sum & 0xffff) == expect;
-}
-
-static void pc_set_checksum(void)
-{
- int i;
- unsigned short sum = 0;
-
- for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i)
- sum += __nvram_read_byte(i);
- __nvram_write_byte(sum >> 8, PC_CKS_LOC);
- __nvram_write_byte(sum & 0xff, PC_CKS_LOC + 1);
-}
-
-#ifdef CONFIG_PROC_FS
-
+#if defined(CONFIG_X86) && defined(CONFIG_PROC_FS)
static const char * const floppy_types[] = {
"none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M",
"3.5'' 2.88M", "3.5'' 2.88M"
@@ -495,8 +411,8 @@ static const char * const gfx_types[] = {
"monochrome",
};
-static void pc_proc_infos(unsigned char *nvram, struct seq_file *seq,
- void *offset)
+static void pc_nvram_proc_read(unsigned char *nvram, struct seq_file *seq,
+ void *offset)
{
int checksum;
int type;
@@ -557,143 +473,76 @@ static void pc_proc_infos(unsigned char *nvram, struct seq_file *seq,
return;
}
-#endif
-#endif /* MACH == PC */
-
-#if MACH == ATARI
-
-static int atari_check_checksum(void)
+static int nvram_proc_read(struct seq_file *seq, void *offset)
{
- int i;
- unsigned char sum = 0;
+ unsigned char contents[NVRAM_BYTES];
+ int i = 0;
- for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i)
- sum += __nvram_read_byte(i);
- return (__nvram_read_byte(ATARI_CKS_LOC) == (~sum & 0xff)) &&
- (__nvram_read_byte(ATARI_CKS_LOC + 1) == (sum & 0xff));
-}
+ spin_lock_irq(&rtc_lock);
+ for (i = 0; i < NVRAM_BYTES; ++i)
+ contents[i] = __nvram_read_byte(i);
+ spin_unlock_irq(&rtc_lock);
-static void atari_set_checksum(void)
-{
- int i;
- unsigned char sum = 0;
+ pc_nvram_proc_read(contents, seq, offset);
- for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i)
- sum += __nvram_read_byte(i);
- __nvram_write_byte(~sum, ATARI_CKS_LOC);
- __nvram_write_byte(sum, ATARI_CKS_LOC + 1);
+ return 0;
}
+#endif /* CONFIG_X86 && CONFIG_PROC_FS */
-#ifdef CONFIG_PROC_FS
-
-static struct {
- unsigned char val;
- const char *name;
-} boot_prefs[] = {
- { 0x80, "TOS" },
- { 0x40, "ASV" },
- { 0x20, "NetBSD (?)" },
- { 0x10, "Linux" },
- { 0x00, "unspecified" }
-};
-
-static const char * const languages[] = {
- "English (US)",
- "German",
- "French",
- "English (UK)",
- "Spanish",
- "Italian",
- "6 (undefined)",
- "Swiss (French)",
- "Swiss (German)"
-};
-
-static const char * const dateformat[] = {
- "MM%cDD%cYY",
- "DD%cMM%cYY",
- "YY%cMM%cDD",
- "YY%cDD%cMM",
- "4 (undefined)",
- "5 (undefined)",
- "6 (undefined)",
- "7 (undefined)"
+static const struct file_operations nvram_misc_fops = {
+ .owner = THIS_MODULE,
+ .llseek = nvram_misc_llseek,
+ .read = nvram_misc_read,
+ .write = nvram_misc_write,
+ .unlocked_ioctl = nvram_misc_ioctl,
+ .open = nvram_misc_open,
+ .release = nvram_misc_release,
};
-static const char * const colors[] = {
- "2", "4", "16", "256", "65536", "??", "??", "??"
+static struct miscdevice nvram_misc = {
+ NVRAM_MINOR,
+ "nvram",
+ &nvram_misc_fops,
};
-static void atari_proc_infos(unsigned char *nvram, struct seq_file *seq,
- void *offset)
+static int __init nvram_module_init(void)
{
- int checksum = nvram_check_checksum();
- int i;
- unsigned vmode;
+ int ret;
- seq_printf(seq, "Checksum status : %svalid\n", checksum ? "" : "not ");
+ nvram_size = nvram_get_size();
+ if (nvram_size < 0)
+ return nvram_size;
- seq_printf(seq, "Boot preference : ");
- for (i = ARRAY_SIZE(boot_prefs) - 1; i >= 0; --i) {
- if (nvram[1] == boot_prefs[i].val) {
- seq_printf(seq, "%s\n", boot_prefs[i].name);
- break;
- }
+ ret = misc_register(&nvram_misc);
+ if (ret) {
+ pr_err("nvram: can't misc_register on minor=%d\n", NVRAM_MINOR);
+ return ret;
}
- if (i < 0)
- seq_printf(seq, "0x%02x (undefined)\n", nvram[1]);
-
- seq_printf(seq, "SCSI arbitration : %s\n",
- (nvram[16] & 0x80) ? "on" : "off");
- seq_printf(seq, "SCSI host ID : ");
- if (nvram[16] & 0x80)
- seq_printf(seq, "%d\n", nvram[16] & 7);
- else
- seq_printf(seq, "n/a\n");
-
- /* the following entries are defined only for the Falcon */
- if ((atari_mch_cookie >> 16) != ATARI_MCH_FALCON)
- return;
- seq_printf(seq, "OS language : ");
- if (nvram[6] < ARRAY_SIZE(languages))
- seq_printf(seq, "%s\n", languages[nvram[6]]);
- else
- seq_printf(seq, "%u (undefined)\n", nvram[6]);
- seq_printf(seq, "Keyboard language: ");
- if (nvram[7] < ARRAY_SIZE(languages))
- seq_printf(seq, "%s\n", languages[nvram[7]]);
- else
- seq_printf(seq, "%u (undefined)\n", nvram[7]);
- seq_printf(seq, "Date format : ");
- seq_printf(seq, dateformat[nvram[8] & 7],
- nvram[9] ? nvram[9] : '/', nvram[9] ? nvram[9] : '/');
- seq_printf(seq, ", %dh clock\n", nvram[8] & 16 ? 24 : 12);
- seq_printf(seq, "Boot delay : ");
- if (nvram[10] == 0)
- seq_printf(seq, "default");
- else
- seq_printf(seq, "%ds%s\n", nvram[10],
- nvram[10] < 8 ? ", no memory test" : "");
-
- vmode = (nvram[14] << 8) | nvram[15];
- seq_printf(seq,
- "Video mode : %s colors, %d columns, %s %s monitor\n",
- colors[vmode & 7],
- vmode & 8 ? 80 : 40,
- vmode & 16 ? "VGA" : "TV", vmode & 32 ? "PAL" : "NTSC");
- seq_printf(seq, " %soverscan, compat. mode %s%s\n",
- vmode & 64 ? "" : "no ",
- vmode & 128 ? "on" : "off",
- vmode & 256 ?
- (vmode & 16 ? ", line doubling" : ", half screen") : "");
+#if defined(CONFIG_X86) && defined(CONFIG_PROC_FS)
+ if (!proc_create_single("driver/nvram", 0, NULL, nvram_proc_read)) {
+ pr_err("nvram: can't create /proc/driver/nvram\n");
+ misc_deregister(&nvram_misc);
+ return -ENOMEM;
+ }
+#endif
- return;
+ pr_info("Non-volatile memory driver v" NVRAM_VERSION "\n");
+ return 0;
}
+
+static void __exit nvram_module_exit(void)
+{
+#if defined(CONFIG_X86) && defined(CONFIG_PROC_FS)
+ remove_proc_entry("driver/nvram", NULL);
#endif
+ misc_deregister(&nvram_misc);
+}
-#endif /* MACH == ATARI */
+module_init(nvram_module_init);
+module_exit(nvram_module_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(NVRAM_MINOR);
+MODULE_ALIAS("devname:nvram");
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index de15bf55895b..8e17149655f0 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -114,6 +114,14 @@ config EXTCON_PALMAS
Say Y here to enable support for USB peripheral and USB host
detection by palmas usb.
+config EXTCON_PTN5150
+ tristate "NXP PTN5150 CC LOGIC USB EXTCON support"
+ depends on I2C && GPIOLIB || COMPILE_TEST
+ select REGMAP_I2C
+ help
+ Say Y here to enable support for USB peripheral and USB host
+ detection by NXP PTN5150 CC (Configuration Channel) logic chip.
+
config EXTCON_QCOM_SPMI_MISC
tristate "Qualcomm USB extcon support"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 0888fdeded72..261ce4cfe209 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
obj-$(CONFIG_EXTCON_MAX77843) += extcon-max77843.o
obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o
obj-$(CONFIG_EXTCON_PALMAS) += extcon-palmas.o
+obj-$(CONFIG_EXTCON_PTN5150) += extcon-ptn5150.o
obj-$(CONFIG_EXTCON_QCOM_SPMI_MISC) += extcon-qcom-spmi-misc.o
obj-$(CONFIG_EXTCON_RT8973A) += extcon-rt8973a.o
obj-$(CONFIG_EXTCON_SM5502) += extcon-sm5502.o
diff --git a/drivers/extcon/extcon-ptn5150.c b/drivers/extcon/extcon-ptn5150.c
new file mode 100644
index 000000000000..d1c997599390
--- /dev/null
+++ b/drivers/extcon/extcon-ptn5150.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// extcon-ptn5150.c - PTN5150 CC logic extcon driver to support USB detection
+//
+// Based on extcon-sm5502.c driver
+// Copyright (c) 2018-2019 by Vijai Kumar K
+// Author: Vijai Kumar K <vijaikumar.kanagarajan@gmail.com>
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/extcon-provider.h>
+#include <linux/gpio/consumer.h>
+
+/* PTN5150 registers */
+enum ptn5150_reg {
+ PTN5150_REG_DEVICE_ID = 0x01,
+ PTN5150_REG_CONTROL,
+ PTN5150_REG_INT_STATUS,
+ PTN5150_REG_CC_STATUS,
+ PTN5150_REG_CON_DET = 0x09,
+ PTN5150_REG_VCONN_STATUS,
+ PTN5150_REG_RESET,
+ PTN5150_REG_INT_MASK = 0x18,
+ PTN5150_REG_INT_REG_STATUS,
+ PTN5150_REG_END,
+};
+
+#define PTN5150_DFP_ATTACHED 0x1
+#define PTN5150_UFP_ATTACHED 0x2
+
+/* Define PTN5150 MASK/SHIFT constant */
+#define PTN5150_REG_DEVICE_ID_VENDOR_SHIFT 0
+#define PTN5150_REG_DEVICE_ID_VENDOR_MASK \
+ (0x3 << PTN5150_REG_DEVICE_ID_VENDOR_SHIFT)
+
+#define PTN5150_REG_DEVICE_ID_VERSION_SHIFT 3
+#define PTN5150_REG_DEVICE_ID_VERSION_MASK \
+ (0x1f << PTN5150_REG_DEVICE_ID_VERSION_SHIFT)
+
+#define PTN5150_REG_CC_PORT_ATTACHMENT_SHIFT 2
+#define PTN5150_REG_CC_PORT_ATTACHMENT_MASK \
+ (0x7 << PTN5150_REG_CC_PORT_ATTACHMENT_SHIFT)
+
+#define PTN5150_REG_CC_VBUS_DETECTION_SHIFT 7
+#define PTN5150_REG_CC_VBUS_DETECTION_MASK \
+ (0x1 << PTN5150_REG_CC_VBUS_DETECTION_SHIFT)
+
+#define PTN5150_REG_INT_CABLE_ATTACH_SHIFT 0
+#define PTN5150_REG_INT_CABLE_ATTACH_MASK \
+ (0x1 << PTN5150_REG_INT_CABLE_ATTACH_SHIFT)
+
+#define PTN5150_REG_INT_CABLE_DETACH_SHIFT 1
+#define PTN5150_REG_INT_CABLE_DETACH_MASK \
+ (0x1 << PTN5150_REG_CC_CABLE_DETACH_SHIFT)
+
+struct ptn5150_info {
+ struct device *dev;
+ struct extcon_dev *edev;
+ struct i2c_client *i2c;
+ struct regmap *regmap;
+ struct gpio_desc *int_gpiod;
+ struct gpio_desc *vbus_gpiod;
+ int irq;
+ struct work_struct irq_work;
+ struct mutex mutex;
+};
+
+/* List of detectable cables */
+static const unsigned int ptn5150_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_NONE,
+};
+
+static const struct regmap_config ptn5150_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = PTN5150_REG_END,
+};
+
+static void ptn5150_irq_work(struct work_struct *work)
+{
+ struct ptn5150_info *info = container_of(work,
+ struct ptn5150_info, irq_work);
+ int ret = 0;
+ unsigned int reg_data;
+ unsigned int int_status;
+
+ if (!info->edev)
+ return;
+
+ mutex_lock(&info->mutex);
+
+ ret = regmap_read(info->regmap, PTN5150_REG_CC_STATUS, &reg_data);
+ if (ret) {
+ dev_err(info->dev, "failed to read CC STATUS %d\n", ret);
+ mutex_unlock(&info->mutex);
+ return;
+ }
+
+ /* Clear interrupt. Read would clear the register */
+ ret = regmap_read(info->regmap, PTN5150_REG_INT_STATUS, &int_status);
+ if (ret) {
+ dev_err(info->dev, "failed to read INT STATUS %d\n", ret);
+ mutex_unlock(&info->mutex);
+ return;
+ }
+
+ if (int_status) {
+ unsigned int cable_attach;
+
+ cable_attach = int_status & PTN5150_REG_INT_CABLE_ATTACH_MASK;
+ if (cable_attach) {
+ unsigned int port_status;
+ unsigned int vbus;
+
+ port_status = ((reg_data &
+ PTN5150_REG_CC_PORT_ATTACHMENT_MASK) >>
+ PTN5150_REG_CC_PORT_ATTACHMENT_SHIFT);
+
+ switch (port_status) {
+ case PTN5150_DFP_ATTACHED:
+ extcon_set_state_sync(info->edev,
+ EXTCON_USB_HOST, false);
+ gpiod_set_value(info->vbus_gpiod, 0);
+ extcon_set_state_sync(info->edev, EXTCON_USB,
+ true);
+ break;
+ case PTN5150_UFP_ATTACHED:
+ extcon_set_state_sync(info->edev, EXTCON_USB,
+ false);
+ vbus = ((reg_data &
+ PTN5150_REG_CC_VBUS_DETECTION_MASK) >>
+ PTN5150_REG_CC_VBUS_DETECTION_SHIFT);
+ if (vbus)
+ gpiod_set_value(info->vbus_gpiod, 0);
+ else
+ gpiod_set_value(info->vbus_gpiod, 1);
+
+ extcon_set_state_sync(info->edev,
+ EXTCON_USB_HOST, true);
+ break;
+ default:
+ dev_err(info->dev,
+ "Unknown Port status : %x\n",
+ port_status);
+ break;
+ }
+ } else {
+ extcon_set_state_sync(info->edev,
+ EXTCON_USB_HOST, false);
+ extcon_set_state_sync(info->edev,
+ EXTCON_USB, false);
+ gpiod_set_value(info->vbus_gpiod, 0);
+ }
+ }
+
+ /* Clear interrupt. Read would clear the register */
+ ret = regmap_read(info->regmap, PTN5150_REG_INT_REG_STATUS,
+ &int_status);
+ if (ret) {
+ dev_err(info->dev,
+ "failed to read INT REG STATUS %d\n", ret);
+ mutex_unlock(&info->mutex);
+ return;
+ }
+
+ mutex_unlock(&info->mutex);
+}
+
+
+static irqreturn_t ptn5150_irq_handler(int irq, void *data)
+{
+ struct ptn5150_info *info = data;
+
+ schedule_work(&info->irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static int ptn5150_init_dev_type(struct ptn5150_info *info)
+{
+ unsigned int reg_data, vendor_id, version_id;
+ int ret;
+
+ ret = regmap_read(info->regmap, PTN5150_REG_DEVICE_ID, &reg_data);
+ if (ret) {
+ dev_err(info->dev, "failed to read DEVICE_ID %d\n", ret);
+ return -EINVAL;
+ }
+
+ vendor_id = ((reg_data & PTN5150_REG_DEVICE_ID_VENDOR_MASK) >>
+ PTN5150_REG_DEVICE_ID_VENDOR_SHIFT);
+ version_id = ((reg_data & PTN5150_REG_DEVICE_ID_VERSION_MASK) >>
+ PTN5150_REG_DEVICE_ID_VERSION_SHIFT);
+
+ dev_info(info->dev, "Device type: version: 0x%x, vendor: 0x%x\n",
+ version_id, vendor_id);
+
+ /* Clear any existing interrupts */
+ ret = regmap_read(info->regmap, PTN5150_REG_INT_STATUS, &reg_data);
+ if (ret) {
+ dev_err(info->dev,
+ "failed to read PTN5150_REG_INT_STATUS %d\n",
+ ret);
+ return -EINVAL;
+ }
+
+ ret = regmap_read(info->regmap, PTN5150_REG_INT_REG_STATUS, &reg_data);
+ if (ret) {
+ dev_err(info->dev,
+ "failed to read PTN5150_REG_INT_REG_STATUS %d\n", ret);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ptn5150_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &i2c->dev;
+ struct device_node *np = i2c->dev.of_node;
+ struct ptn5150_info *info;
+ int ret;
+
+ if (!np)
+ return -EINVAL;
+
+ info = devm_kzalloc(&i2c->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+ i2c_set_clientdata(i2c, info);
+
+ info->dev = &i2c->dev;
+ info->i2c = i2c;
+ info->int_gpiod = devm_gpiod_get(&i2c->dev, "int", GPIOD_IN);
+ if (IS_ERR(info->int_gpiod)) {
+ dev_err(dev, "failed to get INT GPIO\n");
+ return PTR_ERR(info->int_gpiod);
+ }
+ info->vbus_gpiod = devm_gpiod_get(&i2c->dev, "vbus", GPIOD_IN);
+ if (IS_ERR(info->vbus_gpiod)) {
+ dev_err(dev, "failed to get VBUS GPIO\n");
+ return PTR_ERR(info->vbus_gpiod);
+ }
+ ret = gpiod_direction_output(info->vbus_gpiod, 0);
+ if (ret) {
+ dev_err(dev, "failed to set VBUS GPIO direction\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&info->mutex);
+
+ INIT_WORK(&info->irq_work, ptn5150_irq_work);
+
+ info->regmap = devm_regmap_init_i2c(i2c, &ptn5150_regmap_config);
+ if (IS_ERR(info->regmap)) {
+ ret = PTR_ERR(info->regmap);
+ dev_err(info->dev, "failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (info->int_gpiod) {
+ info->irq = gpiod_to_irq(info->int_gpiod);
+ if (info->irq < 0) {
+ dev_err(dev, "failed to get INTB IRQ\n");
+ return info->irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, info->irq, NULL,
+ ptn5150_irq_handler,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ i2c->name, info);
+ if (ret < 0) {
+ dev_err(dev, "failed to request handler for INTB IRQ\n");
+ return ret;
+ }
+ }
+
+ /* Allocate extcon device */
+ info->edev = devm_extcon_dev_allocate(info->dev, ptn5150_extcon_cable);
+ if (IS_ERR(info->edev)) {
+ dev_err(info->dev, "failed to allocate memory for extcon\n");
+ return -ENOMEM;
+ }
+
+ /* Register extcon device */
+ ret = devm_extcon_dev_register(info->dev, info->edev);
+ if (ret) {
+ dev_err(info->dev, "failed to register extcon device\n");
+ return ret;
+ }
+
+ /* Initialize PTN5150 device and print vendor id and version id */
+ ret = ptn5150_init_dev_type(info);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct of_device_id ptn5150_dt_match[] = {
+ { .compatible = "nxp,ptn5150" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ptn5150_dt_match);
+
+static const struct i2c_device_id ptn5150_i2c_id[] = {
+ { "ptn5150", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ptn5150_i2c_id);
+
+static struct i2c_driver ptn5150_i2c_driver = {
+ .driver = {
+ .name = "ptn5150",
+ .of_match_table = ptn5150_dt_match,
+ },
+ .probe = ptn5150_i2c_probe,
+ .id_table = ptn5150_i2c_id,
+};
+
+static int __init ptn5150_i2c_init(void)
+{
+ return i2c_add_driver(&ptn5150_i2c_driver);
+}
+subsys_initcall(ptn5150_i2c_init);
+
+MODULE_DESCRIPTION("NXP PTN5150 CC logic Extcon driver");
+MODULE_AUTHOR("Vijai Kumar K <vijaikumar.kanagarajan@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 0bb7b5cd6cdc..c20445b867ae 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -104,7 +104,7 @@ config SOCFPGA_FPGA_BRIDGE
config ALTERA_FREEZE_BRIDGE
tristate "Altera FPGA Freeze Bridge"
- depends on ARCH_SOCFPGA && FPGA_BRIDGE
+ depends on FPGA_BRIDGE && HAS_IOMEM
help
Say Y to enable drivers for Altera FPGA Freeze bridges. A
freeze bridge is a bridge that exists in the FPGA fabric to
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
index 8c18beec6b57..678d0115f840 100644
--- a/drivers/fpga/altera-ps-spi.c
+++ b/drivers/fpga/altera-ps-spi.c
@@ -205,7 +205,7 @@ static int altera_ps_write_complete(struct fpga_manager *mgr,
struct fpga_image_info *info)
{
struct altera_ps_conf *conf = mgr->priv;
- const char dummy[] = {0};
+ static const char dummy[] = {0};
int ret;
if (gpiod_get_value_cansleep(conf->status)) {
diff --git a/drivers/gnss/Kconfig b/drivers/gnss/Kconfig
index 6abc88514512..6d8c8027e1cd 100644
--- a/drivers/gnss/Kconfig
+++ b/drivers/gnss/Kconfig
@@ -15,6 +15,19 @@ if GNSS
config GNSS_SERIAL
tristate
+config GNSS_MTK_SERIAL
+ tristate "Mediatek GNSS receiver support"
+ depends on SERIAL_DEV_BUS
+ select GNSS_SERIAL
+ help
+ Say Y here if you have a Mediatek-based GNSS receiver which uses a
+ serial interface.
+
+ To compile this driver as a module, choose M here: the module will
+ be called gnss-mtk.
+
+ If unsure, say N.
+
config GNSS_SIRF_SERIAL
tristate "SiRFstar GNSS receiver support"
depends on SERIAL_DEV_BUS
diff --git a/drivers/gnss/Makefile b/drivers/gnss/Makefile
index 5cf0ebe0330a..451f11401ecc 100644
--- a/drivers/gnss/Makefile
+++ b/drivers/gnss/Makefile
@@ -9,6 +9,9 @@ gnss-y := core.o
obj-$(CONFIG_GNSS_SERIAL) += gnss-serial.o
gnss-serial-y := serial.o
+obj-$(CONFIG_GNSS_MTK_SERIAL) += gnss-mtk.o
+gnss-mtk-y := mtk.o
+
obj-$(CONFIG_GNSS_SIRF_SERIAL) += gnss-sirf.o
gnss-sirf-y := sirf.o
diff --git a/drivers/gnss/core.c b/drivers/gnss/core.c
index 4291a0dd22aa..320cfca80d5f 100644
--- a/drivers/gnss/core.c
+++ b/drivers/gnss/core.c
@@ -334,6 +334,7 @@ static const char * const gnss_type_names[GNSS_TYPE_COUNT] = {
[GNSS_TYPE_NMEA] = "NMEA",
[GNSS_TYPE_SIRF] = "SiRF",
[GNSS_TYPE_UBX] = "UBX",
+ [GNSS_TYPE_MTK] = "MTK",
};
static const char *gnss_type_name(struct gnss_device *gdev)
diff --git a/drivers/gnss/mtk.c b/drivers/gnss/mtk.c
new file mode 100644
index 000000000000..d1fc55560daf
--- /dev/null
+++ b/drivers/gnss/mtk.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Mediatek GNSS receiver driver
+ *
+ * Copyright (C) 2018 Johan Hovold <johan@kernel.org>
+ */
+
+#include <linux/errno.h>
+#include <linux/gnss.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/serdev.h>
+
+#include "serial.h"
+
+struct mtk_data {
+ struct regulator *vbackup;
+ struct regulator *vcc;
+};
+
+static int mtk_set_active(struct gnss_serial *gserial)
+{
+ struct mtk_data *data = gnss_serial_get_drvdata(gserial);
+ int ret;
+
+ ret = regulator_enable(data->vcc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mtk_set_standby(struct gnss_serial *gserial)
+{
+ struct mtk_data *data = gnss_serial_get_drvdata(gserial);
+ int ret;
+
+ ret = regulator_disable(data->vcc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mtk_set_power(struct gnss_serial *gserial,
+ enum gnss_serial_pm_state state)
+{
+ switch (state) {
+ case GNSS_SERIAL_ACTIVE:
+ return mtk_set_active(gserial);
+ case GNSS_SERIAL_OFF:
+ case GNSS_SERIAL_STANDBY:
+ return mtk_set_standby(gserial);
+ }
+
+ return -EINVAL;
+}
+
+static const struct gnss_serial_ops mtk_gserial_ops = {
+ .set_power = mtk_set_power,
+};
+
+static int mtk_probe(struct serdev_device *serdev)
+{
+ struct gnss_serial *gserial;
+ struct mtk_data *data;
+ int ret;
+
+ gserial = gnss_serial_allocate(serdev, sizeof(*data));
+ if (IS_ERR(gserial)) {
+ ret = PTR_ERR(gserial);
+ return ret;
+ }
+
+ gserial->ops = &mtk_gserial_ops;
+
+ gserial->gdev->type = GNSS_TYPE_MTK;
+
+ data = gnss_serial_get_drvdata(gserial);
+
+ data->vcc = devm_regulator_get(&serdev->dev, "vcc");
+ if (IS_ERR(data->vcc)) {
+ ret = PTR_ERR(data->vcc);
+ goto err_free_gserial;
+ }
+
+ data->vbackup = devm_regulator_get_optional(&serdev->dev, "vbackup");
+ if (IS_ERR(data->vbackup)) {
+ ret = PTR_ERR(data->vbackup);
+ if (ret == -ENODEV)
+ data->vbackup = NULL;
+ else
+ goto err_free_gserial;
+ }
+
+ if (data->vbackup) {
+ ret = regulator_enable(data->vbackup);
+ if (ret)
+ goto err_free_gserial;
+ }
+
+ ret = gnss_serial_register(gserial);
+ if (ret)
+ goto err_disable_vbackup;
+
+ return 0;
+
+err_disable_vbackup:
+ if (data->vbackup)
+ regulator_disable(data->vbackup);
+err_free_gserial:
+ gnss_serial_free(gserial);
+
+ return ret;
+}
+
+static void mtk_remove(struct serdev_device *serdev)
+{
+ struct gnss_serial *gserial = serdev_device_get_drvdata(serdev);
+ struct mtk_data *data = gnss_serial_get_drvdata(gserial);
+
+ gnss_serial_deregister(gserial);
+ if (data->vbackup)
+ regulator_disable(data->vbackup);
+ gnss_serial_free(gserial);
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id mtk_of_match[] = {
+ { .compatible = "globaltop,pa6h" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_of_match);
+#endif
+
+static struct serdev_device_driver mtk_driver = {
+ .driver = {
+ .name = "gnss-mtk",
+ .of_match_table = of_match_ptr(mtk_of_match),
+ .pm = &gnss_serial_pm_ops,
+ },
+ .probe = mtk_probe,
+ .remove = mtk_remove,
+};
+module_serdev_device_driver(mtk_driver);
+
+MODULE_AUTHOR("Loys Ollivier <lollivier@baylibre.com>");
+MODULE_DESCRIPTION("Mediatek GNSS receiver driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
index 226f6e6fe01b..effed3a8d398 100644
--- a/drivers/gnss/sirf.c
+++ b/drivers/gnss/sirf.c
@@ -25,31 +25,83 @@
#define SIRF_ON_OFF_PULSE_TIME 100
#define SIRF_ACTIVATE_TIMEOUT 200
#define SIRF_HIBERNATE_TIMEOUT 200
+/*
+ * If no data arrives for this time, we assume that the chip is off.
+ * REVISIT: The report cycle is configurable and can be several minutes long,
+ * so this will only work reliably if the report cycle is set to a reasonable
+ * low value. Also power saving settings (like send data only on movement)
+ * might things work even worse.
+ * Workaround might be to parse shutdown or bootup messages.
+ */
+#define SIRF_REPORT_CYCLE 2000
struct sirf_data {
struct gnss_device *gdev;
struct serdev_device *serdev;
speed_t speed;
struct regulator *vcc;
+ struct regulator *lna;
struct gpio_desc *on_off;
struct gpio_desc *wakeup;
int irq;
bool active;
+
+ struct mutex gdev_mutex;
+ bool open;
+
+ struct mutex serdev_mutex;
+ int serdev_count;
+
wait_queue_head_t power_wait;
};
+static int sirf_serdev_open(struct sirf_data *data)
+{
+ int ret = 0;
+
+ mutex_lock(&data->serdev_mutex);
+ if (++data->serdev_count == 1) {
+ ret = serdev_device_open(data->serdev);
+ if (ret) {
+ data->serdev_count--;
+ goto out_unlock;
+ }
+
+ serdev_device_set_baudrate(data->serdev, data->speed);
+ serdev_device_set_flow_control(data->serdev, false);
+ }
+
+out_unlock:
+ mutex_unlock(&data->serdev_mutex);
+
+ return ret;
+}
+
+static void sirf_serdev_close(struct sirf_data *data)
+{
+ mutex_lock(&data->serdev_mutex);
+ if (--data->serdev_count == 0)
+ serdev_device_close(data->serdev);
+ mutex_unlock(&data->serdev_mutex);
+}
+
static int sirf_open(struct gnss_device *gdev)
{
struct sirf_data *data = gnss_get_drvdata(gdev);
struct serdev_device *serdev = data->serdev;
int ret;
- ret = serdev_device_open(serdev);
- if (ret)
- return ret;
+ mutex_lock(&data->gdev_mutex);
+ data->open = true;
+ mutex_unlock(&data->gdev_mutex);
- serdev_device_set_baudrate(serdev, data->speed);
- serdev_device_set_flow_control(serdev, false);
+ ret = sirf_serdev_open(data);
+ if (ret) {
+ mutex_lock(&data->gdev_mutex);
+ data->open = false;
+ mutex_unlock(&data->gdev_mutex);
+ return ret;
+ }
ret = pm_runtime_get_sync(&serdev->dev);
if (ret < 0) {
@@ -61,7 +113,11 @@ static int sirf_open(struct gnss_device *gdev)
return 0;
err_close:
- serdev_device_close(serdev);
+ sirf_serdev_close(data);
+
+ mutex_lock(&data->gdev_mutex);
+ data->open = false;
+ mutex_unlock(&data->gdev_mutex);
return ret;
}
@@ -71,9 +127,13 @@ static void sirf_close(struct gnss_device *gdev)
struct sirf_data *data = gnss_get_drvdata(gdev);
struct serdev_device *serdev = data->serdev;
- serdev_device_close(serdev);
+ sirf_serdev_close(data);
pm_runtime_put(&serdev->dev);
+
+ mutex_lock(&data->gdev_mutex);
+ data->open = false;
+ mutex_unlock(&data->gdev_mutex);
}
static int sirf_write_raw(struct gnss_device *gdev, const unsigned char *buf,
@@ -105,8 +165,19 @@ static int sirf_receive_buf(struct serdev_device *serdev,
{
struct sirf_data *data = serdev_device_get_drvdata(serdev);
struct gnss_device *gdev = data->gdev;
+ int ret = 0;
+
+ if (!data->wakeup && !data->active) {
+ data->active = true;
+ wake_up_interruptible(&data->power_wait);
+ }
+
+ mutex_lock(&data->gdev_mutex);
+ if (data->open)
+ ret = gnss_insert_raw(gdev, buf, count);
+ mutex_unlock(&data->gdev_mutex);
- return gnss_insert_raw(gdev, buf, count);
+ return ret;
}
static const struct serdev_device_ops sirf_serdev_ops = {
@@ -125,17 +196,45 @@ static irqreturn_t sirf_wakeup_handler(int irq, void *dev_id)
if (ret < 0)
goto out;
- data->active = !!ret;
+ data->active = ret;
wake_up_interruptible(&data->power_wait);
out:
return IRQ_HANDLED;
}
+static int sirf_wait_for_power_state_nowakeup(struct sirf_data *data,
+ bool active,
+ unsigned long timeout)
+{
+ int ret;
+
+ /* Wait for state change (including any shutdown messages). */
+ msleep(timeout);
+
+ /* Wait for data reception or timeout. */
+ data->active = false;
+ ret = wait_event_interruptible_timeout(data->power_wait,
+ data->active, msecs_to_jiffies(SIRF_REPORT_CYCLE));
+ if (ret < 0)
+ return ret;
+
+ if (ret > 0 && !active)
+ return -ETIMEDOUT;
+
+ if (ret == 0 && active)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
static int sirf_wait_for_power_state(struct sirf_data *data, bool active,
unsigned long timeout)
{
int ret;
+ if (!data->wakeup)
+ return sirf_wait_for_power_state_nowakeup(data, active, timeout);
+
ret = wait_event_interruptible_timeout(data->power_wait,
data->active == active, msecs_to_jiffies(timeout));
if (ret < 0)
@@ -168,21 +267,22 @@ static int sirf_set_active(struct sirf_data *data, bool active)
else
timeout = SIRF_HIBERNATE_TIMEOUT;
+ if (!data->wakeup) {
+ ret = sirf_serdev_open(data);
+ if (ret)
+ return ret;
+ }
+
do {
sirf_pulse_on_off(data);
ret = sirf_wait_for_power_state(data, active, timeout);
- if (ret < 0) {
- if (ret == -ETIMEDOUT)
- continue;
+ } while (ret == -ETIMEDOUT && retries--);
- return ret;
- }
+ if (!data->wakeup)
+ sirf_serdev_close(data);
- break;
- } while (retries--);
-
- if (retries < 0)
- return -ETIMEDOUT;
+ if (ret)
+ return ret;
return 0;
}
@@ -190,21 +290,60 @@ static int sirf_set_active(struct sirf_data *data, bool active)
static int sirf_runtime_suspend(struct device *dev)
{
struct sirf_data *data = dev_get_drvdata(dev);
+ int ret2;
+ int ret;
- if (!data->on_off)
- return regulator_disable(data->vcc);
+ if (data->on_off)
+ ret = sirf_set_active(data, false);
+ else
+ ret = regulator_disable(data->vcc);
+
+ if (ret)
+ return ret;
+
+ ret = regulator_disable(data->lna);
+ if (ret)
+ goto err_reenable;
- return sirf_set_active(data, false);
+ return 0;
+
+err_reenable:
+ if (data->on_off)
+ ret2 = sirf_set_active(data, true);
+ else
+ ret2 = regulator_enable(data->vcc);
+
+ if (ret2)
+ dev_err(dev,
+ "failed to reenable power on failed suspend: %d\n",
+ ret2);
+
+ return ret;
}
static int sirf_runtime_resume(struct device *dev)
{
struct sirf_data *data = dev_get_drvdata(dev);
+ int ret;
- if (!data->on_off)
- return regulator_enable(data->vcc);
+ ret = regulator_enable(data->lna);
+ if (ret)
+ return ret;
+
+ if (data->on_off)
+ ret = sirf_set_active(data, true);
+ else
+ ret = regulator_enable(data->vcc);
+
+ if (ret)
+ goto err_disable_lna;
+
+ return 0;
+
+err_disable_lna:
+ regulator_disable(data->lna);
- return sirf_set_active(data, true);
+ return ret;
}
static int __maybe_unused sirf_suspend(struct device *dev)
@@ -275,6 +414,8 @@ static int sirf_probe(struct serdev_device *serdev)
data->serdev = serdev;
data->gdev = gdev;
+ mutex_init(&data->gdev_mutex);
+ mutex_init(&data->serdev_mutex);
init_waitqueue_head(&data->power_wait);
serdev_device_set_drvdata(serdev, data);
@@ -290,6 +431,12 @@ static int sirf_probe(struct serdev_device *serdev)
goto err_put_device;
}
+ data->lna = devm_regulator_get(dev, "lna");
+ if (IS_ERR(data->lna)) {
+ ret = PTR_ERR(data->lna);
+ goto err_put_device;
+ }
+
data->on_off = devm_gpiod_get_optional(dev, "sirf,onoff",
GPIOD_OUT_LOW);
if (IS_ERR(data->on_off))
@@ -301,39 +448,53 @@ static int sirf_probe(struct serdev_device *serdev)
if (IS_ERR(data->wakeup))
goto err_put_device;
- /*
- * Configurations where WAKEUP has been left not connected,
- * are currently not supported.
- */
- if (!data->wakeup) {
- dev_err(dev, "no wakeup gpio specified\n");
- ret = -ENODEV;
+ ret = regulator_enable(data->vcc);
+ if (ret)
goto err_put_device;
- }
+
+ /* Wait for chip to boot into hibernate mode. */
+ msleep(SIRF_BOOT_DELAY);
}
if (data->wakeup) {
- ret = gpiod_to_irq(data->wakeup);
+ ret = gpiod_get_value_cansleep(data->wakeup);
if (ret < 0)
- goto err_put_device;
+ goto err_disable_vcc;
+ data->active = ret;
+ ret = gpiod_to_irq(data->wakeup);
+ if (ret < 0)
+ goto err_disable_vcc;
data->irq = ret;
- ret = devm_request_threaded_irq(dev, data->irq, NULL,
- sirf_wakeup_handler,
+ ret = request_threaded_irq(data->irq, NULL, sirf_wakeup_handler,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"wakeup", data);
if (ret)
- goto err_put_device;
+ goto err_disable_vcc;
}
if (data->on_off) {
- ret = regulator_enable(data->vcc);
- if (ret)
- goto err_put_device;
+ if (!data->wakeup) {
+ data->active = false;
- /* Wait for chip to boot into hibernate mode */
- msleep(SIRF_BOOT_DELAY);
+ ret = sirf_serdev_open(data);
+ if (ret)
+ goto err_disable_vcc;
+
+ msleep(SIRF_REPORT_CYCLE);
+ sirf_serdev_close(data);
+ }
+
+ /* Force hibernate mode if already active. */
+ if (data->active) {
+ ret = sirf_set_active(data, false);
+ if (ret) {
+ dev_err(dev, "failed to set hibernate mode: %d\n",
+ ret);
+ goto err_free_irq;
+ }
+ }
}
if (IS_ENABLED(CONFIG_PM)) {
@@ -342,7 +503,7 @@ static int sirf_probe(struct serdev_device *serdev)
} else {
ret = sirf_runtime_resume(dev);
if (ret < 0)
- goto err_disable_vcc;
+ goto err_free_irq;
}
ret = gnss_register_device(gdev);
@@ -356,6 +517,9 @@ err_disable_rpm:
pm_runtime_disable(dev);
else
sirf_runtime_suspend(dev);
+err_free_irq:
+ if (data->wakeup)
+ free_irq(data->irq, data);
err_disable_vcc:
if (data->on_off)
regulator_disable(data->vcc);
@@ -376,6 +540,9 @@ static void sirf_remove(struct serdev_device *serdev)
else
sirf_runtime_suspend(&serdev->dev);
+ if (data->wakeup)
+ free_irq(data->irq, data);
+
if (data->on_off)
regulator_disable(data->vcc);
@@ -386,6 +553,7 @@ static void sirf_remove(struct serdev_device *serdev)
static const struct of_device_id sirf_of_match[] = {
{ .compatible = "fastrax,uc430" },
{ .compatible = "linx,r4" },
+ { .compatible = "wi2wi,w2sg0004" },
{ .compatible = "wi2wi,w2sg0008i" },
{ .compatible = "wi2wi,w2sg0084i" },
{},
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index ae55a6865d5c..b32681632f30 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -984,7 +984,9 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
{
int ret;
- ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops);
+ ret = component_add_typed(dev_priv->drm.dev,
+ &i915_audio_component_bind_ops,
+ I915_COMPONENT_AUDIO);
if (ret < 0) {
DRM_ERROR("failed to add audio component (%d)\n", ret);
/* continue with reduced functionality */
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index 4262452963b3..79203666fc62 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -26,6 +26,7 @@
#define _INTEL_DISPLAY_H_
#include <drm/drm_util.h>
+#include <drm/i915_drm.h>
enum i915_gpio {
GPIOA,
@@ -150,21 +151,6 @@ enum plane_id {
for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
for_each_if((__crtc)->plane_ids_mask & BIT(__p))
-enum port {
- PORT_NONE = -1,
-
- PORT_A = 0,
- PORT_B,
- PORT_C,
- PORT_D,
- PORT_E,
- PORT_F,
-
- I915_MAX_PORTS
-};
-
-#define port_name(p) ((p) + 'A')
-
/*
* Ports identifier referenced from other drivers.
* Expected to remain stable over time
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index cf549f1ed403..78c9e5a5e793 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -5,6 +5,7 @@ config DRM_MSM
depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST)
depends on OF && COMMON_CLK
depends on MMU
+ depends on INTERCONNECT || !INTERCONNECT
select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index ce1b3cc4bf6d..d1662a75c7ec 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
#include <linux/clk.h>
+#include <linux/interconnect.h>
#include <linux/pm_opp.h>
#include <soc/qcom/cmd-db.h>
@@ -84,6 +85,9 @@ bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
int ret;
gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
@@ -106,6 +110,12 @@ static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
gmu->freq = gmu->gpu_freqs[index];
+
+ /*
+ * Eventually we will want to scale the path vote with the frequency but
+ * for now leave it at max so that the performance is nominal.
+ */
+ icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216));
}
void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
@@ -705,6 +715,8 @@ out:
int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
int status, ret;
@@ -720,6 +732,9 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
if (ret)
goto out;
+ /* Set the bus quota to a reasonable value for boot */
+ icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072));
+
a6xx_gmu_irq_enable(gmu);
/* Check to see if we are doing a cold or warm boot */
@@ -760,6 +775,8 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
u32 val;
@@ -806,6 +823,9 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
/* Tell RPMh to power off the GPU */
a6xx_rpmh_stop(gmu);
+ /* Remove the bus vote */
+ icc_set_bw(gpu->icc_path, 0, 0);
+
clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
pm_runtime_put_sync(gmu->dev);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 2cfee1a4fe0b..27898475cdf4 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -18,6 +18,7 @@
*/
#include <linux/ascii85.h>
+#include <linux/interconnect.h>
#include <linux/kernel.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
@@ -747,6 +748,11 @@ static int adreno_get_pwrlevels(struct device *dev,
DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate);
+ /* Check for an interconnect path for the bus */
+ gpu->icc_path = of_icc_get(dev, NULL);
+ if (IS_ERR(gpu->icc_path))
+ gpu->icc_path = NULL;
+
return 0;
}
@@ -787,10 +793,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
{
+ struct msm_gpu *gpu = &adreno_gpu->base;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
release_firmware(adreno_gpu->fw[i]);
+ icc_put(gpu->icc_path);
+
msm_gpu_cleanup(&adreno_gpu->base);
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index ca17086f72c9..6241986bab51 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -19,6 +19,7 @@
#define __MSM_GPU_H__
#include <linux/clk.h>
+#include <linux/interconnect.h>
#include <linux/regulator/consumer.h>
#include "msm_drv.h"
@@ -118,6 +119,8 @@ struct msm_gpu {
struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
uint32_t fast_rate;
+ struct icc_path *icc_path;
+
/* Hang and Inactivity Detection:
*/
#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index bea4c9850247..23381c41d087 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -282,8 +282,8 @@ int vmbus_open(struct vmbus_channel *newchannel,
EXPORT_SYMBOL_GPL(vmbus_open);
/* Used for Hyper-V Socket: a guest client's connect() to the host */
-int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
- const uuid_le *shv_host_servie_id)
+int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
+ const guid_t *shv_host_servie_id)
{
struct vmbus_channel_tl_connect_request conn_msg;
int ret;
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index d01689079e9b..62703b354d6d 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -141,7 +141,7 @@ static const struct vmbus_device vmbus_devs[] = {
};
static const struct {
- uuid_le guid;
+ guid_t guid;
} vmbus_unsupported_devs[] = {
{ HV_AVMA1_GUID },
{ HV_AVMA2_GUID },
@@ -171,26 +171,26 @@ static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
}
-static bool is_unsupported_vmbus_devs(const uuid_le *guid)
+static bool is_unsupported_vmbus_devs(const guid_t *guid)
{
int i;
for (i = 0; i < ARRAY_SIZE(vmbus_unsupported_devs); i++)
- if (!uuid_le_cmp(*guid, vmbus_unsupported_devs[i].guid))
+ if (guid_equal(guid, &vmbus_unsupported_devs[i].guid))
return true;
return false;
}
static u16 hv_get_dev_type(const struct vmbus_channel *channel)
{
- const uuid_le *guid = &channel->offermsg.offer.if_type;
+ const guid_t *guid = &channel->offermsg.offer.if_type;
u16 i;
if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid))
return HV_UNKNOWN;
for (i = HV_IDE; i < HV_UNKNOWN; i++) {
- if (!uuid_le_cmp(*guid, vmbus_devs[i].guid))
+ if (guid_equal(guid, &vmbus_devs[i].guid))
return i;
}
pr_info("Unknown GUID: %pUl\n", guid);
@@ -561,10 +561,10 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
atomic_dec(&vmbus_connection.offer_in_progress);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
- if (!uuid_le_cmp(channel->offermsg.offer.if_type,
- newchannel->offermsg.offer.if_type) &&
- !uuid_le_cmp(channel->offermsg.offer.if_instance,
- newchannel->offermsg.offer.if_instance)) {
+ if (guid_equal(&channel->offermsg.offer.if_type,
+ &newchannel->offermsg.offer.if_type) &&
+ guid_equal(&channel->offermsg.offer.if_instance,
+ &newchannel->offermsg.offer.if_instance)) {
fnew = false;
break;
}
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index a1f6ce6e5974..cb86b133eb4d 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -312,8 +312,8 @@ extern const struct vmbus_channel_message_table_entry
/* General vmbus interface */
-struct hv_device *vmbus_device_create(const uuid_le *type,
- const uuid_le *instance,
+struct hv_device *vmbus_device_create(const guid_t *type,
+ const guid_t *instance,
struct vmbus_channel *channel);
int vmbus_device_register(struct hv_device *child_device_obj);
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 1f1a55e07733..9e8b31ccc142 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -74,8 +74,10 @@ static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
* This is the only case we need to signal when the
* ring transitions from being empty to non-empty.
*/
- if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
+ if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) {
+ ++channel->intr_out_empty;
vmbus_setevent(channel);
+ }
}
/* Get the next write location for the specified ring buffer. */
@@ -272,10 +274,19 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
* is empty since the read index == write index.
*/
if (bytes_avail_towrite <= totalbytes_towrite) {
+ ++channel->out_full_total;
+
+ if (!channel->out_full_flag) {
+ ++channel->out_full_first;
+ channel->out_full_flag = true;
+ }
+
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
return -EAGAIN;
}
+ channel->out_full_flag = false;
+
/* Write to the ring buffer */
next_write_location = hv_get_next_write_location(outring_info);
@@ -530,6 +541,7 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)
if (curr_write_sz <= pending_sz)
return;
+ ++channel->intr_in_full;
vmbus_setevent(channel);
}
EXPORT_SYMBOL_GPL(hv_pkt_iter_close);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 403fee01572c..000b53e5a17a 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -234,7 +234,7 @@ static ssize_t server_monitor_pending_show(struct device *dev,
return -ENODEV;
return sprintf(buf, "%d\n",
channel_pending(hv_dev->channel,
- vmbus_connection.monitor_pages[1]));
+ vmbus_connection.monitor_pages[0]));
}
static DEVICE_ATTR_RO(server_monitor_pending);
@@ -654,38 +654,28 @@ static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
return ret;
}
-static const uuid_le null_guid;
-
-static inline bool is_null_guid(const uuid_le *guid)
-{
- if (uuid_le_cmp(*guid, null_guid))
- return false;
- return true;
-}
-
static const struct hv_vmbus_device_id *
-hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const uuid_le *guid)
-
+hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const guid_t *guid)
{
if (id == NULL)
return NULL; /* empty device table */
- for (; !is_null_guid(&id->guid); id++)
- if (!uuid_le_cmp(id->guid, *guid))
+ for (; !guid_is_null(&id->guid); id++)
+ if (guid_equal(&id->guid, guid))
return id;
return NULL;
}
static const struct hv_vmbus_device_id *
-hv_vmbus_dynid_match(struct hv_driver *drv, const uuid_le *guid)
+hv_vmbus_dynid_match(struct hv_driver *drv, const guid_t *guid)
{
const struct hv_vmbus_device_id *id = NULL;
struct vmbus_dynid *dynid;
spin_lock(&drv->dynids.lock);
list_for_each_entry(dynid, &drv->dynids.list, node) {
- if (!uuid_le_cmp(dynid->id.guid, *guid)) {
+ if (guid_equal(&dynid->id.guid, guid)) {
id = &dynid->id;
break;
}
@@ -695,9 +685,7 @@ hv_vmbus_dynid_match(struct hv_driver *drv, const uuid_le *guid)
return id;
}
-static const struct hv_vmbus_device_id vmbus_device_null = {
- .guid = NULL_UUID_LE,
-};
+static const struct hv_vmbus_device_id vmbus_device_null;
/*
* Return a matching hv_vmbus_device_id pointer.
@@ -706,7 +694,7 @@ static const struct hv_vmbus_device_id vmbus_device_null = {
static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
struct hv_device *dev)
{
- const uuid_le *guid = &dev->dev_type;
+ const guid_t *guid = &dev->dev_type;
const struct hv_vmbus_device_id *id;
/* When driver_override is set, only bind to the matching driver */
@@ -726,7 +714,7 @@ static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
}
/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
-static int vmbus_add_dynid(struct hv_driver *drv, uuid_le *guid)
+static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid)
{
struct vmbus_dynid *dynid;
@@ -764,10 +752,10 @@ static ssize_t new_id_store(struct device_driver *driver, const char *buf,
size_t count)
{
struct hv_driver *drv = drv_to_hv_drv(driver);
- uuid_le guid;
+ guid_t guid;
ssize_t retval;
- retval = uuid_le_to_bin(buf, &guid);
+ retval = guid_parse(buf, &guid);
if (retval)
return retval;
@@ -791,10 +779,10 @@ static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
{
struct hv_driver *drv = drv_to_hv_drv(driver);
struct vmbus_dynid *dynid, *n;
- uuid_le guid;
+ guid_t guid;
ssize_t retval;
- retval = uuid_le_to_bin(buf, &guid);
+ retval = guid_parse(buf, &guid);
if (retval)
return retval;
@@ -803,7 +791,7 @@ static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
struct hv_vmbus_device_id *id = &dynid->id;
- if (!uuid_le_cmp(id->guid, guid)) {
+ if (guid_equal(&id->guid, &guid)) {
list_del(&dynid->node);
kfree(dynid);
retval = count;
@@ -1496,6 +1484,38 @@ static ssize_t channel_events_show(const struct vmbus_channel *channel, char *bu
}
static VMBUS_CHAN_ATTR(events, S_IRUGO, channel_events_show, NULL);
+static ssize_t channel_intr_in_full_show(const struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%llu\n",
+ (unsigned long long)channel->intr_in_full);
+}
+static VMBUS_CHAN_ATTR(intr_in_full, 0444, channel_intr_in_full_show, NULL);
+
+static ssize_t channel_intr_out_empty_show(const struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%llu\n",
+ (unsigned long long)channel->intr_out_empty);
+}
+static VMBUS_CHAN_ATTR(intr_out_empty, 0444, channel_intr_out_empty_show, NULL);
+
+static ssize_t channel_out_full_first_show(const struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%llu\n",
+ (unsigned long long)channel->out_full_first);
+}
+static VMBUS_CHAN_ATTR(out_full_first, 0444, channel_out_full_first_show, NULL);
+
+static ssize_t channel_out_full_total_show(const struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%llu\n",
+ (unsigned long long)channel->out_full_total);
+}
+static VMBUS_CHAN_ATTR(out_full_total, 0444, channel_out_full_total_show, NULL);
+
static ssize_t subchannel_monitor_id_show(const struct vmbus_channel *channel,
char *buf)
{
@@ -1521,6 +1541,10 @@ static struct attribute *vmbus_chan_attrs[] = {
&chan_attr_latency.attr,
&chan_attr_interrupts.attr,
&chan_attr_events.attr,
+ &chan_attr_intr_in_full.attr,
+ &chan_attr_intr_out_empty.attr,
+ &chan_attr_out_full_first.attr,
+ &chan_attr_out_full_total.attr,
&chan_attr_monitor_id.attr,
&chan_attr_subchannel_id.attr,
NULL
@@ -1556,8 +1580,8 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
* vmbus_device_create - Creates and registers a new child device
* on the vmbus.
*/
-struct hv_device *vmbus_device_create(const uuid_le *type,
- const uuid_le *instance,
+struct hv_device *vmbus_device_create(const guid_t *type,
+ const guid_t *instance,
struct vmbus_channel *channel)
{
struct hv_device *child_device_obj;
@@ -1569,12 +1593,10 @@ struct hv_device *vmbus_device_create(const uuid_le *type,
}
child_device_obj->channel = channel;
- memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
- memcpy(&child_device_obj->dev_instance, instance,
- sizeof(uuid_le));
+ guid_copy(&child_device_obj->dev_type, type);
+ guid_copy(&child_device_obj->dev_instance, instance);
child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
-
return child_device_obj;
}
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index 45b2460f3166..e8819d750938 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -668,6 +668,10 @@ static const struct amba_id debug_ids[] = {
.id = 0x000bbd08,
.mask = 0x000fffff,
},
+ { /* Debug for Cortex-A73 */
+ .id = 0x000bbd09,
+ .mask = 0x000fffff,
+ },
{ 0, 0 },
};
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 53e2fb6e86f6..fe76b176974a 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -55,7 +55,8 @@ static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
static bool etm4_arch_supported(u8 arch)
{
- switch (arch) {
+ /* Mask out the minor version number */
+ switch (arch & 0xf0) {
case ETM_ARCH_V4:
break;
default:
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index ef339ff22090..f07825df5c7a 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -793,7 +793,7 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
struct stm_drvdata *drvdata;
struct resource *res = &adev->res;
struct resource ch_res;
- size_t res_size, bitmap_size;
+ size_t bitmap_size;
struct coresight_desc desc = { 0 };
struct device_node *np = adev->dev.of_node;
@@ -833,15 +833,11 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->write_bytes = stm_fundamental_data_size(drvdata);
- if (boot_nr_channel) {
+ if (boot_nr_channel)
drvdata->numsp = boot_nr_channel;
- res_size = min((resource_size_t)(boot_nr_channel *
- BYTES_PER_CHANNEL), resource_size(res));
- } else {
+ else
drvdata->numsp = stm_num_stimulus_port(drvdata);
- res_size = min((resource_size_t)(drvdata->numsp *
- BYTES_PER_CHANNEL), resource_size(res));
- }
+
bitmap_size = BITS_TO_LONGS(drvdata->numsp) * sizeof(long);
guaranteed = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 89092f83567e..7045930fc958 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -80,8 +80,8 @@ static struct device_node *of_coresight_get_port_parent(struct device_node *ep)
* Skip one-level up to the real device node, if we
* are using the new bindings.
*/
- if (!of_node_cmp(parent->name, "in-ports") ||
- !of_node_cmp(parent->name, "out-ports"))
+ if (of_node_name_eq(parent, "in-ports") ||
+ of_node_name_eq(parent, "out-ports"))
parent = of_get_next_parent(parent);
return parent;
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index fc6b7f8b62fb..7c1acc2f801c 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -422,6 +422,7 @@ static const struct intel_th_subdevice {
unsigned nres;
unsigned type;
unsigned otype;
+ bool mknode;
unsigned scrpd;
int id;
} intel_th_subdevices[] = {
@@ -456,6 +457,7 @@ static const struct intel_th_subdevice {
.name = "msc",
.id = 0,
.type = INTEL_TH_OUTPUT,
+ .mknode = true,
.otype = GTH_MSU,
.scrpd = SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC0_IS_ENABLED,
},
@@ -476,6 +478,7 @@ static const struct intel_th_subdevice {
.name = "msc",
.id = 1,
.type = INTEL_TH_OUTPUT,
+ .mknode = true,
.otype = GTH_MSU,
.scrpd = SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC1_IS_ENABLED,
},
@@ -635,7 +638,8 @@ intel_th_subdevice_alloc(struct intel_th *th,
}
if (subdev->type == INTEL_TH_OUTPUT) {
- thdev->dev.devt = MKDEV(th->major, th->num_thdevs);
+ if (subdev->mknode)
+ thdev->dev.devt = MKDEV(th->major, th->num_thdevs);
thdev->output.type = subdev->otype;
thdev->output.port = -1;
thdev->output.scratchpad = subdev->scrpd;
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index 8426b7970c14..edc52d75e6bd 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -607,6 +607,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
{
struct gth_device *gth = dev_get_drvdata(&thdev->dev);
int port = othdev->output.port;
+ int master;
if (thdev->host_mode)
return;
@@ -615,6 +616,9 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
othdev->output.port = -1;
othdev->output.active = false;
gth->output[port].output = NULL;
+ for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
+ if (gth->master[master] == port)
+ gth->master[master] = -1;
spin_unlock(&gth->gth_lock);
}
diff --git a/drivers/hwtracing/intel_th/pti.c b/drivers/hwtracing/intel_th/pti.c
index 56694339cb06..0da6b787f553 100644
--- a/drivers/hwtracing/intel_th/pti.c
+++ b/drivers/hwtracing/intel_th/pti.c
@@ -272,19 +272,17 @@ static ssize_t lpp_dest_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
struct pti_device *pti = dev_get_drvdata(dev);
- ssize_t ret = -EINVAL;
int i;
- for (i = 0; i < ARRAY_SIZE(lpp_dest_str); i++)
- if (sysfs_streq(buf, lpp_dest_str[i]))
- break;
+ i = sysfs_match_string(lpp_dest_str, buf);
+ if (i < 0)
+ return i;
- if (i < ARRAY_SIZE(lpp_dest_str) && pti->lpp_dest_mask & BIT(i)) {
- pti->lpp_dest = i;
- ret = size;
- }
+ if (!(pti->lpp_dest_mask & BIT(i)))
+ return -EINVAL;
- return ret;
+ pti->lpp_dest = i;
+ return size;
}
static DEVICE_ATTR_RW(lpp_dest);
diff --git a/drivers/hwtracing/intel_th/sth.c b/drivers/hwtracing/intel_th/sth.c
index 4b7ae47789d2..3a1f4e650378 100644
--- a/drivers/hwtracing/intel_th/sth.c
+++ b/drivers/hwtracing/intel_th/sth.c
@@ -84,8 +84,12 @@ static ssize_t notrace sth_stm_packet(struct stm_data *stm_data,
/* Global packets (GERR, XSYNC, TRIG) are sent with register writes */
case STP_PACKET_GERR:
reg += 4;
+ /* fall through */
+
case STP_PACKET_XSYNC:
reg += 8;
+ /* fall through */
+
case STP_PACKET_TRIG:
if (flags & STP_PACKET_TIMESTAMPED)
reg += 4;
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 93ce3aa740a9..c7ba8acfd4d5 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -244,6 +244,9 @@ static int find_free_channels(unsigned long *bitmap, unsigned int start,
;
if (i == width)
return pos;
+
+ /* step over [pos..pos+i) to continue search */
+ pos += i;
}
return -1;
@@ -732,7 +735,7 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
struct stm_device *stm = stmf->stm;
struct stp_policy_id *id;
char *ids[] = { NULL, NULL };
- int ret = -EINVAL;
+ int ret = -EINVAL, wlimit = 1;
u32 size;
if (stmf->output.nr_chans)
@@ -760,8 +763,10 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
if (id->__reserved_0 || id->__reserved_1)
goto err_free;
- if (id->width < 1 ||
- id->width > PAGE_SIZE / stm->data->sw_mmiosz)
+ if (stm->data->sw_mmiosz)
+ wlimit = PAGE_SIZE / stm->data->sw_mmiosz;
+
+ if (id->width < 1 || id->width > wlimit)
goto err_free;
ids[0] = id->id;
diff --git a/drivers/interconnect/Kconfig b/drivers/interconnect/Kconfig
new file mode 100644
index 000000000000..07a8276fa35a
--- /dev/null
+++ b/drivers/interconnect/Kconfig
@@ -0,0 +1,15 @@
+menuconfig INTERCONNECT
+ tristate "On-Chip Interconnect management support"
+ help
+ Support for management of the on-chip interconnects.
+
+ This framework is designed to provide a generic interface for
+ managing the interconnects in a SoC.
+
+ If unsure, say no.
+
+if INTERCONNECT
+
+source "drivers/interconnect/qcom/Kconfig"
+
+endif
diff --git a/drivers/interconnect/Makefile b/drivers/interconnect/Makefile
new file mode 100644
index 000000000000..28f2ab0824d5
--- /dev/null
+++ b/drivers/interconnect/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+
+icc-core-objs := core.o
+
+obj-$(CONFIG_INTERCONNECT) += icc-core.o
+obj-$(CONFIG_INTERCONNECT_QCOM) += qcom/
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
new file mode 100644
index 000000000000..6005a1c189f6
--- /dev/null
+++ b/drivers/interconnect/core.c
@@ -0,0 +1,799 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interconnect framework core driver
+ *
+ * Copyright (c) 2017-2019, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/overflow.h>
+
+static DEFINE_IDR(icc_idr);
+static LIST_HEAD(icc_providers);
+static DEFINE_MUTEX(icc_lock);
+static struct dentry *icc_debugfs_dir;
+
+/**
+ * struct icc_req - constraints that are attached to each node
+ * @req_node: entry in list of requests for the particular @node
+ * @node: the interconnect node to which this constraint applies
+ * @dev: reference to the device that sets the constraints
+ * @avg_bw: an integer describing the average bandwidth in kBps
+ * @peak_bw: an integer describing the peak bandwidth in kBps
+ */
+struct icc_req {
+ struct hlist_node req_node;
+ struct icc_node *node;
+ struct device *dev;
+ u32 avg_bw;
+ u32 peak_bw;
+};
+
+/**
+ * struct icc_path - interconnect path structure
+ * @num_nodes: number of hops (nodes)
+ * @reqs: array of the requests applicable to this path of nodes
+ */
+struct icc_path {
+ size_t num_nodes;
+ struct icc_req reqs[];
+};
+
+static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
+{
+ if (!n)
+ return;
+
+ seq_printf(s, "%-30s %12u %12u\n",
+ n->name, n->avg_bw, n->peak_bw);
+}
+
+static int icc_summary_show(struct seq_file *s, void *data)
+{
+ struct icc_provider *provider;
+
+ seq_puts(s, " node avg peak\n");
+ seq_puts(s, "--------------------------------------------------------\n");
+
+ mutex_lock(&icc_lock);
+
+ list_for_each_entry(provider, &icc_providers, provider_list) {
+ struct icc_node *n;
+
+ list_for_each_entry(n, &provider->nodes, node_list) {
+ struct icc_req *r;
+
+ icc_summary_show_one(s, n);
+ hlist_for_each_entry(r, &n->req_list, req_node) {
+ if (!r->dev)
+ continue;
+
+ seq_printf(s, " %-26s %12u %12u\n",
+ dev_name(r->dev), r->avg_bw,
+ r->peak_bw);
+ }
+ }
+ }
+
+ mutex_unlock(&icc_lock);
+
+ return 0;
+}
+
+static int icc_summary_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, icc_summary_show, inode->i_private);
+}
+
+static const struct file_operations icc_summary_fops = {
+ .open = icc_summary_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct icc_node *node_find(const int id)
+{
+ return idr_find(&icc_idr, id);
+}
+
+static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
+ ssize_t num_nodes)
+{
+ struct icc_node *node = dst;
+ struct icc_path *path;
+ int i;
+
+ path = kzalloc(struct_size(path, reqs, num_nodes), GFP_KERNEL);
+ if (!path)
+ return ERR_PTR(-ENOMEM);
+
+ path->num_nodes = num_nodes;
+
+ for (i = num_nodes - 1; i >= 0; i--) {
+ node->provider->users++;
+ hlist_add_head(&path->reqs[i].req_node, &node->req_list);
+ path->reqs[i].node = node;
+ path->reqs[i].dev = dev;
+ /* reference to previous node was saved during path traversal */
+ node = node->reverse;
+ }
+
+ return path;
+}
+
+static struct icc_path *path_find(struct device *dev, struct icc_node *src,
+ struct icc_node *dst)
+{
+ struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
+ struct icc_node *n, *node = NULL;
+ struct list_head traverse_list;
+ struct list_head edge_list;
+ struct list_head visited_list;
+ size_t i, depth = 1;
+ bool found = false;
+
+ INIT_LIST_HEAD(&traverse_list);
+ INIT_LIST_HEAD(&edge_list);
+ INIT_LIST_HEAD(&visited_list);
+
+ list_add(&src->search_list, &traverse_list);
+ src->reverse = NULL;
+
+ do {
+ list_for_each_entry_safe(node, n, &traverse_list, search_list) {
+ if (node == dst) {
+ found = true;
+ list_splice_init(&edge_list, &visited_list);
+ list_splice_init(&traverse_list, &visited_list);
+ break;
+ }
+ for (i = 0; i < node->num_links; i++) {
+ struct icc_node *tmp = node->links[i];
+
+ if (!tmp) {
+ path = ERR_PTR(-ENOENT);
+ goto out;
+ }
+
+ if (tmp->is_traversed)
+ continue;
+
+ tmp->is_traversed = true;
+ tmp->reverse = node;
+ list_add_tail(&tmp->search_list, &edge_list);
+ }
+ }
+
+ if (found)
+ break;
+
+ list_splice_init(&traverse_list, &visited_list);
+ list_splice_init(&edge_list, &traverse_list);
+
+ /* count the hops including the source */
+ depth++;
+
+ } while (!list_empty(&traverse_list));
+
+out:
+
+ /* reset the traversed state */
+ list_for_each_entry_reverse(n, &visited_list, search_list)
+ n->is_traversed = false;
+
+ if (found)
+ path = path_init(dev, dst, depth);
+
+ return path;
+}
+
+/*
+ * We want the path to honor all bandwidth requests, so the average and peak
+ * bandwidth requirements from each consumer are aggregated at each node.
+ * The aggregation is platform specific, so each platform can customize it by
+ * implementing its own aggregate() function.
+ */
+
+static int aggregate_requests(struct icc_node *node)
+{
+ struct icc_provider *p = node->provider;
+ struct icc_req *r;
+
+ node->avg_bw = 0;
+ node->peak_bw = 0;
+
+ hlist_for_each_entry(r, &node->req_list, req_node)
+ p->aggregate(node, r->avg_bw, r->peak_bw,
+ &node->avg_bw, &node->peak_bw);
+
+ return 0;
+}
+
+static int apply_constraints(struct icc_path *path)
+{
+ struct icc_node *next, *prev = NULL;
+ int ret = -EINVAL;
+ int i;
+
+ for (i = 0; i < path->num_nodes; i++) {
+ next = path->reqs[i].node;
+
+ /*
+ * Both endpoints should be valid master-slave pairs of the
+ * same interconnect provider that will be configured.
+ */
+ if (!prev || next->provider != prev->provider) {
+ prev = next;
+ continue;
+ }
+
+ /* set the constraints */
+ ret = next->provider->set(prev, next);
+ if (ret)
+ goto out;
+
+ prev = next;
+ }
+out:
+ return ret;
+}
+
+/* of_icc_xlate_onecell() - Translate function using a single index.
+ * @spec: OF phandle args to map into an interconnect node.
+ * @data: private data (pointer to struct icc_onecell_data)
+ *
+ * This is a generic translate function that can be used to model simple
+ * interconnect providers that have one device tree node and provide
+ * multiple interconnect nodes. A single cell is used as an index into
+ * an array of icc nodes specified in the icc_onecell_data struct when
+ * registering the provider.
+ */
+struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
+ void *data)
+{
+ struct icc_onecell_data *icc_data = data;
+ unsigned int idx = spec->args[0];
+
+ if (idx >= icc_data->num_nodes) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return icc_data->nodes[idx];
+}
+EXPORT_SYMBOL_GPL(of_icc_xlate_onecell);
+
+/**
+ * of_icc_get_from_provider() - Look-up interconnect node
+ * @spec: OF phandle args to use for look-up
+ *
+ * Looks for interconnect provider under the node specified by @spec and if
+ * found, uses xlate function of the provider to map phandle args to node.
+ *
+ * Returns a valid pointer to struct icc_node on success or ERR_PTR()
+ * on failure.
+ */
+static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
+{
+ struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
+ struct icc_provider *provider;
+
+ if (!spec || spec->args_count != 1)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&icc_lock);
+ list_for_each_entry(provider, &icc_providers, provider_list) {
+ if (provider->dev->of_node == spec->np)
+ node = provider->xlate(spec, provider->data);
+ if (!IS_ERR(node))
+ break;
+ }
+ mutex_unlock(&icc_lock);
+
+ return node;
+}
+
+/**
+ * of_icc_get() - get a path handle from a DT node based on name
+ * @dev: device pointer for the consumer device
+ * @name: interconnect path name
+ *
+ * This function will search for a path between two endpoints and return an
+ * icc_path handle on success. Use icc_put() to release constraints when they
+ * are not needed anymore.
+ * If the interconnect API is disabled, NULL is returned and the consumer
+ * drivers will still build. Drivers are free to handle this specifically,
+ * but they don't have to.
+ *
+ * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
+ * when the API is disabled or the "interconnects" DT property is missing.
+ */
+struct icc_path *of_icc_get(struct device *dev, const char *name)
+{
+ struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
+ struct icc_node *src_node, *dst_node;
+ struct device_node *np = NULL;
+ struct of_phandle_args src_args, dst_args;
+ int idx = 0;
+ int ret;
+
+ if (!dev || !dev->of_node)
+ return ERR_PTR(-ENODEV);
+
+ np = dev->of_node;
+
+ /*
+ * When the consumer DT node do not have "interconnects" property
+ * return a NULL path to skip setting constraints.
+ */
+ if (!of_find_property(np, "interconnects", NULL))
+ return NULL;
+
+ /*
+ * We use a combination of phandle and specifier for endpoint. For now
+ * lets support only global ids and extend this in the future if needed
+ * without breaking DT compatibility.
+ */
+ if (name) {
+ idx = of_property_match_string(np, "interconnect-names", name);
+ if (idx < 0)
+ return ERR_PTR(idx);
+ }
+
+ ret = of_parse_phandle_with_args(np, "interconnects",
+ "#interconnect-cells", idx * 2,
+ &src_args);
+ if (ret)
+ return ERR_PTR(ret);
+
+ of_node_put(src_args.np);
+
+ ret = of_parse_phandle_with_args(np, "interconnects",
+ "#interconnect-cells", idx * 2 + 1,
+ &dst_args);
+ if (ret)
+ return ERR_PTR(ret);
+
+ of_node_put(dst_args.np);
+
+ src_node = of_icc_get_from_provider(&src_args);
+
+ if (IS_ERR(src_node)) {
+ if (PTR_ERR(src_node) != -EPROBE_DEFER)
+ dev_err(dev, "error finding src node: %ld\n",
+ PTR_ERR(src_node));
+ return ERR_CAST(src_node);
+ }
+
+ dst_node = of_icc_get_from_provider(&dst_args);
+
+ if (IS_ERR(dst_node)) {
+ if (PTR_ERR(dst_node) != -EPROBE_DEFER)
+ dev_err(dev, "error finding dst node: %ld\n",
+ PTR_ERR(dst_node));
+ return ERR_CAST(dst_node);
+ }
+
+ mutex_lock(&icc_lock);
+ path = path_find(dev, src_node, dst_node);
+ if (IS_ERR(path))
+ dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
+ mutex_unlock(&icc_lock);
+
+ return path;
+}
+EXPORT_SYMBOL_GPL(of_icc_get);
+
+/**
+ * icc_set_bw() - set bandwidth constraints on an interconnect path
+ * @path: reference to the path returned by icc_get()
+ * @avg_bw: average bandwidth in kilobytes per second
+ * @peak_bw: peak bandwidth in kilobytes per second
+ *
+ * This function is used by an interconnect consumer to express its own needs
+ * in terms of bandwidth for a previously requested path between two endpoints.
+ * The requests are aggregated and each node is updated accordingly. The entire
+ * path is locked by a mutex to ensure that the set() is completed.
+ * The @path can be NULL when the "interconnects" DT properties is missing,
+ * which will mean that no constraints will be set.
+ *
+ * Returns 0 on success, or an appropriate error code otherwise.
+ */
+int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
+{
+ struct icc_node *node;
+ u32 old_avg, old_peak;
+ size_t i;
+ int ret;
+
+ if (!path || !path->num_nodes)
+ return 0;
+
+ mutex_lock(&icc_lock);
+
+ old_avg = path->reqs[0].avg_bw;
+ old_peak = path->reqs[0].peak_bw;
+
+ for (i = 0; i < path->num_nodes; i++) {
+ node = path->reqs[i].node;
+
+ /* update the consumer request for this path */
+ path->reqs[i].avg_bw = avg_bw;
+ path->reqs[i].peak_bw = peak_bw;
+
+ /* aggregate requests for this node */
+ aggregate_requests(node);
+ }
+
+ ret = apply_constraints(path);
+ if (ret) {
+ pr_debug("interconnect: error applying constraints (%d)\n",
+ ret);
+
+ for (i = 0; i < path->num_nodes; i++) {
+ node = path->reqs[i].node;
+ path->reqs[i].avg_bw = old_avg;
+ path->reqs[i].peak_bw = old_peak;
+ aggregate_requests(node);
+ }
+ apply_constraints(path);
+ }
+
+ mutex_unlock(&icc_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(icc_set_bw);
+
+/**
+ * icc_get() - return a handle for path between two endpoints
+ * @dev: the device requesting the path
+ * @src_id: source device port id
+ * @dst_id: destination device port id
+ *
+ * This function will search for a path between two endpoints and return an
+ * icc_path handle on success. Use icc_put() to release
+ * constraints when they are not needed anymore.
+ * If the interconnect API is disabled, NULL is returned and the consumer
+ * drivers will still build. Drivers are free to handle this specifically,
+ * but they don't have to.
+ *
+ * Return: icc_path pointer on success, ERR_PTR() on error or NULL if the
+ * interconnect API is disabled.
+ */
+struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
+{
+ struct icc_node *src, *dst;
+ struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
+
+ mutex_lock(&icc_lock);
+
+ src = node_find(src_id);
+ if (!src)
+ goto out;
+
+ dst = node_find(dst_id);
+ if (!dst)
+ goto out;
+
+ path = path_find(dev, src, dst);
+ if (IS_ERR(path))
+ dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
+
+out:
+ mutex_unlock(&icc_lock);
+ return path;
+}
+EXPORT_SYMBOL_GPL(icc_get);
+
+/**
+ * icc_put() - release the reference to the icc_path
+ * @path: interconnect path
+ *
+ * Use this function to release the constraints on a path when the path is
+ * no longer needed. The constraints will be re-aggregated.
+ */
+void icc_put(struct icc_path *path)
+{
+ struct icc_node *node;
+ size_t i;
+ int ret;
+
+ if (!path || WARN_ON(IS_ERR(path)))
+ return;
+
+ ret = icc_set_bw(path, 0, 0);
+ if (ret)
+ pr_err("%s: error (%d)\n", __func__, ret);
+
+ mutex_lock(&icc_lock);
+ for (i = 0; i < path->num_nodes; i++) {
+ node = path->reqs[i].node;
+ hlist_del(&path->reqs[i].req_node);
+ if (!WARN_ON(!node->provider->users))
+ node->provider->users--;
+ }
+ mutex_unlock(&icc_lock);
+
+ kfree(path);
+}
+EXPORT_SYMBOL_GPL(icc_put);
+
+static struct icc_node *icc_node_create_nolock(int id)
+{
+ struct icc_node *node;
+
+ /* check if node already exists */
+ node = node_find(id);
+ if (node)
+ return node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL);
+ if (id < 0) {
+ WARN(1, "%s: couldn't get idr\n", __func__);
+ kfree(node);
+ return ERR_PTR(id);
+ }
+
+ node->id = id;
+
+ return node;
+}
+
+/**
+ * icc_node_create() - create a node
+ * @id: node id
+ *
+ * Return: icc_node pointer on success, or ERR_PTR() on error
+ */
+struct icc_node *icc_node_create(int id)
+{
+ struct icc_node *node;
+
+ mutex_lock(&icc_lock);
+
+ node = icc_node_create_nolock(id);
+
+ mutex_unlock(&icc_lock);
+
+ return node;
+}
+EXPORT_SYMBOL_GPL(icc_node_create);
+
+/**
+ * icc_node_destroy() - destroy a node
+ * @id: node id
+ */
+void icc_node_destroy(int id)
+{
+ struct icc_node *node;
+
+ mutex_lock(&icc_lock);
+
+ node = node_find(id);
+ if (node) {
+ idr_remove(&icc_idr, node->id);
+ WARN_ON(!hlist_empty(&node->req_list));
+ }
+
+ mutex_unlock(&icc_lock);
+
+ kfree(node);
+}
+EXPORT_SYMBOL_GPL(icc_node_destroy);
+
+/**
+ * icc_link_create() - create a link between two nodes
+ * @node: source node id
+ * @dst_id: destination node id
+ *
+ * Create a link between two nodes. The nodes might belong to different
+ * interconnect providers and the @dst_id node might not exist (if the
+ * provider driver has not probed yet). So just create the @dst_id node
+ * and when the actual provider driver is probed, the rest of the node
+ * data is filled.
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int icc_link_create(struct icc_node *node, const int dst_id)
+{
+ struct icc_node *dst;
+ struct icc_node **new;
+ int ret = 0;
+
+ if (!node->provider)
+ return -EINVAL;
+
+ mutex_lock(&icc_lock);
+
+ dst = node_find(dst_id);
+ if (!dst) {
+ dst = icc_node_create_nolock(dst_id);
+
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ goto out;
+ }
+ }
+
+ new = krealloc(node->links,
+ (node->num_links + 1) * sizeof(*node->links),
+ GFP_KERNEL);
+ if (!new) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ node->links = new;
+ node->links[node->num_links++] = dst;
+
+out:
+ mutex_unlock(&icc_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(icc_link_create);
+
+/**
+ * icc_link_destroy() - destroy a link between two nodes
+ * @src: pointer to source node
+ * @dst: pointer to destination node
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
+{
+ struct icc_node **new;
+ size_t slot;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(src))
+ return -EINVAL;
+
+ if (IS_ERR_OR_NULL(dst))
+ return -EINVAL;
+
+ mutex_lock(&icc_lock);
+
+ for (slot = 0; slot < src->num_links; slot++)
+ if (src->links[slot] == dst)
+ break;
+
+ if (WARN_ON(slot == src->num_links)) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ src->links[slot] = src->links[--src->num_links];
+
+ new = krealloc(src->links, src->num_links * sizeof(*src->links),
+ GFP_KERNEL);
+ if (new)
+ src->links = new;
+
+out:
+ mutex_unlock(&icc_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(icc_link_destroy);
+
+/**
+ * icc_node_add() - add interconnect node to interconnect provider
+ * @node: pointer to the interconnect node
+ * @provider: pointer to the interconnect provider
+ */
+void icc_node_add(struct icc_node *node, struct icc_provider *provider)
+{
+ mutex_lock(&icc_lock);
+
+ node->provider = provider;
+ list_add_tail(&node->node_list, &provider->nodes);
+
+ mutex_unlock(&icc_lock);
+}
+EXPORT_SYMBOL_GPL(icc_node_add);
+
+/**
+ * icc_node_del() - delete interconnect node from interconnect provider
+ * @node: pointer to the interconnect node
+ */
+void icc_node_del(struct icc_node *node)
+{
+ mutex_lock(&icc_lock);
+
+ list_del(&node->node_list);
+
+ mutex_unlock(&icc_lock);
+}
+EXPORT_SYMBOL_GPL(icc_node_del);
+
+/**
+ * icc_provider_add() - add a new interconnect provider
+ * @provider: the interconnect provider that will be added into topology
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int icc_provider_add(struct icc_provider *provider)
+{
+ if (WARN_ON(!provider->set))
+ return -EINVAL;
+ if (WARN_ON(!provider->xlate))
+ return -EINVAL;
+
+ mutex_lock(&icc_lock);
+
+ INIT_LIST_HEAD(&provider->nodes);
+ list_add_tail(&provider->provider_list, &icc_providers);
+
+ mutex_unlock(&icc_lock);
+
+ dev_dbg(provider->dev, "interconnect provider added to topology\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icc_provider_add);
+
+/**
+ * icc_provider_del() - delete previously added interconnect provider
+ * @provider: the interconnect provider that will be removed from topology
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int icc_provider_del(struct icc_provider *provider)
+{
+ mutex_lock(&icc_lock);
+ if (provider->users) {
+ pr_warn("interconnect provider still has %d users\n",
+ provider->users);
+ mutex_unlock(&icc_lock);
+ return -EBUSY;
+ }
+
+ if (!list_empty(&provider->nodes)) {
+ pr_warn("interconnect provider still has nodes\n");
+ mutex_unlock(&icc_lock);
+ return -EBUSY;
+ }
+
+ list_del(&provider->provider_list);
+ mutex_unlock(&icc_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icc_provider_del);
+
+static int __init icc_init(void)
+{
+ icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
+ debugfs_create_file("interconnect_summary", 0444,
+ icc_debugfs_dir, NULL, &icc_summary_fops);
+ return 0;
+}
+
+static void __exit icc_exit(void)
+{
+ debugfs_remove_recursive(icc_debugfs_dir);
+}
+module_init(icc_init);
+module_exit(icc_exit);
+
+MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
+MODULE_DESCRIPTION("Interconnect Driver Core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
new file mode 100644
index 000000000000..290d330abe5a
--- /dev/null
+++ b/drivers/interconnect/qcom/Kconfig
@@ -0,0 +1,13 @@
+config INTERCONNECT_QCOM
+ bool "Qualcomm Network-on-Chip interconnect drivers"
+ depends on ARCH_QCOM
+ help
+ Support for Qualcomm's Network-on-Chip interconnect hardware.
+
+config INTERCONNECT_QCOM_SDM845
+ tristate "Qualcomm SDM845 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ help
+ This is a driver for the Qualcomm Network-on-Chip on sdm845-based
+ platforms.
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
new file mode 100644
index 000000000000..1c1cea690f92
--- /dev/null
+++ b/drivers/interconnect/qcom/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+qnoc-sdm845-objs := sdm845.o
+
+obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
new file mode 100644
index 000000000000..4915b78da673
--- /dev/null
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -0,0 +1,838 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <asm/div64.h>
+#include <dt-bindings/interconnect/qcom,sdm845.h>
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/sort.h>
+
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/rpmh.h>
+#include <soc/qcom/tcs.h>
+
+#define BCM_TCS_CMD_COMMIT_SHFT 30
+#define BCM_TCS_CMD_COMMIT_MASK 0x40000000
+#define BCM_TCS_CMD_VALID_SHFT 29
+#define BCM_TCS_CMD_VALID_MASK 0x20000000
+#define BCM_TCS_CMD_VOTE_X_SHFT 14
+#define BCM_TCS_CMD_VOTE_MASK 0x3fff
+#define BCM_TCS_CMD_VOTE_Y_SHFT 0
+#define BCM_TCS_CMD_VOTE_Y_MASK 0xfffc000
+
+#define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \
+ (((commit) << BCM_TCS_CMD_COMMIT_SHFT) | \
+ ((valid) << BCM_TCS_CMD_VALID_SHFT) | \
+ ((cpu_to_le32(vote_x) & \
+ BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_X_SHFT) | \
+ ((cpu_to_le32(vote_y) & \
+ BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT))
+
+#define to_qcom_provider(_provider) \
+ container_of(_provider, struct qcom_icc_provider, provider)
+
+struct qcom_icc_provider {
+ struct icc_provider provider;
+ struct device *dev;
+ struct qcom_icc_bcm **bcms;
+ size_t num_bcms;
+};
+
+/**
+ * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager (BCM)
+ * @unit: divisor used to convert bytes/sec bw value to an RPMh msg
+ * @width: multiplier used to convert bytes/sec bw value to an RPMh msg
+ * @vcd: virtual clock domain that this bcm belongs to
+ * @reserved: reserved field
+ */
+struct bcm_db {
+ __le32 unit;
+ __le16 width;
+ u8 vcd;
+ u8 reserved;
+};
+
+#define SDM845_MAX_LINKS 43
+#define SDM845_MAX_BCMS 30
+#define SDM845_MAX_BCM_PER_NODE 2
+#define SDM845_MAX_VCD 10
+
+/**
+ * struct qcom_icc_node - Qualcomm specific interconnect nodes
+ * @name: the node name used in debugfs
+ * @links: an array of nodes where we can go next while traversing
+ * @id: a unique node identifier
+ * @num_links: the total number of @links
+ * @channels: num of channels at this node
+ * @buswidth: width of the interconnect between a node and the bus
+ * @sum_avg: current sum aggregate value of all avg bw requests
+ * @max_peak: current max aggregate value of all peak bw requests
+ * @bcms: list of bcms associated with this logical node
+ * @num_bcms: num of @bcms
+ */
+struct qcom_icc_node {
+ const char *name;
+ u16 links[SDM845_MAX_LINKS];
+ u16 id;
+ u16 num_links;
+ u16 channels;
+ u16 buswidth;
+ u64 sum_avg;
+ u64 max_peak;
+ struct qcom_icc_bcm *bcms[SDM845_MAX_BCM_PER_NODE];
+ size_t num_bcms;
+};
+
+/**
+ * struct qcom_icc_bcm - Qualcomm specific hardware accelerator nodes
+ * known as Bus Clock Manager (BCM)
+ * @name: the bcm node name used to fetch BCM data from command db
+ * @type: latency or bandwidth bcm
+ * @addr: address offsets used when voting to RPMH
+ * @vote_x: aggregated threshold values, represents sum_bw when @type is bw bcm
+ * @vote_y: aggregated threshold values, represents peak_bw when @type is bw bcm
+ * @dirty: flag used to indicate whether the bcm needs to be committed
+ * @keepalive: flag used to indicate whether a keepalive is required
+ * @aux_data: auxiliary data used when calculating threshold values and
+ * communicating with RPMh
+ * @list: used to link to other bcms when compiling lists for commit
+ * @num_nodes: total number of @num_nodes
+ * @nodes: list of qcom_icc_nodes that this BCM encapsulates
+ */
+struct qcom_icc_bcm {
+ const char *name;
+ u32 type;
+ u32 addr;
+ u64 vote_x;
+ u64 vote_y;
+ bool dirty;
+ bool keepalive;
+ struct bcm_db aux_data;
+ struct list_head list;
+ size_t num_nodes;
+ struct qcom_icc_node *nodes[];
+};
+
+struct qcom_icc_fabric {
+ struct qcom_icc_node **nodes;
+ size_t num_nodes;
+};
+
+struct qcom_icc_desc {
+ struct qcom_icc_node **nodes;
+ size_t num_nodes;
+ struct qcom_icc_bcm **bcms;
+ size_t num_bcms;
+};
+
+#define DEFINE_QNODE(_name, _id, _channels, _buswidth, \
+ _numlinks, ...) \
+ static struct qcom_icc_node _name = { \
+ .id = _id, \
+ .name = #_name, \
+ .channels = _channels, \
+ .buswidth = _buswidth, \
+ .num_links = _numlinks, \
+ .links = { __VA_ARGS__ }, \
+ }
+
+DEFINE_QNODE(qhm_a1noc_cfg, MASTER_A1NOC_CFG, 1, 4, 1, SLAVE_SERVICE_A1NOC);
+DEFINE_QNODE(qhm_qup1, MASTER_BLSP_1, 1, 4, 1, SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(qhm_tsif, MASTER_TSIF, 1, 4, 1, SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_sdc2, MASTER_SDCC_2, 1, 8, 1, SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_sdc4, MASTER_SDCC_4, 1, 8, 1, SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_ufs_card, MASTER_UFS_CARD, 1, 8, 1, SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_ufs_mem, MASTER_UFS_MEM, 1, 8, 1, SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_pcie_0, MASTER_PCIE_0, 1, 8, 1, SLAVE_ANOC_PCIE_A1NOC_SNOC);
+DEFINE_QNODE(qhm_a2noc_cfg, MASTER_A2NOC_CFG, 1, 4, 1, SLAVE_SERVICE_A2NOC);
+DEFINE_QNODE(qhm_qdss_bam, MASTER_QDSS_BAM, 1, 4, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qhm_qup2, MASTER_BLSP_2, 1, 4, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qnm_cnoc, MASTER_CNOC_A2NOC, 1, 8, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_crypto, MASTER_CRYPTO, 1, 8, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_ipa, MASTER_IPA, 1, 8, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_pcie3_1, MASTER_PCIE_1, 1, 8, 1, SLAVE_ANOC_PCIE_SNOC);
+DEFINE_QNODE(xm_qdss_etr, MASTER_QDSS_ETR, 1, 8, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_usb3_0, MASTER_USB3_0, 1, 8, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_usb3_1, MASTER_USB3_1, 1, 8, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_camnoc_hf0_uncomp, MASTER_CAMNOC_HF0_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qxm_camnoc_hf1_uncomp, MASTER_CAMNOC_HF1_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qxm_camnoc_sf_uncomp, MASTER_CAMNOC_SF_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qhm_spdm, MASTER_SPDM, 1, 4, 1, SLAVE_CNOC_A2NOC);
+DEFINE_QNODE(qhm_tic, MASTER_TIC, 1, 4, 43, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_CNOC_A2NOC, SLAVE_SERVICE_CNOC);
+DEFINE_QNODE(qnm_snoc, MASTER_SNOC_CNOC, 1, 8, 42, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_SERVICE_CNOC);
+DEFINE_QNODE(xm_qdss_dap, MASTER_QDSS_DAP, 1, 8, 43, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_CNOC_A2NOC, SLAVE_SERVICE_CNOC);
+DEFINE_QNODE(qhm_cnoc, MASTER_CNOC_DC_NOC, 1, 4, 2, SLAVE_LLCC_CFG, SLAVE_MEM_NOC_CFG);
+DEFINE_QNODE(acm_l3, MASTER_APPSS_PROC, 1, 16, 3, SLAVE_GNOC_SNOC, SLAVE_GNOC_MEM_NOC, SLAVE_SERVICE_GNOC);
+DEFINE_QNODE(pm_gnoc_cfg, MASTER_GNOC_CFG, 1, 4, 1, SLAVE_SERVICE_GNOC);
+DEFINE_QNODE(llcc_mc, MASTER_LLCC, 4, 4, 1, SLAVE_EBI1);
+DEFINE_QNODE(acm_tcu, MASTER_TCU_0, 1, 8, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC);
+DEFINE_QNODE(qhm_memnoc_cfg, MASTER_MEM_NOC_CFG, 1, 4, 2, SLAVE_MSS_PROC_MS_MPU_CFG, SLAVE_SERVICE_MEM_NOC);
+DEFINE_QNODE(qnm_apps, MASTER_GNOC_MEM_NOC, 2, 32, 1, SLAVE_LLCC);
+DEFINE_QNODE(qnm_mnoc_hf, MASTER_MNOC_HF_MEM_NOC, 2, 32, 2, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC);
+DEFINE_QNODE(qnm_mnoc_sf, MASTER_MNOC_SF_MEM_NOC, 1, 32, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC);
+DEFINE_QNODE(qnm_snoc_gc, MASTER_SNOC_GC_MEM_NOC, 1, 8, 1, SLAVE_LLCC);
+DEFINE_QNODE(qnm_snoc_sf, MASTER_SNOC_SF_MEM_NOC, 1, 16, 2, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC);
+DEFINE_QNODE(qxm_gpu, MASTER_GFX3D, 2, 32, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC);
+DEFINE_QNODE(qhm_mnoc_cfg, MASTER_CNOC_MNOC_CFG, 1, 4, 1, SLAVE_SERVICE_MNOC);
+DEFINE_QNODE(qxm_camnoc_hf0, MASTER_CAMNOC_HF0, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_camnoc_hf1, MASTER_CAMNOC_HF1, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_camnoc_sf, MASTER_CAMNOC_SF, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp0, MASTER_MDP0, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp1, MASTER_MDP1, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_rot, MASTER_ROTATOR, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus0, MASTER_VIDEO_P0, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus1, MASTER_VIDEO_P1, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus_arm9, MASTER_VIDEO_PROC, 1, 8, 1, SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qhm_snoc_cfg, MASTER_SNOC_CFG, 1, 4, 1, SLAVE_SERVICE_SNOC);
+DEFINE_QNODE(qnm_aggre1_noc, MASTER_A1NOC_SNOC, 1, 16, 6, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_PIMEM, SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_aggre2_noc, MASTER_A2NOC_SNOC, 1, 16, 9, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_PCIE_0, SLAVE_PCIE_1, SLAVE_PIMEM, SLAVE_QDSS_STM, SLAVE_TCU);
+DEFINE_QNODE(qnm_gladiator_sodv, MASTER_GNOC_SNOC, 1, 8, 8, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_IMEM, SLAVE_PCIE_0, SLAVE_PCIE_1, SLAVE_PIMEM, SLAVE_QDSS_STM, SLAVE_TCU);
+DEFINE_QNODE(qnm_memnoc, MASTER_MEM_NOC_SNOC, 1, 8, 5, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_IMEM, SLAVE_PIMEM, SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_pcie_anoc, MASTER_ANOC_PCIE_SNOC, 1, 16, 5, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_QDSS_STM);
+DEFINE_QNODE(qxm_pimem, MASTER_PIMEM, 1, 8, 2, SLAVE_SNOC_MEM_NOC_GC, SLAVE_IMEM);
+DEFINE_QNODE(xm_gic, MASTER_GIC, 1, 8, 2, SLAVE_SNOC_MEM_NOC_GC, SLAVE_IMEM);
+DEFINE_QNODE(qns_a1noc_snoc, SLAVE_A1NOC_SNOC, 1, 16, 1, MASTER_A1NOC_SNOC);
+DEFINE_QNODE(srvc_aggre1_noc, SLAVE_SERVICE_A1NOC, 1, 4, 0);
+DEFINE_QNODE(qns_pcie_a1noc_snoc, SLAVE_ANOC_PCIE_A1NOC_SNOC, 1, 16, 1, MASTER_ANOC_PCIE_SNOC);
+DEFINE_QNODE(qns_a2noc_snoc, SLAVE_A2NOC_SNOC, 1, 16, 1, MASTER_A2NOC_SNOC);
+DEFINE_QNODE(qns_pcie_snoc, SLAVE_ANOC_PCIE_SNOC, 1, 16, 1, MASTER_ANOC_PCIE_SNOC);
+DEFINE_QNODE(srvc_aggre2_noc, SLAVE_SERVICE_A2NOC, 1, 4, 0);
+DEFINE_QNODE(qns_camnoc_uncomp, SLAVE_CAMNOC_UNCOMP, 1, 32, 0);
+DEFINE_QNODE(qhs_a1_noc_cfg, SLAVE_A1NOC_CFG, 1, 4, 1, MASTER_A1NOC_CFG);
+DEFINE_QNODE(qhs_a2_noc_cfg, SLAVE_A2NOC_CFG, 1, 4, 1, MASTER_A2NOC_CFG);
+DEFINE_QNODE(qhs_aop, SLAVE_AOP, 1, 4, 0);
+DEFINE_QNODE(qhs_aoss, SLAVE_AOSS, 1, 4, 0);
+DEFINE_QNODE(qhs_camera_cfg, SLAVE_CAMERA_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_clk_ctl, SLAVE_CLK_CTL, 1, 4, 0);
+DEFINE_QNODE(qhs_compute_dsp_cfg, SLAVE_CDSP_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_cpr_cx, SLAVE_RBCPR_CX_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_crypto0_cfg, SLAVE_CRYPTO_0_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_dcc_cfg, SLAVE_DCC_CFG, 1, 4, 1, MASTER_CNOC_DC_NOC);
+DEFINE_QNODE(qhs_ddrss_cfg, SLAVE_CNOC_DDRSS, 1, 4, 0);
+DEFINE_QNODE(qhs_display_cfg, SLAVE_DISPLAY_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_glm, SLAVE_GLM, 1, 4, 0);
+DEFINE_QNODE(qhs_gpuss_cfg, SLAVE_GFX3D_CFG, 1, 8, 0);
+DEFINE_QNODE(qhs_imem_cfg, SLAVE_IMEM_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_ipa, SLAVE_IPA_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_mnoc_cfg, SLAVE_CNOC_MNOC_CFG, 1, 4, 1, MASTER_CNOC_MNOC_CFG);
+DEFINE_QNODE(qhs_pcie0_cfg, SLAVE_PCIE_0_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_pcie_gen3_cfg, SLAVE_PCIE_1_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_pdm, SLAVE_PDM, 1, 4, 0);
+DEFINE_QNODE(qhs_phy_refgen_south, SLAVE_SOUTH_PHY_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_pimem_cfg, SLAVE_PIMEM_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_prng, SLAVE_PRNG, 1, 4, 0);
+DEFINE_QNODE(qhs_qdss_cfg, SLAVE_QDSS_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_qupv3_north, SLAVE_BLSP_2, 1, 4, 0);
+DEFINE_QNODE(qhs_qupv3_south, SLAVE_BLSP_1, 1, 4, 0);
+DEFINE_QNODE(qhs_sdc2, SLAVE_SDCC_2, 1, 4, 0);
+DEFINE_QNODE(qhs_sdc4, SLAVE_SDCC_4, 1, 4, 0);
+DEFINE_QNODE(qhs_snoc_cfg, SLAVE_SNOC_CFG, 1, 4, 1, MASTER_SNOC_CFG);
+DEFINE_QNODE(qhs_spdm, SLAVE_SPDM_WRAPPER, 1, 4, 0);
+DEFINE_QNODE(qhs_spss_cfg, SLAVE_SPSS_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_tcsr, SLAVE_TCSR, 1, 4, 0);
+DEFINE_QNODE(qhs_tlmm_north, SLAVE_TLMM_NORTH, 1, 4, 0);
+DEFINE_QNODE(qhs_tlmm_south, SLAVE_TLMM_SOUTH, 1, 4, 0);
+DEFINE_QNODE(qhs_tsif, SLAVE_TSIF, 1, 4, 0);
+DEFINE_QNODE(qhs_ufs_card_cfg, SLAVE_UFS_CARD_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_ufs_mem_cfg, SLAVE_UFS_MEM_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_usb3_0, SLAVE_USB3_0, 1, 4, 0);
+DEFINE_QNODE(qhs_usb3_1, SLAVE_USB3_1, 1, 4, 0);
+DEFINE_QNODE(qhs_venus_cfg, SLAVE_VENUS_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_vsense_ctrl_cfg, SLAVE_VSENSE_CTRL_CFG, 1, 4, 0);
+DEFINE_QNODE(qns_cnoc_a2noc, SLAVE_CNOC_A2NOC, 1, 8, 1, MASTER_CNOC_A2NOC);
+DEFINE_QNODE(srvc_cnoc, SLAVE_SERVICE_CNOC, 1, 4, 0);
+DEFINE_QNODE(qhs_llcc, SLAVE_LLCC_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_memnoc, SLAVE_MEM_NOC_CFG, 1, 4, 1, MASTER_MEM_NOC_CFG);
+DEFINE_QNODE(qns_gladiator_sodv, SLAVE_GNOC_SNOC, 1, 8, 1, MASTER_GNOC_SNOC);
+DEFINE_QNODE(qns_gnoc_memnoc, SLAVE_GNOC_MEM_NOC, 2, 32, 1, MASTER_GNOC_MEM_NOC);
+DEFINE_QNODE(srvc_gnoc, SLAVE_SERVICE_GNOC, 1, 4, 0);
+DEFINE_QNODE(ebi, SLAVE_EBI1, 4, 4, 0);
+DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4, 0);
+DEFINE_QNODE(qns_apps_io, SLAVE_MEM_NOC_GNOC, 1, 32, 0);
+DEFINE_QNODE(qns_llcc, SLAVE_LLCC, 4, 16, 1, MASTER_LLCC);
+DEFINE_QNODE(qns_memnoc_snoc, SLAVE_MEM_NOC_SNOC, 1, 8, 1, MASTER_MEM_NOC_SNOC);
+DEFINE_QNODE(srvc_memnoc, SLAVE_SERVICE_MEM_NOC, 1, 4, 0);
+DEFINE_QNODE(qns2_mem_noc, SLAVE_MNOC_SF_MEM_NOC, 1, 32, 1, MASTER_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qns_mem_noc_hf, SLAVE_MNOC_HF_MEM_NOC, 2, 32, 1, MASTER_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(srvc_mnoc, SLAVE_SERVICE_MNOC, 1, 4, 0);
+DEFINE_QNODE(qhs_apss, SLAVE_APPSS, 1, 8, 0);
+DEFINE_QNODE(qns_cnoc, SLAVE_SNOC_CNOC, 1, 8, 1, MASTER_SNOC_CNOC);
+DEFINE_QNODE(qns_memnoc_gc, SLAVE_SNOC_MEM_NOC_GC, 1, 8, 1, MASTER_SNOC_GC_MEM_NOC);
+DEFINE_QNODE(qns_memnoc_sf, SLAVE_SNOC_MEM_NOC_SF, 1, 16, 1, MASTER_SNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxs_imem, SLAVE_IMEM, 1, 8, 0);
+DEFINE_QNODE(qxs_pcie, SLAVE_PCIE_0, 1, 8, 0);
+DEFINE_QNODE(qxs_pcie_gen3, SLAVE_PCIE_1, 1, 8, 0);
+DEFINE_QNODE(qxs_pimem, SLAVE_PIMEM, 1, 8, 0);
+DEFINE_QNODE(srvc_snoc, SLAVE_SERVICE_SNOC, 1, 4, 0);
+DEFINE_QNODE(xs_qdss_stm, SLAVE_QDSS_STM, 1, 4, 0);
+DEFINE_QNODE(xs_sys_tcu_cfg, SLAVE_TCU, 1, 8, 0);
+
+#define DEFINE_QBCM(_name, _bcmname, _keepalive, _numnodes, ...) \
+ static struct qcom_icc_bcm _name = { \
+ .name = _bcmname, \
+ .keepalive = _keepalive, \
+ .num_nodes = _numnodes, \
+ .nodes = { __VA_ARGS__ }, \
+ }
+
+DEFINE_QBCM(bcm_acv, "ACV", false, 1, &ebi);
+DEFINE_QBCM(bcm_mc0, "MC0", true, 1, &ebi);
+DEFINE_QBCM(bcm_sh0, "SH0", true, 1, &qns_llcc);
+DEFINE_QBCM(bcm_mm0, "MM0", false, 1, &qns_mem_noc_hf);
+DEFINE_QBCM(bcm_sh1, "SH1", false, 1, &qns_apps_io);
+DEFINE_QBCM(bcm_mm1, "MM1", false, 7, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
+DEFINE_QBCM(bcm_sh2, "SH2", false, 1, &qns_memnoc_snoc);
+DEFINE_QBCM(bcm_mm2, "MM2", false, 1, &qns2_mem_noc);
+DEFINE_QBCM(bcm_sh3, "SH3", false, 1, &acm_tcu);
+DEFINE_QBCM(bcm_mm3, "MM3", false, 5, &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9);
+DEFINE_QBCM(bcm_sh5, "SH5", false, 1, &qnm_apps);
+DEFINE_QBCM(bcm_sn0, "SN0", true, 1, &qns_memnoc_sf);
+DEFINE_QBCM(bcm_ce0, "CE0", false, 1, &qxm_crypto);
+DEFINE_QBCM(bcm_cn0, "CN0", false, 47, &qhm_spdm, &qhm_tic, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp_cfg, &qhs_cpr_cx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_pcie0_cfg, &qhs_pcie_gen3_cfg, &qhs_pdm, &qhs_phy_refgen_south, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_spss_cfg, &qhs_tcsr, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc);
+DEFINE_QBCM(bcm_qup0, "QUP0", false, 2, &qhm_qup1, &qhm_qup2);
+DEFINE_QBCM(bcm_sn1, "SN1", false, 1, &qxs_imem);
+DEFINE_QBCM(bcm_sn2, "SN2", false, 1, &qns_memnoc_gc);
+DEFINE_QBCM(bcm_sn3, "SN3", false, 1, &qns_cnoc);
+DEFINE_QBCM(bcm_sn4, "SN4", false, 1, &qxm_pimem);
+DEFINE_QBCM(bcm_sn5, "SN5", false, 1, &xs_qdss_stm);
+DEFINE_QBCM(bcm_sn6, "SN6", false, 3, &qhs_apss, &srvc_snoc, &xs_sys_tcu_cfg);
+DEFINE_QBCM(bcm_sn7, "SN7", false, 1, &qxs_pcie);
+DEFINE_QBCM(bcm_sn8, "SN8", false, 1, &qxs_pcie_gen3);
+DEFINE_QBCM(bcm_sn9, "SN9", false, 2, &srvc_aggre1_noc, &qnm_aggre1_noc);
+DEFINE_QBCM(bcm_sn11, "SN11", false, 2, &srvc_aggre2_noc, &qnm_aggre2_noc);
+DEFINE_QBCM(bcm_sn12, "SN12", false, 2, &qnm_gladiator_sodv, &xm_gic);
+DEFINE_QBCM(bcm_sn14, "SN14", false, 1, &qnm_pcie_anoc);
+DEFINE_QBCM(bcm_sn15, "SN15", false, 1, &qnm_memnoc);
+
+static struct qcom_icc_node *rsc_hlos_nodes[] = {
+ [MASTER_APPSS_PROC] = &acm_l3,
+ [MASTER_TCU_0] = &acm_tcu,
+ [MASTER_LLCC] = &llcc_mc,
+ [MASTER_GNOC_CFG] = &pm_gnoc_cfg,
+ [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
+ [MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
+ [MASTER_CNOC_DC_NOC] = &qhm_cnoc,
+ [MASTER_MEM_NOC_CFG] = &qhm_memnoc_cfg,
+ [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_BLSP_1] = &qhm_qup1,
+ [MASTER_BLSP_2] = &qhm_qup2,
+ [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
+ [MASTER_SPDM] = &qhm_spdm,
+ [MASTER_TIC] = &qhm_tic,
+ [MASTER_TSIF] = &qhm_tsif,
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [MASTER_GNOC_MEM_NOC] = &qnm_apps,
+ [MASTER_CNOC_A2NOC] = &qnm_cnoc,
+ [MASTER_GNOC_SNOC] = &qnm_gladiator_sodv,
+ [MASTER_MEM_NOC_SNOC] = &qnm_memnoc,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_ANOC_PCIE_SNOC] = &qnm_pcie_anoc,
+ [MASTER_SNOC_CNOC] = &qnm_snoc,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
+ [MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
+ [MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
+ [MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp,
+ [MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
+ [MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_GFX3D] = &qxm_gpu,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_MDP0] = &qxm_mdp0,
+ [MASTER_MDP1] = &qxm_mdp1,
+ [MASTER_PIMEM] = &qxm_pimem,
+ [MASTER_ROTATOR] = &qxm_rot,
+ [MASTER_VIDEO_P0] = &qxm_venus0,
+ [MASTER_VIDEO_P1] = &qxm_venus1,
+ [MASTER_VIDEO_PROC] = &qxm_venus_arm9,
+ [MASTER_GIC] = &xm_gic,
+ [MASTER_PCIE_1] = &xm_pcie3_1,
+ [MASTER_PCIE_0] = &xm_pcie_0,
+ [MASTER_QDSS_DAP] = &xm_qdss_dap,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_UFS_CARD] = &xm_ufs_card,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [MASTER_USB3_0] = &xm_usb3_0,
+ [MASTER_USB3_1] = &xm_usb3_1,
+ [SLAVE_EBI1] = &ebi,
+ [SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
+ [SLAVE_A2NOC_CFG] = &qhs_a2_noc_cfg,
+ [SLAVE_AOP] = &qhs_aop,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CDSP_CFG] = &qhs_compute_dsp_cfg,
+ [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_DCC_CFG] = &qhs_dcc_cfg,
+ [SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_GLM] = &qhs_glm,
+ [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_LLCC_CFG] = &qhs_llcc,
+ [SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
+ [SLAVE_MEM_NOC_CFG] = &qhs_memnoc,
+ [SLAVE_CNOC_MNOC_CFG] = &qhs_mnoc_cfg,
+ [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
+ [SLAVE_PCIE_1_CFG] = &qhs_pcie_gen3_cfg,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_SOUTH_PHY_CFG] = &qhs_phy_refgen_south,
+ [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_BLSP_2] = &qhs_qupv3_north,
+ [SLAVE_BLSP_1] = &qhs_qupv3_south,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
+ [SLAVE_SPDM_WRAPPER] = &qhs_spdm,
+ [SLAVE_SPSS_CFG] = &qhs_spss_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM_NORTH] = &qhs_tlmm_north,
+ [SLAVE_TLMM_SOUTH] = &qhs_tlmm_south,
+ [SLAVE_TSIF] = &qhs_tsif,
+ [SLAVE_UFS_CARD_CFG] = &qhs_ufs_card_cfg,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB3_0] = &qhs_usb3_0,
+ [SLAVE_USB3_1] = &qhs_usb3_1,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns2_mem_noc,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+ [SLAVE_MEM_NOC_GNOC] = &qns_apps_io,
+ [SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
+ [SLAVE_SNOC_CNOC] = &qns_cnoc,
+ [SLAVE_CNOC_A2NOC] = &qns_cnoc_a2noc,
+ [SLAVE_GNOC_SNOC] = &qns_gladiator_sodv,
+ [SLAVE_GNOC_MEM_NOC] = &qns_gnoc_memnoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_SNOC_MEM_NOC_GC] = &qns_memnoc_gc,
+ [SLAVE_SNOC_MEM_NOC_SF] = &qns_memnoc_sf,
+ [SLAVE_MEM_NOC_SNOC] = &qns_memnoc_snoc,
+ [SLAVE_ANOC_PCIE_A1NOC_SNOC] = &qns_pcie_a1noc_snoc,
+ [SLAVE_ANOC_PCIE_SNOC] = &qns_pcie_snoc,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_PCIE_0] = &qxs_pcie,
+ [SLAVE_PCIE_1] = &qxs_pcie_gen3,
+ [SLAVE_PIMEM] = &qxs_pimem,
+ [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
+ [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
+ [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
+ [SLAVE_SERVICE_GNOC] = &srvc_gnoc,
+ [SLAVE_SERVICE_MEM_NOC] = &srvc_memnoc,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static struct qcom_icc_bcm *rsc_hlos_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+ &bcm_sh0,
+ &bcm_mm0,
+ &bcm_sh1,
+ &bcm_mm1,
+ &bcm_sh2,
+ &bcm_mm2,
+ &bcm_sh3,
+ &bcm_mm3,
+ &bcm_sh5,
+ &bcm_sn0,
+ &bcm_ce0,
+ &bcm_cn0,
+ &bcm_qup0,
+ &bcm_sn1,
+ &bcm_sn2,
+ &bcm_sn3,
+ &bcm_sn4,
+ &bcm_sn5,
+ &bcm_sn6,
+ &bcm_sn7,
+ &bcm_sn8,
+ &bcm_sn9,
+ &bcm_sn11,
+ &bcm_sn12,
+ &bcm_sn14,
+ &bcm_sn15,
+};
+
+static struct qcom_icc_desc sdm845_rsc_hlos = {
+ .nodes = rsc_hlos_nodes,
+ .num_nodes = ARRAY_SIZE(rsc_hlos_nodes),
+ .bcms = rsc_hlos_bcms,
+ .num_bcms = ARRAY_SIZE(rsc_hlos_bcms),
+};
+
+static int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev)
+{
+ struct qcom_icc_node *qn;
+ const struct bcm_db *data;
+ size_t data_count;
+ int i;
+
+ bcm->addr = cmd_db_read_addr(bcm->name);
+ if (!bcm->addr) {
+ dev_err(dev, "%s could not find RPMh address\n",
+ bcm->name);
+ return -EINVAL;
+ }
+
+ data = cmd_db_read_aux_data(bcm->name, &data_count);
+ if (IS_ERR(data)) {
+ dev_err(dev, "%s command db read error (%ld)\n",
+ bcm->name, PTR_ERR(data));
+ return PTR_ERR(data);
+ }
+ if (!data_count) {
+ dev_err(dev, "%s command db missing or partial aux data\n",
+ bcm->name);
+ return -EINVAL;
+ }
+
+ bcm->aux_data.unit = le32_to_cpu(data->unit);
+ bcm->aux_data.width = le16_to_cpu(data->width);
+ bcm->aux_data.vcd = data->vcd;
+ bcm->aux_data.reserved = data->reserved;
+
+ /*
+ * Link Qnodes to their respective BCMs
+ */
+ for (i = 0; i < bcm->num_nodes; i++) {
+ qn = bcm->nodes[i];
+ qn->bcms[qn->num_bcms] = bcm;
+ qn->num_bcms++;
+ }
+
+ return 0;
+}
+
+inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y,
+ u32 addr, bool commit)
+{
+ bool valid = true;
+
+ if (!cmd)
+ return;
+
+ if (vote_x == 0 && vote_y == 0)
+ valid = false;
+
+ if (vote_x > BCM_TCS_CMD_VOTE_MASK)
+ vote_x = BCM_TCS_CMD_VOTE_MASK;
+
+ if (vote_y > BCM_TCS_CMD_VOTE_MASK)
+ vote_y = BCM_TCS_CMD_VOTE_MASK;
+
+ cmd->addr = addr;
+ cmd->data = BCM_TCS_CMD(commit, valid, vote_x, vote_y);
+
+ /*
+ * Set the wait for completion flag on command that need to be completed
+ * before the next command.
+ */
+ if (commit)
+ cmd->wait = true;
+}
+
+static void tcs_list_gen(struct list_head *bcm_list,
+ struct tcs_cmd tcs_list[SDM845_MAX_VCD],
+ int n[SDM845_MAX_VCD])
+{
+ struct qcom_icc_bcm *bcm;
+ bool commit;
+ size_t idx = 0, batch = 0, cur_vcd_size = 0;
+
+ memset(n, 0, sizeof(int) * SDM845_MAX_VCD);
+
+ list_for_each_entry(bcm, bcm_list, list) {
+ commit = false;
+ cur_vcd_size++;
+ if ((list_is_last(&bcm->list, bcm_list)) ||
+ bcm->aux_data.vcd != list_next_entry(bcm, list)->aux_data.vcd) {
+ commit = true;
+ cur_vcd_size = 0;
+ }
+ tcs_cmd_gen(&tcs_list[idx], bcm->vote_x, bcm->vote_y,
+ bcm->addr, commit);
+ idx++;
+ n[batch]++;
+ /*
+ * Batch the BCMs in such a way that we do not split them in
+ * multiple payloads when they are under the same VCD. This is
+ * to ensure that every BCM is committed since we only set the
+ * commit bit on the last BCM request of every VCD.
+ */
+ if (n[batch] >= MAX_RPMH_PAYLOAD) {
+ if (!commit) {
+ n[batch] -= cur_vcd_size;
+ n[batch + 1] = cur_vcd_size;
+ }
+ batch++;
+ }
+ }
+}
+
+static void bcm_aggregate(struct qcom_icc_bcm *bcm)
+{
+ size_t i;
+ u64 agg_avg = 0;
+ u64 agg_peak = 0;
+ u64 temp;
+
+ for (i = 0; i < bcm->num_nodes; i++) {
+ temp = bcm->nodes[i]->sum_avg * bcm->aux_data.width;
+ do_div(temp, bcm->nodes[i]->buswidth * bcm->nodes[i]->channels);
+ agg_avg = max(agg_avg, temp);
+
+ temp = bcm->nodes[i]->max_peak * bcm->aux_data.width;
+ do_div(temp, bcm->nodes[i]->buswidth);
+ agg_peak = max(agg_peak, temp);
+ }
+
+ temp = agg_avg * 1000ULL;
+ do_div(temp, bcm->aux_data.unit);
+ bcm->vote_x = temp;
+
+ temp = agg_peak * 1000ULL;
+ do_div(temp, bcm->aux_data.unit);
+ bcm->vote_y = temp;
+
+ if (bcm->keepalive && bcm->vote_x == 0 && bcm->vote_y == 0) {
+ bcm->vote_x = 1;
+ bcm->vote_y = 1;
+ }
+
+ bcm->dirty = false;
+}
+
+static int qcom_icc_aggregate(struct icc_node *node, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ size_t i;
+ struct qcom_icc_node *qn;
+
+ qn = node->data;
+
+ *agg_avg += avg_bw;
+ *agg_peak = max_t(u32, *agg_peak, peak_bw);
+
+ qn->sum_avg = *agg_avg;
+ qn->max_peak = *agg_peak;
+
+ for (i = 0; i < qn->num_bcms; i++)
+ qn->bcms[i]->dirty = true;
+
+ return 0;
+}
+
+static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+ struct tcs_cmd cmds[SDM845_MAX_BCMS];
+ struct list_head commit_list;
+ int commit_idx[SDM845_MAX_VCD];
+ int ret = 0, i;
+
+ if (!src)
+ node = dst;
+ else
+ node = src;
+
+ qp = to_qcom_provider(node->provider);
+
+ INIT_LIST_HEAD(&commit_list);
+
+ for (i = 0; i < qp->num_bcms; i++) {
+ if (qp->bcms[i]->dirty) {
+ bcm_aggregate(qp->bcms[i]);
+ list_add_tail(&qp->bcms[i]->list, &commit_list);
+ }
+ }
+
+ /*
+ * Construct the command list based on a pre ordered list of BCMs
+ * based on VCD.
+ */
+ tcs_list_gen(&commit_list, cmds, commit_idx);
+
+ if (!commit_idx[0])
+ return ret;
+
+ ret = rpmh_invalidate(qp->dev);
+ if (ret) {
+ pr_err("Error invalidating RPMH client (%d)\n", ret);
+ return ret;
+ }
+
+ ret = rpmh_write_batch(qp->dev, RPMH_ACTIVE_ONLY_STATE,
+ cmds, commit_idx);
+ if (ret) {
+ pr_err("Error sending AMC RPMH requests (%d)\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int cmp_vcd(const void *_l, const void *_r)
+{
+ const struct qcom_icc_bcm **l = (const struct qcom_icc_bcm **)_l;
+ const struct qcom_icc_bcm **r = (const struct qcom_icc_bcm **)_r;
+
+ if (l[0]->aux_data.vcd < r[0]->aux_data.vcd)
+ return -1;
+ else if (l[0]->aux_data.vcd == r[0]->aux_data.vcd)
+ return 0;
+ else
+ return 1;
+}
+
+static int qnoc_probe(struct platform_device *pdev)
+{
+ const struct qcom_icc_desc *desc;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct qcom_icc_node **qnodes;
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ provider = &qp->provider;
+ provider->dev = &pdev->dev;
+ provider->set = qcom_icc_set;
+ provider->aggregate = qcom_icc_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->data = data;
+
+ qp->dev = &pdev->dev;
+ qp->bcms = desc->bcms;
+ qp->num_bcms = desc->num_bcms;
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(&pdev->dev, "error adding interconnect provider\n");
+ return ret;
+ }
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ dev_dbg(&pdev->dev, "registered node %p %s %d\n", node,
+ qnodes[i]->name, node->id);
+
+ /* populate links */
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ for (i = 0; i < qp->num_bcms; i++)
+ qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
+
+ /*
+ * Pre sort the BCMs based on VCD for ease of generating a command list
+ * that groups the BCMs with the same VCD together. VCDs are numbered
+ * with lowest being the most expensive time wise, ensuring that
+ * those commands are being sent the earliest in the queue.
+ */
+ sort(qp->bcms, qp->num_bcms, sizeof(*qp->bcms), cmp_vcd, NULL);
+
+ platform_set_drvdata(pdev, qp);
+
+ dev_dbg(&pdev->dev, "Registered SDM845 ICC\n");
+
+ return ret;
+err:
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ icc_node_del(node);
+ icc_node_destroy(node->id);
+ }
+
+ icc_provider_del(provider);
+ return ret;
+}
+
+static int qnoc_remove(struct platform_device *pdev)
+{
+ struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+ struct icc_provider *provider = &qp->provider;
+ struct icc_node *n;
+
+ list_for_each_entry(n, &provider->nodes, node_list) {
+ icc_node_del(n);
+ icc_node_destroy(n->id);
+ }
+
+ return icc_provider_del(provider);
+}
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sdm845-rsc-hlos", .data = &sdm845_rsc_hlos },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qnoc_probe,
+ .remove = qnoc_remove,
+ .driver = {
+ .name = "qnoc-sdm845",
+ .of_match_table = qnoc_of_match,
+ },
+};
+module_platform_driver(qnoc_driver);
+
+MODULE_AUTHOR("David Dai <daidavid1@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm sdm845 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c
index bbec6ac0a966..3581abfb0c6a 100644
--- a/drivers/macintosh/via-cuda.c
+++ b/drivers/macintosh/via-cuda.c
@@ -569,6 +569,7 @@ cuda_interrupt(int irq, void *arg)
unsigned char ibuf[16];
int ibuf_len = 0;
int complete = 0;
+ bool full;
spin_lock_irqsave(&cuda_lock, flags);
@@ -656,12 +657,13 @@ idle_state:
break;
case reading:
- if (reading_reply ? ARRAY_FULL(current_req->reply, reply_ptr)
- : ARRAY_FULL(cuda_rbuf, reply_ptr))
+ full = reading_reply ? ARRAY_FULL(current_req->reply, reply_ptr)
+ : ARRAY_FULL(cuda_rbuf, reply_ptr);
+ if (full)
(void)in_8(&via[SR]);
else
*reply_ptr++ = in_8(&via[SR]);
- if (!TREQ_asserted(status)) {
+ if (!TREQ_asserted(status) || full) {
if (mcu_is_egret)
assert_TACK();
/* that's all folks */
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index f417b06e11c5..42ab8ec92a04 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -295,6 +295,17 @@ config QCOM_COINCELL
to maintain PMIC register and RTC state in the absence of
external power.
+config QCOM_FASTRPC
+ tristate "Qualcomm FastRPC"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on RPMSG
+ select DMA_SHARED_BUFFER
+ help
+ Provides a communication mechanism that allows for clients to
+ make remote method invocations across processor boundary to
+ applications DSP processor. Say M if you want to enable this
+ module.
+
config SGI_GRU
tristate "SGI GRU driver"
depends on X86_UV && SMP
@@ -535,4 +546,5 @@ source "drivers/misc/echo/Kconfig"
source "drivers/misc/cxl/Kconfig"
source "drivers/misc/ocxl/Kconfig"
source "drivers/misc/cardreader/Kconfig"
+source "drivers/misc/habanalabs/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index e39ccbbc1b3a..d5b7d3404dc7 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_TIFM_CORE) += tifm_core.o
obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
obj-$(CONFIG_PHANTOM) += phantom.o
obj-$(CONFIG_QCOM_COINCELL) += qcom-coincell.o
+obj-$(CONFIG_QCOM_FASTRPC) += fastrpc.o
obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o
obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
obj-$(CONFIG_SGI_IOC4) += ioc4.o
@@ -59,3 +60,4 @@ obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
obj-$(CONFIG_OCXL) += ocxl/
obj-y += cardreader/
obj-$(CONFIG_PVPANIC) += pvpanic.o
+obj-$(CONFIG_HABANA_AI) += habanalabs/
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index a0afadefcc49..1f6d008e0036 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -202,22 +202,20 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
return dpot_read_r8d8(dpot, ctrl);
case DPOT_UID(AD5272_ID):
case DPOT_UID(AD5274_ID):
- dpot_write_r8d8(dpot,
+ dpot_write_r8d8(dpot,
(DPOT_AD5270_1_2_4_READ_RDAC << 2), 0);
- value = dpot_read_r8d16(dpot,
- DPOT_AD5270_1_2_4_RDAC << 2);
-
- if (value < 0)
- return value;
- /*
- * AD5272/AD5274 returns high byte first, however
- * underling smbus expects low byte first.
- */
- value = swab16(value);
+ value = dpot_read_r8d16(dpot, DPOT_AD5270_1_2_4_RDAC << 2);
+ if (value < 0)
+ return value;
+ /*
+ * AD5272/AD5274 returns high byte first, however
+ * underling smbus expects low byte first.
+ */
+ value = swab16(value);
- if (dpot->uid == DPOT_UID(AD5274_ID))
- value = value >> 2;
+ if (dpot->uid == DPOT_UID(AD5274_ID))
+ value = value >> 2;
return value;
default:
if ((reg & DPOT_REG_TOL) || (dpot->max_pos > 256))
diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
index 024dcba8d6c8..5c98e2221889 100644
--- a/drivers/misc/cardreader/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -170,35 +170,46 @@ static int rts5227_card_power_on(struct rtsx_pcr *pcr, int card)
{
int err;
+ if (pcr->option.ocp_en)
+ rtsx_pci_enable_ocp(pcr);
+
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
SD_POWER_MASK, SD_PARTIAL_POWER_ON);
+
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
LDO3318_PWR_MASK, 0x02);
+
err = rtsx_pci_send_cmd(pcr, 100);
if (err < 0)
return err;
/* To avoid too large in-rush current */
- udelay(150);
-
+ msleep(20);
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
SD_POWER_MASK, SD_POWER_ON);
+
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
LDO3318_PWR_MASK, 0x06);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_OE,
+ SD_OUTPUT_EN, SD_OUTPUT_EN);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_OE,
+ MS_OUTPUT_EN, MS_OUTPUT_EN);
return rtsx_pci_send_cmd(pcr, 100);
}
static int rts5227_card_power_off(struct rtsx_pcr *pcr, int card)
{
- rtsx_pci_init_cmd(pcr);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
- SD_POWER_MASK | PMOS_STRG_MASK,
- SD_POWER_OFF | PMOS_STRG_400mA);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
- LDO3318_PWR_MASK, 0X00);
- return rtsx_pci_send_cmd(pcr, 100);
+ if (pcr->option.ocp_en)
+ rtsx_pci_disable_ocp(pcr);
+
+ rtsx_pci_write_register(pcr, CARD_PWR_CTL, SD_POWER_MASK |
+ PMOS_STRG_MASK, SD_POWER_OFF | PMOS_STRG_400mA);
+ rtsx_pci_write_register(pcr, PWR_GATE_CTRL, LDO3318_PWR_MASK, 0X00);
+
+ return 0;
}
static int rts5227_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
@@ -348,6 +359,32 @@ static int rts522a_extra_init_hw(struct rtsx_pcr *pcr)
return 0;
}
+static int rts522a_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ int err;
+
+ if (voltage == OUTPUT_3V3) {
+ err = rtsx_pci_write_phy_register(pcr, 0x08, 0x57E4);
+ if (err < 0)
+ return err;
+ } else if (voltage == OUTPUT_1V8) {
+ err = rtsx_pci_write_phy_register(pcr, 0x11, 0x3C02);
+ if (err < 0)
+ return err;
+ err = rtsx_pci_write_phy_register(pcr, 0x08, 0x54A4);
+ if (err < 0)
+ return err;
+ } else {
+ return -EINVAL;
+ }
+
+ /* set pad drive */
+ rtsx_pci_init_cmd(pcr);
+ rts5227_fill_driving(pcr, voltage);
+ return rtsx_pci_send_cmd(pcr, 100);
+}
+
+
/* rts522a operations mainly derived from rts5227, except phy/hw init setting.
*/
static const struct pcr_ops rts522a_pcr_ops = {
@@ -360,7 +397,7 @@ static const struct pcr_ops rts522a_pcr_ops = {
.disable_auto_blink = rts5227_disable_auto_blink,
.card_power_on = rts5227_card_power_on,
.card_power_off = rts5227_card_power_off,
- .switch_output_voltage = rts5227_switch_output_voltage,
+ .switch_output_voltage = rts522a_switch_output_voltage,
.cd_deglitch = NULL,
.conv_clk_and_div_n = NULL,
.force_power_down = rts5227_force_power_down,
@@ -371,4 +408,11 @@ void rts522a_init_params(struct rtsx_pcr *pcr)
rts5227_init_params(pcr);
pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;
+
+ pcr->option.ocp_en = 1;
+ if (pcr->option.ocp_en)
+ pcr->hw_param.interrupt_en |= SD_OC_INT_EN;
+ pcr->hw_param.ocp_glitch = SD_OCP_GLITCH_10M;
+ pcr->option.sd_800mA_ocp_thd = RTS522A_OCP_THD_800;
+
}
diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
index dbe013abdb83..0f72a7e0fdab 100644
--- a/drivers/misc/cardreader/rts5249.c
+++ b/drivers/misc/cardreader/rts5249.c
@@ -284,6 +284,10 @@ static int rtsx_base_disable_auto_blink(struct rtsx_pcr *pcr)
static int rtsx_base_card_power_on(struct rtsx_pcr *pcr, int card)
{
int err;
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en)
+ rtsx_pci_enable_ocp(pcr);
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
@@ -306,12 +310,15 @@ static int rtsx_base_card_power_on(struct rtsx_pcr *pcr, int card)
static int rtsx_base_card_power_off(struct rtsx_pcr *pcr, int card)
{
- rtsx_pci_init_cmd(pcr);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
- SD_POWER_MASK, SD_POWER_OFF);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
- LDO3318_PWR_MASK, 0x00);
- return rtsx_pci_send_cmd(pcr, 100);
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en)
+ rtsx_pci_disable_ocp(pcr);
+
+ rtsx_pci_write_register(pcr, CARD_PWR_CTL, SD_POWER_MASK, SD_POWER_OFF);
+
+ rtsx_pci_write_register(pcr, PWR_GATE_CTRL, LDO3318_PWR_MASK, 0x00);
+ return 0;
}
static int rtsx_base_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
@@ -629,6 +636,13 @@ void rts524a_init_params(struct rtsx_pcr *pcr)
pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
pcr->ops = &rts524a_pcr_ops;
+
+ pcr->option.ocp_en = 1;
+ if (pcr->option.ocp_en)
+ pcr->hw_param.interrupt_en |= SD_OC_INT_EN;
+ pcr->hw_param.ocp_glitch = SD_OCP_GLITCH_10M;
+ pcr->option.sd_800mA_ocp_thd = RTS524A_OCP_THD_800;
+
}
static int rts525a_card_power_on(struct rtsx_pcr *pcr, int card)
@@ -737,4 +751,10 @@ void rts525a_init_params(struct rtsx_pcr *pcr)
pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
pcr->ops = &rts525a_pcr_ops;
+
+ pcr->option.ocp_en = 1;
+ if (pcr->option.ocp_en)
+ pcr->hw_param.interrupt_en |= SD_OC_INT_EN;
+ pcr->hw_param.ocp_glitch = SD_OCP_GLITCH_10M;
+ pcr->option.sd_800mA_ocp_thd = RTS525A_OCP_THD_800;
}
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
index a493b01c5bc6..da22bcb62b04 100644
--- a/drivers/misc/cardreader/rts5260.c
+++ b/drivers/misc/cardreader/rts5260.c
@@ -64,11 +64,13 @@ static void rts5260_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
drive_sel = pcr->sd30_drive_sel_1v8;
}
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
+ rtsx_pci_write_register(pcr, SD30_CLK_DRIVE_SEL,
0xFF, driving[drive_sel][0]);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
+
+ rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL,
0xFF, driving[drive_sel][1]);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
+
+ rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL,
0xFF, driving[drive_sel][2]);
}
@@ -193,7 +195,7 @@ static int sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
| SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST);
rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF,
- CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+ CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
return 0;
@@ -207,22 +209,16 @@ static int rts5260_card_power_on(struct rtsx_pcr *pcr, int card)
if (option->ocp_en)
rtsx_pci_enable_ocp(pcr);
- rtsx_pci_init_cmd(pcr);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
- DV331812_VDD1, DV331812_VDD1);
- err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
- if (err < 0)
- return err;
- rtsx_pci_init_cmd(pcr);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG0,
+ rtsx_pci_write_register(pcr, LDO_CONFIG2, DV331812_VDD1, DV331812_VDD1);
+ rtsx_pci_write_register(pcr, LDO_VCC_CFG0,
RTS5260_DVCC_TUNE_MASK, RTS5260_DVCC_33);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG1,
- LDO_POW_SDVDD1_MASK, LDO_POW_SDVDD1_ON);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
- DV331812_POWERON, DV331812_POWERON);
- err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+ rtsx_pci_write_register(pcr, LDO_VCC_CFG1, LDO_POW_SDVDD1_MASK,
+ LDO_POW_SDVDD1_ON);
+
+ rtsx_pci_write_register(pcr, LDO_CONFIG2,
+ DV331812_POWERON, DV331812_POWERON);
msleep(20);
if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 ||
@@ -242,8 +238,8 @@ static int rts5260_card_power_on(struct rtsx_pcr *pcr, int card)
/* Reset SD_CFG3 register */
rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0);
rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG,
- SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 |
- SD30_CLK_STOP_CFG0, 0);
+ SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 |
+ SD30_CLK_STOP_CFG0, 0);
rtsx_pci_write_register(pcr, REG_PRE_RW_MODE, EN_INFINITE_MODE, 0);
@@ -273,9 +269,9 @@ static int rts5260_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
}
/* set pad drive */
- rtsx_pci_init_cmd(pcr);
rts5260_fill_driving(pcr, voltage);
- return rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+
+ return 0;
}
static void rts5260_stop_cmd(struct rtsx_pcr *pcr)
@@ -290,13 +286,9 @@ static void rts5260_stop_cmd(struct rtsx_pcr *pcr)
static void rts5260_card_before_power_off(struct rtsx_pcr *pcr)
{
- struct rtsx_cr_option *option = &pcr->option;
-
rts5260_stop_cmd(pcr);
rts5260_switch_output_voltage(pcr, OUTPUT_3V3);
- if (option->ocp_en)
- rtsx_pci_disable_ocp(pcr);
}
static int rts5260_card_power_off(struct rtsx_pcr *pcr, int card)
@@ -304,13 +296,12 @@ static int rts5260_card_power_off(struct rtsx_pcr *pcr, int card)
int err = 0;
rts5260_card_before_power_off(pcr);
-
- rtsx_pci_init_cmd(pcr);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG1,
+ err = rtsx_pci_write_register(pcr, LDO_VCC_CFG1,
LDO_POW_SDVDD1_MASK, LDO_POW_SDVDD1_OFF);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
+ err = rtsx_pci_write_register(pcr, LDO_CONFIG2,
DV331812_POWERON, DV331812_POWEROFF);
- err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+ if (pcr->option.ocp_en)
+ rtsx_pci_disable_ocp(pcr);
return err;
}
@@ -322,41 +313,29 @@ static void rts5260_init_ocp(struct rtsx_pcr *pcr)
if (option->ocp_en) {
u8 mask, val;
- rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
- RTS5260_DVCC_OCP_EN |
- RTS5260_DVCC_OCP_CL_EN,
- RTS5260_DVCC_OCP_EN |
- RTS5260_DVCC_OCP_CL_EN);
- rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
- RTS5260_DVIO_OCP_EN |
- RTS5260_DVIO_OCP_CL_EN,
- RTS5260_DVIO_OCP_EN |
- RTS5260_DVIO_OCP_CL_EN);
rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
- RTS5260_DVCC_OCP_THD_MASK,
- option->sd_400mA_ocp_thd);
-
- rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
- RTS5260_DVIO_OCP_THD_MASK,
- RTS5260_DVIO_OCP_THD_350);
+ RTS5260_DVCC_OCP_THD_MASK,
+ option->sd_800mA_ocp_thd);
rtsx_pci_write_register(pcr, RTS5260_DV331812_CFG,
- RTS5260_DV331812_OCP_THD_MASK,
- RTS5260_DV331812_OCP_THD_210);
+ RTS5260_DV331812_OCP_THD_MASK,
+ RTS5260_DV331812_OCP_THD_270);
- mask = SD_OCP_GLITCH_MASK | SDVIO_OCP_GLITCH_MASK;
+ mask = SD_OCP_GLITCH_MASK;
val = pcr->hw_param.ocp_glitch;
rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val);
+ rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
+ RTS5260_DVCC_OCP_EN |
+ RTS5260_DVCC_OCP_CL_EN,
+ RTS5260_DVCC_OCP_EN |
+ RTS5260_DVCC_OCP_CL_EN);
rtsx_pci_enable_ocp(pcr);
} else {
rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
RTS5260_DVCC_OCP_EN |
RTS5260_DVCC_OCP_CL_EN, 0);
- rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
- RTS5260_DVIO_OCP_EN |
- RTS5260_DVIO_OCP_CL_EN, 0);
}
}
@@ -364,14 +343,9 @@ static void rts5260_enable_ocp(struct rtsx_pcr *pcr)
{
u8 val = 0;
- rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
-
val = SD_OCP_INT_EN | SD_DETECT_EN;
- val |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN;
rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
- rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
- DV3318_DETECT_EN | DV3318_OCP_INT_EN,
- DV3318_DETECT_EN | DV3318_OCP_INT_EN);
+
}
static void rts5260_disable_ocp(struct rtsx_pcr *pcr)
@@ -379,15 +353,11 @@ static void rts5260_disable_ocp(struct rtsx_pcr *pcr)
u8 mask = 0;
mask = SD_OCP_INT_EN | SD_DETECT_EN;
- mask |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN;
rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
- rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
- DV3318_DETECT_EN | DV3318_OCP_INT_EN, 0);
- rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
- OC_POWER_DOWN);
}
+
static int rts5260_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
{
return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
@@ -404,9 +374,7 @@ static void rts5260_clear_ocpstat(struct rtsx_pcr *pcr)
u8 val = 0;
mask = SD_OCP_INT_CLR | SD_OC_CLR;
- mask |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR;
val = SD_OCP_INT_CLR | SD_OC_CLR;
- val |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR;
rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
@@ -425,36 +393,22 @@ static void rts5260_process_ocp(struct rtsx_pcr *pcr)
rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
rts5260_get_ocpstat2(pcr, &pcr->ocp_stat2);
- if (pcr->card_exist & SD_EXIST)
- rtsx_sd_power_off_card3v3(pcr);
- else if (pcr->card_exist & MS_EXIST)
- rtsx_ms_power_off_card3v3(pcr);
-
- if (!(pcr->card_exist & MS_EXIST) && !(pcr->card_exist & SD_EXIST)) {
- if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER |
- SDVIO_OC_NOW | SDVIO_OC_EVER)) ||
- (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER)))
- rtsx_pci_clear_ocpstat(pcr);
+
+ if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) ||
+ (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER))) {
+ rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+ rtsx_pci_clear_ocpstat(pcr);
pcr->ocp_stat = 0;
pcr->ocp_stat2 = 0;
}
- if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER |
- SDVIO_OC_NOW | SDVIO_OC_EVER)) ||
- (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER))) {
- if (pcr->card_exist & SD_EXIST)
- rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
- else if (pcr->card_exist & MS_EXIST)
- rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
- }
}
static int rts5260_init_hw(struct rtsx_pcr *pcr)
{
int err;
- rtsx_pci_init_ocp(pcr);
-
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG1,
@@ -483,6 +437,8 @@ static int rts5260_init_hw(struct rtsx_pcr *pcr)
if (err < 0)
return err;
+ rtsx_pci_init_ocp(pcr);
+
return 0;
}
@@ -499,7 +455,13 @@ static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr)
pcr_dbg(pcr, "Set parameters for L1.2.");
rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
0xFF, PCIE_L1_2_EN);
- rtsx_pci_write_register(pcr, PWR_FE_CTL,
+ rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
+ RTS5260_DVCC_OCP_EN |
+ RTS5260_DVCC_OCP_CL_EN,
+ RTS5260_DVCC_OCP_EN |
+ RTS5260_DVCC_OCP_CL_EN);
+
+ rtsx_pci_write_register(pcr, PWR_FE_CTL,
0xFF, PCIE_L1_2_PD_FE_EN);
} else if (lss_l1_1) {
pcr_dbg(pcr, "Set parameters for L1.1.");
@@ -742,7 +704,7 @@ void rts5260_init_params(struct rtsx_pcr *pcr)
option->ocp_en = 1;
if (option->ocp_en)
hw_param->interrupt_en |= SD_OC_INT_EN;
- hw_param->ocp_glitch = SD_OCP_GLITCH_10M | SDVIO_OCP_GLITCH_800U;
+ hw_param->ocp_glitch = SDVIO_OCP_GLITCH_800U | SDVIO_OCP_GLITCH_800U;
option->sd_400mA_ocp_thd = RTS5260_DVCC_OCP_THD_550;
option->sd_800mA_ocp_thd = RTS5260_DVCC_OCP_THD_970;
}
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index da445223f4cc..0d320e0ab4c9 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -703,7 +703,10 @@ EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
{
- pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN;
+ struct rtsx_hw_param *hw_param = &pcr->hw_param;
+
+ pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN
+ | hw_param->interrupt_en;
if (pcr->num_slots > 1)
pcr->bier |= MS_INT_EN;
@@ -969,8 +972,19 @@ static void rtsx_pci_card_detect(struct work_struct *work)
static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
{
- if (pcr->ops->process_ocp)
+ if (pcr->ops->process_ocp) {
pcr->ops->process_ocp(pcr);
+ } else {
+ if (!pcr->option.ocp_en)
+ return;
+ rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
+ if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
+ rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+ rtsx_pci_clear_ocpstat(pcr);
+ pcr->ocp_stat = 0;
+ }
+ }
}
static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
@@ -1039,7 +1053,7 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
}
}
- if (pcr->card_inserted || pcr->card_removed)
+ if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT))
schedule_delayed_work(&pcr->carddet_work,
msecs_to_jiffies(200));
@@ -1144,10 +1158,12 @@ void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
{
u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
- if (pcr->ops->enable_ocp)
+ if (pcr->ops->enable_ocp) {
pcr->ops->enable_ocp(pcr);
- else
+ } else {
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
+ }
}
@@ -1155,10 +1171,13 @@ void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
{
u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
- if (pcr->ops->disable_ocp)
+ if (pcr->ops->disable_ocp) {
pcr->ops->disable_ocp(pcr);
- else
+ } else {
rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
+ OC_POWER_DOWN);
+ }
}
void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
@@ -1169,7 +1188,7 @@ void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
struct rtsx_cr_option *option = &(pcr->option);
if (option->ocp_en) {
- u8 val = option->sd_400mA_ocp_thd;
+ u8 val = option->sd_800mA_ocp_thd;
rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
rtsx_pci_write_register(pcr, REG_OCPPARA1,
@@ -1204,6 +1223,7 @@ void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+ udelay(100);
rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
}
}
@@ -1213,7 +1233,6 @@ int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
MS_CLK_EN | SD40_CLK_EN, 0);
rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
-
rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
msleep(50);
@@ -1313,6 +1332,9 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
break;
}
+ /*init ocp*/
+ rtsx_pci_init_ocp(pcr);
+
/* Enable clk_request_n to enable clock power management */
rtsx_pci_write_config_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL + 1, 1);
/* Enter L1 when host tx idle */
diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index 6ea1655db0bb..300fc31d8e67 100644
--- a/drivers/misc/cardreader/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -46,6 +46,11 @@
#define SSC_CLOCK_STABLE_WAIT 130
+#define RTS524A_OCP_THD_800 0x04
+#define RTS525A_OCP_THD_800 0x05
+#define RTS522A_OCP_THD_800 0x06
+
+
int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val);
int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val);
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 5a17bfeb80d3..74d4fda6c4a7 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -125,9 +125,7 @@ enclosure_register(struct device *dev, const char *name, int components,
struct enclosure_component_callbacks *cb)
{
struct enclosure_device *edev =
- kzalloc(sizeof(struct enclosure_device) +
- sizeof(struct enclosure_component)*components,
- GFP_KERNEL);
+ kzalloc(struct_size(edev, component, components), GFP_KERNEL);
int err, i;
BUG_ON(!cb);
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
new file mode 100644
index 000000000000..39f832d27288
--- /dev/null
+++ b/drivers/misc/fastrpc.c
@@ -0,0 +1,1401 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/rpmsg.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <uapi/misc/fastrpc.h>
+
+#define ADSP_DOMAIN_ID (0)
+#define MDSP_DOMAIN_ID (1)
+#define SDSP_DOMAIN_ID (2)
+#define CDSP_DOMAIN_ID (3)
+#define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/
+#define FASTRPC_MAX_SESSIONS 9 /*8 compute, 1 cpz*/
+#define FASTRPC_ALIGN 128
+#define FASTRPC_MAX_FDLIST 16
+#define FASTRPC_MAX_CRCLIST 64
+#define FASTRPC_PHYS(p) ((p) & 0xffffffff)
+#define FASTRPC_CTX_MAX (256)
+#define FASTRPC_INIT_HANDLE 1
+#define FASTRPC_CTXID_MASK (0xFF0)
+#define INIT_FILELEN_MAX (2 * 1024 * 1024)
+#define INIT_MEMLEN_MAX (8 * 1024 * 1024)
+#define FASTRPC_DEVICE_NAME "fastrpc"
+
+/* Retrives number of input buffers from the scalars parameter */
+#define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
+
+/* Retrives number of output buffers from the scalars parameter */
+#define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff)
+
+/* Retrives number of input handles from the scalars parameter */
+#define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f)
+
+/* Retrives number of output handles from the scalars parameter */
+#define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f)
+
+#define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \
+ REMOTE_SCALARS_OUTBUFS(sc) + \
+ REMOTE_SCALARS_INHANDLES(sc)+ \
+ REMOTE_SCALARS_OUTHANDLES(sc))
+#define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \
+ (((attr & 0x07) << 29) | \
+ ((method & 0x1f) << 24) | \
+ ((in & 0xff) << 16) | \
+ ((out & 0xff) << 8) | \
+ ((oin & 0x0f) << 4) | \
+ (oout & 0x0f))
+
+#define FASTRPC_SCALARS(method, in, out) \
+ FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
+
+#define FASTRPC_CREATE_PROCESS_NARGS 6
+/* Remote Method id table */
+#define FASTRPC_RMID_INIT_ATTACH 0
+#define FASTRPC_RMID_INIT_RELEASE 1
+#define FASTRPC_RMID_INIT_CREATE 6
+#define FASTRPC_RMID_INIT_CREATE_ATTR 7
+#define FASTRPC_RMID_INIT_CREATE_STATIC 8
+
+#define miscdev_to_cctx(d) container_of(d, struct fastrpc_channel_ctx, miscdev)
+
+static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
+ "sdsp", "cdsp"};
+struct fastrpc_phy_page {
+ u64 addr; /* physical address */
+ u64 size; /* size of contiguous region */
+};
+
+struct fastrpc_invoke_buf {
+ u32 num; /* number of contiguous regions */
+ u32 pgidx; /* index to start of contiguous region */
+};
+
+struct fastrpc_remote_arg {
+ u64 pv;
+ u64 len;
+};
+
+struct fastrpc_msg {
+ int pid; /* process group id */
+ int tid; /* thread id */
+ u64 ctx; /* invoke caller context */
+ u32 handle; /* handle to invoke */
+ u32 sc; /* scalars structure describing the data */
+ u64 addr; /* physical address */
+ u64 size; /* size of contiguous region */
+};
+
+struct fastrpc_invoke_rsp {
+ u64 ctx; /* invoke caller context */
+ int retval; /* invoke return value */
+};
+
+struct fastrpc_buf {
+ struct fastrpc_user *fl;
+ struct dma_buf *dmabuf;
+ struct device *dev;
+ void *virt;
+ u64 phys;
+ u64 size;
+ /* Lock for dma buf attachments */
+ struct mutex lock;
+ struct list_head attachments;
+};
+
+struct fastrpc_dma_buf_attachment {
+ struct device *dev;
+ struct sg_table sgt;
+ struct list_head node;
+};
+
+struct fastrpc_map {
+ struct list_head node;
+ struct fastrpc_user *fl;
+ int fd;
+ struct dma_buf *buf;
+ struct sg_table *table;
+ struct dma_buf_attachment *attach;
+ u64 phys;
+ u64 size;
+ void *va;
+ u64 len;
+ struct kref refcount;
+};
+
+struct fastrpc_invoke_ctx {
+ int nscalars;
+ int nbufs;
+ int retval;
+ int pid;
+ int tgid;
+ u32 sc;
+ u32 *crc;
+ u64 ctxid;
+ u64 msg_sz;
+ struct kref refcount;
+ struct list_head node; /* list of ctxs */
+ struct completion work;
+ struct fastrpc_msg msg;
+ struct fastrpc_user *fl;
+ struct fastrpc_remote_arg *rpra;
+ struct fastrpc_map **maps;
+ struct fastrpc_buf *buf;
+ struct fastrpc_invoke_args *args;
+ struct fastrpc_channel_ctx *cctx;
+};
+
+struct fastrpc_session_ctx {
+ struct device *dev;
+ int sid;
+ bool used;
+ bool valid;
+};
+
+struct fastrpc_channel_ctx {
+ int domain_id;
+ int sesscount;
+ struct rpmsg_device *rpdev;
+ struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
+ spinlock_t lock;
+ struct idr ctx_idr;
+ struct list_head users;
+ struct miscdevice miscdev;
+};
+
+struct fastrpc_user {
+ struct list_head user;
+ struct list_head maps;
+ struct list_head pending;
+
+ struct fastrpc_channel_ctx *cctx;
+ struct fastrpc_session_ctx *sctx;
+ struct fastrpc_buf *init_mem;
+
+ int tgid;
+ int pd;
+ /* Lock for lists */
+ spinlock_t lock;
+ /* lock for allocations */
+ struct mutex mutex;
+};
+
+static void fastrpc_free_map(struct kref *ref)
+{
+ struct fastrpc_map *map;
+
+ map = container_of(ref, struct fastrpc_map, refcount);
+
+ if (map->table) {
+ dma_buf_unmap_attachment(map->attach, map->table,
+ DMA_BIDIRECTIONAL);
+ dma_buf_detach(map->buf, map->attach);
+ dma_buf_put(map->buf);
+ }
+
+ kfree(map);
+}
+
+static void fastrpc_map_put(struct fastrpc_map *map)
+{
+ if (map)
+ kref_put(&map->refcount, fastrpc_free_map);
+}
+
+static void fastrpc_map_get(struct fastrpc_map *map)
+{
+ if (map)
+ kref_get(&map->refcount);
+}
+
+static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
+ struct fastrpc_map **ppmap)
+{
+ struct fastrpc_map *map = NULL;
+
+ mutex_lock(&fl->mutex);
+ list_for_each_entry(map, &fl->maps, node) {
+ if (map->fd == fd) {
+ fastrpc_map_get(map);
+ *ppmap = map;
+ mutex_unlock(&fl->mutex);
+ return 0;
+ }
+ }
+ mutex_unlock(&fl->mutex);
+
+ return -ENOENT;
+}
+
+static void fastrpc_buf_free(struct fastrpc_buf *buf)
+{
+ dma_free_coherent(buf->dev, buf->size, buf->virt,
+ FASTRPC_PHYS(buf->phys));
+ kfree(buf);
+}
+
+static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
+ u64 size, struct fastrpc_buf **obuf)
+{
+ struct fastrpc_buf *buf;
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&buf->attachments);
+ mutex_init(&buf->lock);
+
+ buf->fl = fl;
+ buf->virt = NULL;
+ buf->phys = 0;
+ buf->size = size;
+ buf->dev = dev;
+
+ buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
+ GFP_KERNEL);
+ if (!buf->virt)
+ return -ENOMEM;
+
+ if (fl->sctx && fl->sctx->sid)
+ buf->phys += ((u64)fl->sctx->sid << 32);
+
+ *obuf = buf;
+
+ return 0;
+}
+
+static void fastrpc_context_free(struct kref *ref)
+{
+ struct fastrpc_invoke_ctx *ctx;
+ struct fastrpc_channel_ctx *cctx;
+ int i;
+
+ ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
+ cctx = ctx->cctx;
+
+ for (i = 0; i < ctx->nscalars; i++)
+ fastrpc_map_put(ctx->maps[i]);
+
+ if (ctx->buf)
+ fastrpc_buf_free(ctx->buf);
+
+ spin_lock(&cctx->lock);
+ idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
+ spin_unlock(&cctx->lock);
+
+ kfree(ctx->maps);
+ kfree(ctx);
+}
+
+static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
+{
+ kref_get(&ctx->refcount);
+}
+
+static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
+{
+ kref_put(&ctx->refcount, fastrpc_context_free);
+}
+
+static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
+ struct fastrpc_user *user, u32 kernel, u32 sc,
+ struct fastrpc_invoke_args *args)
+{
+ struct fastrpc_channel_ctx *cctx = user->cctx;
+ struct fastrpc_invoke_ctx *ctx = NULL;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&ctx->node);
+ ctx->fl = user;
+ ctx->nscalars = REMOTE_SCALARS_LENGTH(sc);
+ ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) +
+ REMOTE_SCALARS_OUTBUFS(sc);
+
+ if (ctx->nscalars) {
+ ctx->maps = kcalloc(ctx->nscalars,
+ sizeof(*ctx->maps), GFP_KERNEL);
+ if (!ctx->maps) {
+ kfree(ctx);
+ return ERR_PTR(-ENOMEM);
+ }
+ ctx->args = args;
+ }
+
+ ctx->sc = sc;
+ ctx->retval = -1;
+ ctx->pid = current->pid;
+ ctx->tgid = user->tgid;
+ ctx->cctx = cctx;
+ init_completion(&ctx->work);
+
+ spin_lock(&user->lock);
+ list_add_tail(&ctx->node, &user->pending);
+ spin_unlock(&user->lock);
+
+ spin_lock(&cctx->lock);
+ ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
+ FASTRPC_CTX_MAX, GFP_ATOMIC);
+ if (ret < 0) {
+ spin_unlock(&cctx->lock);
+ goto err_idr;
+ }
+ ctx->ctxid = ret << 4;
+ spin_unlock(&cctx->lock);
+
+ kref_init(&ctx->refcount);
+
+ return ctx;
+err_idr:
+ spin_lock(&user->lock);
+ list_del(&ctx->node);
+ spin_unlock(&user->lock);
+ kfree(ctx->maps);
+ kfree(ctx);
+
+ return ERR_PTR(ret);
+}
+
+static struct sg_table *
+fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction dir)
+{
+ struct fastrpc_dma_buf_attachment *a = attachment->priv;
+ struct sg_table *table;
+
+ table = &a->sgt;
+
+ if (!dma_map_sg(attachment->dev, table->sgl, table->nents, dir))
+ return ERR_PTR(-ENOMEM);
+
+ return table;
+}
+
+static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *table,
+ enum dma_data_direction dir)
+{
+ dma_unmap_sg(attach->dev, table->sgl, table->nents, dir);
+}
+
+static void fastrpc_release(struct dma_buf *dmabuf)
+{
+ struct fastrpc_buf *buffer = dmabuf->priv;
+
+ fastrpc_buf_free(buffer);
+}
+
+static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct fastrpc_dma_buf_attachment *a;
+ struct fastrpc_buf *buffer = dmabuf->priv;
+ int ret;
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt,
+ FASTRPC_PHYS(buffer->phys), buffer->size);
+ if (ret < 0) {
+ dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
+ return -EINVAL;
+ }
+
+ a->dev = attachment->dev;
+ INIT_LIST_HEAD(&a->node);
+ attachment->priv = a;
+
+ mutex_lock(&buffer->lock);
+ list_add(&a->node, &buffer->attachments);
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct fastrpc_dma_buf_attachment *a = attachment->priv;
+ struct fastrpc_buf *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ list_del(&a->node);
+ mutex_unlock(&buffer->lock);
+ kfree(a);
+}
+
+static void *fastrpc_kmap(struct dma_buf *dmabuf, unsigned long pgnum)
+{
+ struct fastrpc_buf *buf = dmabuf->priv;
+
+ return buf->virt ? buf->virt + pgnum * PAGE_SIZE : NULL;
+}
+
+static void *fastrpc_vmap(struct dma_buf *dmabuf)
+{
+ struct fastrpc_buf *buf = dmabuf->priv;
+
+ return buf->virt;
+}
+
+static int fastrpc_mmap(struct dma_buf *dmabuf,
+ struct vm_area_struct *vma)
+{
+ struct fastrpc_buf *buf = dmabuf->priv;
+ size_t size = vma->vm_end - vma->vm_start;
+
+ return dma_mmap_coherent(buf->dev, vma, buf->virt,
+ FASTRPC_PHYS(buf->phys), size);
+}
+
+static const struct dma_buf_ops fastrpc_dma_buf_ops = {
+ .attach = fastrpc_dma_buf_attach,
+ .detach = fastrpc_dma_buf_detatch,
+ .map_dma_buf = fastrpc_map_dma_buf,
+ .unmap_dma_buf = fastrpc_unmap_dma_buf,
+ .mmap = fastrpc_mmap,
+ .map = fastrpc_kmap,
+ .vmap = fastrpc_vmap,
+ .release = fastrpc_release,
+};
+
+static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
+ u64 len, struct fastrpc_map **ppmap)
+{
+ struct fastrpc_session_ctx *sess = fl->sctx;
+ struct fastrpc_map *map = NULL;
+ int err = 0;
+
+ if (!fastrpc_map_find(fl, fd, ppmap))
+ return 0;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&map->node);
+ map->fl = fl;
+ map->fd = fd;
+ map->buf = dma_buf_get(fd);
+ if (IS_ERR(map->buf)) {
+ err = PTR_ERR(map->buf);
+ goto get_err;
+ }
+
+ map->attach = dma_buf_attach(map->buf, sess->dev);
+ if (IS_ERR(map->attach)) {
+ dev_err(sess->dev, "Failed to attach dmabuf\n");
+ err = PTR_ERR(map->attach);
+ goto attach_err;
+ }
+
+ map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(map->table)) {
+ err = PTR_ERR(map->table);
+ goto map_err;
+ }
+
+ map->phys = sg_dma_address(map->table->sgl);
+ map->phys += ((u64)fl->sctx->sid << 32);
+ map->size = len;
+ map->va = sg_virt(map->table->sgl);
+ map->len = len;
+ kref_init(&map->refcount);
+
+ spin_lock(&fl->lock);
+ list_add_tail(&map->node, &fl->maps);
+ spin_unlock(&fl->lock);
+ *ppmap = map;
+
+ return 0;
+
+map_err:
+ dma_buf_detach(map->buf, map->attach);
+attach_err:
+ dma_buf_put(map->buf);
+get_err:
+ kfree(map);
+
+ return err;
+}
+
+/*
+ * Fastrpc payload buffer with metadata looks like:
+ *
+ * >>>>>> START of METADATA <<<<<<<<<
+ * +---------------------------------+
+ * | Arguments |
+ * | type:(struct fastrpc_remote_arg)|
+ * | (0 - N) |
+ * +---------------------------------+
+ * | Invoke Buffer list |
+ * | type:(struct fastrpc_invoke_buf)|
+ * | (0 - N) |
+ * +---------------------------------+
+ * | Page info list |
+ * | type:(struct fastrpc_phy_page) |
+ * | (0 - N) |
+ * +---------------------------------+
+ * | Optional info |
+ * |(can be specific to SoC/Firmware)|
+ * +---------------------------------+
+ * >>>>>>>> END of METADATA <<<<<<<<<
+ * +---------------------------------+
+ * | Inline ARGS |
+ * | (0-N) |
+ * +---------------------------------+
+ */
+
+static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
+{
+ int size = 0;
+
+ size = (sizeof(struct fastrpc_remote_arg) +
+ sizeof(struct fastrpc_invoke_buf) +
+ sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
+ sizeof(u64) * FASTRPC_MAX_FDLIST +
+ sizeof(u32) * FASTRPC_MAX_CRCLIST;
+
+ return size;
+}
+
+static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
+{
+ u64 size = 0;
+ int i;
+
+ size = ALIGN(metalen, FASTRPC_ALIGN);
+ for (i = 0; i < ctx->nscalars; i++) {
+ if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
+ size = ALIGN(size, FASTRPC_ALIGN);
+ size += ctx->args[i].length;
+ }
+ }
+
+ return size;
+}
+
+static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
+{
+ struct device *dev = ctx->fl->sctx->dev;
+ int i, err;
+
+ for (i = 0; i < ctx->nscalars; ++i) {
+ /* Make sure reserved field is set to 0 */
+ if (ctx->args[i].reserved)
+ return -EINVAL;
+
+ if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 ||
+ ctx->args[i].length == 0)
+ continue;
+
+ err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
+ ctx->args[i].length, &ctx->maps[i]);
+ if (err) {
+ dev_err(dev, "Error Creating map %d\n", err);
+ return -EINVAL;
+ }
+
+ }
+ return 0;
+}
+
+static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
+{
+ struct device *dev = ctx->fl->sctx->dev;
+ struct fastrpc_remote_arg *rpra;
+ struct fastrpc_invoke_buf *list;
+ struct fastrpc_phy_page *pages;
+ int inbufs, i, err = 0;
+ u64 rlen, pkt_size;
+ uintptr_t args;
+ int metalen;
+
+
+ inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
+ metalen = fastrpc_get_meta_size(ctx);
+ pkt_size = fastrpc_get_payload_size(ctx, metalen);
+
+ err = fastrpc_create_maps(ctx);
+ if (err)
+ return err;
+
+ ctx->msg_sz = pkt_size;
+
+ err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
+ if (err)
+ return err;
+
+ rpra = ctx->buf->virt;
+ list = ctx->buf->virt + ctx->nscalars * sizeof(*rpra);
+ pages = ctx->buf->virt + ctx->nscalars * (sizeof(*list) +
+ sizeof(*rpra));
+ args = (uintptr_t)ctx->buf->virt + metalen;
+ rlen = pkt_size - metalen;
+ ctx->rpra = rpra;
+
+ for (i = 0; i < ctx->nbufs; ++i) {
+ u64 len = ctx->args[i].length;
+
+ rpra[i].pv = 0;
+ rpra[i].len = len;
+ list[i].num = len ? 1 : 0;
+ list[i].pgidx = i;
+
+ if (!len)
+ continue;
+
+ pages[i].size = roundup(len, PAGE_SIZE);
+
+ if (ctx->maps[i]) {
+ rpra[i].pv = (u64) ctx->args[i].ptr;
+ pages[i].addr = ctx->maps[i]->phys;
+ } else {
+ rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
+ args = ALIGN(args, FASTRPC_ALIGN);
+ if (rlen < len)
+ goto bail;
+
+ rpra[i].pv = args;
+ pages[i].addr = ctx->buf->phys + (pkt_size - rlen);
+ pages[i].addr = pages[i].addr & PAGE_MASK;
+ args = args + len;
+ rlen -= len;
+ }
+
+ if (i < inbufs && !ctx->maps[i]) {
+ void *dst = (void *)(uintptr_t)rpra[i].pv;
+ void *src = (void *)(uintptr_t)ctx->args[i].ptr;
+
+ if (!kernel) {
+ if (copy_from_user(dst, (void __user *)src,
+ len)) {
+ err = -EFAULT;
+ goto bail;
+ }
+ } else {
+ memcpy(dst, src, len);
+ }
+ }
+ }
+
+ for (i = ctx->nbufs; i < ctx->nscalars; ++i) {
+ rpra[i].pv = (u64) ctx->args[i].ptr;
+ rpra[i].len = ctx->args[i].length;
+ list[i].num = ctx->args[i].length ? 1 : 0;
+ list[i].pgidx = i;
+ pages[i].addr = ctx->maps[i]->phys;
+ pages[i].size = ctx->maps[i]->size;
+ }
+
+bail:
+ if (err)
+ dev_err(dev, "Error: get invoke args failed:%d\n", err);
+
+ return err;
+}
+
+static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
+ u32 kernel)
+{
+ struct fastrpc_remote_arg *rpra = ctx->rpra;
+ int i, inbufs;
+
+ inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
+
+ for (i = inbufs; i < ctx->nbufs; ++i) {
+ void *src = (void *)(uintptr_t)rpra[i].pv;
+ void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
+ u64 len = rpra[i].len;
+
+ if (!kernel) {
+ if (copy_to_user((void __user *)dst, src, len))
+ return -EFAULT;
+ } else {
+ memcpy(dst, src, len);
+ }
+ }
+
+ return 0;
+}
+
+static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
+ struct fastrpc_invoke_ctx *ctx,
+ u32 kernel, uint32_t handle)
+{
+ struct fastrpc_channel_ctx *cctx;
+ struct fastrpc_user *fl = ctx->fl;
+ struct fastrpc_msg *msg = &ctx->msg;
+
+ cctx = fl->cctx;
+ msg->pid = fl->tgid;
+ msg->tid = current->pid;
+
+ if (kernel)
+ msg->pid = 0;
+
+ msg->ctx = ctx->ctxid | fl->pd;
+ msg->handle = handle;
+ msg->sc = ctx->sc;
+ msg->addr = ctx->buf ? ctx->buf->phys : 0;
+ msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
+ fastrpc_context_get(ctx);
+
+ return rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
+}
+
+static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
+ u32 handle, u32 sc,
+ struct fastrpc_invoke_args *args)
+{
+ struct fastrpc_invoke_ctx *ctx = NULL;
+ int err = 0;
+
+ if (!fl->sctx)
+ return -EINVAL;
+
+ ctx = fastrpc_context_alloc(fl, kernel, sc, args);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ if (ctx->nscalars) {
+ err = fastrpc_get_args(kernel, ctx);
+ if (err)
+ goto bail;
+ }
+ /* Send invoke buffer to remote dsp */
+ err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
+ if (err)
+ goto bail;
+
+ /* Wait for remote dsp to respond or time out */
+ err = wait_for_completion_interruptible(&ctx->work);
+ if (err)
+ goto bail;
+
+ /* Check the response from remote dsp */
+ err = ctx->retval;
+ if (err)
+ goto bail;
+
+ if (ctx->nscalars) {
+ /* populate all the output buffers with results */
+ err = fastrpc_put_args(ctx, kernel);
+ if (err)
+ goto bail;
+ }
+
+bail:
+ /* We are done with this compute context, remove it from pending list */
+ spin_lock(&fl->lock);
+ list_del(&ctx->node);
+ spin_unlock(&fl->lock);
+ fastrpc_context_put(ctx);
+
+ if (err)
+ dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
+
+ return err;
+}
+
+static int fastrpc_init_create_process(struct fastrpc_user *fl,
+ char __user *argp)
+{
+ struct fastrpc_init_create init;
+ struct fastrpc_invoke_args *args;
+ struct fastrpc_phy_page pages[1];
+ struct fastrpc_map *map = NULL;
+ struct fastrpc_buf *imem = NULL;
+ int memlen;
+ int err;
+ struct {
+ int pgid;
+ u32 namelen;
+ u32 filelen;
+ u32 pageslen;
+ u32 attrs;
+ u32 siglen;
+ } inbuf;
+ u32 sc;
+
+ args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
+ if (!args)
+ return -ENOMEM;
+
+ if (copy_from_user(&init, argp, sizeof(init))) {
+ err = -EFAULT;
+ goto bail;
+ }
+
+ if (init.filelen > INIT_FILELEN_MAX) {
+ err = -EINVAL;
+ goto bail;
+ }
+
+ inbuf.pgid = fl->tgid;
+ inbuf.namelen = strlen(current->comm) + 1;
+ inbuf.filelen = init.filelen;
+ inbuf.pageslen = 1;
+ inbuf.attrs = init.attrs;
+ inbuf.siglen = init.siglen;
+ fl->pd = 1;
+
+ if (init.filelen && init.filefd) {
+ err = fastrpc_map_create(fl, init.filefd, init.filelen, &map);
+ if (err)
+ goto bail;
+ }
+
+ memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
+ 1024 * 1024);
+ err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
+ &imem);
+ if (err) {
+ fastrpc_map_put(map);
+ goto bail;
+ }
+
+ fl->init_mem = imem;
+ args[0].ptr = (u64)(uintptr_t)&inbuf;
+ args[0].length = sizeof(inbuf);
+ args[0].fd = -1;
+
+ args[1].ptr = (u64)(uintptr_t)current->comm;
+ args[1].length = inbuf.namelen;
+ args[1].fd = -1;
+
+ args[2].ptr = (u64) init.file;
+ args[2].length = inbuf.filelen;
+ args[2].fd = init.filefd;
+
+ pages[0].addr = imem->phys;
+ pages[0].size = imem->size;
+
+ args[3].ptr = (u64)(uintptr_t) pages;
+ args[3].length = 1 * sizeof(*pages);
+ args[3].fd = -1;
+
+ args[4].ptr = (u64)(uintptr_t)&inbuf.attrs;
+ args[4].length = sizeof(inbuf.attrs);
+ args[4].fd = -1;
+
+ args[5].ptr = (u64)(uintptr_t) &inbuf.siglen;
+ args[5].length = sizeof(inbuf.siglen);
+ args[5].fd = -1;
+
+ sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
+ if (init.attrs)
+ sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0);
+
+ err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
+ sc, args);
+
+ if (err) {
+ fastrpc_map_put(map);
+ fastrpc_buf_free(imem);
+ }
+
+bail:
+ kfree(args);
+
+ return err;
+}
+
+static struct fastrpc_session_ctx *fastrpc_session_alloc(
+ struct fastrpc_channel_ctx *cctx)
+{
+ struct fastrpc_session_ctx *session = NULL;
+ int i;
+
+ spin_lock(&cctx->lock);
+ for (i = 0; i < cctx->sesscount; i++) {
+ if (!cctx->session[i].used && cctx->session[i].valid) {
+ cctx->session[i].used = true;
+ session = &cctx->session[i];
+ break;
+ }
+ }
+ spin_unlock(&cctx->lock);
+
+ return session;
+}
+
+static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
+ struct fastrpc_session_ctx *session)
+{
+ spin_lock(&cctx->lock);
+ session->used = false;
+ spin_unlock(&cctx->lock);
+}
+
+static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
+{
+ struct fastrpc_invoke_args args[1];
+ int tgid = 0;
+ u32 sc;
+
+ tgid = fl->tgid;
+ args[0].ptr = (u64)(uintptr_t) &tgid;
+ args[0].length = sizeof(tgid);
+ args[0].fd = -1;
+ args[0].reserved = 0;
+ sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
+
+ return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
+ sc, &args[0]);
+}
+
+static int fastrpc_device_release(struct inode *inode, struct file *file)
+{
+ struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
+ struct fastrpc_channel_ctx *cctx = fl->cctx;
+ struct fastrpc_invoke_ctx *ctx, *n;
+ struct fastrpc_map *map, *m;
+
+ fastrpc_release_current_dsp_process(fl);
+
+ spin_lock(&cctx->lock);
+ list_del(&fl->user);
+ spin_unlock(&cctx->lock);
+
+ if (fl->init_mem)
+ fastrpc_buf_free(fl->init_mem);
+
+ list_for_each_entry_safe(ctx, n, &fl->pending, node) {
+ list_del(&ctx->node);
+ fastrpc_context_put(ctx);
+ }
+
+ list_for_each_entry_safe(map, m, &fl->maps, node) {
+ list_del(&map->node);
+ fastrpc_map_put(map);
+ }
+
+ fastrpc_session_free(cctx, fl->sctx);
+
+ mutex_destroy(&fl->mutex);
+ kfree(fl);
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static int fastrpc_device_open(struct inode *inode, struct file *filp)
+{
+ struct fastrpc_channel_ctx *cctx = miscdev_to_cctx(filp->private_data);
+ struct fastrpc_user *fl = NULL;
+
+ fl = kzalloc(sizeof(*fl), GFP_KERNEL);
+ if (!fl)
+ return -ENOMEM;
+
+ filp->private_data = fl;
+ spin_lock_init(&fl->lock);
+ mutex_init(&fl->mutex);
+ INIT_LIST_HEAD(&fl->pending);
+ INIT_LIST_HEAD(&fl->maps);
+ INIT_LIST_HEAD(&fl->user);
+ fl->tgid = current->tgid;
+ fl->cctx = cctx;
+
+ fl->sctx = fastrpc_session_alloc(cctx);
+ if (!fl->sctx) {
+ dev_err(&cctx->rpdev->dev, "No session available\n");
+ mutex_destroy(&fl->mutex);
+ kfree(fl);
+
+ return -EBUSY;
+ }
+
+ spin_lock(&cctx->lock);
+ list_add_tail(&fl->user, &cctx->users);
+ spin_unlock(&cctx->lock);
+
+ return 0;
+}
+
+static int fastrpc_dmabuf_free(struct fastrpc_user *fl, char __user *argp)
+{
+ struct dma_buf *buf;
+ int info;
+
+ if (copy_from_user(&info, argp, sizeof(info)))
+ return -EFAULT;
+
+ buf = dma_buf_get(info);
+ if (IS_ERR_OR_NULL(buf))
+ return -EINVAL;
+ /*
+ * one for the last get and other for the ALLOC_DMA_BUFF ioctl
+ */
+ dma_buf_put(buf);
+ dma_buf_put(buf);
+
+ return 0;
+}
+
+static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
+{
+ struct fastrpc_alloc_dma_buf bp;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ struct fastrpc_buf *buf = NULL;
+ int err;
+
+ if (copy_from_user(&bp, argp, sizeof(bp)))
+ return -EFAULT;
+
+ err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
+ if (err)
+ return err;
+ exp_info.ops = &fastrpc_dma_buf_ops;
+ exp_info.size = bp.size;
+ exp_info.flags = O_RDWR;
+ exp_info.priv = buf;
+ buf->dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(buf->dmabuf)) {
+ err = PTR_ERR(buf->dmabuf);
+ fastrpc_buf_free(buf);
+ return err;
+ }
+
+ bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE);
+ if (bp.fd < 0) {
+ dma_buf_put(buf->dmabuf);
+ return -EINVAL;
+ }
+
+ if (copy_to_user(argp, &bp, sizeof(bp))) {
+ dma_buf_put(buf->dmabuf);
+ return -EFAULT;
+ }
+
+ get_dma_buf(buf->dmabuf);
+
+ return 0;
+}
+
+static int fastrpc_init_attach(struct fastrpc_user *fl)
+{
+ struct fastrpc_invoke_args args[1];
+ int tgid = fl->tgid;
+ u32 sc;
+
+ args[0].ptr = (u64)(uintptr_t) &tgid;
+ args[0].length = sizeof(tgid);
+ args[0].fd = -1;
+ args[0].reserved = 0;
+ sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
+ fl->pd = 0;
+
+ return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
+ sc, &args[0]);
+}
+
+static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
+{
+ struct fastrpc_invoke_args *args = NULL;
+ struct fastrpc_invoke inv;
+ u32 nscalars;
+ int err;
+
+ if (copy_from_user(&inv, argp, sizeof(inv)))
+ return -EFAULT;
+
+ /* nscalars is truncated here to max supported value */
+ nscalars = REMOTE_SCALARS_LENGTH(inv.sc);
+ if (nscalars) {
+ args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL);
+ if (!args)
+ return -ENOMEM;
+
+ if (copy_from_user(args, (void __user *)(uintptr_t)inv.args,
+ nscalars * sizeof(*args))) {
+ kfree(args);
+ return -EFAULT;
+ }
+ }
+
+ err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
+ kfree(args);
+
+ return err;
+}
+
+static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
+ char __user *argp = (char __user *)arg;
+ int err;
+
+ switch (cmd) {
+ case FASTRPC_IOCTL_INVOKE:
+ err = fastrpc_invoke(fl, argp);
+ break;
+ case FASTRPC_IOCTL_INIT_ATTACH:
+ err = fastrpc_init_attach(fl);
+ break;
+ case FASTRPC_IOCTL_INIT_CREATE:
+ err = fastrpc_init_create_process(fl, argp);
+ break;
+ case FASTRPC_IOCTL_FREE_DMA_BUFF:
+ err = fastrpc_dmabuf_free(fl, argp);
+ break;
+ case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
+ err = fastrpc_dmabuf_alloc(fl, argp);
+ break;
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ return err;
+}
+
+static const struct file_operations fastrpc_fops = {
+ .open = fastrpc_device_open,
+ .release = fastrpc_device_release,
+ .unlocked_ioctl = fastrpc_device_ioctl,
+ .compat_ioctl = fastrpc_device_ioctl,
+};
+
+static int fastrpc_cb_probe(struct platform_device *pdev)
+{
+ struct fastrpc_channel_ctx *cctx;
+ struct fastrpc_session_ctx *sess;
+ struct device *dev = &pdev->dev;
+ int i, sessions = 0;
+
+ cctx = dev_get_drvdata(dev->parent);
+ if (!cctx)
+ return -EINVAL;
+
+ of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
+
+ spin_lock(&cctx->lock);
+ sess = &cctx->session[cctx->sesscount];
+ sess->used = false;
+ sess->valid = true;
+ sess->dev = dev;
+ dev_set_drvdata(dev, sess);
+
+ if (of_property_read_u32(dev->of_node, "reg", &sess->sid))
+ dev_info(dev, "FastRPC Session ID not specified in DT\n");
+
+ if (sessions > 0) {
+ struct fastrpc_session_ctx *dup_sess;
+
+ for (i = 1; i < sessions; i++) {
+ if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS)
+ break;
+ dup_sess = &cctx->session[cctx->sesscount];
+ memcpy(dup_sess, sess, sizeof(*dup_sess));
+ }
+ }
+ cctx->sesscount++;
+ spin_unlock(&cctx->lock);
+ dma_set_mask(dev, DMA_BIT_MASK(32));
+
+ return 0;
+}
+
+static int fastrpc_cb_remove(struct platform_device *pdev)
+{
+ struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
+ struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
+ int i;
+
+ spin_lock(&cctx->lock);
+ for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
+ if (cctx->session[i].sid == sess->sid) {
+ cctx->session[i].valid = false;
+ cctx->sesscount--;
+ }
+ }
+ spin_unlock(&cctx->lock);
+
+ return 0;
+}
+
+static const struct of_device_id fastrpc_match_table[] = {
+ { .compatible = "qcom,fastrpc-compute-cb", },
+ {}
+};
+
+static struct platform_driver fastrpc_cb_driver = {
+ .probe = fastrpc_cb_probe,
+ .remove = fastrpc_cb_remove,
+ .driver = {
+ .name = "qcom,fastrpc-cb",
+ .of_match_table = fastrpc_match_table,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
+{
+ struct device *rdev = &rpdev->dev;
+ struct fastrpc_channel_ctx *data;
+ int i, err, domain_id = -1;
+ const char *domain;
+
+ data = devm_kzalloc(rdev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ err = of_property_read_string(rdev->of_node, "label", &domain);
+ if (err) {
+ dev_info(rdev, "FastRPC Domain not specified in DT\n");
+ return err;
+ }
+
+ for (i = 0; i <= CDSP_DOMAIN_ID; i++) {
+ if (!strcmp(domains[i], domain)) {
+ domain_id = i;
+ break;
+ }
+ }
+
+ if (domain_id < 0) {
+ dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id);
+ return -EINVAL;
+ }
+
+ data->miscdev.minor = MISC_DYNAMIC_MINOR;
+ data->miscdev.name = kasprintf(GFP_KERNEL, "fastrpc-%s",
+ domains[domain_id]);
+ data->miscdev.fops = &fastrpc_fops;
+ err = misc_register(&data->miscdev);
+ if (err)
+ return err;
+
+ dev_set_drvdata(&rpdev->dev, data);
+ dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
+ INIT_LIST_HEAD(&data->users);
+ spin_lock_init(&data->lock);
+ idr_init(&data->ctx_idr);
+ data->domain_id = domain_id;
+ data->rpdev = rpdev;
+
+ return of_platform_populate(rdev->of_node, NULL, NULL, rdev);
+}
+
+static void fastrpc_notify_users(struct fastrpc_user *user)
+{
+ struct fastrpc_invoke_ctx *ctx;
+
+ spin_lock(&user->lock);
+ list_for_each_entry(ctx, &user->pending, node)
+ complete(&ctx->work);
+ spin_unlock(&user->lock);
+}
+
+static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
+{
+ struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
+ struct fastrpc_user *user;
+
+ spin_lock(&cctx->lock);
+ list_for_each_entry(user, &cctx->users, user)
+ fastrpc_notify_users(user);
+ spin_unlock(&cctx->lock);
+
+ misc_deregister(&cctx->miscdev);
+ of_platform_depopulate(&rpdev->dev);
+ kfree(cctx);
+}
+
+static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
+ int len, void *priv, u32 addr)
+{
+ struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
+ struct fastrpc_invoke_rsp *rsp = data;
+ struct fastrpc_invoke_ctx *ctx;
+ unsigned long flags;
+ unsigned long ctxid;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
+
+ spin_lock_irqsave(&cctx->lock, flags);
+ ctx = idr_find(&cctx->ctx_idr, ctxid);
+ spin_unlock_irqrestore(&cctx->lock, flags);
+
+ if (!ctx) {
+ dev_err(&rpdev->dev, "No context ID matches response\n");
+ return -ENOENT;
+ }
+
+ ctx->retval = rsp->retval;
+ complete(&ctx->work);
+ fastrpc_context_put(ctx);
+
+ return 0;
+}
+
+static const struct of_device_id fastrpc_rpmsg_of_match[] = {
+ { .compatible = "qcom,fastrpc" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match);
+
+static struct rpmsg_driver fastrpc_driver = {
+ .probe = fastrpc_rpmsg_probe,
+ .remove = fastrpc_rpmsg_remove,
+ .callback = fastrpc_rpmsg_callback,
+ .drv = {
+ .name = "qcom,fastrpc",
+ .of_match_table = fastrpc_rpmsg_of_match,
+ },
+};
+
+static int fastrpc_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&fastrpc_cb_driver);
+ if (ret < 0) {
+ pr_err("fastrpc: failed to register cb driver\n");
+ return ret;
+ }
+
+ ret = register_rpmsg_driver(&fastrpc_driver);
+ if (ret < 0) {
+ pr_err("fastrpc: failed to register rpmsg driver\n");
+ platform_driver_unregister(&fastrpc_cb_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(fastrpc_init);
+
+static void fastrpc_exit(void)
+{
+ platform_driver_unregister(&fastrpc_cb_driver);
+ unregister_rpmsg_driver(&fastrpc_driver);
+}
+module_exit(fastrpc_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/habanalabs/Kconfig b/drivers/misc/habanalabs/Kconfig
new file mode 100644
index 000000000000..99db2b82ada6
--- /dev/null
+++ b/drivers/misc/habanalabs/Kconfig
@@ -0,0 +1,25 @@
+#
+# HabanaLabs AI accelerators driver
+#
+
+config HABANA_AI
+ tristate "HabanaAI accelerators (habanalabs)"
+ depends on PCI && HAS_IOMEM
+ select FRAME_VECTOR
+ select DMA_SHARED_BUFFER
+ select GENERIC_ALLOCATOR
+ select HWMON
+ help
+ Enables PCIe card driver for Habana's AI Processors (AIP) that are
+ designed to accelerate Deep Learning inference and training workloads.
+
+ The driver manages the PCIe devices and provides IOCTL interface for
+ the user to submit workloads to the devices.
+
+ The user-space interface is described in
+ include/uapi/misc/habanalabs.h
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called habanalabs.
diff --git a/drivers/misc/habanalabs/Makefile b/drivers/misc/habanalabs/Makefile
new file mode 100644
index 000000000000..c6592db59b25
--- /dev/null
+++ b/drivers/misc/habanalabs/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for HabanaLabs AI accelerators driver
+#
+
+obj-m := habanalabs.o
+
+habanalabs-y := habanalabs_drv.o device.o context.o asid.o habanalabs_ioctl.o \
+ command_buffer.o hw_queue.o irq.o sysfs.o hwmon.o memory.o \
+ command_submission.o mmu.o
+
+habanalabs-$(CONFIG_DEBUG_FS) += debugfs.o
+
+include $(src)/goya/Makefile
+habanalabs-y += $(HL_GOYA_FILES)
diff --git a/drivers/misc/habanalabs/asid.c b/drivers/misc/habanalabs/asid.c
new file mode 100644
index 000000000000..f54e7971a762
--- /dev/null
+++ b/drivers/misc/habanalabs/asid.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+
+#include <linux/slab.h>
+
+int hl_asid_init(struct hl_device *hdev)
+{
+ hdev->asid_bitmap = kcalloc(BITS_TO_LONGS(hdev->asic_prop.max_asid),
+ sizeof(*hdev->asid_bitmap), GFP_KERNEL);
+ if (!hdev->asid_bitmap)
+ return -ENOMEM;
+
+ mutex_init(&hdev->asid_mutex);
+
+ /* ASID 0 is reserved for KMD */
+ set_bit(0, hdev->asid_bitmap);
+
+ return 0;
+}
+
+void hl_asid_fini(struct hl_device *hdev)
+{
+ mutex_destroy(&hdev->asid_mutex);
+ kfree(hdev->asid_bitmap);
+}
+
+unsigned long hl_asid_alloc(struct hl_device *hdev)
+{
+ unsigned long found;
+
+ mutex_lock(&hdev->asid_mutex);
+
+ found = find_first_zero_bit(hdev->asid_bitmap,
+ hdev->asic_prop.max_asid);
+ if (found == hdev->asic_prop.max_asid)
+ found = 0;
+ else
+ set_bit(found, hdev->asid_bitmap);
+
+ mutex_unlock(&hdev->asid_mutex);
+
+ return found;
+}
+
+void hl_asid_free(struct hl_device *hdev, unsigned long asid)
+{
+ if (WARN((asid == 0 || asid >= hdev->asic_prop.max_asid),
+ "Invalid ASID %lu", asid))
+ return;
+ clear_bit(asid, hdev->asid_bitmap);
+}
diff --git a/drivers/misc/habanalabs/command_buffer.c b/drivers/misc/habanalabs/command_buffer.c
new file mode 100644
index 000000000000..85f75806a9a7
--- /dev/null
+++ b/drivers/misc/habanalabs/command_buffer.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include <uapi/misc/habanalabs.h>
+#include "habanalabs.h"
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
+{
+ hdev->asic_funcs->dma_free_coherent(hdev, cb->size,
+ (void *) (uintptr_t) cb->kernel_address,
+ cb->bus_address);
+ kfree(cb);
+}
+
+static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
+{
+ if (cb->is_pool) {
+ spin_lock(&hdev->cb_pool_lock);
+ list_add(&cb->pool_list, &hdev->cb_pool);
+ spin_unlock(&hdev->cb_pool_lock);
+ } else {
+ cb_fini(hdev, cb);
+ }
+}
+
+static void cb_release(struct kref *ref)
+{
+ struct hl_device *hdev;
+ struct hl_cb *cb;
+
+ cb = container_of(ref, struct hl_cb, refcount);
+ hdev = cb->hdev;
+
+ hl_debugfs_remove_cb(cb);
+
+ cb_do_release(hdev, cb);
+}
+
+static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
+ int ctx_id)
+{
+ struct hl_cb *cb;
+ void *p;
+
+ /*
+ * We use of GFP_ATOMIC here because this function can be called from
+ * the latency-sensitive code path for command submission. Due to H/W
+ * limitations in some of the ASICs, the kernel must copy the user CB
+ * that is designated for an external queue and actually enqueue
+ * the kernel's copy. Hence, we must never sleep in this code section
+ * and must use GFP_ATOMIC for all memory allocations.
+ */
+ if (ctx_id == HL_KERNEL_ASID_ID)
+ cb = kzalloc(sizeof(*cb), GFP_ATOMIC);
+ else
+ cb = kzalloc(sizeof(*cb), GFP_KERNEL);
+
+ if (!cb)
+ return NULL;
+
+ if (ctx_id == HL_KERNEL_ASID_ID)
+ p = hdev->asic_funcs->dma_alloc_coherent(hdev, cb_size,
+ &cb->bus_address, GFP_ATOMIC);
+ else
+ p = hdev->asic_funcs->dma_alloc_coherent(hdev, cb_size,
+ &cb->bus_address,
+ GFP_USER | __GFP_ZERO);
+ if (!p) {
+ dev_err(hdev->dev,
+ "failed to allocate %d of dma memory for CB\n",
+ cb_size);
+ kfree(cb);
+ return NULL;
+ }
+
+ cb->kernel_address = (u64) (uintptr_t) p;
+ cb->size = cb_size;
+
+ return cb;
+}
+
+int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
+ u32 cb_size, u64 *handle, int ctx_id)
+{
+ struct hl_cb *cb;
+ bool alloc_new_cb = true;
+ int rc;
+
+ /*
+ * Can't use generic function to check this because of special case
+ * where we create a CB as part of the reset process
+ */
+ if ((hdev->disabled) || ((atomic_read(&hdev->in_reset)) &&
+ (ctx_id != HL_KERNEL_ASID_ID))) {
+ dev_warn_ratelimited(hdev->dev,
+ "Device is disabled or in reset. Can't create new CBs\n");
+ rc = -EBUSY;
+ goto out_err;
+ }
+
+ if (cb_size > HL_MAX_CB_SIZE) {
+ dev_err(hdev->dev,
+ "CB size %d must be less then %d\n",
+ cb_size, HL_MAX_CB_SIZE);
+ rc = -EINVAL;
+ goto out_err;
+ }
+
+ /* Minimum allocation must be PAGE SIZE */
+ if (cb_size < PAGE_SIZE)
+ cb_size = PAGE_SIZE;
+
+ if (ctx_id == HL_KERNEL_ASID_ID &&
+ cb_size <= hdev->asic_prop.cb_pool_cb_size) {
+
+ spin_lock(&hdev->cb_pool_lock);
+ if (!list_empty(&hdev->cb_pool)) {
+ cb = list_first_entry(&hdev->cb_pool, typeof(*cb),
+ pool_list);
+ list_del(&cb->pool_list);
+ spin_unlock(&hdev->cb_pool_lock);
+ alloc_new_cb = false;
+ } else {
+ spin_unlock(&hdev->cb_pool_lock);
+ dev_dbg(hdev->dev, "CB pool is empty\n");
+ }
+ }
+
+ if (alloc_new_cb) {
+ cb = hl_cb_alloc(hdev, cb_size, ctx_id);
+ if (!cb) {
+ rc = -ENOMEM;
+ goto out_err;
+ }
+ }
+
+ cb->hdev = hdev;
+ cb->ctx_id = ctx_id;
+
+ spin_lock(&mgr->cb_lock);
+ rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
+ spin_unlock(&mgr->cb_lock);
+
+ if (rc < 0) {
+ dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n");
+ goto release_cb;
+ }
+
+ cb->id = rc;
+
+ kref_init(&cb->refcount);
+ spin_lock_init(&cb->lock);
+
+ /*
+ * idr is 32-bit so we can safely OR it with a mask that is above
+ * 32 bit
+ */
+ *handle = cb->id | HL_MMAP_CB_MASK;
+ *handle <<= PAGE_SHIFT;
+
+ hl_debugfs_add_cb(cb);
+
+ return 0;
+
+release_cb:
+ cb_do_release(hdev, cb);
+out_err:
+ *handle = 0;
+
+ return rc;
+}
+
+int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle)
+{
+ struct hl_cb *cb;
+ u32 handle;
+ int rc = 0;
+
+ /*
+ * handle was given to user to do mmap, I need to shift it back to
+ * how the idr module gave it to me
+ */
+ cb_handle >>= PAGE_SHIFT;
+ handle = (u32) cb_handle;
+
+ spin_lock(&mgr->cb_lock);
+
+ cb = idr_find(&mgr->cb_handles, handle);
+ if (cb) {
+ idr_remove(&mgr->cb_handles, handle);
+ spin_unlock(&mgr->cb_lock);
+ kref_put(&cb->refcount, cb_release);
+ } else {
+ spin_unlock(&mgr->cb_lock);
+ dev_err(hdev->dev,
+ "CB destroy failed, no match to handle 0x%x\n", handle);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
+{
+ union hl_cb_args *args = data;
+ struct hl_device *hdev = hpriv->hdev;
+ u64 handle;
+ int rc;
+
+ switch (args->in.op) {
+ case HL_CB_OP_CREATE:
+ rc = hl_cb_create(hdev, &hpriv->cb_mgr, args->in.cb_size,
+ &handle, hpriv->ctx->asid);
+ memset(args, 0, sizeof(*args));
+ args->out.cb_handle = handle;
+ break;
+ case HL_CB_OP_DESTROY:
+ rc = hl_cb_destroy(hdev, &hpriv->cb_mgr,
+ args->in.cb_handle);
+ break;
+ default:
+ rc = -ENOTTY;
+ break;
+ }
+
+ return rc;
+}
+
+static void cb_vm_close(struct vm_area_struct *vma)
+{
+ struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data;
+ long new_mmap_size;
+
+ new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start);
+
+ if (new_mmap_size > 0) {
+ cb->mmap_size = new_mmap_size;
+ return;
+ }
+
+ spin_lock(&cb->lock);
+ cb->mmap = false;
+ spin_unlock(&cb->lock);
+
+ hl_cb_put(cb);
+ vma->vm_private_data = NULL;
+}
+
+static const struct vm_operations_struct cb_vm_ops = {
+ .close = cb_vm_close
+};
+
+int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_cb *cb;
+ phys_addr_t address;
+ u32 handle;
+ int rc;
+
+ handle = vma->vm_pgoff;
+
+ /* reference was taken here */
+ cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle);
+ if (!cb) {
+ dev_err(hdev->dev,
+ "CB mmap failed, no match to handle %d\n", handle);
+ return -EINVAL;
+ }
+
+ /* Validation check */
+ if ((vma->vm_end - vma->vm_start) != ALIGN(cb->size, PAGE_SIZE)) {
+ dev_err(hdev->dev,
+ "CB mmap failed, mmap size 0x%lx != 0x%x cb size\n",
+ vma->vm_end - vma->vm_start, cb->size);
+ rc = -EINVAL;
+ goto put_cb;
+ }
+
+ spin_lock(&cb->lock);
+
+ if (cb->mmap) {
+ dev_err(hdev->dev,
+ "CB mmap failed, CB already mmaped to user\n");
+ rc = -EINVAL;
+ goto release_lock;
+ }
+
+ cb->mmap = true;
+
+ spin_unlock(&cb->lock);
+
+ vma->vm_ops = &cb_vm_ops;
+
+ /*
+ * Note: We're transferring the cb reference to
+ * vma->vm_private_data here.
+ */
+
+ vma->vm_private_data = cb;
+
+ /* Calculate address for CB */
+ address = virt_to_phys((void *) (uintptr_t) cb->kernel_address);
+
+ rc = hdev->asic_funcs->cb_mmap(hdev, vma, cb->kernel_address,
+ address, cb->size);
+
+ if (rc) {
+ spin_lock(&cb->lock);
+ cb->mmap = false;
+ goto release_lock;
+ }
+
+ cb->mmap_size = cb->size;
+
+ return 0;
+
+release_lock:
+ spin_unlock(&cb->lock);
+put_cb:
+ hl_cb_put(cb);
+ return rc;
+}
+
+struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
+ u32 handle)
+{
+ struct hl_cb *cb;
+
+ spin_lock(&mgr->cb_lock);
+ cb = idr_find(&mgr->cb_handles, handle);
+
+ if (!cb) {
+ spin_unlock(&mgr->cb_lock);
+ dev_warn(hdev->dev,
+ "CB get failed, no match to handle %d\n", handle);
+ return NULL;
+ }
+
+ kref_get(&cb->refcount);
+
+ spin_unlock(&mgr->cb_lock);
+
+ return cb;
+
+}
+
+void hl_cb_put(struct hl_cb *cb)
+{
+ kref_put(&cb->refcount, cb_release);
+}
+
+void hl_cb_mgr_init(struct hl_cb_mgr *mgr)
+{
+ spin_lock_init(&mgr->cb_lock);
+ idr_init(&mgr->cb_handles);
+}
+
+void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr)
+{
+ struct hl_cb *cb;
+ struct idr *idp;
+ u32 id;
+
+ idp = &mgr->cb_handles;
+
+ idr_for_each_entry(idp, cb, id) {
+ if (kref_put(&cb->refcount, cb_release) != 1)
+ dev_err(hdev->dev,
+ "CB %d for CTX ID %d is still alive\n",
+ id, cb->ctx_id);
+ }
+
+ idr_destroy(&mgr->cb_handles);
+}
+
+struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size)
+{
+ u64 cb_handle;
+ struct hl_cb *cb;
+ int rc;
+
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, cb_size, &cb_handle,
+ HL_KERNEL_ASID_ID);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to allocate CB for KMD %d\n", rc);
+ return NULL;
+ }
+
+ cb_handle >>= PAGE_SHIFT;
+ cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle);
+ /* hl_cb_get should never fail here so use kernel WARN */
+ WARN(!cb, "Kernel CB handle invalid 0x%x\n", (u32) cb_handle);
+ if (!cb)
+ goto destroy_cb;
+
+ return cb;
+
+destroy_cb:
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb_handle << PAGE_SHIFT);
+
+ return NULL;
+}
+
+int hl_cb_pool_init(struct hl_device *hdev)
+{
+ struct hl_cb *cb;
+ int i;
+
+ INIT_LIST_HEAD(&hdev->cb_pool);
+ spin_lock_init(&hdev->cb_pool_lock);
+
+ for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) {
+ cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size,
+ HL_KERNEL_ASID_ID);
+ if (cb) {
+ cb->is_pool = true;
+ list_add(&cb->pool_list, &hdev->cb_pool);
+ } else {
+ hl_cb_pool_fini(hdev);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+int hl_cb_pool_fini(struct hl_device *hdev)
+{
+ struct hl_cb *cb, *tmp;
+
+ list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) {
+ list_del(&cb->pool_list);
+ cb_fini(hdev, cb);
+ }
+
+ return 0;
+}
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
new file mode 100644
index 000000000000..3525236ed8d9
--- /dev/null
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -0,0 +1,780 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include <uapi/misc/habanalabs.h>
+#include "habanalabs.h"
+
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+
+static void job_wq_completion(struct work_struct *work);
+static long _hl_cs_wait_ioctl(struct hl_device *hdev,
+ struct hl_ctx *ctx, u64 timeout_us, u64 seq);
+static void cs_do_release(struct kref *ref);
+
+static const char *hl_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "HabanaLabs";
+}
+
+static const char *hl_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct hl_dma_fence *hl_fence =
+ container_of(fence, struct hl_dma_fence, base_fence);
+
+ return dev_name(hl_fence->hdev->dev);
+}
+
+static bool hl_fence_enable_signaling(struct dma_fence *fence)
+{
+ return true;
+}
+
+static void hl_fence_release(struct dma_fence *fence)
+{
+ struct hl_dma_fence *hl_fence =
+ container_of(fence, struct hl_dma_fence, base_fence);
+
+ kfree_rcu(hl_fence, base_fence.rcu);
+}
+
+static const struct dma_fence_ops hl_fence_ops = {
+ .get_driver_name = hl_fence_get_driver_name,
+ .get_timeline_name = hl_fence_get_timeline_name,
+ .enable_signaling = hl_fence_enable_signaling,
+ .wait = dma_fence_default_wait,
+ .release = hl_fence_release
+};
+
+static void cs_get(struct hl_cs *cs)
+{
+ kref_get(&cs->refcount);
+}
+
+static int cs_get_unless_zero(struct hl_cs *cs)
+{
+ return kref_get_unless_zero(&cs->refcount);
+}
+
+static void cs_put(struct hl_cs *cs)
+{
+ kref_put(&cs->refcount, cs_do_release);
+}
+
+/*
+ * cs_parser - parse the user command submission
+ *
+ * @hpriv : pointer to the private data of the fd
+ * @job : pointer to the job that holds the command submission info
+ *
+ * The function parses the command submission of the user. It calls the
+ * ASIC specific parser, which returns a list of memory blocks to send
+ * to the device as different command buffers
+ *
+ */
+static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_cs_parser parser;
+ int rc;
+
+ parser.ctx_id = job->cs->ctx->asid;
+ parser.cs_sequence = job->cs->sequence;
+ parser.job_id = job->id;
+
+ parser.hw_queue_id = job->hw_queue_id;
+ parser.job_userptr_list = &job->userptr_list;
+ parser.patched_cb = NULL;
+ parser.user_cb = job->user_cb;
+ parser.user_cb_size = job->user_cb_size;
+ parser.ext_queue = job->ext_queue;
+ job->patched_cb = NULL;
+ parser.use_virt_addr = hdev->mmu_enable;
+
+ rc = hdev->asic_funcs->cs_parser(hdev, &parser);
+ if (job->ext_queue) {
+ if (!rc) {
+ job->patched_cb = parser.patched_cb;
+ job->job_cb_size = parser.patched_cb_size;
+
+ spin_lock(&job->patched_cb->lock);
+ job->patched_cb->cs_cnt++;
+ spin_unlock(&job->patched_cb->lock);
+ }
+
+ /*
+ * Whether the parsing worked or not, we don't need the
+ * original CB anymore because it was already parsed and
+ * won't be accessed again for this CS
+ */
+ spin_lock(&job->user_cb->lock);
+ job->user_cb->cs_cnt--;
+ spin_unlock(&job->user_cb->lock);
+ hl_cb_put(job->user_cb);
+ job->user_cb = NULL;
+ }
+
+ return rc;
+}
+
+static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
+{
+ struct hl_cs *cs = job->cs;
+
+ if (job->ext_queue) {
+ hl_userptr_delete_list(hdev, &job->userptr_list);
+
+ /*
+ * We might arrive here from rollback and patched CB wasn't
+ * created, so we need to check it's not NULL
+ */
+ if (job->patched_cb) {
+ spin_lock(&job->patched_cb->lock);
+ job->patched_cb->cs_cnt--;
+ spin_unlock(&job->patched_cb->lock);
+
+ hl_cb_put(job->patched_cb);
+ }
+ }
+
+ /*
+ * This is the only place where there can be multiple threads
+ * modifying the list at the same time
+ */
+ spin_lock(&cs->job_lock);
+ list_del(&job->cs_node);
+ spin_unlock(&cs->job_lock);
+
+ hl_debugfs_remove_job(hdev, job);
+
+ if (job->ext_queue)
+ cs_put(cs);
+
+ kfree(job);
+}
+
+static void cs_do_release(struct kref *ref)
+{
+ struct hl_cs *cs = container_of(ref, struct hl_cs,
+ refcount);
+ struct hl_device *hdev = cs->ctx->hdev;
+ struct hl_cs_job *job, *tmp;
+
+ cs->completed = true;
+
+ /*
+ * Although if we reached here it means that all external jobs have
+ * finished, because each one of them took refcnt to CS, we still
+ * need to go over the internal jobs and free them. Otherwise, we
+ * will have leaked memory and what's worse, the CS object (and
+ * potentially the CTX object) could be released, while the JOB
+ * still holds a pointer to them (but no reference).
+ */
+ list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
+ free_job(hdev, job);
+
+ /* We also need to update CI for internal queues */
+ if (cs->submitted) {
+ hl_int_hw_queue_update_ci(cs);
+
+ spin_lock(&hdev->hw_queues_mirror_lock);
+ /* remove CS from hw_queues mirror list */
+ list_del_init(&cs->mirror_node);
+ spin_unlock(&hdev->hw_queues_mirror_lock);
+
+ /*
+ * Don't cancel TDR in case this CS was timedout because we
+ * might be running from the TDR context
+ */
+ if ((!cs->timedout) &&
+ (hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT)) {
+ struct hl_cs *next;
+
+ if (cs->tdr_active)
+ cancel_delayed_work_sync(&cs->work_tdr);
+
+ spin_lock(&hdev->hw_queues_mirror_lock);
+
+ /* queue TDR for next CS */
+ next = list_first_entry_or_null(
+ &hdev->hw_queues_mirror_list,
+ struct hl_cs, mirror_node);
+
+ if ((next) && (!next->tdr_active)) {
+ next->tdr_active = true;
+ schedule_delayed_work(&next->work_tdr,
+ hdev->timeout_jiffies);
+ }
+
+ spin_unlock(&hdev->hw_queues_mirror_lock);
+ }
+ }
+
+ /*
+ * Must be called before hl_ctx_put because inside we use ctx to get
+ * the device
+ */
+ hl_debugfs_remove_cs(cs);
+
+ hl_ctx_put(cs->ctx);
+
+ if (cs->timedout)
+ dma_fence_set_error(cs->fence, -ETIMEDOUT);
+ else if (cs->aborted)
+ dma_fence_set_error(cs->fence, -EIO);
+
+ dma_fence_signal(cs->fence);
+ dma_fence_put(cs->fence);
+
+ kfree(cs);
+}
+
+static void cs_timedout(struct work_struct *work)
+{
+ struct hl_device *hdev;
+ int ctx_asid, rc;
+ struct hl_cs *cs = container_of(work, struct hl_cs,
+ work_tdr.work);
+ rc = cs_get_unless_zero(cs);
+ if (!rc)
+ return;
+
+ if ((!cs->submitted) || (cs->completed)) {
+ cs_put(cs);
+ return;
+ }
+
+ /* Mark the CS is timed out so we won't try to cancel its TDR */
+ cs->timedout = true;
+
+ hdev = cs->ctx->hdev;
+ ctx_asid = cs->ctx->asid;
+
+ /* TODO: add information about last signaled seq and last emitted seq */
+ dev_err(hdev->dev, "CS %d.%llu got stuck!\n", ctx_asid, cs->sequence);
+
+ cs_put(cs);
+
+ if (hdev->reset_on_lockup)
+ hl_device_reset(hdev, false, false);
+}
+
+static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
+ struct hl_cs **cs_new)
+{
+ struct hl_dma_fence *fence;
+ struct dma_fence *other = NULL;
+ struct hl_cs *cs;
+ int rc;
+
+ cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
+ if (!cs)
+ return -ENOMEM;
+
+ cs->ctx = ctx;
+ cs->submitted = false;
+ cs->completed = false;
+ INIT_LIST_HEAD(&cs->job_list);
+ INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
+ kref_init(&cs->refcount);
+ spin_lock_init(&cs->job_lock);
+
+ fence = kmalloc(sizeof(*fence), GFP_ATOMIC);
+ if (!fence) {
+ rc = -ENOMEM;
+ goto free_cs;
+ }
+
+ fence->hdev = hdev;
+ spin_lock_init(&fence->lock);
+ cs->fence = &fence->base_fence;
+
+ spin_lock(&ctx->cs_lock);
+
+ fence->cs_seq = ctx->cs_sequence;
+ other = ctx->cs_pending[fence->cs_seq & (HL_MAX_PENDING_CS - 1)];
+ if ((other) && (!dma_fence_is_signaled(other))) {
+ spin_unlock(&ctx->cs_lock);
+ rc = -EAGAIN;
+ goto free_fence;
+ }
+
+ dma_fence_init(&fence->base_fence, &hl_fence_ops, &fence->lock,
+ ctx->asid, ctx->cs_sequence);
+
+ cs->sequence = fence->cs_seq;
+
+ ctx->cs_pending[fence->cs_seq & (HL_MAX_PENDING_CS - 1)] =
+ &fence->base_fence;
+ ctx->cs_sequence++;
+
+ dma_fence_get(&fence->base_fence);
+
+ dma_fence_put(other);
+
+ spin_unlock(&ctx->cs_lock);
+
+ *cs_new = cs;
+
+ return 0;
+
+free_fence:
+ kfree(fence);
+free_cs:
+ kfree(cs);
+ return rc;
+}
+
+static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
+{
+ struct hl_cs_job *job, *tmp;
+
+ list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
+ free_job(hdev, job);
+}
+
+void hl_cs_rollback_all(struct hl_device *hdev)
+{
+ struct hl_cs *cs, *tmp;
+
+ /* flush all completions */
+ flush_workqueue(hdev->cq_wq);
+
+ /* Make sure we don't have leftovers in the H/W queues mirror list */
+ list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list,
+ mirror_node) {
+ cs_get(cs);
+ cs->aborted = true;
+ dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
+ cs->ctx->asid, cs->sequence);
+ cs_rollback(hdev, cs);
+ cs_put(cs);
+ }
+}
+
+static void job_wq_completion(struct work_struct *work)
+{
+ struct hl_cs_job *job = container_of(work, struct hl_cs_job,
+ finish_work);
+ struct hl_cs *cs = job->cs;
+ struct hl_device *hdev = cs->ctx->hdev;
+
+ /* job is no longer needed */
+ free_job(hdev, job);
+}
+
+static struct hl_cb *validate_queue_index(struct hl_device *hdev,
+ struct hl_cb_mgr *cb_mgr,
+ struct hl_cs_chunk *chunk,
+ bool *ext_queue)
+{
+ struct asic_fixed_properties *asic = &hdev->asic_prop;
+ struct hw_queue_properties *hw_queue_prop;
+ u32 cb_handle;
+ struct hl_cb *cb;
+
+ /* Assume external queue */
+ *ext_queue = true;
+
+ hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
+
+ if ((chunk->queue_index >= HL_MAX_QUEUES) ||
+ (hw_queue_prop->type == QUEUE_TYPE_NA)) {
+ dev_err(hdev->dev, "Queue index %d is invalid\n",
+ chunk->queue_index);
+ return NULL;
+ }
+
+ if (hw_queue_prop->kmd_only) {
+ dev_err(hdev->dev, "Queue index %d is restricted for KMD\n",
+ chunk->queue_index);
+ return NULL;
+ } else if (hw_queue_prop->type == QUEUE_TYPE_INT) {
+ *ext_queue = false;
+ return (struct hl_cb *) (uintptr_t) chunk->cb_handle;
+ }
+
+ /* Retrieve CB object */
+ cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT);
+
+ cb = hl_cb_get(hdev, cb_mgr, cb_handle);
+ if (!cb) {
+ dev_err(hdev->dev, "CB handle 0x%x invalid\n", cb_handle);
+ return NULL;
+ }
+
+ if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) {
+ dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size);
+ goto release_cb;
+ }
+
+ spin_lock(&cb->lock);
+ cb->cs_cnt++;
+ spin_unlock(&cb->lock);
+
+ return cb;
+
+release_cb:
+ hl_cb_put(cb);
+ return NULL;
+}
+
+struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue)
+{
+ struct hl_cs_job *job;
+
+ job = kzalloc(sizeof(*job), GFP_ATOMIC);
+ if (!job)
+ return NULL;
+
+ job->ext_queue = ext_queue;
+
+ if (job->ext_queue) {
+ INIT_LIST_HEAD(&job->userptr_list);
+ INIT_WORK(&job->finish_work, job_wq_completion);
+ }
+
+ return job;
+}
+
+static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
+ u32 num_chunks, u64 *cs_seq)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_cs_chunk *cs_chunk_array;
+ struct hl_cs_job *job;
+ struct hl_cs *cs;
+ struct hl_cb *cb;
+ bool ext_queue_present = false;
+ u32 size_to_copy;
+ int rc, i, parse_cnt;
+
+ *cs_seq = ULLONG_MAX;
+
+ if (num_chunks > HL_MAX_JOBS_PER_CS) {
+ dev_err(hdev->dev,
+ "Number of chunks can NOT be larger than %d\n",
+ HL_MAX_JOBS_PER_CS);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ cs_chunk_array = kmalloc_array(num_chunks, sizeof(*cs_chunk_array),
+ GFP_ATOMIC);
+ if (!cs_chunk_array) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
+ if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) {
+ dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
+ rc = -EFAULT;
+ goto free_cs_chunk_array;
+ }
+
+ /* increment refcnt for context */
+ hl_ctx_get(hdev, hpriv->ctx);
+
+ rc = allocate_cs(hdev, hpriv->ctx, &cs);
+ if (rc) {
+ hl_ctx_put(hpriv->ctx);
+ goto free_cs_chunk_array;
+ }
+
+ *cs_seq = cs->sequence;
+
+ hl_debugfs_add_cs(cs);
+
+ /* Validate ALL the CS chunks before submitting the CS */
+ for (i = 0, parse_cnt = 0 ; i < num_chunks ; i++, parse_cnt++) {
+ struct hl_cs_chunk *chunk = &cs_chunk_array[i];
+ bool ext_queue;
+
+ cb = validate_queue_index(hdev, &hpriv->cb_mgr, chunk,
+ &ext_queue);
+ if (ext_queue) {
+ ext_queue_present = true;
+ if (!cb) {
+ rc = -EINVAL;
+ goto free_cs_object;
+ }
+ }
+
+ job = hl_cs_allocate_job(hdev, ext_queue);
+ if (!job) {
+ dev_err(hdev->dev, "Failed to allocate a new job\n");
+ rc = -ENOMEM;
+ if (ext_queue)
+ goto release_cb;
+ else
+ goto free_cs_object;
+ }
+
+ job->id = i + 1;
+ job->cs = cs;
+ job->user_cb = cb;
+ job->user_cb_size = chunk->cb_size;
+ if (job->ext_queue)
+ job->job_cb_size = cb->size;
+ else
+ job->job_cb_size = chunk->cb_size;
+ job->hw_queue_id = chunk->queue_index;
+
+ cs->jobs_in_queue_cnt[job->hw_queue_id]++;
+
+ list_add_tail(&job->cs_node, &cs->job_list);
+
+ /*
+ * Increment CS reference. When CS reference is 0, CS is
+ * done and can be signaled to user and free all its resources
+ * Only increment for JOB on external queues, because only
+ * for those JOBs we get completion
+ */
+ if (job->ext_queue)
+ cs_get(cs);
+
+ hl_debugfs_add_job(hdev, job);
+
+ rc = cs_parser(hpriv, job);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
+ cs->ctx->asid, cs->sequence, job->id, rc);
+ goto free_cs_object;
+ }
+ }
+
+ if (!ext_queue_present) {
+ dev_err(hdev->dev,
+ "Reject CS %d.%llu because no external queues jobs\n",
+ cs->ctx->asid, cs->sequence);
+ rc = -EINVAL;
+ goto free_cs_object;
+ }
+
+ rc = hl_hw_queue_schedule_cs(cs);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to submit CS %d.%llu to H/W queues, error %d\n",
+ cs->ctx->asid, cs->sequence, rc);
+ goto free_cs_object;
+ }
+
+ rc = HL_CS_STATUS_SUCCESS;
+ goto put_cs;
+
+release_cb:
+ spin_lock(&cb->lock);
+ cb->cs_cnt--;
+ spin_unlock(&cb->lock);
+ hl_cb_put(cb);
+free_cs_object:
+ cs_rollback(hdev, cs);
+ *cs_seq = ULLONG_MAX;
+ /* The path below is both for good and erroneous exits */
+put_cs:
+ /* We finished with the CS in this function, so put the ref */
+ cs_put(cs);
+free_cs_chunk_array:
+ kfree(cs_chunk_array);
+out:
+ return rc;
+}
+
+int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ union hl_cs_args *args = data;
+ struct hl_ctx *ctx = hpriv->ctx;
+ void __user *chunks;
+ u32 num_chunks;
+ u64 cs_seq = ULONG_MAX;
+ int rc, do_restore;
+ bool need_soft_reset = false;
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ dev_warn(hdev->dev,
+ "Device is %s. Can't submit new CS\n",
+ atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
+ rc = -EBUSY;
+ goto out;
+ }
+
+ do_restore = atomic_cmpxchg(&ctx->thread_restore_token, 1, 0);
+
+ if (do_restore || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
+ long ret;
+
+ chunks = (void __user *)(uintptr_t)args->in.chunks_restore;
+ num_chunks = args->in.num_chunks_restore;
+
+ mutex_lock(&hpriv->restore_phase_mutex);
+
+ if (do_restore) {
+ rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
+ if (rc) {
+ dev_err_ratelimited(hdev->dev,
+ "Failed to switch to context %d, rejecting CS! %d\n",
+ ctx->asid, rc);
+ /*
+ * If we timedout, or if the device is not IDLE
+ * while we want to do context-switch (-EBUSY),
+ * we need to soft-reset because QMAN is
+ * probably stuck. However, we can't call to
+ * reset here directly because of deadlock, so
+ * need to do it at the very end of this
+ * function
+ */
+ if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
+ need_soft_reset = true;
+ mutex_unlock(&hpriv->restore_phase_mutex);
+ goto out;
+ }
+ }
+
+ hdev->asic_funcs->restore_phase_topology(hdev);
+
+ if (num_chunks == 0) {
+ dev_dbg(hdev->dev,
+ "Need to run restore phase but restore CS is empty\n");
+ rc = 0;
+ } else {
+ rc = _hl_cs_ioctl(hpriv, chunks, num_chunks,
+ &cs_seq);
+ }
+
+ mutex_unlock(&hpriv->restore_phase_mutex);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to submit restore CS for context %d (%d)\n",
+ ctx->asid, rc);
+ goto out;
+ }
+
+ /* Need to wait for restore completion before execution phase */
+ if (num_chunks > 0) {
+ ret = _hl_cs_wait_ioctl(hdev, ctx,
+ jiffies_to_usecs(hdev->timeout_jiffies),
+ cs_seq);
+ if (ret <= 0) {
+ dev_err(hdev->dev,
+ "Restore CS for context %d failed to complete %ld\n",
+ ctx->asid, ret);
+ rc = -ENOEXEC;
+ goto out;
+ }
+ }
+
+ ctx->thread_restore_wait_token = 1;
+ } else if (!ctx->thread_restore_wait_token) {
+ u32 tmp;
+
+ rc = hl_poll_timeout_memory(hdev,
+ (u64) (uintptr_t) &ctx->thread_restore_wait_token,
+ jiffies_to_usecs(hdev->timeout_jiffies),
+ &tmp);
+
+ if (rc || !tmp) {
+ dev_err(hdev->dev,
+ "restore phase hasn't finished in time\n");
+ rc = -ETIMEDOUT;
+ goto out;
+ }
+ }
+
+ chunks = (void __user *)(uintptr_t)args->in.chunks_execute;
+ num_chunks = args->in.num_chunks_execute;
+
+ if (num_chunks == 0) {
+ dev_err(hdev->dev,
+ "Got execute CS with 0 chunks, context %d\n",
+ ctx->asid);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = _hl_cs_ioctl(hpriv, chunks, num_chunks, &cs_seq);
+
+out:
+ if (rc != -EAGAIN) {
+ memset(args, 0, sizeof(*args));
+ args->out.status = rc;
+ args->out.seq = cs_seq;
+ }
+
+ if (((rc == -ETIMEDOUT) || (rc == -EBUSY)) && (need_soft_reset))
+ hl_device_reset(hdev, false, false);
+
+ return rc;
+}
+
+static long _hl_cs_wait_ioctl(struct hl_device *hdev,
+ struct hl_ctx *ctx, u64 timeout_us, u64 seq)
+{
+ struct dma_fence *fence;
+ unsigned long timeout;
+ long rc;
+
+ if (timeout_us == MAX_SCHEDULE_TIMEOUT)
+ timeout = timeout_us;
+ else
+ timeout = usecs_to_jiffies(timeout_us);
+
+ hl_ctx_get(hdev, ctx);
+
+ fence = hl_ctx_get_fence(ctx, seq);
+ if (IS_ERR(fence)) {
+ rc = PTR_ERR(fence);
+ } else if (fence) {
+ rc = dma_fence_wait_timeout(fence, true, timeout);
+ if (fence->error == -ETIMEDOUT)
+ rc = -ETIMEDOUT;
+ else if (fence->error == -EIO)
+ rc = -EIO;
+ dma_fence_put(fence);
+ } else
+ rc = 1;
+
+ hl_ctx_put(ctx);
+
+ return rc;
+}
+
+int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ union hl_wait_cs_args *args = data;
+ u64 seq = args->in.seq;
+ long rc;
+
+ rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq);
+
+ memset(args, 0, sizeof(*args));
+
+ if (rc < 0) {
+ dev_err(hdev->dev, "Error %ld on waiting for CS handle %llu\n",
+ rc, seq);
+ if (rc == -ERESTARTSYS) {
+ args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
+ rc = -EINTR;
+ } else if (rc == -ETIMEDOUT) {
+ args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
+ } else if (rc == -EIO) {
+ args->out.status = HL_WAIT_CS_STATUS_ABORTED;
+ }
+ return rc;
+ }
+
+ if (rc == 0)
+ args->out.status = HL_WAIT_CS_STATUS_BUSY;
+ else
+ args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
+
+ return 0;
+}
diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/context.c
new file mode 100644
index 000000000000..619ace1c4ef7
--- /dev/null
+++ b/drivers/misc/habanalabs/context.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+
+#include <linux/slab.h>
+
+static void hl_ctx_fini(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ int i;
+
+ /*
+ * If we arrived here, there are no jobs waiting for this context
+ * on its queues so we can safely remove it.
+ * This is because for each CS, we increment the ref count and for
+ * every CS that was finished we decrement it and we won't arrive
+ * to this function unless the ref count is 0
+ */
+
+ for (i = 0 ; i < HL_MAX_PENDING_CS ; i++)
+ dma_fence_put(ctx->cs_pending[i]);
+
+ if (ctx->asid != HL_KERNEL_ASID_ID) {
+ hl_vm_ctx_fini(ctx);
+ hl_asid_free(hdev, ctx->asid);
+ }
+}
+
+void hl_ctx_do_release(struct kref *ref)
+{
+ struct hl_ctx *ctx;
+
+ ctx = container_of(ref, struct hl_ctx, refcount);
+
+ hl_ctx_fini(ctx);
+
+ if (ctx->hpriv)
+ hl_hpriv_put(ctx->hpriv);
+
+ kfree(ctx);
+}
+
+int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
+{
+ struct hl_ctx_mgr *mgr = &hpriv->ctx_mgr;
+ struct hl_ctx *ctx;
+ int rc;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto out_err;
+ }
+
+ rc = hl_ctx_init(hdev, ctx, false);
+ if (rc)
+ goto free_ctx;
+
+ hl_hpriv_get(hpriv);
+ ctx->hpriv = hpriv;
+
+ /* TODO: remove for multiple contexts */
+ hpriv->ctx = ctx;
+ hdev->user_ctx = ctx;
+
+ mutex_lock(&mgr->ctx_lock);
+ rc = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
+ mutex_unlock(&mgr->ctx_lock);
+
+ if (rc < 0) {
+ dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
+ hl_ctx_free(hdev, ctx);
+ goto out_err;
+ }
+
+ return 0;
+
+free_ctx:
+ kfree(ctx);
+out_err:
+ return rc;
+}
+
+void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx)
+{
+ if (kref_put(&ctx->refcount, hl_ctx_do_release) == 1)
+ return;
+
+ dev_warn(hdev->dev,
+ "Context %d closed or terminated but its CS are executing\n",
+ ctx->asid);
+}
+
+int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
+{
+ int rc = 0;
+
+ ctx->hdev = hdev;
+
+ kref_init(&ctx->refcount);
+
+ ctx->cs_sequence = 1;
+ spin_lock_init(&ctx->cs_lock);
+ atomic_set(&ctx->thread_restore_token, 1);
+ ctx->thread_restore_wait_token = 0;
+
+ if (is_kernel_ctx) {
+ ctx->asid = HL_KERNEL_ASID_ID; /* KMD gets ASID 0 */
+ } else {
+ ctx->asid = hl_asid_alloc(hdev);
+ if (!ctx->asid) {
+ dev_err(hdev->dev, "No free ASID, failed to create context\n");
+ return -ENOMEM;
+ }
+
+ rc = hl_vm_ctx_init(ctx);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to init mem ctx module\n");
+ rc = -ENOMEM;
+ goto mem_ctx_err;
+ }
+ }
+
+ return 0;
+
+mem_ctx_err:
+ if (ctx->asid != HL_KERNEL_ASID_ID)
+ hl_asid_free(hdev, ctx->asid);
+
+ return rc;
+}
+
+void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx)
+{
+ kref_get(&ctx->refcount);
+}
+
+int hl_ctx_put(struct hl_ctx *ctx)
+{
+ return kref_put(&ctx->refcount, hl_ctx_do_release);
+}
+
+struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct dma_fence *fence;
+
+ spin_lock(&ctx->cs_lock);
+
+ if (seq >= ctx->cs_sequence) {
+ dev_notice(hdev->dev,
+ "Can't wait on seq %llu because current CS is at seq %llu\n",
+ seq, ctx->cs_sequence);
+ spin_unlock(&ctx->cs_lock);
+ return ERR_PTR(-EINVAL);
+ }
+
+
+ if (seq + HL_MAX_PENDING_CS < ctx->cs_sequence) {
+ dev_dbg(hdev->dev,
+ "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
+ seq, ctx->cs_sequence);
+ spin_unlock(&ctx->cs_lock);
+ return NULL;
+ }
+
+ fence = dma_fence_get(
+ ctx->cs_pending[seq & (HL_MAX_PENDING_CS - 1)]);
+ spin_unlock(&ctx->cs_lock);
+
+ return fence;
+}
+
+/*
+ * hl_ctx_mgr_init - initialize the context manager
+ *
+ * @mgr: pointer to context manager structure
+ *
+ * This manager is an object inside the hpriv object of the user process.
+ * The function is called when a user process opens the FD.
+ */
+void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr)
+{
+ mutex_init(&mgr->ctx_lock);
+ idr_init(&mgr->ctx_handles);
+}
+
+/*
+ * hl_ctx_mgr_fini - finalize the context manager
+ *
+ * @hdev: pointer to device structure
+ * @mgr: pointer to context manager structure
+ *
+ * This function goes over all the contexts in the manager and frees them.
+ * It is called when a process closes the FD.
+ */
+void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr)
+{
+ struct hl_ctx *ctx;
+ struct idr *idp;
+ u32 id;
+
+ idp = &mgr->ctx_handles;
+
+ idr_for_each_entry(idp, ctx, id)
+ hl_ctx_free(hdev, ctx);
+
+ idr_destroy(&mgr->ctx_handles);
+ mutex_destroy(&mgr->ctx_lock);
+}
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
new file mode 100644
index 000000000000..a53c12aff6ad
--- /dev/null
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -0,0 +1,1077 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+#include "include/hw_ip/mmu/mmu_general.h"
+
+#include <linux/pci.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
+#define MMU_ADDR_BUF_SIZE 40
+#define MMU_ASID_BUF_SIZE 10
+#define MMU_KBUF_SIZE (MMU_ADDR_BUF_SIZE + MMU_ASID_BUF_SIZE)
+
+static struct dentry *hl_debug_root;
+
+static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
+ u8 i2c_reg, u32 *val)
+{
+ struct armcp_packet pkt;
+ int rc;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -EBUSY;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_I2C_RD <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.i2c_bus = i2c_bus;
+ pkt.i2c_addr = i2c_addr;
+ pkt.i2c_reg = i2c_reg;
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ HL_DEVICE_TIMEOUT_USEC, (long *) val);
+
+ if (rc)
+ dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
+
+ return rc;
+}
+
+static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
+ u8 i2c_reg, u32 val)
+{
+ struct armcp_packet pkt;
+ int rc;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -EBUSY;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_I2C_WR <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.i2c_bus = i2c_bus;
+ pkt.i2c_addr = i2c_addr;
+ pkt.i2c_reg = i2c_reg;
+ pkt.value = __cpu_to_le64(val);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ HL_DEVICE_TIMEOUT_USEC, NULL);
+
+ if (rc)
+ dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc);
+
+ return rc;
+}
+
+static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
+{
+ struct armcp_packet pkt;
+ int rc;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_LED_SET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.led_index = __cpu_to_le32(led);
+ pkt.value = __cpu_to_le64(state);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ HL_DEVICE_TIMEOUT_USEC, NULL);
+
+ if (rc)
+ dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc);
+}
+
+static int command_buffers_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_cb *cb;
+ bool first = true;
+
+ spin_lock(&dev_entry->cb_spinlock);
+
+ list_for_each_entry(cb, &dev_entry->cb_list, debugfs_list) {
+ if (first) {
+ first = false;
+ seq_puts(s, "\n");
+ seq_puts(s, " CB ID CTX ID CB size CB RefCnt mmap? CS counter\n");
+ seq_puts(s, "---------------------------------------------------------------\n");
+ }
+ seq_printf(s,
+ " %03d %d 0x%08x %d %d %d\n",
+ cb->id, cb->ctx_id, cb->size,
+ kref_read(&cb->refcount),
+ cb->mmap, cb->cs_cnt);
+ }
+
+ spin_unlock(&dev_entry->cb_spinlock);
+
+ if (!first)
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int command_submission_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_cs *cs;
+ bool first = true;
+
+ spin_lock(&dev_entry->cs_spinlock);
+
+ list_for_each_entry(cs, &dev_entry->cs_list, debugfs_list) {
+ if (first) {
+ first = false;
+ seq_puts(s, "\n");
+ seq_puts(s, " CS ID CTX ASID CS RefCnt Submitted Completed\n");
+ seq_puts(s, "------------------------------------------------------\n");
+ }
+ seq_printf(s,
+ " %llu %d %d %d %d\n",
+ cs->sequence, cs->ctx->asid,
+ kref_read(&cs->refcount),
+ cs->submitted, cs->completed);
+ }
+
+ spin_unlock(&dev_entry->cs_spinlock);
+
+ if (!first)
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int command_submission_jobs_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_cs_job *job;
+ bool first = true;
+
+ spin_lock(&dev_entry->cs_job_spinlock);
+
+ list_for_each_entry(job, &dev_entry->cs_job_list, debugfs_list) {
+ if (first) {
+ first = false;
+ seq_puts(s, "\n");
+ seq_puts(s, " JOB ID CS ID CTX ASID H/W Queue\n");
+ seq_puts(s, "---------------------------------------\n");
+ }
+ if (job->cs)
+ seq_printf(s,
+ " %02d %llu %d %d\n",
+ job->id, job->cs->sequence, job->cs->ctx->asid,
+ job->hw_queue_id);
+ else
+ seq_printf(s,
+ " %02d 0 %d %d\n",
+ job->id, HL_KERNEL_ASID_ID, job->hw_queue_id);
+ }
+
+ spin_unlock(&dev_entry->cs_job_spinlock);
+
+ if (!first)
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int userptr_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_userptr *userptr;
+ char dma_dir[4][30] = {"DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
+ "DMA_FROM_DEVICE", "DMA_NONE"};
+ bool first = true;
+
+ spin_lock(&dev_entry->userptr_spinlock);
+
+ list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) {
+ if (first) {
+ first = false;
+ seq_puts(s, "\n");
+ seq_puts(s, " user virtual address size dma dir\n");
+ seq_puts(s, "----------------------------------------------------------\n");
+ }
+ seq_printf(s,
+ " 0x%-14llx %-10u %-30s\n",
+ userptr->addr, userptr->size, dma_dir[userptr->dir]);
+ }
+
+ spin_unlock(&dev_entry->userptr_spinlock);
+
+ if (!first)
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int vm_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_ctx *ctx;
+ struct hl_vm *vm;
+ struct hl_vm_hash_node *hnode;
+ struct hl_userptr *userptr;
+ struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
+ enum vm_type_t *vm_type;
+ bool once = true;
+ int i;
+
+ if (!dev_entry->hdev->mmu_enable)
+ return 0;
+
+ spin_lock(&dev_entry->ctx_mem_hash_spinlock);
+
+ list_for_each_entry(ctx, &dev_entry->ctx_mem_hash_list, debugfs_list) {
+ once = false;
+ seq_puts(s, "\n\n----------------------------------------------------");
+ seq_puts(s, "\n----------------------------------------------------\n\n");
+ seq_printf(s, "ctx asid: %u\n", ctx->asid);
+
+ seq_puts(s, "\nmappings:\n\n");
+ seq_puts(s, " virtual address size handle\n");
+ seq_puts(s, "----------------------------------------------------\n");
+ mutex_lock(&ctx->mem_hash_lock);
+ hash_for_each(ctx->mem_hash, i, hnode, node) {
+ vm_type = hnode->ptr;
+
+ if (*vm_type == VM_TYPE_USERPTR) {
+ userptr = hnode->ptr;
+ seq_printf(s,
+ " 0x%-14llx %-10u\n",
+ hnode->vaddr, userptr->size);
+ } else {
+ phys_pg_pack = hnode->ptr;
+ seq_printf(s,
+ " 0x%-14llx %-10u %-4u\n",
+ hnode->vaddr, phys_pg_pack->total_size,
+ phys_pg_pack->handle);
+ }
+ }
+ mutex_unlock(&ctx->mem_hash_lock);
+
+ vm = &ctx->hdev->vm;
+ spin_lock(&vm->idr_lock);
+
+ if (!idr_is_empty(&vm->phys_pg_pack_handles))
+ seq_puts(s, "\n\nallocations:\n");
+
+ idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_pack, i) {
+ if (phys_pg_pack->asid != ctx->asid)
+ continue;
+
+ seq_printf(s, "\nhandle: %u\n", phys_pg_pack->handle);
+ seq_printf(s, "page size: %u\n\n",
+ phys_pg_pack->page_size);
+ seq_puts(s, " physical address\n");
+ seq_puts(s, "---------------------\n");
+ for (i = 0 ; i < phys_pg_pack->npages ; i++) {
+ seq_printf(s, " 0x%-14llx\n",
+ phys_pg_pack->pages[i]);
+ }
+ }
+ spin_unlock(&vm->idr_lock);
+
+ }
+
+ spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
+
+ if (!once)
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+/* these inline functions are copied from mmu.c */
+static inline u64 get_hop0_addr(struct hl_ctx *ctx)
+{
+ return ctx->hdev->asic_prop.mmu_pgt_addr +
+ (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
+}
+
+static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
+ u64 virt_addr)
+{
+ return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
+ ((virt_addr & HOP0_MASK) >> HOP0_SHIFT);
+}
+
+static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
+ u64 virt_addr)
+{
+ return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
+ ((virt_addr & HOP1_MASK) >> HOP1_SHIFT);
+}
+
+static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
+ u64 virt_addr)
+{
+ return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
+ ((virt_addr & HOP2_MASK) >> HOP2_SHIFT);
+}
+
+static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
+ u64 virt_addr)
+{
+ return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
+ ((virt_addr & HOP3_MASK) >> HOP3_SHIFT);
+}
+
+static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
+ u64 virt_addr)
+{
+ return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
+ ((virt_addr & HOP4_MASK) >> HOP4_SHIFT);
+}
+
+static inline u64 get_next_hop_addr(u64 curr_pte)
+{
+ if (curr_pte & PAGE_PRESENT_MASK)
+ return curr_pte & PHYS_ADDR_MASK;
+ else
+ return ULLONG_MAX;
+}
+
+static int mmu_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ struct hl_ctx *ctx = hdev->user_ctx;
+
+ u64 hop0_addr = 0, hop0_pte_addr = 0, hop0_pte = 0,
+ hop1_addr = 0, hop1_pte_addr = 0, hop1_pte = 0,
+ hop2_addr = 0, hop2_pte_addr = 0, hop2_pte = 0,
+ hop3_addr = 0, hop3_pte_addr = 0, hop3_pte = 0,
+ hop4_addr = 0, hop4_pte_addr = 0, hop4_pte = 0,
+ virt_addr = dev_entry->mmu_addr;
+
+ if (!hdev->mmu_enable)
+ return 0;
+
+ if (!ctx) {
+ dev_err(hdev->dev, "no ctx available\n");
+ return 0;
+ }
+
+ mutex_lock(&ctx->mmu_lock);
+
+ /* the following lookup is copied from unmap() in mmu.c */
+
+ hop0_addr = get_hop0_addr(ctx);
+ hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
+ hop0_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
+ hop1_addr = get_next_hop_addr(hop0_pte);
+
+ if (hop1_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
+ hop1_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
+ hop2_addr = get_next_hop_addr(hop1_pte);
+
+ if (hop2_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
+ hop2_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
+ hop3_addr = get_next_hop_addr(hop2_pte);
+
+ if (hop3_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
+ hop3_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
+
+ if (!(hop3_pte & LAST_MASK)) {
+ hop4_addr = get_next_hop_addr(hop3_pte);
+
+ if (hop4_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
+ hop4_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr);
+ if (!(hop4_pte & PAGE_PRESENT_MASK))
+ goto not_mapped;
+ } else {
+ if (!(hop3_pte & PAGE_PRESENT_MASK))
+ goto not_mapped;
+ }
+
+ seq_printf(s, "asid: %u, virt_addr: 0x%llx\n",
+ dev_entry->mmu_asid, dev_entry->mmu_addr);
+
+ seq_printf(s, "hop0_addr: 0x%llx\n", hop0_addr);
+ seq_printf(s, "hop0_pte_addr: 0x%llx\n", hop0_pte_addr);
+ seq_printf(s, "hop0_pte: 0x%llx\n", hop0_pte);
+
+ seq_printf(s, "hop1_addr: 0x%llx\n", hop1_addr);
+ seq_printf(s, "hop1_pte_addr: 0x%llx\n", hop1_pte_addr);
+ seq_printf(s, "hop1_pte: 0x%llx\n", hop1_pte);
+
+ seq_printf(s, "hop2_addr: 0x%llx\n", hop2_addr);
+ seq_printf(s, "hop2_pte_addr: 0x%llx\n", hop2_pte_addr);
+ seq_printf(s, "hop2_pte: 0x%llx\n", hop2_pte);
+
+ seq_printf(s, "hop3_addr: 0x%llx\n", hop3_addr);
+ seq_printf(s, "hop3_pte_addr: 0x%llx\n", hop3_pte_addr);
+ seq_printf(s, "hop3_pte: 0x%llx\n", hop3_pte);
+
+ if (!(hop3_pte & LAST_MASK)) {
+ seq_printf(s, "hop4_addr: 0x%llx\n", hop4_addr);
+ seq_printf(s, "hop4_pte_addr: 0x%llx\n", hop4_pte_addr);
+ seq_printf(s, "hop4_pte: 0x%llx\n", hop4_pte);
+ }
+
+ goto out;
+
+not_mapped:
+ dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
+ virt_addr);
+out:
+ mutex_unlock(&ctx->mmu_lock);
+
+ return 0;
+}
+
+static ssize_t mmu_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ char kbuf[MMU_KBUF_SIZE], asid_kbuf[MMU_ASID_BUF_SIZE],
+ addr_kbuf[MMU_ADDR_BUF_SIZE];
+ char *c;
+ ssize_t rc;
+
+ if (!hdev->mmu_enable)
+ return count;
+
+ memset(kbuf, 0, sizeof(kbuf));
+ memset(asid_kbuf, 0, sizeof(asid_kbuf));
+ memset(addr_kbuf, 0, sizeof(addr_kbuf));
+
+ if (copy_from_user(kbuf, buf, count))
+ goto err;
+
+ kbuf[MMU_KBUF_SIZE - 1] = 0;
+
+ c = strchr(kbuf, ' ');
+ if (!c)
+ goto err;
+
+ memcpy(asid_kbuf, kbuf, c - kbuf);
+
+ rc = kstrtouint(asid_kbuf, 10, &dev_entry->mmu_asid);
+ if (rc)
+ goto err;
+
+ c = strstr(kbuf, " 0x");
+ if (!c)
+ goto err;
+
+ c += 3;
+ memcpy(addr_kbuf, c, (kbuf + count) - c);
+
+ rc = kstrtoull(addr_kbuf, 16, &dev_entry->mmu_addr);
+ if (rc)
+ goto err;
+
+ return count;
+
+err:
+ dev_err(hdev->dev, "usage: echo <asid> <0xaddr> > mmu\n");
+
+ return -EINVAL;
+}
+
+static ssize_t hl_data_read32(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ char tmp_buf[32];
+ u32 val;
+ ssize_t rc;
+
+ if (*ppos)
+ return 0;
+
+ rc = hdev->asic_funcs->debugfs_read32(hdev, entry->addr, &val);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to read from 0x%010llx\n",
+ entry->addr);
+ return rc;
+ }
+
+ sprintf(tmp_buf, "0x%08x\n", val);
+ rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
+ strlen(tmp_buf) + 1);
+
+ return rc;
+}
+
+static ssize_t hl_data_write32(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 value;
+ ssize_t rc;
+
+ rc = kstrtouint_from_user(buf, count, 16, &value);
+ if (rc)
+ return rc;
+
+ rc = hdev->asic_funcs->debugfs_write32(hdev, entry->addr, value);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to write 0x%08x to 0x%010llx\n",
+ value, entry->addr);
+ return rc;
+ }
+
+ return count;
+}
+
+static ssize_t hl_get_power_state(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ char tmp_buf[200];
+ ssize_t rc;
+ int i;
+
+ if (*ppos)
+ return 0;
+
+ if (hdev->pdev->current_state == PCI_D0)
+ i = 1;
+ else if (hdev->pdev->current_state == PCI_D3hot)
+ i = 2;
+ else
+ i = 3;
+
+ sprintf(tmp_buf,
+ "current power state: %d\n1 - D0\n2 - D3hot\n3 - Unknown\n", i);
+ rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
+ strlen(tmp_buf) + 1);
+
+ return rc;
+}
+
+static ssize_t hl_set_power_state(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 value;
+ ssize_t rc;
+
+ rc = kstrtouint_from_user(buf, count, 10, &value);
+ if (rc)
+ return rc;
+
+ if (value == 1) {
+ pci_set_power_state(hdev->pdev, PCI_D0);
+ pci_restore_state(hdev->pdev);
+ rc = pci_enable_device(hdev->pdev);
+ } else if (value == 2) {
+ pci_save_state(hdev->pdev);
+ pci_disable_device(hdev->pdev);
+ pci_set_power_state(hdev->pdev, PCI_D3hot);
+ } else {
+ dev_dbg(hdev->dev, "invalid power state value %u\n", value);
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t hl_i2c_data_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ char tmp_buf[32];
+ u32 val;
+ ssize_t rc;
+
+ if (*ppos)
+ return 0;
+
+ rc = hl_debugfs_i2c_read(hdev, entry->i2c_bus, entry->i2c_addr,
+ entry->i2c_reg, &val);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to read from I2C bus %d, addr %d, reg %d\n",
+ entry->i2c_bus, entry->i2c_addr, entry->i2c_reg);
+ return rc;
+ }
+
+ sprintf(tmp_buf, "0x%02x\n", val);
+ rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
+ strlen(tmp_buf) + 1);
+
+ return rc;
+}
+
+static ssize_t hl_i2c_data_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 value;
+ ssize_t rc;
+
+ rc = kstrtouint_from_user(buf, count, 16, &value);
+ if (rc)
+ return rc;
+
+ rc = hl_debugfs_i2c_write(hdev, entry->i2c_bus, entry->i2c_addr,
+ entry->i2c_reg, value);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to write 0x%02x to I2C bus %d, addr %d, reg %d\n",
+ value, entry->i2c_bus, entry->i2c_addr, entry->i2c_reg);
+ return rc;
+ }
+
+ return count;
+}
+
+static ssize_t hl_led0_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 value;
+ ssize_t rc;
+
+ rc = kstrtouint_from_user(buf, count, 10, &value);
+ if (rc)
+ return rc;
+
+ value = value ? 1 : 0;
+
+ hl_debugfs_led_set(hdev, 0, value);
+
+ return count;
+}
+
+static ssize_t hl_led1_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 value;
+ ssize_t rc;
+
+ rc = kstrtouint_from_user(buf, count, 10, &value);
+ if (rc)
+ return rc;
+
+ value = value ? 1 : 0;
+
+ hl_debugfs_led_set(hdev, 1, value);
+
+ return count;
+}
+
+static ssize_t hl_led2_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 value;
+ ssize_t rc;
+
+ rc = kstrtouint_from_user(buf, count, 10, &value);
+ if (rc)
+ return rc;
+
+ value = value ? 1 : 0;
+
+ hl_debugfs_led_set(hdev, 2, value);
+
+ return count;
+}
+
+static ssize_t hl_device_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char tmp_buf[200];
+ ssize_t rc;
+
+ if (*ppos)
+ return 0;
+
+ sprintf(tmp_buf,
+ "Valid values: disable, enable, suspend, resume, cpu_timeout\n");
+ rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
+ strlen(tmp_buf) + 1);
+
+ return rc;
+}
+
+static ssize_t hl_device_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ char data[30];
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
+ return 0;
+
+ simple_write_to_buffer(data, 29, ppos, buf, count);
+
+ if (strncmp("disable", data, strlen("disable")) == 0) {
+ hdev->disabled = true;
+ } else if (strncmp("enable", data, strlen("enable")) == 0) {
+ hdev->disabled = false;
+ } else if (strncmp("suspend", data, strlen("suspend")) == 0) {
+ hdev->asic_funcs->suspend(hdev);
+ } else if (strncmp("resume", data, strlen("resume")) == 0) {
+ hdev->asic_funcs->resume(hdev);
+ } else if (strncmp("cpu_timeout", data, strlen("cpu_timeout")) == 0) {
+ hdev->device_cpu_disabled = true;
+ } else {
+ dev_err(hdev->dev,
+ "Valid values: disable, enable, suspend, resume, cpu_timeout\n");
+ count = -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations hl_data32b_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_data_read32,
+ .write = hl_data_write32
+};
+
+static const struct file_operations hl_i2c_data_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_i2c_data_read,
+ .write = hl_i2c_data_write
+};
+
+static const struct file_operations hl_power_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_get_power_state,
+ .write = hl_set_power_state
+};
+
+static const struct file_operations hl_led0_fops = {
+ .owner = THIS_MODULE,
+ .write = hl_led0_write
+};
+
+static const struct file_operations hl_led1_fops = {
+ .owner = THIS_MODULE,
+ .write = hl_led1_write
+};
+
+static const struct file_operations hl_led2_fops = {
+ .owner = THIS_MODULE,
+ .write = hl_led2_write
+};
+
+static const struct file_operations hl_device_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_device_read,
+ .write = hl_device_write
+};
+
+static const struct hl_info_list hl_debugfs_list[] = {
+ {"command_buffers", command_buffers_show, NULL},
+ {"command_submission", command_submission_show, NULL},
+ {"command_submission_jobs", command_submission_jobs_show, NULL},
+ {"userptr", userptr_show, NULL},
+ {"vm", vm_show, NULL},
+ {"mmu", mmu_show, mmu_write},
+};
+
+static int hl_debugfs_open(struct inode *inode, struct file *file)
+{
+ struct hl_debugfs_entry *node = inode->i_private;
+
+ return single_open(file, node->info_ent->show, node);
+}
+
+static ssize_t hl_debugfs_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct hl_debugfs_entry *node = file->f_inode->i_private;
+
+ if (node->info_ent->write)
+ return node->info_ent->write(file, buf, count, f_pos);
+ else
+ return -EINVAL;
+
+}
+
+static const struct file_operations hl_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = hl_debugfs_open,
+ .read = seq_read,
+ .write = hl_debugfs_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void hl_debugfs_add_device(struct hl_device *hdev)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+ int count = ARRAY_SIZE(hl_debugfs_list);
+ struct hl_debugfs_entry *entry;
+ struct dentry *ent;
+ int i;
+
+ dev_entry->hdev = hdev;
+ dev_entry->entry_arr = kmalloc_array(count,
+ sizeof(struct hl_debugfs_entry),
+ GFP_KERNEL);
+ if (!dev_entry->entry_arr)
+ return;
+
+ INIT_LIST_HEAD(&dev_entry->file_list);
+ INIT_LIST_HEAD(&dev_entry->cb_list);
+ INIT_LIST_HEAD(&dev_entry->cs_list);
+ INIT_LIST_HEAD(&dev_entry->cs_job_list);
+ INIT_LIST_HEAD(&dev_entry->userptr_list);
+ INIT_LIST_HEAD(&dev_entry->ctx_mem_hash_list);
+ mutex_init(&dev_entry->file_mutex);
+ spin_lock_init(&dev_entry->cb_spinlock);
+ spin_lock_init(&dev_entry->cs_spinlock);
+ spin_lock_init(&dev_entry->cs_job_spinlock);
+ spin_lock_init(&dev_entry->userptr_spinlock);
+ spin_lock_init(&dev_entry->ctx_mem_hash_spinlock);
+
+ dev_entry->root = debugfs_create_dir(dev_name(hdev->dev),
+ hl_debug_root);
+
+ debugfs_create_x64("addr",
+ 0644,
+ dev_entry->root,
+ &dev_entry->addr);
+
+ debugfs_create_file("data32",
+ 0644,
+ dev_entry->root,
+ dev_entry,
+ &hl_data32b_fops);
+
+ debugfs_create_file("set_power_state",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_power_fops);
+
+ debugfs_create_u8("i2c_bus",
+ 0644,
+ dev_entry->root,
+ &dev_entry->i2c_bus);
+
+ debugfs_create_u8("i2c_addr",
+ 0644,
+ dev_entry->root,
+ &dev_entry->i2c_addr);
+
+ debugfs_create_u8("i2c_reg",
+ 0644,
+ dev_entry->root,
+ &dev_entry->i2c_reg);
+
+ debugfs_create_file("i2c_data",
+ 0644,
+ dev_entry->root,
+ dev_entry,
+ &hl_i2c_data_fops);
+
+ debugfs_create_file("led0",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_led0_fops);
+
+ debugfs_create_file("led1",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_led1_fops);
+
+ debugfs_create_file("led2",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_led2_fops);
+
+ debugfs_create_file("device",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_device_fops);
+
+ for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
+
+ ent = debugfs_create_file(hl_debugfs_list[i].name,
+ 0444,
+ dev_entry->root,
+ entry,
+ &hl_debugfs_fops);
+ entry->dent = ent;
+ entry->info_ent = &hl_debugfs_list[i];
+ entry->dev_entry = dev_entry;
+ }
+}
+
+void hl_debugfs_remove_device(struct hl_device *hdev)
+{
+ struct hl_dbg_device_entry *entry = &hdev->hl_debugfs;
+
+ debugfs_remove_recursive(entry->root);
+
+ mutex_destroy(&entry->file_mutex);
+ kfree(entry->entry_arr);
+}
+
+void hl_debugfs_add_file(struct hl_fpriv *hpriv)
+{
+ struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
+
+ mutex_lock(&dev_entry->file_mutex);
+ list_add(&hpriv->debugfs_list, &dev_entry->file_list);
+ mutex_unlock(&dev_entry->file_mutex);
+}
+
+void hl_debugfs_remove_file(struct hl_fpriv *hpriv)
+{
+ struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
+
+ mutex_lock(&dev_entry->file_mutex);
+ list_del(&hpriv->debugfs_list);
+ mutex_unlock(&dev_entry->file_mutex);
+}
+
+void hl_debugfs_add_cb(struct hl_cb *cb)
+{
+ struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->cb_spinlock);
+ list_add(&cb->debugfs_list, &dev_entry->cb_list);
+ spin_unlock(&dev_entry->cb_spinlock);
+}
+
+void hl_debugfs_remove_cb(struct hl_cb *cb)
+{
+ struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->cb_spinlock);
+ list_del(&cb->debugfs_list);
+ spin_unlock(&dev_entry->cb_spinlock);
+}
+
+void hl_debugfs_add_cs(struct hl_cs *cs)
+{
+ struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->cs_spinlock);
+ list_add(&cs->debugfs_list, &dev_entry->cs_list);
+ spin_unlock(&dev_entry->cs_spinlock);
+}
+
+void hl_debugfs_remove_cs(struct hl_cs *cs)
+{
+ struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->cs_spinlock);
+ list_del(&cs->debugfs_list);
+ spin_unlock(&dev_entry->cs_spinlock);
+}
+
+void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->cs_job_spinlock);
+ list_add(&job->debugfs_list, &dev_entry->cs_job_list);
+ spin_unlock(&dev_entry->cs_job_spinlock);
+}
+
+void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->cs_job_spinlock);
+ list_del(&job->debugfs_list);
+ spin_unlock(&dev_entry->cs_job_spinlock);
+}
+
+void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->userptr_spinlock);
+ list_add(&userptr->debugfs_list, &dev_entry->userptr_list);
+ spin_unlock(&dev_entry->userptr_spinlock);
+}
+
+void hl_debugfs_remove_userptr(struct hl_device *hdev,
+ struct hl_userptr *userptr)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->userptr_spinlock);
+ list_del(&userptr->debugfs_list);
+ spin_unlock(&dev_entry->userptr_spinlock);
+}
+
+void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->ctx_mem_hash_spinlock);
+ list_add(&ctx->debugfs_list, &dev_entry->ctx_mem_hash_list);
+ spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
+}
+
+void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->ctx_mem_hash_spinlock);
+ list_del(&ctx->debugfs_list);
+ spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
+}
+
+void __init hl_debugfs_init(void)
+{
+ hl_debug_root = debugfs_create_dir("habanalabs", NULL);
+}
+
+void hl_debugfs_fini(void)
+{
+ debugfs_remove_recursive(hl_debug_root);
+}
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
new file mode 100644
index 000000000000..de46aa6ed154
--- /dev/null
+++ b/drivers/misc/habanalabs/device.c
@@ -0,0 +1,1140 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+
+#include <linux/pci.h>
+#include <linux/sched/signal.h>
+#include <linux/hwmon.h>
+
+bool hl_device_disabled_or_in_reset(struct hl_device *hdev)
+{
+ if ((hdev->disabled) || (atomic_read(&hdev->in_reset)))
+ return true;
+ else
+ return false;
+}
+
+static void hpriv_release(struct kref *ref)
+{
+ struct hl_fpriv *hpriv;
+ struct hl_device *hdev;
+
+ hpriv = container_of(ref, struct hl_fpriv, refcount);
+
+ hdev = hpriv->hdev;
+
+ put_pid(hpriv->taskpid);
+
+ hl_debugfs_remove_file(hpriv);
+
+ mutex_destroy(&hpriv->restore_phase_mutex);
+
+ kfree(hpriv);
+
+ /* Now the FD is really closed */
+ atomic_dec(&hdev->fd_open_cnt);
+
+ /* This allows a new user context to open the device */
+ hdev->user_ctx = NULL;
+}
+
+void hl_hpriv_get(struct hl_fpriv *hpriv)
+{
+ kref_get(&hpriv->refcount);
+}
+
+void hl_hpriv_put(struct hl_fpriv *hpriv)
+{
+ kref_put(&hpriv->refcount, hpriv_release);
+}
+
+/*
+ * hl_device_release - release function for habanalabs device
+ *
+ * @inode: pointer to inode structure
+ * @filp: pointer to file structure
+ *
+ * Called when process closes an habanalabs device
+ */
+static int hl_device_release(struct inode *inode, struct file *filp)
+{
+ struct hl_fpriv *hpriv = filp->private_data;
+
+ hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr);
+ hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
+
+ filp->private_data = NULL;
+
+ hl_hpriv_put(hpriv);
+
+ return 0;
+}
+
+/*
+ * hl_mmap - mmap function for habanalabs device
+ *
+ * @*filp: pointer to file structure
+ * @*vma: pointer to vm_area_struct of the process
+ *
+ * Called when process does an mmap on habanalabs device. Call the device's mmap
+ * function at the end of the common code.
+ */
+static int hl_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct hl_fpriv *hpriv = filp->private_data;
+
+ if ((vma->vm_pgoff & HL_MMAP_CB_MASK) == HL_MMAP_CB_MASK) {
+ vma->vm_pgoff ^= HL_MMAP_CB_MASK;
+ return hl_cb_mmap(hpriv, vma);
+ }
+
+ return -EINVAL;
+}
+
+static const struct file_operations hl_ops = {
+ .owner = THIS_MODULE,
+ .open = hl_device_open,
+ .release = hl_device_release,
+ .mmap = hl_mmap,
+ .unlocked_ioctl = hl_ioctl,
+ .compat_ioctl = hl_ioctl
+};
+
+/*
+ * device_setup_cdev - setup cdev and device for habanalabs device
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @hclass: pointer to the class object of the device
+ * @minor: minor number of the specific device
+ * @fpos : file operations to install for this device
+ *
+ * Create a cdev and a Linux device for habanalabs's device. Need to be
+ * called at the end of the habanalabs device initialization process,
+ * because this function exposes the device to the user
+ */
+static int device_setup_cdev(struct hl_device *hdev, struct class *hclass,
+ int minor, const struct file_operations *fops)
+{
+ int err, devno = MKDEV(hdev->major, minor);
+ struct cdev *hdev_cdev = &hdev->cdev;
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "hl%d", hdev->id);
+ if (!name)
+ return -ENOMEM;
+
+ cdev_init(hdev_cdev, fops);
+ hdev_cdev->owner = THIS_MODULE;
+ err = cdev_add(hdev_cdev, devno, 1);
+ if (err) {
+ pr_err("Failed to add char device %s\n", name);
+ goto err_cdev_add;
+ }
+
+ hdev->dev = device_create(hclass, NULL, devno, NULL, "%s", name);
+ if (IS_ERR(hdev->dev)) {
+ pr_err("Failed to create device %s\n", name);
+ err = PTR_ERR(hdev->dev);
+ goto err_device_create;
+ }
+
+ dev_set_drvdata(hdev->dev, hdev);
+
+ kfree(name);
+
+ return 0;
+
+err_device_create:
+ cdev_del(hdev_cdev);
+err_cdev_add:
+ kfree(name);
+ return err;
+}
+
+/*
+ * device_early_init - do some early initialization for the habanalabs device
+ *
+ * @hdev: pointer to habanalabs device structure
+ *
+ * Install the relevant function pointers and call the early_init function,
+ * if such a function exists
+ */
+static int device_early_init(struct hl_device *hdev)
+{
+ int rc;
+
+ switch (hdev->asic_type) {
+ case ASIC_GOYA:
+ goya_set_asic_funcs(hdev);
+ strlcpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name));
+ break;
+ default:
+ dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
+ hdev->asic_type);
+ return -EINVAL;
+ }
+
+ rc = hdev->asic_funcs->early_init(hdev);
+ if (rc)
+ return rc;
+
+ rc = hl_asid_init(hdev);
+ if (rc)
+ goto early_fini;
+
+ hdev->cq_wq = alloc_workqueue("hl-free-jobs", WQ_UNBOUND, 0);
+ if (hdev->cq_wq == NULL) {
+ dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
+ rc = -ENOMEM;
+ goto asid_fini;
+ }
+
+ hdev->eq_wq = alloc_workqueue("hl-events", WQ_UNBOUND, 0);
+ if (hdev->eq_wq == NULL) {
+ dev_err(hdev->dev, "Failed to allocate EQ workqueue\n");
+ rc = -ENOMEM;
+ goto free_cq_wq;
+ }
+
+ hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info),
+ GFP_KERNEL);
+ if (!hdev->hl_chip_info) {
+ rc = -ENOMEM;
+ goto free_eq_wq;
+ }
+
+ hl_cb_mgr_init(&hdev->kernel_cb_mgr);
+
+ mutex_init(&hdev->fd_open_cnt_lock);
+ mutex_init(&hdev->send_cpu_message_lock);
+ INIT_LIST_HEAD(&hdev->hw_queues_mirror_list);
+ spin_lock_init(&hdev->hw_queues_mirror_lock);
+ atomic_set(&hdev->in_reset, 0);
+ atomic_set(&hdev->fd_open_cnt, 0);
+
+ return 0;
+
+free_eq_wq:
+ destroy_workqueue(hdev->eq_wq);
+free_cq_wq:
+ destroy_workqueue(hdev->cq_wq);
+asid_fini:
+ hl_asid_fini(hdev);
+early_fini:
+ if (hdev->asic_funcs->early_fini)
+ hdev->asic_funcs->early_fini(hdev);
+
+ return rc;
+}
+
+/*
+ * device_early_fini - finalize all that was done in device_early_init
+ *
+ * @hdev: pointer to habanalabs device structure
+ *
+ */
+static void device_early_fini(struct hl_device *hdev)
+{
+ mutex_destroy(&hdev->send_cpu_message_lock);
+
+ hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
+
+ kfree(hdev->hl_chip_info);
+
+ destroy_workqueue(hdev->eq_wq);
+ destroy_workqueue(hdev->cq_wq);
+
+ hl_asid_fini(hdev);
+
+ if (hdev->asic_funcs->early_fini)
+ hdev->asic_funcs->early_fini(hdev);
+
+ mutex_destroy(&hdev->fd_open_cnt_lock);
+}
+
+static void set_freq_to_low_job(struct work_struct *work)
+{
+ struct hl_device *hdev = container_of(work, struct hl_device,
+ work_freq.work);
+
+ if (atomic_read(&hdev->fd_open_cnt) == 0)
+ hl_device_set_frequency(hdev, PLL_LOW);
+
+ schedule_delayed_work(&hdev->work_freq,
+ usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
+}
+
+static void hl_device_heartbeat(struct work_struct *work)
+{
+ struct hl_device *hdev = container_of(work, struct hl_device,
+ work_heartbeat.work);
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ goto reschedule;
+
+ if (!hdev->asic_funcs->send_heartbeat(hdev))
+ goto reschedule;
+
+ dev_err(hdev->dev, "Device heartbeat failed!\n");
+ hl_device_reset(hdev, true, false);
+
+ return;
+
+reschedule:
+ schedule_delayed_work(&hdev->work_heartbeat,
+ usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
+}
+
+/*
+ * device_late_init - do late stuff initialization for the habanalabs device
+ *
+ * @hdev: pointer to habanalabs device structure
+ *
+ * Do stuff that either needs the device H/W queues to be active or needs
+ * to happen after all the rest of the initialization is finished
+ */
+static int device_late_init(struct hl_device *hdev)
+{
+ int rc;
+
+ INIT_DELAYED_WORK(&hdev->work_freq, set_freq_to_low_job);
+ hdev->high_pll = hdev->asic_prop.high_pll;
+
+ /* force setting to low frequency */
+ atomic_set(&hdev->curr_pll_profile, PLL_LOW);
+
+ if (hdev->pm_mng_profile == PM_AUTO)
+ hdev->asic_funcs->set_pll_profile(hdev, PLL_LOW);
+ else
+ hdev->asic_funcs->set_pll_profile(hdev, PLL_LAST);
+
+ if (hdev->asic_funcs->late_init) {
+ rc = hdev->asic_funcs->late_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed late initialization for the H/W\n");
+ return rc;
+ }
+ }
+
+ schedule_delayed_work(&hdev->work_freq,
+ usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
+
+ if (hdev->heartbeat) {
+ INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);
+ schedule_delayed_work(&hdev->work_heartbeat,
+ usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
+ }
+
+ hdev->late_init_done = true;
+
+ return 0;
+}
+
+/*
+ * device_late_fini - finalize all that was done in device_late_init
+ *
+ * @hdev: pointer to habanalabs device structure
+ *
+ */
+static void device_late_fini(struct hl_device *hdev)
+{
+ if (!hdev->late_init_done)
+ return;
+
+ cancel_delayed_work_sync(&hdev->work_freq);
+ if (hdev->heartbeat)
+ cancel_delayed_work_sync(&hdev->work_heartbeat);
+
+ if (hdev->asic_funcs->late_fini)
+ hdev->asic_funcs->late_fini(hdev);
+
+ hdev->late_init_done = false;
+}
+
+/*
+ * hl_device_set_frequency - set the frequency of the device
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @freq: the new frequency value
+ *
+ * Change the frequency if needed.
+ * We allose to set PLL to low only if there is no user process
+ * Returns 0 if no change was done, otherwise returns 1;
+ */
+int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq)
+{
+ enum hl_pll_frequency old_freq =
+ (freq == PLL_HIGH) ? PLL_LOW : PLL_HIGH;
+ int ret;
+
+ if (hdev->pm_mng_profile == PM_MANUAL)
+ return 0;
+
+ ret = atomic_cmpxchg(&hdev->curr_pll_profile, old_freq, freq);
+ if (ret == freq)
+ return 0;
+
+ /*
+ * in case we want to lower frequency, check if device is not
+ * opened. We must have a check here to workaround race condition with
+ * hl_device_open
+ */
+ if ((freq == PLL_LOW) && (atomic_read(&hdev->fd_open_cnt) > 0)) {
+ atomic_set(&hdev->curr_pll_profile, PLL_HIGH);
+ return 0;
+ }
+
+ dev_dbg(hdev->dev, "Changing device frequency to %s\n",
+ freq == PLL_HIGH ? "high" : "low");
+
+ hdev->asic_funcs->set_pll_profile(hdev, freq);
+
+ return 1;
+}
+
+/*
+ * hl_device_suspend - initiate device suspend
+ *
+ * @hdev: pointer to habanalabs device structure
+ *
+ * Puts the hw in the suspend state (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver suspend.
+ */
+int hl_device_suspend(struct hl_device *hdev)
+{
+ int rc;
+
+ pci_save_state(hdev->pdev);
+
+ rc = hdev->asic_funcs->suspend(hdev);
+ if (rc)
+ dev_err(hdev->dev,
+ "Failed to disable PCI access of device CPU\n");
+
+ /* Shut down the device */
+ pci_disable_device(hdev->pdev);
+ pci_set_power_state(hdev->pdev, PCI_D3hot);
+
+ return 0;
+}
+
+/*
+ * hl_device_resume - initiate device resume
+ *
+ * @hdev: pointer to habanalabs device structure
+ *
+ * Bring the hw back to operating state (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver resume.
+ */
+int hl_device_resume(struct hl_device *hdev)
+{
+ int rc;
+
+ pci_set_power_state(hdev->pdev, PCI_D0);
+ pci_restore_state(hdev->pdev);
+ rc = pci_enable_device(hdev->pdev);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to enable PCI device in resume\n");
+ return rc;
+ }
+
+ rc = hdev->asic_funcs->resume(hdev);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to enable PCI access from device CPU\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static void hl_device_hard_reset_pending(struct work_struct *work)
+{
+ struct hl_device_reset_work *device_reset_work =
+ container_of(work, struct hl_device_reset_work, reset_work);
+ struct hl_device *hdev = device_reset_work->hdev;
+ u16 pending_cnt = HL_PENDING_RESET_PER_SEC;
+ struct task_struct *task = NULL;
+
+ /* Flush all processes that are inside hl_open */
+ mutex_lock(&hdev->fd_open_cnt_lock);
+
+ while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
+
+ pending_cnt--;
+
+ dev_info(hdev->dev,
+ "Can't HARD reset, waiting for user to close FD\n");
+ ssleep(1);
+ }
+
+ if (atomic_read(&hdev->fd_open_cnt)) {
+ task = get_pid_task(hdev->user_ctx->hpriv->taskpid,
+ PIDTYPE_PID);
+ if (task) {
+ dev_info(hdev->dev, "Killing user processes\n");
+ send_sig(SIGKILL, task, 1);
+ msleep(100);
+
+ put_task_struct(task);
+ }
+ }
+
+ mutex_unlock(&hdev->fd_open_cnt_lock);
+
+ hl_device_reset(hdev, true, true);
+
+ kfree(device_reset_work);
+}
+
+/*
+ * hl_device_reset - reset the device
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @hard_reset: should we do hard reset to all engines or just reset the
+ * compute/dma engines
+ *
+ * Block future CS and wait for pending CS to be enqueued
+ * Call ASIC H/W fini
+ * Flush all completions
+ * Re-initialize all internal data structures
+ * Call ASIC H/W init, late_init
+ * Test queues
+ * Enable device
+ *
+ * Returns 0 for success or an error on failure.
+ */
+int hl_device_reset(struct hl_device *hdev, bool hard_reset,
+ bool from_hard_reset_thread)
+{
+ int i, rc;
+
+ if (!hdev->init_done) {
+ dev_err(hdev->dev,
+ "Can't reset before initialization is done\n");
+ return 0;
+ }
+
+ /*
+ * Prevent concurrency in this function - only one reset should be
+ * done at any given time. Only need to perform this if we didn't
+ * get from the dedicated hard reset thread
+ */
+ if (!from_hard_reset_thread) {
+ /* Block future CS/VM/JOB completion operations */
+ rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
+ if (rc)
+ return 0;
+
+ /* This also blocks future CS/VM/JOB completion operations */
+ hdev->disabled = true;
+
+ /*
+ * Flush anyone that is inside the critical section of enqueue
+ * jobs to the H/W
+ */
+ hdev->asic_funcs->hw_queues_lock(hdev);
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+
+ dev_err(hdev->dev, "Going to RESET device!\n");
+ }
+
+again:
+ if ((hard_reset) && (!from_hard_reset_thread)) {
+ struct hl_device_reset_work *device_reset_work;
+
+ if (!hdev->pdev) {
+ dev_err(hdev->dev,
+ "Reset action is NOT supported in simulator\n");
+ rc = -EINVAL;
+ goto out_err;
+ }
+
+ hdev->hard_reset_pending = true;
+
+ device_reset_work = kzalloc(sizeof(*device_reset_work),
+ GFP_ATOMIC);
+ if (!device_reset_work) {
+ rc = -ENOMEM;
+ goto out_err;
+ }
+
+ /*
+ * Because the reset function can't run from interrupt or
+ * from heartbeat work, we need to call the reset function
+ * from a dedicated work
+ */
+ INIT_WORK(&device_reset_work->reset_work,
+ hl_device_hard_reset_pending);
+ device_reset_work->hdev = hdev;
+ schedule_work(&device_reset_work->reset_work);
+
+ return 0;
+ }
+
+ if (hard_reset) {
+ device_late_fini(hdev);
+
+ /*
+ * Now that the heartbeat thread is closed, flush processes
+ * which are sending messages to CPU
+ */
+ mutex_lock(&hdev->send_cpu_message_lock);
+ mutex_unlock(&hdev->send_cpu_message_lock);
+ }
+
+ /*
+ * Halt the engines and disable interrupts so we won't get any more
+ * completions from H/W and we won't have any accesses from the
+ * H/W to the host machine
+ */
+ hdev->asic_funcs->halt_engines(hdev, hard_reset);
+
+ /* Go over all the queues, release all CS and their jobs */
+ hl_cs_rollback_all(hdev);
+
+ if (hard_reset) {
+ /* Release kernel context */
+ if (hl_ctx_put(hdev->kernel_ctx) != 1) {
+ dev_err(hdev->dev,
+ "kernel ctx is alive during hard reset\n");
+ rc = -EBUSY;
+ goto out_err;
+ }
+
+ hdev->kernel_ctx = NULL;
+ }
+
+ /* Reset the H/W. It will be in idle state after this returns */
+ hdev->asic_funcs->hw_fini(hdev, hard_reset);
+
+ if (hard_reset) {
+ hl_vm_fini(hdev);
+ hl_eq_reset(hdev, &hdev->event_queue);
+ }
+
+ /* Re-initialize PI,CI to 0 in all queues (hw queue, cq) */
+ hl_hw_queue_reset(hdev, hard_reset);
+ for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
+ hl_cq_reset(hdev, &hdev->completion_queue[i]);
+
+ /* Make sure the setup phase for the user context will run again */
+ if (hdev->user_ctx) {
+ atomic_set(&hdev->user_ctx->thread_restore_token, 1);
+ hdev->user_ctx->thread_restore_wait_token = 0;
+ }
+
+ /* Finished tear-down, starting to re-initialize */
+
+ if (hard_reset) {
+ hdev->device_cpu_disabled = false;
+
+ /* Allocate the kernel context */
+ hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx),
+ GFP_KERNEL);
+ if (!hdev->kernel_ctx) {
+ rc = -ENOMEM;
+ goto out_err;
+ }
+
+ hdev->user_ctx = NULL;
+
+ rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed to init kernel ctx in hard reset\n");
+ kfree(hdev->kernel_ctx);
+ hdev->kernel_ctx = NULL;
+ goto out_err;
+ }
+ }
+
+ rc = hdev->asic_funcs->hw_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed to initialize the H/W after reset\n");
+ goto out_err;
+ }
+
+ hdev->disabled = false;
+
+ /* Check that the communication with the device is working */
+ rc = hdev->asic_funcs->test_queues(hdev);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to detect if device is alive after reset\n");
+ goto out_err;
+ }
+
+ if (hard_reset) {
+ rc = device_late_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed late init after hard reset\n");
+ goto out_err;
+ }
+
+ rc = hl_vm_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to init memory module after hard reset\n");
+ goto out_err;
+ }
+
+ hl_set_max_power(hdev, hdev->max_power);
+
+ hdev->hard_reset_pending = false;
+ } else {
+ rc = hdev->asic_funcs->soft_reset_late_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed late init after soft reset\n");
+ goto out_err;
+ }
+ }
+
+ atomic_set(&hdev->in_reset, 0);
+
+ if (hard_reset)
+ hdev->hard_reset_cnt++;
+ else
+ hdev->soft_reset_cnt++;
+
+ return 0;
+
+out_err:
+ hdev->disabled = true;
+
+ if (hard_reset) {
+ dev_err(hdev->dev,
+ "Failed to reset! Device is NOT usable\n");
+ hdev->hard_reset_cnt++;
+ } else {
+ dev_err(hdev->dev,
+ "Failed to do soft-reset, trying hard reset\n");
+ hdev->soft_reset_cnt++;
+ hard_reset = true;
+ goto again;
+ }
+
+ atomic_set(&hdev->in_reset, 0);
+
+ return rc;
+}
+
+/*
+ * hl_device_init - main initialization function for habanalabs device
+ *
+ * @hdev: pointer to habanalabs device structure
+ *
+ * Allocate an id for the device, do early initialization and then call the
+ * ASIC specific initialization functions. Finally, create the cdev and the
+ * Linux device to expose it to the user
+ */
+int hl_device_init(struct hl_device *hdev, struct class *hclass)
+{
+ int i, rc, cq_ready_cnt;
+
+ /* Create device */
+ rc = device_setup_cdev(hdev, hclass, hdev->id, &hl_ops);
+
+ if (rc)
+ goto out_disabled;
+
+ /* Initialize ASIC function pointers and perform early init */
+ rc = device_early_init(hdev);
+ if (rc)
+ goto release_device;
+
+ /*
+ * Start calling ASIC initialization. First S/W then H/W and finally
+ * late init
+ */
+ rc = hdev->asic_funcs->sw_init(hdev);
+ if (rc)
+ goto early_fini;
+
+ /*
+ * Initialize the H/W queues. Must be done before hw_init, because
+ * there the addresses of the kernel queue are being written to the
+ * registers of the device
+ */
+ rc = hl_hw_queues_create(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "failed to initialize kernel queues\n");
+ goto sw_fini;
+ }
+
+ /*
+ * Initialize the completion queues. Must be done before hw_init,
+ * because there the addresses of the completion queues are being
+ * passed as arguments to request_irq
+ */
+ hdev->completion_queue =
+ kcalloc(hdev->asic_prop.completion_queues_count,
+ sizeof(*hdev->completion_queue), GFP_KERNEL);
+
+ if (!hdev->completion_queue) {
+ dev_err(hdev->dev, "failed to allocate completion queues\n");
+ rc = -ENOMEM;
+ goto hw_queues_destroy;
+ }
+
+ for (i = 0, cq_ready_cnt = 0;
+ i < hdev->asic_prop.completion_queues_count;
+ i++, cq_ready_cnt++) {
+ rc = hl_cq_init(hdev, &hdev->completion_queue[i], i);
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed to initialize completion queue\n");
+ goto cq_fini;
+ }
+ }
+
+ /*
+ * Initialize the event queue. Must be done before hw_init,
+ * because there the address of the event queue is being
+ * passed as argument to request_irq
+ */
+ rc = hl_eq_init(hdev, &hdev->event_queue);
+ if (rc) {
+ dev_err(hdev->dev, "failed to initialize event queue\n");
+ goto cq_fini;
+ }
+
+ /* Allocate the kernel context */
+ hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx), GFP_KERNEL);
+ if (!hdev->kernel_ctx) {
+ rc = -ENOMEM;
+ goto eq_fini;
+ }
+
+ hdev->user_ctx = NULL;
+
+ rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
+ if (rc) {
+ dev_err(hdev->dev, "failed to initialize kernel context\n");
+ goto free_ctx;
+ }
+
+ rc = hl_cb_pool_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "failed to initialize CB pool\n");
+ goto release_ctx;
+ }
+
+ rc = hl_sysfs_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "failed to initialize sysfs\n");
+ goto free_cb_pool;
+ }
+
+ hl_debugfs_add_device(hdev);
+
+ if (hdev->asic_funcs->get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
+ dev_info(hdev->dev,
+ "H/W state is dirty, must reset before initializing\n");
+ hdev->asic_funcs->hw_fini(hdev, true);
+ }
+
+ rc = hdev->asic_funcs->hw_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "failed to initialize the H/W\n");
+ rc = 0;
+ goto out_disabled;
+ }
+
+ hdev->disabled = false;
+
+ /* Check that the communication with the device is working */
+ rc = hdev->asic_funcs->test_queues(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to detect if device is alive\n");
+ rc = 0;
+ goto out_disabled;
+ }
+
+ /* After test_queues, KMD can start sending messages to device CPU */
+
+ rc = device_late_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed late initialization\n");
+ rc = 0;
+ goto out_disabled;
+ }
+
+ dev_info(hdev->dev, "Found %s device with %lluGB DRAM\n",
+ hdev->asic_name,
+ hdev->asic_prop.dram_size / 1024 / 1024 / 1024);
+
+ rc = hl_vm_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to initialize memory module\n");
+ rc = 0;
+ goto out_disabled;
+ }
+
+ /*
+ * hl_hwmon_init must be called after device_late_init, because only
+ * there we get the information from the device about which
+ * hwmon-related sensors the device supports
+ */
+ rc = hl_hwmon_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to initialize hwmon\n");
+ rc = 0;
+ goto out_disabled;
+ }
+
+ dev_notice(hdev->dev,
+ "Successfully added device to habanalabs driver\n");
+
+ hdev->init_done = true;
+
+ return 0;
+
+free_cb_pool:
+ hl_cb_pool_fini(hdev);
+release_ctx:
+ if (hl_ctx_put(hdev->kernel_ctx) != 1)
+ dev_err(hdev->dev,
+ "kernel ctx is still alive on initialization failure\n");
+free_ctx:
+ kfree(hdev->kernel_ctx);
+eq_fini:
+ hl_eq_fini(hdev, &hdev->event_queue);
+cq_fini:
+ for (i = 0 ; i < cq_ready_cnt ; i++)
+ hl_cq_fini(hdev, &hdev->completion_queue[i]);
+ kfree(hdev->completion_queue);
+hw_queues_destroy:
+ hl_hw_queues_destroy(hdev);
+sw_fini:
+ hdev->asic_funcs->sw_fini(hdev);
+early_fini:
+ device_early_fini(hdev);
+release_device:
+ device_destroy(hclass, hdev->dev->devt);
+ cdev_del(&hdev->cdev);
+out_disabled:
+ hdev->disabled = true;
+ if (hdev->pdev)
+ dev_err(&hdev->pdev->dev,
+ "Failed to initialize hl%d. Device is NOT usable !\n",
+ hdev->id);
+ else
+ pr_err("Failed to initialize hl%d. Device is NOT usable !\n",
+ hdev->id);
+
+ return rc;
+}
+
+/*
+ * hl_device_fini - main tear-down function for habanalabs device
+ *
+ * @hdev: pointer to habanalabs device structure
+ *
+ * Destroy the device, call ASIC fini functions and release the id
+ */
+void hl_device_fini(struct hl_device *hdev)
+{
+ int i, rc;
+ ktime_t timeout;
+
+ dev_info(hdev->dev, "Removing device\n");
+
+ /*
+ * This function is competing with the reset function, so try to
+ * take the reset atomic and if we are already in middle of reset,
+ * wait until reset function is finished. Reset function is designed
+ * to always finish (could take up to a few seconds in worst case).
+ */
+
+ timeout = ktime_add_us(ktime_get(),
+ HL_PENDING_RESET_PER_SEC * 1000 * 1000 * 4);
+ rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
+ while (rc) {
+ usleep_range(50, 200);
+ rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
+ if (ktime_compare(ktime_get(), timeout) > 0) {
+ WARN(1, "Failed to remove device because reset function did not finish\n");
+ return;
+ }
+ };
+
+ /* Mark device as disabled */
+ hdev->disabled = true;
+
+ hl_hwmon_fini(hdev);
+
+ device_late_fini(hdev);
+
+ hl_debugfs_remove_device(hdev);
+
+ hl_sysfs_fini(hdev);
+
+ /*
+ * Halt the engines and disable interrupts so we won't get any more
+ * completions from H/W and we won't have any accesses from the
+ * H/W to the host machine
+ */
+ hdev->asic_funcs->halt_engines(hdev, true);
+
+ /* Go over all the queues, release all CS and their jobs */
+ hl_cs_rollback_all(hdev);
+
+ hl_cb_pool_fini(hdev);
+
+ /* Release kernel context */
+ if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))
+ dev_err(hdev->dev, "kernel ctx is still alive\n");
+
+ /* Reset the H/W. It will be in idle state after this returns */
+ hdev->asic_funcs->hw_fini(hdev, true);
+
+ hl_vm_fini(hdev);
+
+ hl_eq_fini(hdev, &hdev->event_queue);
+
+ for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
+ hl_cq_fini(hdev, &hdev->completion_queue[i]);
+ kfree(hdev->completion_queue);
+
+ hl_hw_queues_destroy(hdev);
+
+ /* Call ASIC S/W finalize function */
+ hdev->asic_funcs->sw_fini(hdev);
+
+ device_early_fini(hdev);
+
+ /* Hide device from user */
+ device_destroy(hdev->dev->class, hdev->dev->devt);
+ cdev_del(&hdev->cdev);
+
+ pr_info("removed device successfully\n");
+}
+
+/*
+ * hl_poll_timeout_memory - Periodically poll a host memory address
+ * until it is not zero or a timeout occurs
+ * @hdev: pointer to habanalabs device structure
+ * @addr: Address to poll
+ * @timeout_us: timeout in us
+ * @val: Variable to read the value into
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ *
+ * The function sleeps for 100us with timeout value of
+ * timeout_us
+ */
+int hl_poll_timeout_memory(struct hl_device *hdev, u64 addr,
+ u32 timeout_us, u32 *val)
+{
+ /*
+ * address in this function points always to a memory location in the
+ * host's (server's) memory. That location is updated asynchronously
+ * either by the direct access of the device or by another core
+ */
+ u32 *paddr = (u32 *) (uintptr_t) addr;
+ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us);
+
+ might_sleep();
+
+ for (;;) {
+ /*
+ * Flush CPU read/write buffers to make sure we read updates
+ * done by other cores or by the device
+ */
+ mb();
+ *val = *paddr;
+ if (*val)
+ break;
+ if (ktime_compare(ktime_get(), timeout) > 0) {
+ *val = *paddr;
+ break;
+ }
+ usleep_range((100 >> 2) + 1, 100);
+ }
+
+ return *val ? 0 : -ETIMEDOUT;
+}
+
+/*
+ * hl_poll_timeout_devicememory - Periodically poll a device memory address
+ * until it is not zero or a timeout occurs
+ * @hdev: pointer to habanalabs device structure
+ * @addr: Device address to poll
+ * @timeout_us: timeout in us
+ * @val: Variable to read the value into
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ *
+ * The function sleeps for 100us with timeout value of
+ * timeout_us
+ */
+int hl_poll_timeout_device_memory(struct hl_device *hdev, void __iomem *addr,
+ u32 timeout_us, u32 *val)
+{
+ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us);
+
+ might_sleep();
+
+ for (;;) {
+ *val = readl(addr);
+ if (*val)
+ break;
+ if (ktime_compare(ktime_get(), timeout) > 0) {
+ *val = readl(addr);
+ break;
+ }
+ usleep_range((100 >> 2) + 1, 100);
+ }
+
+ return *val ? 0 : -ETIMEDOUT;
+}
+
+/*
+ * MMIO register access helper functions.
+ */
+
+/*
+ * hl_rreg - Read an MMIO register
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @reg: MMIO register offset (in bytes)
+ *
+ * Returns the value of the MMIO register we are asked to read
+ *
+ */
+inline u32 hl_rreg(struct hl_device *hdev, u32 reg)
+{
+ return readl(hdev->rmmio + reg);
+}
+
+/*
+ * hl_wreg - Write to an MMIO register
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @reg: MMIO register offset (in bytes)
+ * @val: 32-bit value
+ *
+ * Writes the 32-bit value into the MMIO register
+ *
+ */
+inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val)
+{
+ writel(val, hdev->rmmio + reg);
+}
diff --git a/drivers/misc/habanalabs/goya/Makefile b/drivers/misc/habanalabs/goya/Makefile
new file mode 100644
index 000000000000..e458e5ba500b
--- /dev/null
+++ b/drivers/misc/habanalabs/goya/Makefile
@@ -0,0 +1,3 @@
+subdir-ccflags-y += -I$(src)
+
+HL_GOYA_FILES := goya/goya.o goya/goya_security.o goya/goya_hwmgr.o
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
new file mode 100644
index 000000000000..238dd57c541b
--- /dev/null
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -0,0 +1,5391 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "goyaP.h"
+#include "include/hw_ip/mmu/mmu_general.h"
+#include "include/hw_ip/mmu/mmu_v1_0.h"
+#include "include/goya/asic_reg/goya_masks.h"
+
+#include <linux/pci.h>
+#include <linux/genalloc.h>
+#include <linux/firmware.h>
+#include <linux/hwmon.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+
+/*
+ * GOYA security scheme:
+ *
+ * 1. Host is protected by:
+ * - Range registers (When MMU is enabled, DMA RR does NOT protect host)
+ * - MMU
+ *
+ * 2. DRAM is protected by:
+ * - Range registers (protect the first 512MB)
+ * - MMU (isolation between users)
+ *
+ * 3. Configuration is protected by:
+ * - Range registers
+ * - Protection bits
+ *
+ * When MMU is disabled:
+ *
+ * QMAN DMA: PQ, CQ, CP, DMA are secured.
+ * PQ, CB and the data are on the host.
+ *
+ * QMAN TPC/MME:
+ * PQ, CQ and CP are not secured.
+ * PQ, CB and the data are on the SRAM/DRAM.
+ *
+ * Since QMAN DMA is secured, KMD is parsing the DMA CB:
+ * - KMD checks DMA pointer
+ * - WREG, MSG_PROT are not allowed.
+ * - MSG_LONG/SHORT are allowed.
+ *
+ * A read/write transaction by the QMAN to a protected area will succeed if
+ * and only if the QMAN's CP is secured and MSG_PROT is used
+ *
+ *
+ * When MMU is enabled:
+ *
+ * QMAN DMA: PQ, CQ and CP are secured.
+ * MMU is set to bypass on the Secure props register of the QMAN.
+ * The reasons we don't enable MMU for PQ, CQ and CP are:
+ * - PQ entry is in kernel address space and KMD doesn't map it.
+ * - CP writes to MSIX register and to kernel address space (completion
+ * queue).
+ *
+ * DMA is not secured but because CP is secured, KMD still needs to parse the
+ * CB, but doesn't need to check the DMA addresses.
+ *
+ * For QMAN DMA 0, DMA is also secured because only KMD uses this DMA and KMD
+ * doesn't map memory in MMU.
+ *
+ * QMAN TPC/MME: PQ, CQ and CP aren't secured (no change from MMU disabled mode)
+ *
+ * DMA RR does NOT protect host because DMA is not secured
+ *
+ */
+
+#define GOYA_MMU_REGS_NUM 61
+
+#define GOYA_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
+
+#define GOYA_RESET_TIMEOUT_MSEC 500 /* 500ms */
+#define GOYA_PLDM_RESET_TIMEOUT_MSEC 20000 /* 20s */
+#define GOYA_RESET_WAIT_MSEC 1 /* 1ms */
+#define GOYA_CPU_RESET_WAIT_MSEC 100 /* 100ms */
+#define GOYA_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
+#define GOYA_CPU_TIMEOUT_USEC 10000000 /* 10s */
+#define GOYA_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
+#define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
+#define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
+
+#define GOYA_QMAN0_FENCE_VAL 0xD169B243
+
+#define GOYA_MAX_INITIATORS 20
+
+#define GOYA_MAX_STRING_LEN 20
+
+#define GOYA_CB_POOL_CB_CNT 512
+#define GOYA_CB_POOL_CB_SIZE 0x20000 /* 128KB */
+
+static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = {
+ "goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3",
+ "goya cq 4", "goya cpu eq"
+};
+
+static u16 goya_packet_sizes[MAX_PACKET_ID] = {
+ [PACKET_WREG_32] = sizeof(struct packet_wreg32),
+ [PACKET_WREG_BULK] = sizeof(struct packet_wreg_bulk),
+ [PACKET_MSG_LONG] = sizeof(struct packet_msg_long),
+ [PACKET_MSG_SHORT] = sizeof(struct packet_msg_short),
+ [PACKET_CP_DMA] = sizeof(struct packet_cp_dma),
+ [PACKET_MSG_PROT] = sizeof(struct packet_msg_prot),
+ [PACKET_FENCE] = sizeof(struct packet_fence),
+ [PACKET_LIN_DMA] = sizeof(struct packet_lin_dma),
+ [PACKET_NOP] = sizeof(struct packet_nop),
+ [PACKET_STOP] = sizeof(struct packet_stop)
+};
+
+static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = {
+ mmDMA_QM_0_GLBL_NON_SECURE_PROPS,
+ mmDMA_QM_1_GLBL_NON_SECURE_PROPS,
+ mmDMA_QM_2_GLBL_NON_SECURE_PROPS,
+ mmDMA_QM_3_GLBL_NON_SECURE_PROPS,
+ mmDMA_QM_4_GLBL_NON_SECURE_PROPS,
+ mmTPC0_QM_GLBL_SECURE_PROPS,
+ mmTPC0_QM_GLBL_NON_SECURE_PROPS,
+ mmTPC0_CMDQ_GLBL_SECURE_PROPS,
+ mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS,
+ mmTPC0_CFG_ARUSER,
+ mmTPC0_CFG_AWUSER,
+ mmTPC1_QM_GLBL_SECURE_PROPS,
+ mmTPC1_QM_GLBL_NON_SECURE_PROPS,
+ mmTPC1_CMDQ_GLBL_SECURE_PROPS,
+ mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS,
+ mmTPC1_CFG_ARUSER,
+ mmTPC1_CFG_AWUSER,
+ mmTPC2_QM_GLBL_SECURE_PROPS,
+ mmTPC2_QM_GLBL_NON_SECURE_PROPS,
+ mmTPC2_CMDQ_GLBL_SECURE_PROPS,
+ mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS,
+ mmTPC2_CFG_ARUSER,
+ mmTPC2_CFG_AWUSER,
+ mmTPC3_QM_GLBL_SECURE_PROPS,
+ mmTPC3_QM_GLBL_NON_SECURE_PROPS,
+ mmTPC3_CMDQ_GLBL_SECURE_PROPS,
+ mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS,
+ mmTPC3_CFG_ARUSER,
+ mmTPC3_CFG_AWUSER,
+ mmTPC4_QM_GLBL_SECURE_PROPS,
+ mmTPC4_QM_GLBL_NON_SECURE_PROPS,
+ mmTPC4_CMDQ_GLBL_SECURE_PROPS,
+ mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS,
+ mmTPC4_CFG_ARUSER,
+ mmTPC4_CFG_AWUSER,
+ mmTPC5_QM_GLBL_SECURE_PROPS,
+ mmTPC5_QM_GLBL_NON_SECURE_PROPS,
+ mmTPC5_CMDQ_GLBL_SECURE_PROPS,
+ mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS,
+ mmTPC5_CFG_ARUSER,
+ mmTPC5_CFG_AWUSER,
+ mmTPC6_QM_GLBL_SECURE_PROPS,
+ mmTPC6_QM_GLBL_NON_SECURE_PROPS,
+ mmTPC6_CMDQ_GLBL_SECURE_PROPS,
+ mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS,
+ mmTPC6_CFG_ARUSER,
+ mmTPC6_CFG_AWUSER,
+ mmTPC7_QM_GLBL_SECURE_PROPS,
+ mmTPC7_QM_GLBL_NON_SECURE_PROPS,
+ mmTPC7_CMDQ_GLBL_SECURE_PROPS,
+ mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS,
+ mmTPC7_CFG_ARUSER,
+ mmTPC7_CFG_AWUSER,
+ mmMME_QM_GLBL_SECURE_PROPS,
+ mmMME_QM_GLBL_NON_SECURE_PROPS,
+ mmMME_CMDQ_GLBL_SECURE_PROPS,
+ mmMME_CMDQ_GLBL_NON_SECURE_PROPS,
+ mmMME_SBA_CONTROL_DATA,
+ mmMME_SBB_CONTROL_DATA,
+ mmMME_SBC_CONTROL_DATA,
+ mmMME_WBC_CONTROL_DATA
+};
+
+#define GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE 121
+
+static u32 goya_non_fatal_events[GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE] = {
+ GOYA_ASYNC_EVENT_ID_PCIE_IF,
+ GOYA_ASYNC_EVENT_ID_TPC0_ECC,
+ GOYA_ASYNC_EVENT_ID_TPC1_ECC,
+ GOYA_ASYNC_EVENT_ID_TPC2_ECC,
+ GOYA_ASYNC_EVENT_ID_TPC3_ECC,
+ GOYA_ASYNC_EVENT_ID_TPC4_ECC,
+ GOYA_ASYNC_EVENT_ID_TPC5_ECC,
+ GOYA_ASYNC_EVENT_ID_TPC6_ECC,
+ GOYA_ASYNC_EVENT_ID_TPC7_ECC,
+ GOYA_ASYNC_EVENT_ID_MME_ECC,
+ GOYA_ASYNC_EVENT_ID_MME_ECC_EXT,
+ GOYA_ASYNC_EVENT_ID_MMU_ECC,
+ GOYA_ASYNC_EVENT_ID_DMA_MACRO,
+ GOYA_ASYNC_EVENT_ID_DMA_ECC,
+ GOYA_ASYNC_EVENT_ID_CPU_IF_ECC,
+ GOYA_ASYNC_EVENT_ID_PSOC_MEM,
+ GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT,
+ GOYA_ASYNC_EVENT_ID_SRAM0,
+ GOYA_ASYNC_EVENT_ID_SRAM1,
+ GOYA_ASYNC_EVENT_ID_SRAM2,
+ GOYA_ASYNC_EVENT_ID_SRAM3,
+ GOYA_ASYNC_EVENT_ID_SRAM4,
+ GOYA_ASYNC_EVENT_ID_SRAM5,
+ GOYA_ASYNC_EVENT_ID_SRAM6,
+ GOYA_ASYNC_EVENT_ID_SRAM7,
+ GOYA_ASYNC_EVENT_ID_SRAM8,
+ GOYA_ASYNC_EVENT_ID_SRAM9,
+ GOYA_ASYNC_EVENT_ID_SRAM10,
+ GOYA_ASYNC_EVENT_ID_SRAM11,
+ GOYA_ASYNC_EVENT_ID_SRAM12,
+ GOYA_ASYNC_EVENT_ID_SRAM13,
+ GOYA_ASYNC_EVENT_ID_SRAM14,
+ GOYA_ASYNC_EVENT_ID_SRAM15,
+ GOYA_ASYNC_EVENT_ID_SRAM16,
+ GOYA_ASYNC_EVENT_ID_SRAM17,
+ GOYA_ASYNC_EVENT_ID_SRAM18,
+ GOYA_ASYNC_EVENT_ID_SRAM19,
+ GOYA_ASYNC_EVENT_ID_SRAM20,
+ GOYA_ASYNC_EVENT_ID_SRAM21,
+ GOYA_ASYNC_EVENT_ID_SRAM22,
+ GOYA_ASYNC_EVENT_ID_SRAM23,
+ GOYA_ASYNC_EVENT_ID_SRAM24,
+ GOYA_ASYNC_EVENT_ID_SRAM25,
+ GOYA_ASYNC_EVENT_ID_SRAM26,
+ GOYA_ASYNC_EVENT_ID_SRAM27,
+ GOYA_ASYNC_EVENT_ID_SRAM28,
+ GOYA_ASYNC_EVENT_ID_SRAM29,
+ GOYA_ASYNC_EVENT_ID_GIC500,
+ GOYA_ASYNC_EVENT_ID_PLL0,
+ GOYA_ASYNC_EVENT_ID_PLL1,
+ GOYA_ASYNC_EVENT_ID_PLL3,
+ GOYA_ASYNC_EVENT_ID_PLL4,
+ GOYA_ASYNC_EVENT_ID_PLL5,
+ GOYA_ASYNC_EVENT_ID_PLL6,
+ GOYA_ASYNC_EVENT_ID_AXI_ECC,
+ GOYA_ASYNC_EVENT_ID_L2_RAM_ECC,
+ GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET,
+ GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT,
+ GOYA_ASYNC_EVENT_ID_PCIE_DEC,
+ GOYA_ASYNC_EVENT_ID_TPC0_DEC,
+ GOYA_ASYNC_EVENT_ID_TPC1_DEC,
+ GOYA_ASYNC_EVENT_ID_TPC2_DEC,
+ GOYA_ASYNC_EVENT_ID_TPC3_DEC,
+ GOYA_ASYNC_EVENT_ID_TPC4_DEC,
+ GOYA_ASYNC_EVENT_ID_TPC5_DEC,
+ GOYA_ASYNC_EVENT_ID_TPC6_DEC,
+ GOYA_ASYNC_EVENT_ID_TPC7_DEC,
+ GOYA_ASYNC_EVENT_ID_MME_WACS,
+ GOYA_ASYNC_EVENT_ID_MME_WACSD,
+ GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER,
+ GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC,
+ GOYA_ASYNC_EVENT_ID_PSOC,
+ GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR,
+ GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR,
+ GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR,
+ GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR,
+ GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR,
+ GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR,
+ GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR,
+ GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR,
+ GOYA_ASYNC_EVENT_ID_TPC0_CMDQ,
+ GOYA_ASYNC_EVENT_ID_TPC1_CMDQ,
+ GOYA_ASYNC_EVENT_ID_TPC2_CMDQ,
+ GOYA_ASYNC_EVENT_ID_TPC3_CMDQ,
+ GOYA_ASYNC_EVENT_ID_TPC4_CMDQ,
+ GOYA_ASYNC_EVENT_ID_TPC5_CMDQ,
+ GOYA_ASYNC_EVENT_ID_TPC6_CMDQ,
+ GOYA_ASYNC_EVENT_ID_TPC7_CMDQ,
+ GOYA_ASYNC_EVENT_ID_TPC0_QM,
+ GOYA_ASYNC_EVENT_ID_TPC1_QM,
+ GOYA_ASYNC_EVENT_ID_TPC2_QM,
+ GOYA_ASYNC_EVENT_ID_TPC3_QM,
+ GOYA_ASYNC_EVENT_ID_TPC4_QM,
+ GOYA_ASYNC_EVENT_ID_TPC5_QM,
+ GOYA_ASYNC_EVENT_ID_TPC6_QM,
+ GOYA_ASYNC_EVENT_ID_TPC7_QM,
+ GOYA_ASYNC_EVENT_ID_MME_QM,
+ GOYA_ASYNC_EVENT_ID_MME_CMDQ,
+ GOYA_ASYNC_EVENT_ID_DMA0_QM,
+ GOYA_ASYNC_EVENT_ID_DMA1_QM,
+ GOYA_ASYNC_EVENT_ID_DMA2_QM,
+ GOYA_ASYNC_EVENT_ID_DMA3_QM,
+ GOYA_ASYNC_EVENT_ID_DMA4_QM,
+ GOYA_ASYNC_EVENT_ID_DMA0_CH,
+ GOYA_ASYNC_EVENT_ID_DMA1_CH,
+ GOYA_ASYNC_EVENT_ID_DMA2_CH,
+ GOYA_ASYNC_EVENT_ID_DMA3_CH,
+ GOYA_ASYNC_EVENT_ID_DMA4_CH,
+ GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU,
+ GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU,
+ GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU,
+ GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU,
+ GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU,
+ GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU,
+ GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU,
+ GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU,
+ GOYA_ASYNC_EVENT_ID_DMA_BM_CH0,
+ GOYA_ASYNC_EVENT_ID_DMA_BM_CH1,
+ GOYA_ASYNC_EVENT_ID_DMA_BM_CH2,
+ GOYA_ASYNC_EVENT_ID_DMA_BM_CH3,
+ GOYA_ASYNC_EVENT_ID_DMA_BM_CH4
+};
+
+static int goya_armcp_info_get(struct hl_device *hdev);
+static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
+static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
+static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
+static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
+ u64 phys_addr);
+
+static void goya_get_fixed_properties(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int i;
+
+ for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
+ prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
+ prop->hw_queues_props[i].kmd_only = 0;
+ }
+
+ for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) {
+ prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
+ prop->hw_queues_props[i].kmd_only = 1;
+ }
+
+ for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES +
+ NUMBER_OF_INT_HW_QUEUES; i++) {
+ prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
+ prop->hw_queues_props[i].kmd_only = 0;
+ }
+
+ for (; i < HL_MAX_QUEUES; i++)
+ prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
+
+ prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
+
+ prop->dram_base_address = DRAM_PHYS_BASE;
+ prop->dram_size = DRAM_PHYS_DEFAULT_SIZE;
+ prop->dram_end_address = prop->dram_base_address + prop->dram_size;
+ prop->dram_user_base_address = DRAM_BASE_ADDR_USER;
+
+ prop->sram_base_address = SRAM_BASE_ADDR;
+ prop->sram_size = SRAM_SIZE;
+ prop->sram_end_address = prop->sram_base_address + prop->sram_size;
+ prop->sram_user_base_address = prop->sram_base_address +
+ SRAM_USER_BASE_OFFSET;
+
+ prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR;
+ prop->mmu_dram_default_page_addr = MMU_DRAM_DEFAULT_PAGE_ADDR;
+ if (hdev->pldm)
+ prop->mmu_pgt_size = 0x800000; /* 8MB */
+ else
+ prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
+ prop->mmu_pte_size = HL_PTE_SIZE;
+ prop->mmu_hop_table_size = HOP_TABLE_SIZE;
+ prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
+ prop->dram_page_size = PAGE_SIZE_2MB;
+
+ prop->host_phys_base_address = HOST_PHYS_BASE;
+ prop->va_space_host_start_address = VA_HOST_SPACE_START;
+ prop->va_space_host_end_address = VA_HOST_SPACE_END;
+ prop->va_space_dram_start_address = VA_DDR_SPACE_START;
+ prop->va_space_dram_end_address = VA_DDR_SPACE_END;
+ prop->dram_size_for_default_page_mapping =
+ prop->va_space_dram_end_address;
+ prop->cfg_size = CFG_SIZE;
+ prop->max_asid = MAX_ASID;
+ prop->num_of_events = GOYA_ASYNC_EVENT_ID_SIZE;
+ prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT;
+ prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE;
+ prop->max_power_default = MAX_POWER_DEFAULT;
+ prop->tpc_enabled_mask = TPC_ENABLED_MASK;
+
+ prop->high_pll = PLL_HIGH_DEFAULT;
+}
+
+int goya_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
+{
+ struct armcp_packet pkt;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(opcode << ARMCP_PKT_CTL_OPCODE_SHIFT);
+
+ return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt,
+ sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL);
+}
+
+/*
+ * goya_pci_bars_map - Map PCI BARS of Goya device
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Request PCI regions and map them to kernel virtual addresses.
+ * Returns 0 on success
+ *
+ */
+static int goya_pci_bars_map(struct hl_device *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int rc;
+
+ rc = pci_request_regions(pdev, HL_NAME);
+ if (rc) {
+ dev_err(hdev->dev, "Cannot obtain PCI resources\n");
+ return rc;
+ }
+
+ hdev->pcie_bar[SRAM_CFG_BAR_ID] =
+ pci_ioremap_bar(pdev, SRAM_CFG_BAR_ID);
+ if (!hdev->pcie_bar[SRAM_CFG_BAR_ID]) {
+ dev_err(hdev->dev, "pci_ioremap_bar failed for CFG\n");
+ rc = -ENODEV;
+ goto err_release_regions;
+ }
+
+ hdev->pcie_bar[MSIX_BAR_ID] = pci_ioremap_bar(pdev, MSIX_BAR_ID);
+ if (!hdev->pcie_bar[MSIX_BAR_ID]) {
+ dev_err(hdev->dev, "pci_ioremap_bar failed for MSIX\n");
+ rc = -ENODEV;
+ goto err_unmap_sram_cfg;
+ }
+
+ hdev->pcie_bar[DDR_BAR_ID] = pci_ioremap_wc_bar(pdev, DDR_BAR_ID);
+ if (!hdev->pcie_bar[DDR_BAR_ID]) {
+ dev_err(hdev->dev, "pci_ioremap_bar failed for DDR\n");
+ rc = -ENODEV;
+ goto err_unmap_msix;
+ }
+
+ hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] +
+ (CFG_BASE - SRAM_BASE_ADDR);
+
+ return 0;
+
+err_unmap_msix:
+ iounmap(hdev->pcie_bar[MSIX_BAR_ID]);
+err_unmap_sram_cfg:
+ iounmap(hdev->pcie_bar[SRAM_CFG_BAR_ID]);
+err_release_regions:
+ pci_release_regions(pdev);
+
+ return rc;
+}
+
+/*
+ * goya_pci_bars_unmap - Unmap PCI BARS of Goya device
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Release all PCI BARS and unmap their virtual addresses
+ *
+ */
+static void goya_pci_bars_unmap(struct hl_device *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ iounmap(hdev->pcie_bar[DDR_BAR_ID]);
+ iounmap(hdev->pcie_bar[MSIX_BAR_ID]);
+ iounmap(hdev->pcie_bar[SRAM_CFG_BAR_ID]);
+ pci_release_regions(pdev);
+}
+
+/*
+ * goya_elbi_write - Write through the ELBI interface
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * return 0 on success, -1 on failure
+ *
+ */
+static int goya_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ ktime_t timeout;
+ u32 val;
+
+ /* Clear previous status */
+ pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0);
+
+ pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr);
+ pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
+ pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL,
+ PCI_CONFIG_ELBI_CTRL_WRITE);
+
+ timeout = ktime_add_ms(ktime_get(), 10);
+ for (;;) {
+ pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val);
+ if (val & PCI_CONFIG_ELBI_STS_MASK)
+ break;
+ if (ktime_compare(ktime_get(), timeout) > 0) {
+ pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS,
+ &val);
+ break;
+ }
+ usleep_range(300, 500);
+ }
+
+ if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
+ return 0;
+
+ if (val & PCI_CONFIG_ELBI_STS_ERR) {
+ dev_err(hdev->dev, "Error writing to ELBI\n");
+ return -EIO;
+ }
+
+ if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
+ dev_err(hdev->dev, "ELBI write didn't finish in time\n");
+ return -EIO;
+ }
+
+ dev_err(hdev->dev, "ELBI write has undefined bits in status\n");
+ return -EIO;
+}
+
+/*
+ * goya_iatu_write - iatu write routine
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ */
+static int goya_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
+{
+ u32 dbi_offset;
+ int rc;
+
+ dbi_offset = addr & 0xFFF;
+
+ rc = goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0x00300000);
+ rc |= goya_elbi_write(hdev, mmPCIE_DBI_BASE + dbi_offset, data);
+
+ if (rc)
+ return -EIO;
+
+ return 0;
+}
+
+static void goya_reset_link_through_bridge(struct hl_device *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct pci_dev *parent_port;
+ u16 val;
+
+ parent_port = pdev->bus->self;
+ pci_read_config_word(parent_port, PCI_BRIDGE_CONTROL, &val);
+ val |= PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
+ ssleep(1);
+
+ val &= ~(PCI_BRIDGE_CTL_BUS_RESET);
+ pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
+ ssleep(3);
+}
+
+/*
+ * goya_set_ddr_bar_base - set DDR bar to map specific device address
+ *
+ * @hdev: pointer to hl_device structure
+ * @addr: address in DDR. Must be aligned to DDR bar size
+ *
+ * This function configures the iATU so that the DDR bar will start at the
+ * specified addr.
+ *
+ */
+static int goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ int rc;
+
+ if ((goya) && (goya->ddr_bar_cur_addr == addr))
+ return 0;
+
+ /* Inbound Region 1 - Bar 4 - Point to DDR */
+ rc = goya_iatu_write(hdev, 0x314, lower_32_bits(addr));
+ rc |= goya_iatu_write(hdev, 0x318, upper_32_bits(addr));
+ rc |= goya_iatu_write(hdev, 0x300, 0);
+ /* Enable + Bar match + match enable + Bar 4 */
+ rc |= goya_iatu_write(hdev, 0x304, 0xC0080400);
+
+ /* Return the DBI window to the default location */
+ rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0);
+ rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI_32, 0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to map DDR bar to 0x%08llx\n", addr);
+ return -EIO;
+ }
+
+ if (goya)
+ goya->ddr_bar_cur_addr = addr;
+
+ return 0;
+}
+
+/*
+ * goya_init_iatu - Initialize the iATU unit inside the PCI controller
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * This is needed in case the firmware doesn't initialize the iATU
+ *
+ */
+static int goya_init_iatu(struct hl_device *hdev)
+{
+ int rc;
+
+ /* Inbound Region 0 - Bar 0 - Point to SRAM_BASE_ADDR */
+ rc = goya_iatu_write(hdev, 0x114, lower_32_bits(SRAM_BASE_ADDR));
+ rc |= goya_iatu_write(hdev, 0x118, upper_32_bits(SRAM_BASE_ADDR));
+ rc |= goya_iatu_write(hdev, 0x100, 0);
+ /* Enable + Bar match + match enable */
+ rc |= goya_iatu_write(hdev, 0x104, 0xC0080000);
+
+ /* Inbound Region 1 - Bar 4 - Point to DDR */
+ rc |= goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
+
+ /* Outbound Region 0 - Point to Host */
+ rc |= goya_iatu_write(hdev, 0x008, lower_32_bits(HOST_PHYS_BASE));
+ rc |= goya_iatu_write(hdev, 0x00C, upper_32_bits(HOST_PHYS_BASE));
+ rc |= goya_iatu_write(hdev, 0x010,
+ lower_32_bits(HOST_PHYS_BASE + HOST_PHYS_SIZE - 1));
+ rc |= goya_iatu_write(hdev, 0x014, 0);
+ rc |= goya_iatu_write(hdev, 0x018, 0);
+ rc |= goya_iatu_write(hdev, 0x020,
+ upper_32_bits(HOST_PHYS_BASE + HOST_PHYS_SIZE - 1));
+ /* Increase region size */
+ rc |= goya_iatu_write(hdev, 0x000, 0x00002000);
+ /* Enable */
+ rc |= goya_iatu_write(hdev, 0x004, 0x80000000);
+
+ /* Return the DBI window to the default location */
+ rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0);
+ rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI_32, 0);
+
+ if (rc)
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * goya_early_init - GOYA early initialization code
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Verify PCI bars
+ * Set DMA masks
+ * PCI controller initialization
+ * Map PCI bars
+ *
+ */
+static int goya_early_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct pci_dev *pdev = hdev->pdev;
+ u32 val;
+ int rc;
+
+ goya_get_fixed_properties(hdev);
+
+ /* Check BAR sizes */
+ if (pci_resource_len(pdev, SRAM_CFG_BAR_ID) != CFG_BAR_SIZE) {
+ dev_err(hdev->dev,
+ "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
+ SRAM_CFG_BAR_ID,
+ (unsigned long long) pci_resource_len(pdev,
+ SRAM_CFG_BAR_ID),
+ CFG_BAR_SIZE);
+ return -ENODEV;
+ }
+
+ if (pci_resource_len(pdev, MSIX_BAR_ID) != MSIX_BAR_SIZE) {
+ dev_err(hdev->dev,
+ "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
+ MSIX_BAR_ID,
+ (unsigned long long) pci_resource_len(pdev,
+ MSIX_BAR_ID),
+ MSIX_BAR_SIZE);
+ return -ENODEV;
+ }
+
+ prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
+
+ /* set DMA mask for GOYA */
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
+ if (rc) {
+ dev_warn(hdev->dev, "Unable to set pci dma mask to 39 bits\n");
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(hdev->dev,
+ "Unable to set pci dma mask to 32 bits\n");
+ return rc;
+ }
+ }
+
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
+ if (rc) {
+ dev_warn(hdev->dev,
+ "Unable to set pci consistent dma mask to 39 bits\n");
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(hdev->dev,
+ "Unable to set pci consistent dma mask to 32 bits\n");
+ return rc;
+ }
+ }
+
+ if (hdev->reset_pcilink)
+ goya_reset_link_through_bridge(hdev);
+
+ rc = pci_enable_device_mem(pdev);
+ if (rc) {
+ dev_err(hdev->dev, "can't enable PCI device\n");
+ return rc;
+ }
+
+ pci_set_master(pdev);
+
+ rc = goya_init_iatu(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to initialize iATU\n");
+ goto disable_device;
+ }
+
+ rc = goya_pci_bars_map(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to initialize PCI BARS\n");
+ goto disable_device;
+ }
+
+ if (!hdev->pldm) {
+ val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
+ if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
+ dev_warn(hdev->dev,
+ "PCI strap is not configured correctly, PCI bus errors may occur\n");
+ }
+
+ return 0;
+
+disable_device:
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+
+ return rc;
+}
+
+/*
+ * goya_early_fini - GOYA early finalization code
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Unmap PCI bars
+ *
+ */
+static int goya_early_fini(struct hl_device *hdev)
+{
+ goya_pci_bars_unmap(hdev);
+
+ pci_clear_master(hdev->pdev);
+ pci_disable_device(hdev->pdev);
+
+ return 0;
+}
+
+/*
+ * goya_fetch_psoc_frequency - Fetch PSOC frequency values
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ */
+static void goya_fetch_psoc_frequency(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ prop->psoc_pci_pll_nr = RREG32(mmPSOC_PCI_PLL_NR);
+ prop->psoc_pci_pll_nf = RREG32(mmPSOC_PCI_PLL_NF);
+ prop->psoc_pci_pll_od = RREG32(mmPSOC_PCI_PLL_OD);
+ prop->psoc_pci_pll_div_factor = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
+}
+
+/*
+ * goya_late_init - GOYA late initialization code
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Get ArmCP info and send message to CPU to enable PCI access
+ */
+static int goya_late_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct goya_device *goya = hdev->asic_specific;
+ int rc;
+
+ rc = goya->armcp_info_get(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to get armcp info\n");
+ return rc;
+ }
+
+ /* Now that we have the DRAM size in ASIC prop, we need to check
+ * its size and configure the DMA_IF DDR wrap protection (which is in
+ * the MMU block) accordingly. The value is the log2 of the DRAM size
+ */
+ WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size));
+
+ rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
+ return rc;
+ }
+
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
+ GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
+
+ goya_fetch_psoc_frequency(hdev);
+
+ rc = goya_mmu_clear_pgt_range(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to clear MMU page tables range\n");
+ goto disable_pci_access;
+ }
+
+ rc = goya_mmu_set_dram_default_page(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to set DRAM default page\n");
+ goto disable_pci_access;
+ }
+
+ return 0;
+
+disable_pci_access:
+ goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
+
+ return rc;
+}
+
+/*
+ * goya_late_fini - GOYA late tear-down code
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Free sensors allocated structures
+ */
+void goya_late_fini(struct hl_device *hdev)
+{
+ const struct hwmon_channel_info **channel_info_arr;
+ int i = 0;
+
+ if (!hdev->hl_chip_info->info)
+ return;
+
+ channel_info_arr = hdev->hl_chip_info->info;
+
+ while (channel_info_arr[i]) {
+ kfree(channel_info_arr[i]->config);
+ kfree(channel_info_arr[i]);
+ i++;
+ }
+
+ kfree(channel_info_arr);
+
+ hdev->hl_chip_info->info = NULL;
+}
+
+/*
+ * goya_sw_init - Goya software initialization code
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ */
+static int goya_sw_init(struct hl_device *hdev)
+{
+ struct goya_device *goya;
+ int rc;
+
+ /* Allocate device structure */
+ goya = kzalloc(sizeof(*goya), GFP_KERNEL);
+ if (!goya)
+ return -ENOMEM;
+
+ goya->test_cpu_queue = goya_test_cpu_queue;
+ goya->armcp_info_get = goya_armcp_info_get;
+
+ /* according to goya_init_iatu */
+ goya->ddr_bar_cur_addr = DRAM_PHYS_BASE;
+
+ goya->mme_clk = GOYA_PLL_FREQ_LOW;
+ goya->tpc_clk = GOYA_PLL_FREQ_LOW;
+ goya->ic_clk = GOYA_PLL_FREQ_LOW;
+
+ hdev->asic_specific = goya;
+
+ /* Create DMA pool for small allocations */
+ hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
+ &hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0);
+ if (!hdev->dma_pool) {
+ dev_err(hdev->dev, "failed to create DMA pool\n");
+ rc = -ENOMEM;
+ goto free_goya_device;
+ }
+
+ hdev->cpu_accessible_dma_mem =
+ hdev->asic_funcs->dma_alloc_coherent(hdev,
+ CPU_ACCESSIBLE_MEM_SIZE,
+ &hdev->cpu_accessible_dma_address,
+ GFP_KERNEL | __GFP_ZERO);
+
+ if (!hdev->cpu_accessible_dma_mem) {
+ dev_err(hdev->dev,
+ "failed to allocate %d of dma memory for CPU accessible memory space\n",
+ CPU_ACCESSIBLE_MEM_SIZE);
+ rc = -ENOMEM;
+ goto free_dma_pool;
+ }
+
+ hdev->cpu_accessible_dma_pool = gen_pool_create(CPU_PKT_SHIFT, -1);
+ if (!hdev->cpu_accessible_dma_pool) {
+ dev_err(hdev->dev,
+ "Failed to create CPU accessible DMA pool\n");
+ rc = -ENOMEM;
+ goto free_cpu_pq_dma_mem;
+ }
+
+ rc = gen_pool_add(hdev->cpu_accessible_dma_pool,
+ (uintptr_t) hdev->cpu_accessible_dma_mem,
+ CPU_ACCESSIBLE_MEM_SIZE, -1);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to add memory to CPU accessible DMA pool\n");
+ rc = -EFAULT;
+ goto free_cpu_pq_pool;
+ }
+
+ spin_lock_init(&goya->hw_queues_lock);
+
+ return 0;
+
+free_cpu_pq_pool:
+ gen_pool_destroy(hdev->cpu_accessible_dma_pool);
+free_cpu_pq_dma_mem:
+ hdev->asic_funcs->dma_free_coherent(hdev, CPU_ACCESSIBLE_MEM_SIZE,
+ hdev->cpu_accessible_dma_mem,
+ hdev->cpu_accessible_dma_address);
+free_dma_pool:
+ dma_pool_destroy(hdev->dma_pool);
+free_goya_device:
+ kfree(goya);
+
+ return rc;
+}
+
+/*
+ * goya_sw_fini - Goya software tear-down code
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ */
+static int goya_sw_fini(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+
+ gen_pool_destroy(hdev->cpu_accessible_dma_pool);
+
+ hdev->asic_funcs->dma_free_coherent(hdev, CPU_ACCESSIBLE_MEM_SIZE,
+ hdev->cpu_accessible_dma_mem,
+ hdev->cpu_accessible_dma_address);
+
+ dma_pool_destroy(hdev->dma_pool);
+
+ kfree(goya);
+
+ return 0;
+}
+
+static void goya_init_dma_qman(struct hl_device *hdev, int dma_id,
+ dma_addr_t bus_address)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ u32 mtr_base_lo, mtr_base_hi;
+ u32 so_base_lo, so_base_hi;
+ u32 gic_base_lo, gic_base_hi;
+ u32 reg_off = dma_id * (mmDMA_QM_1_PQ_PI - mmDMA_QM_0_PQ_PI);
+
+ mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
+ mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
+ so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+ so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+
+ gic_base_lo =
+ lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
+ gic_base_hi =
+ upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
+
+ WREG32(mmDMA_QM_0_PQ_BASE_LO + reg_off, lower_32_bits(bus_address));
+ WREG32(mmDMA_QM_0_PQ_BASE_HI + reg_off, upper_32_bits(bus_address));
+
+ WREG32(mmDMA_QM_0_PQ_SIZE + reg_off, ilog2(HL_QUEUE_LENGTH));
+ WREG32(mmDMA_QM_0_PQ_PI + reg_off, 0);
+ WREG32(mmDMA_QM_0_PQ_CI + reg_off, 0);
+
+ WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
+ WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
+ WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
+ WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
+ WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
+ WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
+ WREG32(mmDMA_QM_0_GLBL_ERR_WDATA + reg_off,
+ GOYA_ASYNC_EVENT_ID_DMA0_QM + dma_id);
+
+ /* PQ has buffer of 2 cache lines, while CQ has 8 lines */
+ WREG32(mmDMA_QM_0_PQ_CFG1 + reg_off, 0x00020002);
+ WREG32(mmDMA_QM_0_CQ_CFG1 + reg_off, 0x00080008);
+
+ if (goya->hw_cap_initialized & HW_CAP_MMU)
+ WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_PARTLY_TRUSTED);
+ else
+ WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_FULLY_TRUSTED);
+
+ WREG32(mmDMA_QM_0_GLBL_ERR_CFG + reg_off, QMAN_DMA_ERR_MSG_EN);
+ WREG32(mmDMA_QM_0_GLBL_CFG0 + reg_off, QMAN_DMA_ENABLE);
+}
+
+static void goya_init_dma_ch(struct hl_device *hdev, int dma_id)
+{
+ u32 gic_base_lo, gic_base_hi;
+ u64 sob_addr;
+ u32 reg_off = dma_id * (mmDMA_CH_1_CFG1 - mmDMA_CH_0_CFG1);
+
+ gic_base_lo =
+ lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
+ gic_base_hi =
+ upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
+
+ WREG32(mmDMA_CH_0_ERRMSG_ADDR_LO + reg_off, gic_base_lo);
+ WREG32(mmDMA_CH_0_ERRMSG_ADDR_HI + reg_off, gic_base_hi);
+ WREG32(mmDMA_CH_0_ERRMSG_WDATA + reg_off,
+ GOYA_ASYNC_EVENT_ID_DMA0_CH + dma_id);
+
+ if (dma_id)
+ sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
+ (dma_id - 1) * 4;
+ else
+ sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
+
+ WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO + reg_off, lower_32_bits(sob_addr));
+ WREG32(mmDMA_CH_0_WR_COMP_ADDR_HI + reg_off, upper_32_bits(sob_addr));
+ WREG32(mmDMA_CH_0_WR_COMP_WDATA + reg_off, 0x80000001);
+}
+
+/*
+ * goya_init_dma_qmans - Initialize QMAN DMA registers
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Initialize the H/W registers of the QMAN DMA channels
+ *
+ */
+static void goya_init_dma_qmans(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ struct hl_hw_queue *q;
+ dma_addr_t bus_address;
+ int i;
+
+ if (goya->hw_cap_initialized & HW_CAP_DMA)
+ return;
+
+ q = &hdev->kernel_queues[0];
+
+ for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) {
+ bus_address = q->bus_address +
+ hdev->asic_prop.host_phys_base_address;
+
+ goya_init_dma_qman(hdev, i, bus_address);
+ goya_init_dma_ch(hdev, i);
+ }
+
+ goya->hw_cap_initialized |= HW_CAP_DMA;
+}
+
+/*
+ * goya_disable_external_queues - Disable external queues
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ */
+static void goya_disable_external_queues(struct hl_device *hdev)
+{
+ WREG32(mmDMA_QM_0_GLBL_CFG0, 0);
+ WREG32(mmDMA_QM_1_GLBL_CFG0, 0);
+ WREG32(mmDMA_QM_2_GLBL_CFG0, 0);
+ WREG32(mmDMA_QM_3_GLBL_CFG0, 0);
+ WREG32(mmDMA_QM_4_GLBL_CFG0, 0);
+}
+
+static int goya_stop_queue(struct hl_device *hdev, u32 cfg_reg,
+ u32 cp_sts_reg, u32 glbl_sts0_reg)
+{
+ int rc;
+ u32 status;
+
+ /* use the values of TPC0 as they are all the same*/
+
+ WREG32(cfg_reg, 1 << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+
+ status = RREG32(cp_sts_reg);
+ if (status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK) {
+ rc = hl_poll_timeout(
+ hdev,
+ cp_sts_reg,
+ status,
+ !(status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK),
+ 1000,
+ QMAN_FENCE_TIMEOUT_USEC);
+
+ /* if QMAN is stuck in fence no need to check for stop */
+ if (rc)
+ return 0;
+ }
+
+ rc = hl_poll_timeout(
+ hdev,
+ glbl_sts0_reg,
+ status,
+ (status & TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK),
+ 1000,
+ QMAN_STOP_TIMEOUT_USEC);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Timeout while waiting for QMAN to stop\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * goya_stop_external_queues - Stop external queues
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Returns 0 on success
+ *
+ */
+static int goya_stop_external_queues(struct hl_device *hdev)
+{
+ int rc, retval = 0;
+
+ rc = goya_stop_queue(hdev,
+ mmDMA_QM_0_GLBL_CFG1,
+ mmDMA_QM_0_CP_STS,
+ mmDMA_QM_0_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop DMA QMAN 0\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmDMA_QM_1_GLBL_CFG1,
+ mmDMA_QM_1_CP_STS,
+ mmDMA_QM_1_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop DMA QMAN 1\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmDMA_QM_2_GLBL_CFG1,
+ mmDMA_QM_2_CP_STS,
+ mmDMA_QM_2_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop DMA QMAN 2\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmDMA_QM_3_GLBL_CFG1,
+ mmDMA_QM_3_CP_STS,
+ mmDMA_QM_3_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop DMA QMAN 3\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmDMA_QM_4_GLBL_CFG1,
+ mmDMA_QM_4_CP_STS,
+ mmDMA_QM_4_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop DMA QMAN 4\n");
+ retval = -EIO;
+ }
+
+ return retval;
+}
+
+static void goya_resume_external_queues(struct hl_device *hdev)
+{
+ WREG32(mmDMA_QM_0_GLBL_CFG1, 0);
+ WREG32(mmDMA_QM_1_GLBL_CFG1, 0);
+ WREG32(mmDMA_QM_2_GLBL_CFG1, 0);
+ WREG32(mmDMA_QM_3_GLBL_CFG1, 0);
+ WREG32(mmDMA_QM_4_GLBL_CFG1, 0);
+}
+
+/*
+ * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Returns 0 on success
+ *
+ */
+static int goya_init_cpu_queues(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ struct hl_eq *eq;
+ dma_addr_t bus_address;
+ u32 status;
+ struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
+ int err;
+
+ if (!hdev->cpu_queues_enable)
+ return 0;
+
+ if (goya->hw_cap_initialized & HW_CAP_CPU_Q)
+ return 0;
+
+ eq = &hdev->event_queue;
+
+ bus_address = cpu_pq->bus_address +
+ hdev->asic_prop.host_phys_base_address;
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_0, lower_32_bits(bus_address));
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_1, upper_32_bits(bus_address));
+
+ bus_address = eq->bus_address + hdev->asic_prop.host_phys_base_address;
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_2, lower_32_bits(bus_address));
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_3, upper_32_bits(bus_address));
+
+ bus_address = hdev->cpu_accessible_dma_address +
+ hdev->asic_prop.host_phys_base_address;
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_8, lower_32_bits(bus_address));
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_9, upper_32_bits(bus_address));
+
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_5, HL_QUEUE_SIZE_IN_BYTES);
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_4, HL_EQ_SIZE_IN_BYTES);
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_10, CPU_ACCESSIBLE_MEM_SIZE);
+
+ /* Used for EQ CI */
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, 0);
+
+ WREG32(mmCPU_IF_PF_PQ_PI, 0);
+
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_7, PQ_INIT_STATUS_READY_FOR_CP);
+
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
+ GOYA_ASYNC_EVENT_ID_PI_UPDATE);
+
+ err = hl_poll_timeout(
+ hdev,
+ mmPSOC_GLOBAL_CONF_SCRATCHPAD_7,
+ status,
+ (status == PQ_INIT_STATUS_READY_FOR_HOST),
+ 1000,
+ GOYA_CPU_TIMEOUT_USEC);
+
+ if (err) {
+ dev_err(hdev->dev,
+ "Failed to communicate with ARM CPU (ArmCP timeout)\n");
+ return -EIO;
+ }
+
+ goya->hw_cap_initialized |= HW_CAP_CPU_Q;
+ return 0;
+}
+
+static void goya_set_pll_refclk(struct hl_device *hdev)
+{
+ WREG32(mmCPU_PLL_DIV_SEL_0, 0x0);
+ WREG32(mmCPU_PLL_DIV_SEL_1, 0x0);
+ WREG32(mmCPU_PLL_DIV_SEL_2, 0x0);
+ WREG32(mmCPU_PLL_DIV_SEL_3, 0x0);
+
+ WREG32(mmIC_PLL_DIV_SEL_0, 0x0);
+ WREG32(mmIC_PLL_DIV_SEL_1, 0x0);
+ WREG32(mmIC_PLL_DIV_SEL_2, 0x0);
+ WREG32(mmIC_PLL_DIV_SEL_3, 0x0);
+
+ WREG32(mmMC_PLL_DIV_SEL_0, 0x0);
+ WREG32(mmMC_PLL_DIV_SEL_1, 0x0);
+ WREG32(mmMC_PLL_DIV_SEL_2, 0x0);
+ WREG32(mmMC_PLL_DIV_SEL_3, 0x0);
+
+ WREG32(mmPSOC_MME_PLL_DIV_SEL_0, 0x0);
+ WREG32(mmPSOC_MME_PLL_DIV_SEL_1, 0x0);
+ WREG32(mmPSOC_MME_PLL_DIV_SEL_2, 0x0);
+ WREG32(mmPSOC_MME_PLL_DIV_SEL_3, 0x0);
+
+ WREG32(mmPSOC_PCI_PLL_DIV_SEL_0, 0x0);
+ WREG32(mmPSOC_PCI_PLL_DIV_SEL_1, 0x0);
+ WREG32(mmPSOC_PCI_PLL_DIV_SEL_2, 0x0);
+ WREG32(mmPSOC_PCI_PLL_DIV_SEL_3, 0x0);
+
+ WREG32(mmPSOC_EMMC_PLL_DIV_SEL_0, 0x0);
+ WREG32(mmPSOC_EMMC_PLL_DIV_SEL_1, 0x0);
+ WREG32(mmPSOC_EMMC_PLL_DIV_SEL_2, 0x0);
+ WREG32(mmPSOC_EMMC_PLL_DIV_SEL_3, 0x0);
+
+ WREG32(mmTPC_PLL_DIV_SEL_0, 0x0);
+ WREG32(mmTPC_PLL_DIV_SEL_1, 0x0);
+ WREG32(mmTPC_PLL_DIV_SEL_2, 0x0);
+ WREG32(mmTPC_PLL_DIV_SEL_3, 0x0);
+}
+
+static void goya_disable_clk_rlx(struct hl_device *hdev)
+{
+ WREG32(mmPSOC_MME_PLL_CLK_RLX_0, 0x100010);
+ WREG32(mmIC_PLL_CLK_RLX_0, 0x100010);
+}
+
+static void _goya_tpc_mbist_workaround(struct hl_device *hdev, u8 tpc_id)
+{
+ u64 tpc_eml_address;
+ u32 val, tpc_offset, tpc_eml_offset, tpc_slm_offset;
+ int err, slm_index;
+
+ tpc_offset = tpc_id * 0x40000;
+ tpc_eml_offset = tpc_id * 0x200000;
+ tpc_eml_address = (mmTPC0_EML_CFG_BASE + tpc_eml_offset - CFG_BASE);
+ tpc_slm_offset = tpc_eml_address + 0x100000;
+
+ /*
+ * Workaround for Bug H2 #2443 :
+ * "TPC SB is not initialized on chip reset"
+ */
+
+ val = RREG32(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset);
+ if (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK)
+ dev_warn(hdev->dev, "TPC%d MBIST ACTIVE is not cleared\n",
+ tpc_id);
+
+ WREG32(mmTPC0_CFG_FUNC_MBIST_PAT + tpc_offset, val & 0xFFFFF000);
+
+ WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_0 + tpc_offset, 0x37FF);
+ WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_1 + tpc_offset, 0x303F);
+ WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_2 + tpc_offset, 0x71FF);
+ WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_3 + tpc_offset, 0x71FF);
+ WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_4 + tpc_offset, 0x70FF);
+ WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_5 + tpc_offset, 0x70FF);
+ WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_6 + tpc_offset, 0x70FF);
+ WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_7 + tpc_offset, 0x70FF);
+ WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_8 + tpc_offset, 0x70FF);
+ WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_9 + tpc_offset, 0x70FF);
+
+ WREG32_OR(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
+ 1 << TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT);
+
+ err = hl_poll_timeout(
+ hdev,
+ mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
+ val,
+ (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK),
+ 1000,
+ HL_DEVICE_TIMEOUT_USEC);
+
+ if (err)
+ dev_err(hdev->dev,
+ "Timeout while waiting for TPC%d MBIST DONE\n", tpc_id);
+
+ WREG32_OR(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
+ 1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT);
+
+ msleep(GOYA_RESET_WAIT_MSEC);
+
+ WREG32_AND(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
+ ~(1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT));
+
+ msleep(GOYA_RESET_WAIT_MSEC);
+
+ for (slm_index = 0 ; slm_index < 256 ; slm_index++)
+ WREG32(tpc_slm_offset + (slm_index << 2), 0);
+
+ val = RREG32(tpc_slm_offset);
+}
+
+static void goya_tpc_mbist_workaround(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ int i;
+
+ if (hdev->pldm)
+ return;
+
+ if (goya->hw_cap_initialized & HW_CAP_TPC_MBIST)
+ return;
+
+ /* Workaround for H2 #2443 */
+
+ for (i = 0 ; i < TPC_MAX_NUM ; i++)
+ _goya_tpc_mbist_workaround(hdev, i);
+
+ goya->hw_cap_initialized |= HW_CAP_TPC_MBIST;
+}
+
+/*
+ * goya_init_golden_registers - Initialize golden registers
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Initialize the H/W registers of the device
+ *
+ */
+static void goya_init_golden_registers(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ u32 polynom[10], tpc_intr_mask, offset;
+ int i;
+
+ if (goya->hw_cap_initialized & HW_CAP_GOLDEN)
+ return;
+
+ polynom[0] = 0x00020080;
+ polynom[1] = 0x00401000;
+ polynom[2] = 0x00200800;
+ polynom[3] = 0x00002000;
+ polynom[4] = 0x00080200;
+ polynom[5] = 0x00040100;
+ polynom[6] = 0x00100400;
+ polynom[7] = 0x00004000;
+ polynom[8] = 0x00010000;
+ polynom[9] = 0x00008000;
+
+ /* Mask all arithmetic interrupts from TPC */
+ tpc_intr_mask = 0x7FFF;
+
+ for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x20000) {
+ WREG32(mmSRAM_Y0_X0_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
+ WREG32(mmSRAM_Y0_X1_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
+ WREG32(mmSRAM_Y0_X2_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
+ WREG32(mmSRAM_Y0_X3_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
+ WREG32(mmSRAM_Y0_X4_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
+
+ WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_L_ARB + offset, 0x204);
+ WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_L_ARB + offset, 0x204);
+ WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_L_ARB + offset, 0x204);
+ WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_L_ARB + offset, 0x204);
+ WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_L_ARB + offset, 0x204);
+
+
+ WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_E_ARB + offset, 0x206);
+ WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_E_ARB + offset, 0x206);
+ WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_E_ARB + offset, 0x206);
+ WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_E_ARB + offset, 0x207);
+ WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_E_ARB + offset, 0x207);
+
+ WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_W_ARB + offset, 0x207);
+ WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_W_ARB + offset, 0x207);
+ WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_W_ARB + offset, 0x206);
+ WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_W_ARB + offset, 0x206);
+ WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_W_ARB + offset, 0x206);
+
+ WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_E_ARB + offset, 0x101);
+ WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_E_ARB + offset, 0x102);
+ WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_E_ARB + offset, 0x103);
+ WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_E_ARB + offset, 0x104);
+ WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_E_ARB + offset, 0x105);
+
+ WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_W_ARB + offset, 0x105);
+ WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_W_ARB + offset, 0x104);
+ WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_W_ARB + offset, 0x103);
+ WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_W_ARB + offset, 0x102);
+ WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_W_ARB + offset, 0x101);
+ }
+
+ WREG32(mmMME_STORE_MAX_CREDIT, 0x21);
+ WREG32(mmMME_AGU, 0x0f0f0f10);
+ WREG32(mmMME_SEI_MASK, ~0x0);
+
+ WREG32(mmMME6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
+ WREG32(mmMME5_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
+ WREG32(mmMME4_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
+ WREG32(mmMME3_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
+ WREG32(mmMME2_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
+ WREG32(mmMME1_RTR_HBW_RD_RQ_N_ARB, 0x07010701);
+ WREG32(mmMME6_RTR_HBW_RD_RQ_S_ARB, 0x04010401);
+ WREG32(mmMME5_RTR_HBW_RD_RQ_S_ARB, 0x04050401);
+ WREG32(mmMME4_RTR_HBW_RD_RQ_S_ARB, 0x03070301);
+ WREG32(mmMME3_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
+ WREG32(mmMME2_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
+ WREG32(mmMME1_RTR_HBW_RD_RQ_S_ARB, 0x01050105);
+ WREG32(mmMME6_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
+ WREG32(mmMME5_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
+ WREG32(mmMME4_RTR_HBW_RD_RQ_W_ARB, 0x01040301);
+ WREG32(mmMME3_RTR_HBW_RD_RQ_W_ARB, 0x01030401);
+ WREG32(mmMME2_RTR_HBW_RD_RQ_W_ARB, 0x01040101);
+ WREG32(mmMME1_RTR_HBW_RD_RQ_W_ARB, 0x01050101);
+ WREG32(mmMME6_RTR_HBW_WR_RQ_N_ARB, 0x02020202);
+ WREG32(mmMME5_RTR_HBW_WR_RQ_N_ARB, 0x01070101);
+ WREG32(mmMME4_RTR_HBW_WR_RQ_N_ARB, 0x02020201);
+ WREG32(mmMME3_RTR_HBW_WR_RQ_N_ARB, 0x07020701);
+ WREG32(mmMME2_RTR_HBW_WR_RQ_N_ARB, 0x01020101);
+ WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
+ WREG32(mmMME6_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
+ WREG32(mmMME5_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
+ WREG32(mmMME4_RTR_HBW_WR_RQ_S_ARB, 0x07020701);
+ WREG32(mmMME3_RTR_HBW_WR_RQ_S_ARB, 0x02020201);
+ WREG32(mmMME2_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
+ WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01020102);
+ WREG32(mmMME6_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
+ WREG32(mmMME5_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
+ WREG32(mmMME4_RTR_HBW_WR_RQ_W_ARB, 0x07020707);
+ WREG32(mmMME3_RTR_HBW_WR_RQ_W_ARB, 0x01020201);
+ WREG32(mmMME2_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
+ WREG32(mmMME1_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
+ WREG32(mmMME6_RTR_HBW_RD_RS_N_ARB, 0x01070102);
+ WREG32(mmMME5_RTR_HBW_RD_RS_N_ARB, 0x01070102);
+ WREG32(mmMME4_RTR_HBW_RD_RS_N_ARB, 0x01060102);
+ WREG32(mmMME3_RTR_HBW_RD_RS_N_ARB, 0x01040102);
+ WREG32(mmMME2_RTR_HBW_RD_RS_N_ARB, 0x01020102);
+ WREG32(mmMME1_RTR_HBW_RD_RS_N_ARB, 0x01020107);
+ WREG32(mmMME6_RTR_HBW_RD_RS_S_ARB, 0x01020106);
+ WREG32(mmMME5_RTR_HBW_RD_RS_S_ARB, 0x01020102);
+ WREG32(mmMME4_RTR_HBW_RD_RS_S_ARB, 0x01040102);
+ WREG32(mmMME3_RTR_HBW_RD_RS_S_ARB, 0x01060102);
+ WREG32(mmMME2_RTR_HBW_RD_RS_S_ARB, 0x01070102);
+ WREG32(mmMME1_RTR_HBW_RD_RS_S_ARB, 0x01070102);
+ WREG32(mmMME6_RTR_HBW_RD_RS_E_ARB, 0x01020702);
+ WREG32(mmMME5_RTR_HBW_RD_RS_E_ARB, 0x01020702);
+ WREG32(mmMME4_RTR_HBW_RD_RS_E_ARB, 0x01040602);
+ WREG32(mmMME3_RTR_HBW_RD_RS_E_ARB, 0x01060402);
+ WREG32(mmMME2_RTR_HBW_RD_RS_E_ARB, 0x01070202);
+ WREG32(mmMME1_RTR_HBW_RD_RS_E_ARB, 0x01070102);
+ WREG32(mmMME6_RTR_HBW_RD_RS_W_ARB, 0x01060401);
+ WREG32(mmMME5_RTR_HBW_RD_RS_W_ARB, 0x01060401);
+ WREG32(mmMME4_RTR_HBW_RD_RS_W_ARB, 0x01060401);
+ WREG32(mmMME3_RTR_HBW_RD_RS_W_ARB, 0x01060401);
+ WREG32(mmMME2_RTR_HBW_RD_RS_W_ARB, 0x01060401);
+ WREG32(mmMME1_RTR_HBW_RD_RS_W_ARB, 0x01060401);
+ WREG32(mmMME6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
+ WREG32(mmMME5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
+ WREG32(mmMME4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
+ WREG32(mmMME3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
+ WREG32(mmMME2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
+ WREG32(mmMME1_RTR_HBW_WR_RS_N_ARB, 0x01010107);
+ WREG32(mmMME6_RTR_HBW_WR_RS_S_ARB, 0x01010107);
+ WREG32(mmMME5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
+ WREG32(mmMME4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
+ WREG32(mmMME3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
+ WREG32(mmMME2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
+ WREG32(mmMME1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
+ WREG32(mmMME6_RTR_HBW_WR_RS_E_ARB, 0x01010501);
+ WREG32(mmMME5_RTR_HBW_WR_RS_E_ARB, 0x01010501);
+ WREG32(mmMME4_RTR_HBW_WR_RS_E_ARB, 0x01040301);
+ WREG32(mmMME3_RTR_HBW_WR_RS_E_ARB, 0x01030401);
+ WREG32(mmMME2_RTR_HBW_WR_RS_E_ARB, 0x01040101);
+ WREG32(mmMME1_RTR_HBW_WR_RS_E_ARB, 0x01050101);
+ WREG32(mmMME6_RTR_HBW_WR_RS_W_ARB, 0x01010101);
+ WREG32(mmMME5_RTR_HBW_WR_RS_W_ARB, 0x01010101);
+ WREG32(mmMME4_RTR_HBW_WR_RS_W_ARB, 0x01010101);
+ WREG32(mmMME3_RTR_HBW_WR_RS_W_ARB, 0x01010101);
+ WREG32(mmMME2_RTR_HBW_WR_RS_W_ARB, 0x01010101);
+ WREG32(mmMME1_RTR_HBW_WR_RS_W_ARB, 0x01010101);
+
+ WREG32(mmTPC1_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
+ WREG32(mmTPC1_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
+ WREG32(mmTPC1_RTR_HBW_RD_RQ_E_ARB, 0x01060101);
+ WREG32(mmTPC1_RTR_HBW_WR_RQ_N_ARB, 0x02020102);
+ WREG32(mmTPC1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
+ WREG32(mmTPC1_RTR_HBW_WR_RQ_E_ARB, 0x02070202);
+ WREG32(mmTPC1_RTR_HBW_RD_RS_N_ARB, 0x01020201);
+ WREG32(mmTPC1_RTR_HBW_RD_RS_S_ARB, 0x01070201);
+ WREG32(mmTPC1_RTR_HBW_RD_RS_W_ARB, 0x01070202);
+ WREG32(mmTPC1_RTR_HBW_WR_RS_N_ARB, 0x01010101);
+ WREG32(mmTPC1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
+ WREG32(mmTPC1_RTR_HBW_WR_RS_W_ARB, 0x01050101);
+
+ WREG32(mmTPC2_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
+ WREG32(mmTPC2_RTR_HBW_RD_RQ_S_ARB, 0x01050101);
+ WREG32(mmTPC2_RTR_HBW_RD_RQ_E_ARB, 0x01010201);
+ WREG32(mmTPC2_RTR_HBW_WR_RQ_N_ARB, 0x02040102);
+ WREG32(mmTPC2_RTR_HBW_WR_RQ_S_ARB, 0x01050101);
+ WREG32(mmTPC2_RTR_HBW_WR_RQ_E_ARB, 0x02060202);
+ WREG32(mmTPC2_RTR_HBW_RD_RS_N_ARB, 0x01020201);
+ WREG32(mmTPC2_RTR_HBW_RD_RS_S_ARB, 0x01070201);
+ WREG32(mmTPC2_RTR_HBW_RD_RS_W_ARB, 0x01070202);
+ WREG32(mmTPC2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
+ WREG32(mmTPC2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
+ WREG32(mmTPC2_RTR_HBW_WR_RS_W_ARB, 0x01040101);
+
+ WREG32(mmTPC3_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
+ WREG32(mmTPC3_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
+ WREG32(mmTPC3_RTR_HBW_RD_RQ_E_ARB, 0x01040301);
+ WREG32(mmTPC3_RTR_HBW_WR_RQ_N_ARB, 0x02060102);
+ WREG32(mmTPC3_RTR_HBW_WR_RQ_S_ARB, 0x01040101);
+ WREG32(mmTPC3_RTR_HBW_WR_RQ_E_ARB, 0x01040301);
+ WREG32(mmTPC3_RTR_HBW_RD_RS_N_ARB, 0x01040201);
+ WREG32(mmTPC3_RTR_HBW_RD_RS_S_ARB, 0x01060201);
+ WREG32(mmTPC3_RTR_HBW_RD_RS_W_ARB, 0x01060402);
+ WREG32(mmTPC3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
+ WREG32(mmTPC3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
+ WREG32(mmTPC3_RTR_HBW_WR_RS_W_ARB, 0x01030401);
+
+ WREG32(mmTPC4_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
+ WREG32(mmTPC4_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
+ WREG32(mmTPC4_RTR_HBW_RD_RQ_E_ARB, 0x01030401);
+ WREG32(mmTPC4_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
+ WREG32(mmTPC4_RTR_HBW_WR_RQ_S_ARB, 0x01030101);
+ WREG32(mmTPC4_RTR_HBW_WR_RQ_E_ARB, 0x02060702);
+ WREG32(mmTPC4_RTR_HBW_RD_RS_N_ARB, 0x01060201);
+ WREG32(mmTPC4_RTR_HBW_RD_RS_S_ARB, 0x01040201);
+ WREG32(mmTPC4_RTR_HBW_RD_RS_W_ARB, 0x01040602);
+ WREG32(mmTPC4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
+ WREG32(mmTPC4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
+ WREG32(mmTPC4_RTR_HBW_WR_RS_W_ARB, 0x01040301);
+
+ WREG32(mmTPC5_RTR_HBW_RD_RQ_N_ARB, 0x01050101);
+ WREG32(mmTPC5_RTR_HBW_RD_RQ_S_ARB, 0x01020101);
+ WREG32(mmTPC5_RTR_HBW_RD_RQ_E_ARB, 0x01200501);
+ WREG32(mmTPC5_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
+ WREG32(mmTPC5_RTR_HBW_WR_RQ_S_ARB, 0x01020101);
+ WREG32(mmTPC5_RTR_HBW_WR_RQ_E_ARB, 0x02020602);
+ WREG32(mmTPC5_RTR_HBW_RD_RS_N_ARB, 0x01070201);
+ WREG32(mmTPC5_RTR_HBW_RD_RS_S_ARB, 0x01020201);
+ WREG32(mmTPC5_RTR_HBW_RD_RS_W_ARB, 0x01020702);
+ WREG32(mmTPC5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
+ WREG32(mmTPC5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
+ WREG32(mmTPC5_RTR_HBW_WR_RS_W_ARB, 0x01010501);
+
+ WREG32(mmTPC6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
+ WREG32(mmTPC6_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
+ WREG32(mmTPC6_RTR_HBW_RD_RQ_E_ARB, 0x01010601);
+ WREG32(mmTPC6_RTR_HBW_WR_RQ_N_ARB, 0x01010101);
+ WREG32(mmTPC6_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
+ WREG32(mmTPC6_RTR_HBW_WR_RQ_E_ARB, 0x02020702);
+ WREG32(mmTPC6_RTR_HBW_RD_RS_N_ARB, 0x01010101);
+ WREG32(mmTPC6_RTR_HBW_RD_RS_S_ARB, 0x01010101);
+ WREG32(mmTPC6_RTR_HBW_RD_RS_W_ARB, 0x01020702);
+ WREG32(mmTPC6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
+ WREG32(mmTPC6_RTR_HBW_WR_RS_S_ARB, 0x01010101);
+ WREG32(mmTPC6_RTR_HBW_WR_RS_W_ARB, 0x01010501);
+
+ for (i = 0, offset = 0 ; i < 10 ; i++, offset += 4) {
+ WREG32(mmMME1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmMME2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmMME3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmMME4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmMME5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmMME6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+
+ WREG32(mmTPC0_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmTPC1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmTPC2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmTPC3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmTPC4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmTPC5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmTPC6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmTPC7_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+
+ WREG32(mmPCI_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmDMA_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ }
+
+ for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x40000) {
+ WREG32(mmMME1_RTR_SCRAMB_EN + offset,
+ 1 << MME1_RTR_SCRAMB_EN_VAL_SHIFT);
+ WREG32(mmMME1_RTR_NON_LIN_SCRAMB + offset,
+ 1 << MME1_RTR_NON_LIN_SCRAMB_EN_SHIFT);
+ }
+
+ for (i = 0, offset = 0 ; i < 8 ; i++, offset += 0x40000) {
+ /*
+ * Workaround for Bug H2 #2441 :
+ * "ST.NOP set trace event illegal opcode"
+ */
+ WREG32(mmTPC0_CFG_TPC_INTR_MASK + offset, tpc_intr_mask);
+
+ WREG32(mmTPC0_NRTR_SCRAMB_EN + offset,
+ 1 << TPC0_NRTR_SCRAMB_EN_VAL_SHIFT);
+ WREG32(mmTPC0_NRTR_NON_LIN_SCRAMB + offset,
+ 1 << TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
+ }
+
+ WREG32(mmDMA_NRTR_SCRAMB_EN, 1 << DMA_NRTR_SCRAMB_EN_VAL_SHIFT);
+ WREG32(mmDMA_NRTR_NON_LIN_SCRAMB,
+ 1 << DMA_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
+
+ WREG32(mmPCI_NRTR_SCRAMB_EN, 1 << PCI_NRTR_SCRAMB_EN_VAL_SHIFT);
+ WREG32(mmPCI_NRTR_NON_LIN_SCRAMB,
+ 1 << PCI_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
+
+ /*
+ * Workaround for H2 #HW-23 bug
+ * Set DMA max outstanding read requests to 240 on DMA CH 1. Set it
+ * to 16 on KMD DMA
+ * We need to limit only these DMAs because the user can only read
+ * from Host using DMA CH 1
+ */
+ WREG32(mmDMA_CH_0_CFG0, 0x0fff0010);
+ WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0);
+
+ goya->hw_cap_initialized |= HW_CAP_GOLDEN;
+}
+
+static void goya_init_mme_qman(struct hl_device *hdev)
+{
+ u32 mtr_base_lo, mtr_base_hi;
+ u32 so_base_lo, so_base_hi;
+ u32 gic_base_lo, gic_base_hi;
+ u64 qman_base_addr;
+
+ mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
+ mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
+ so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+ so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+
+ gic_base_lo =
+ lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
+ gic_base_hi =
+ upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
+
+ qman_base_addr = hdev->asic_prop.sram_base_address +
+ MME_QMAN_BASE_OFFSET;
+
+ WREG32(mmMME_QM_PQ_BASE_LO, lower_32_bits(qman_base_addr));
+ WREG32(mmMME_QM_PQ_BASE_HI, upper_32_bits(qman_base_addr));
+ WREG32(mmMME_QM_PQ_SIZE, ilog2(MME_QMAN_LENGTH));
+ WREG32(mmMME_QM_PQ_PI, 0);
+ WREG32(mmMME_QM_PQ_CI, 0);
+ WREG32(mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET, 0x10C0);
+ WREG32(mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET, 0x10C4);
+ WREG32(mmMME_QM_CP_LDMA_TSIZE_OFFSET, 0x10C8);
+ WREG32(mmMME_QM_CP_LDMA_COMMIT_OFFSET, 0x10CC);
+
+ WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
+ WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
+ WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_LO, so_base_lo);
+ WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_HI, so_base_hi);
+
+ /* QMAN CQ has 8 cache lines */
+ WREG32(mmMME_QM_CQ_CFG1, 0x00080008);
+
+ WREG32(mmMME_QM_GLBL_ERR_ADDR_LO, gic_base_lo);
+ WREG32(mmMME_QM_GLBL_ERR_ADDR_HI, gic_base_hi);
+
+ WREG32(mmMME_QM_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_QM);
+
+ WREG32(mmMME_QM_GLBL_ERR_CFG, QMAN_MME_ERR_MSG_EN);
+
+ WREG32(mmMME_QM_GLBL_PROT, QMAN_MME_ERR_PROT);
+
+ WREG32(mmMME_QM_GLBL_CFG0, QMAN_MME_ENABLE);
+}
+
+static void goya_init_mme_cmdq(struct hl_device *hdev)
+{
+ u32 mtr_base_lo, mtr_base_hi;
+ u32 so_base_lo, so_base_hi;
+ u32 gic_base_lo, gic_base_hi;
+ u64 qman_base_addr;
+
+ mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
+ mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
+ so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+ so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+
+ gic_base_lo =
+ lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
+ gic_base_hi =
+ upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
+
+ qman_base_addr = hdev->asic_prop.sram_base_address +
+ MME_QMAN_BASE_OFFSET;
+
+ WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
+ WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
+ WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO, so_base_lo);
+ WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI, so_base_hi);
+
+ /* CMDQ CQ has 20 cache lines */
+ WREG32(mmMME_CMDQ_CQ_CFG1, 0x00140014);
+
+ WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_LO, gic_base_lo);
+ WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_HI, gic_base_hi);
+
+ WREG32(mmMME_CMDQ_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_CMDQ);
+
+ WREG32(mmMME_CMDQ_GLBL_ERR_CFG, CMDQ_MME_ERR_MSG_EN);
+
+ WREG32(mmMME_CMDQ_GLBL_PROT, CMDQ_MME_ERR_PROT);
+
+ WREG32(mmMME_CMDQ_GLBL_CFG0, CMDQ_MME_ENABLE);
+}
+
+static void goya_init_mme_qmans(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ u32 so_base_lo, so_base_hi;
+
+ if (goya->hw_cap_initialized & HW_CAP_MME)
+ return;
+
+ so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+ so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+
+ WREG32(mmMME_SM_BASE_ADDRESS_LOW, so_base_lo);
+ WREG32(mmMME_SM_BASE_ADDRESS_HIGH, so_base_hi);
+
+ goya_init_mme_qman(hdev);
+ goya_init_mme_cmdq(hdev);
+
+ goya->hw_cap_initialized |= HW_CAP_MME;
+}
+
+static void goya_init_tpc_qman(struct hl_device *hdev, u32 base_off, int tpc_id)
+{
+ u32 mtr_base_lo, mtr_base_hi;
+ u32 so_base_lo, so_base_hi;
+ u32 gic_base_lo, gic_base_hi;
+ u64 qman_base_addr;
+ u32 reg_off = tpc_id * (mmTPC1_QM_PQ_PI - mmTPC0_QM_PQ_PI);
+
+ mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
+ mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
+ so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+ so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+
+ gic_base_lo =
+ lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
+ gic_base_hi =
+ upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
+
+ qman_base_addr = hdev->asic_prop.sram_base_address + base_off;
+
+ WREG32(mmTPC0_QM_PQ_BASE_LO + reg_off, lower_32_bits(qman_base_addr));
+ WREG32(mmTPC0_QM_PQ_BASE_HI + reg_off, upper_32_bits(qman_base_addr));
+ WREG32(mmTPC0_QM_PQ_SIZE + reg_off, ilog2(TPC_QMAN_LENGTH));
+ WREG32(mmTPC0_QM_PQ_PI + reg_off, 0);
+ WREG32(mmTPC0_QM_PQ_CI + reg_off, 0);
+ WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET + reg_off, 0x10C0);
+ WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET + reg_off, 0x10C4);
+ WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET + reg_off, 0x10C8);
+ WREG32(mmTPC0_QM_CP_LDMA_COMMIT_OFFSET + reg_off, 0x10CC);
+
+ WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
+ WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
+ WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
+ WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
+
+ WREG32(mmTPC0_QM_CQ_CFG1 + reg_off, 0x00080008);
+
+ WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
+ WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
+
+ WREG32(mmTPC0_QM_GLBL_ERR_WDATA + reg_off,
+ GOYA_ASYNC_EVENT_ID_TPC0_QM + tpc_id);
+
+ WREG32(mmTPC0_QM_GLBL_ERR_CFG + reg_off, QMAN_TPC_ERR_MSG_EN);
+
+ WREG32(mmTPC0_QM_GLBL_PROT + reg_off, QMAN_TPC_ERR_PROT);
+
+ WREG32(mmTPC0_QM_GLBL_CFG0 + reg_off, QMAN_TPC_ENABLE);
+}
+
+static void goya_init_tpc_cmdq(struct hl_device *hdev, int tpc_id)
+{
+ u32 mtr_base_lo, mtr_base_hi;
+ u32 so_base_lo, so_base_hi;
+ u32 gic_base_lo, gic_base_hi;
+ u32 reg_off = tpc_id * (mmTPC1_CMDQ_CQ_CFG1 - mmTPC0_CMDQ_CQ_CFG1);
+
+ mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
+ mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
+ so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+ so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+
+ gic_base_lo =
+ lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
+ gic_base_hi =
+ upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
+
+ WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
+ WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
+ WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
+ WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
+
+ WREG32(mmTPC0_CMDQ_CQ_CFG1 + reg_off, 0x00140014);
+
+ WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
+ WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
+
+ WREG32(mmTPC0_CMDQ_GLBL_ERR_WDATA + reg_off,
+ GOYA_ASYNC_EVENT_ID_TPC0_CMDQ + tpc_id);
+
+ WREG32(mmTPC0_CMDQ_GLBL_ERR_CFG + reg_off, CMDQ_TPC_ERR_MSG_EN);
+
+ WREG32(mmTPC0_CMDQ_GLBL_PROT + reg_off, CMDQ_TPC_ERR_PROT);
+
+ WREG32(mmTPC0_CMDQ_GLBL_CFG0 + reg_off, CMDQ_TPC_ENABLE);
+}
+
+static void goya_init_tpc_qmans(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ u32 so_base_lo, so_base_hi;
+ u32 cfg_off = mmTPC1_CFG_SM_BASE_ADDRESS_LOW -
+ mmTPC0_CFG_SM_BASE_ADDRESS_LOW;
+ int i;
+
+ if (goya->hw_cap_initialized & HW_CAP_TPC)
+ return;
+
+ so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+ so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+
+ for (i = 0 ; i < TPC_MAX_NUM ; i++) {
+ WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_LOW + i * cfg_off,
+ so_base_lo);
+ WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + i * cfg_off,
+ so_base_hi);
+ }
+
+ goya_init_tpc_qman(hdev, TPC0_QMAN_BASE_OFFSET, 0);
+ goya_init_tpc_qman(hdev, TPC1_QMAN_BASE_OFFSET, 1);
+ goya_init_tpc_qman(hdev, TPC2_QMAN_BASE_OFFSET, 2);
+ goya_init_tpc_qman(hdev, TPC3_QMAN_BASE_OFFSET, 3);
+ goya_init_tpc_qman(hdev, TPC4_QMAN_BASE_OFFSET, 4);
+ goya_init_tpc_qman(hdev, TPC5_QMAN_BASE_OFFSET, 5);
+ goya_init_tpc_qman(hdev, TPC6_QMAN_BASE_OFFSET, 6);
+ goya_init_tpc_qman(hdev, TPC7_QMAN_BASE_OFFSET, 7);
+
+ for (i = 0 ; i < TPC_MAX_NUM ; i++)
+ goya_init_tpc_cmdq(hdev, i);
+
+ goya->hw_cap_initialized |= HW_CAP_TPC;
+}
+
+/*
+ * goya_disable_internal_queues - Disable internal queues
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ */
+static void goya_disable_internal_queues(struct hl_device *hdev)
+{
+ WREG32(mmMME_QM_GLBL_CFG0, 0);
+ WREG32(mmMME_CMDQ_GLBL_CFG0, 0);
+
+ WREG32(mmTPC0_QM_GLBL_CFG0, 0);
+ WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0);
+
+ WREG32(mmTPC1_QM_GLBL_CFG0, 0);
+ WREG32(mmTPC1_CMDQ_GLBL_CFG0, 0);
+
+ WREG32(mmTPC2_QM_GLBL_CFG0, 0);
+ WREG32(mmTPC2_CMDQ_GLBL_CFG0, 0);
+
+ WREG32(mmTPC3_QM_GLBL_CFG0, 0);
+ WREG32(mmTPC3_CMDQ_GLBL_CFG0, 0);
+
+ WREG32(mmTPC4_QM_GLBL_CFG0, 0);
+ WREG32(mmTPC4_CMDQ_GLBL_CFG0, 0);
+
+ WREG32(mmTPC5_QM_GLBL_CFG0, 0);
+ WREG32(mmTPC5_CMDQ_GLBL_CFG0, 0);
+
+ WREG32(mmTPC6_QM_GLBL_CFG0, 0);
+ WREG32(mmTPC6_CMDQ_GLBL_CFG0, 0);
+
+ WREG32(mmTPC7_QM_GLBL_CFG0, 0);
+ WREG32(mmTPC7_CMDQ_GLBL_CFG0, 0);
+}
+
+/*
+ * goya_stop_internal_queues - Stop internal queues
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Returns 0 on success
+ *
+ */
+static int goya_stop_internal_queues(struct hl_device *hdev)
+{
+ int rc, retval = 0;
+
+ /*
+ * Each queue (QMAN) is a separate H/W logic. That means that each
+ * QMAN can be stopped independently and failure to stop one does NOT
+ * mandate we should not try to stop other QMANs
+ */
+
+ rc = goya_stop_queue(hdev,
+ mmMME_QM_GLBL_CFG1,
+ mmMME_QM_CP_STS,
+ mmMME_QM_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop MME QMAN\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmMME_CMDQ_GLBL_CFG1,
+ mmMME_CMDQ_CP_STS,
+ mmMME_CMDQ_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop MME CMDQ\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC0_QM_GLBL_CFG1,
+ mmTPC0_QM_CP_STS,
+ mmTPC0_QM_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 0 QMAN\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC0_CMDQ_GLBL_CFG1,
+ mmTPC0_CMDQ_CP_STS,
+ mmTPC0_CMDQ_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 0 CMDQ\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC1_QM_GLBL_CFG1,
+ mmTPC1_QM_CP_STS,
+ mmTPC1_QM_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 1 QMAN\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC1_CMDQ_GLBL_CFG1,
+ mmTPC1_CMDQ_CP_STS,
+ mmTPC1_CMDQ_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 1 CMDQ\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC2_QM_GLBL_CFG1,
+ mmTPC2_QM_CP_STS,
+ mmTPC2_QM_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 2 QMAN\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC2_CMDQ_GLBL_CFG1,
+ mmTPC2_CMDQ_CP_STS,
+ mmTPC2_CMDQ_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 2 CMDQ\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC3_QM_GLBL_CFG1,
+ mmTPC3_QM_CP_STS,
+ mmTPC3_QM_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 3 QMAN\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC3_CMDQ_GLBL_CFG1,
+ mmTPC3_CMDQ_CP_STS,
+ mmTPC3_CMDQ_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 3 CMDQ\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC4_QM_GLBL_CFG1,
+ mmTPC4_QM_CP_STS,
+ mmTPC4_QM_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 4 QMAN\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC4_CMDQ_GLBL_CFG1,
+ mmTPC4_CMDQ_CP_STS,
+ mmTPC4_CMDQ_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 4 CMDQ\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC5_QM_GLBL_CFG1,
+ mmTPC5_QM_CP_STS,
+ mmTPC5_QM_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 5 QMAN\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC5_CMDQ_GLBL_CFG1,
+ mmTPC5_CMDQ_CP_STS,
+ mmTPC5_CMDQ_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 5 CMDQ\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC6_QM_GLBL_CFG1,
+ mmTPC6_QM_CP_STS,
+ mmTPC6_QM_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 6 QMAN\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC6_CMDQ_GLBL_CFG1,
+ mmTPC6_CMDQ_CP_STS,
+ mmTPC6_CMDQ_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 6 CMDQ\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC7_QM_GLBL_CFG1,
+ mmTPC7_QM_CP_STS,
+ mmTPC7_QM_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 7 QMAN\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC7_CMDQ_GLBL_CFG1,
+ mmTPC7_CMDQ_CP_STS,
+ mmTPC7_CMDQ_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 7 CMDQ\n");
+ retval = -EIO;
+ }
+
+ return retval;
+}
+
+static void goya_resume_internal_queues(struct hl_device *hdev)
+{
+ WREG32(mmMME_QM_GLBL_CFG1, 0);
+ WREG32(mmMME_CMDQ_GLBL_CFG1, 0);
+
+ WREG32(mmTPC0_QM_GLBL_CFG1, 0);
+ WREG32(mmTPC0_CMDQ_GLBL_CFG1, 0);
+
+ WREG32(mmTPC1_QM_GLBL_CFG1, 0);
+ WREG32(mmTPC1_CMDQ_GLBL_CFG1, 0);
+
+ WREG32(mmTPC2_QM_GLBL_CFG1, 0);
+ WREG32(mmTPC2_CMDQ_GLBL_CFG1, 0);
+
+ WREG32(mmTPC3_QM_GLBL_CFG1, 0);
+ WREG32(mmTPC3_CMDQ_GLBL_CFG1, 0);
+
+ WREG32(mmTPC4_QM_GLBL_CFG1, 0);
+ WREG32(mmTPC4_CMDQ_GLBL_CFG1, 0);
+
+ WREG32(mmTPC5_QM_GLBL_CFG1, 0);
+ WREG32(mmTPC5_CMDQ_GLBL_CFG1, 0);
+
+ WREG32(mmTPC6_QM_GLBL_CFG1, 0);
+ WREG32(mmTPC6_CMDQ_GLBL_CFG1, 0);
+
+ WREG32(mmTPC7_QM_GLBL_CFG1, 0);
+ WREG32(mmTPC7_CMDQ_GLBL_CFG1, 0);
+}
+
+static void goya_dma_stall(struct hl_device *hdev)
+{
+ WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
+ WREG32(mmDMA_QM_1_GLBL_CFG1, 1 << DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT);
+ WREG32(mmDMA_QM_2_GLBL_CFG1, 1 << DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT);
+ WREG32(mmDMA_QM_3_GLBL_CFG1, 1 << DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT);
+ WREG32(mmDMA_QM_4_GLBL_CFG1, 1 << DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT);
+}
+
+static void goya_tpc_stall(struct hl_device *hdev)
+{
+ WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC1_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC2_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC3_CFG_TPC_STALL, 1 << TPC3_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC4_CFG_TPC_STALL, 1 << TPC4_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC5_CFG_TPC_STALL, 1 << TPC5_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC6_CFG_TPC_STALL, 1 << TPC6_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC7_CFG_TPC_STALL_V_SHIFT);
+}
+
+static void goya_mme_stall(struct hl_device *hdev)
+{
+ WREG32(mmMME_STALL, 0xFFFFFFFF);
+}
+
+static int goya_enable_msix(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ int cq_cnt = hdev->asic_prop.completion_queues_count;
+ int rc, i, irq_cnt_init, irq;
+
+ if (goya->hw_cap_initialized & HW_CAP_MSIX)
+ return 0;
+
+ rc = pci_alloc_irq_vectors(hdev->pdev, GOYA_MSIX_ENTRIES,
+ GOYA_MSIX_ENTRIES, PCI_IRQ_MSIX);
+ if (rc < 0) {
+ dev_err(hdev->dev,
+ "MSI-X: Failed to enable support -- %d/%d\n",
+ GOYA_MSIX_ENTRIES, rc);
+ return rc;
+ }
+
+ for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) {
+ irq = pci_irq_vector(hdev->pdev, i);
+ rc = request_irq(irq, hl_irq_handler_cq, 0, goya_irq_name[i],
+ &hdev->completion_queue[i]);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to request IRQ %d", irq);
+ goto free_irqs;
+ }
+ }
+
+ irq = pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX);
+
+ rc = request_irq(irq, hl_irq_handler_eq, 0,
+ goya_irq_name[EVENT_QUEUE_MSIX_IDX],
+ &hdev->event_queue);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to request IRQ %d", irq);
+ goto free_irqs;
+ }
+
+ goya->hw_cap_initialized |= HW_CAP_MSIX;
+ return 0;
+
+free_irqs:
+ for (i = 0 ; i < irq_cnt_init ; i++)
+ free_irq(pci_irq_vector(hdev->pdev, i),
+ &hdev->completion_queue[i]);
+
+ pci_free_irq_vectors(hdev->pdev);
+ return rc;
+}
+
+static void goya_sync_irqs(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ int i;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
+ return;
+
+ /* Wait for all pending IRQs to be finished */
+ for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
+ synchronize_irq(pci_irq_vector(hdev->pdev, i));
+
+ synchronize_irq(pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX));
+}
+
+static void goya_disable_msix(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ int i, irq;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
+ return;
+
+ goya_sync_irqs(hdev);
+
+ irq = pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX);
+ free_irq(irq, &hdev->event_queue);
+
+ for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
+ irq = pci_irq_vector(hdev->pdev, i);
+ free_irq(irq, &hdev->completion_queue[i]);
+ }
+
+ pci_free_irq_vectors(hdev->pdev);
+
+ goya->hw_cap_initialized &= ~HW_CAP_MSIX;
+}
+
+static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
+{
+ u32 wait_timeout_ms, cpu_timeout_ms;
+
+ dev_info(hdev->dev,
+ "Halting compute engines and disabling interrupts\n");
+
+ if (hdev->pldm) {
+ wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
+ cpu_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
+ } else {
+ wait_timeout_ms = GOYA_RESET_WAIT_MSEC;
+ cpu_timeout_ms = GOYA_CPU_RESET_WAIT_MSEC;
+ }
+
+ if (hard_reset) {
+ /*
+ * I don't know what is the state of the CPU so make sure it is
+ * stopped in any means necessary
+ */
+ WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_GOTO_WFE);
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
+ GOYA_ASYNC_EVENT_ID_HALT_MACHINE);
+ msleep(cpu_timeout_ms);
+ }
+
+ goya_stop_external_queues(hdev);
+ goya_stop_internal_queues(hdev);
+
+ msleep(wait_timeout_ms);
+
+ goya_dma_stall(hdev);
+ goya_tpc_stall(hdev);
+ goya_mme_stall(hdev);
+
+ msleep(wait_timeout_ms);
+
+ goya_disable_external_queues(hdev);
+ goya_disable_internal_queues(hdev);
+
+ if (hard_reset)
+ goya_disable_msix(hdev);
+ else
+ goya_sync_irqs(hdev);
+}
+
+/*
+ * goya_push_fw_to_device - Push FW code to device
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Copy fw code from firmware file to device memory.
+ * Returns 0 on success
+ *
+ */
+static int goya_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
+ void __iomem *dst)
+{
+ const struct firmware *fw;
+ const u64 *fw_data;
+ size_t fw_size, i;
+ int rc;
+
+ rc = request_firmware(&fw, fw_name, hdev->dev);
+
+ if (rc) {
+ dev_err(hdev->dev, "Failed to request %s\n", fw_name);
+ goto out;
+ }
+
+ fw_size = fw->size;
+ if ((fw_size % 4) != 0) {
+ dev_err(hdev->dev, "illegal %s firmware size %zu\n",
+ fw_name, fw_size);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
+
+ fw_data = (const u64 *) fw->data;
+
+ if ((fw->size % 8) != 0)
+ fw_size -= 8;
+
+ for (i = 0 ; i < fw_size ; i += 8, fw_data++, dst += 8) {
+ if (!(i & (0x80000 - 1))) {
+ dev_dbg(hdev->dev,
+ "copied so far %zu out of %zu for %s firmware",
+ i, fw_size, fw_name);
+ usleep_range(20, 100);
+ }
+
+ writeq(*fw_data, dst);
+ }
+
+ if ((fw->size % 8) != 0)
+ writel(*(const u32 *) fw_data, dst);
+
+out:
+ release_firmware(fw);
+ return rc;
+}
+
+static int goya_pldm_init_cpu(struct hl_device *hdev)
+{
+ char fw_name[200];
+ void __iomem *dst;
+ u32 val, unit_rst_val;
+ int rc;
+
+ /* Must initialize SRAM scrambler before pushing u-boot to SRAM */
+ goya_init_golden_registers(hdev);
+
+ /* Put ARM cores into reset */
+ WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, CPU_RESET_ASSERT);
+ val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
+
+ /* Reset the CA53 MACRO */
+ unit_rst_val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
+ WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, CA53_RESET);
+ val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
+ WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val);
+ val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
+
+ snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-u-boot.bin");
+ dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET;
+ rc = goya_push_fw_to_device(hdev, fw_name, dst);
+ if (rc)
+ return rc;
+
+ snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
+ dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
+ rc = goya_push_fw_to_device(hdev, fw_name, dst);
+ if (rc)
+ return rc;
+
+ WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY);
+ WREG32(mmPSOC_GLOBAL_CONF_WARM_REBOOT, CPU_BOOT_STATUS_NA);
+
+ WREG32(mmCPU_CA53_CFG_RST_ADDR_LSB_0,
+ lower_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET));
+ WREG32(mmCPU_CA53_CFG_RST_ADDR_MSB_0,
+ upper_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET));
+
+ /* Release ARM core 0 from reset */
+ WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL,
+ CPU_RESET_CORE0_DEASSERT);
+ val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
+
+ return 0;
+}
+
+/*
+ * FW component passes an offset from SRAM_BASE_ADDR in SCRATCHPAD_xx.
+ * The version string should be located by that offset.
+ */
+static void goya_read_device_fw_version(struct hl_device *hdev,
+ enum goya_fw_component fwc)
+{
+ const char *name;
+ u32 ver_off;
+ char *dest;
+
+ switch (fwc) {
+ case FW_COMP_UBOOT:
+ ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_29);
+ dest = hdev->asic_prop.uboot_ver;
+ name = "U-Boot";
+ break;
+ case FW_COMP_PREBOOT:
+ ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_28);
+ dest = hdev->asic_prop.preboot_ver;
+ name = "Preboot";
+ break;
+ default:
+ dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
+ return;
+ }
+
+ ver_off &= ~((u32)SRAM_BASE_ADDR);
+
+ if (ver_off < SRAM_SIZE - VERSION_MAX_LEN) {
+ memcpy_fromio(dest, hdev->pcie_bar[SRAM_CFG_BAR_ID] + ver_off,
+ VERSION_MAX_LEN);
+ } else {
+ dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
+ name, ver_off);
+ strcpy(dest, "unavailable");
+ }
+}
+
+static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ char fw_name[200];
+ void __iomem *dst;
+ u32 status;
+ int rc;
+
+ if (!hdev->cpu_enable)
+ return 0;
+
+ if (goya->hw_cap_initialized & HW_CAP_CPU)
+ return 0;
+
+ /*
+ * Before pushing u-boot/linux to device, need to set the ddr bar to
+ * base address of dram
+ */
+ rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed to map DDR bar to DRAM base address\n");
+ return rc;
+ }
+
+ if (hdev->pldm) {
+ rc = goya_pldm_init_cpu(hdev);
+ if (rc)
+ return rc;
+
+ goto out;
+ }
+
+ /* Make sure CPU boot-loader is running */
+ rc = hl_poll_timeout(
+ hdev,
+ mmPSOC_GLOBAL_CONF_WARM_REBOOT,
+ status,
+ (status == CPU_BOOT_STATUS_DRAM_RDY) ||
+ (status == CPU_BOOT_STATUS_SRAM_AVAIL),
+ 10000,
+ cpu_timeout);
+
+ if (rc) {
+ dev_err(hdev->dev, "Error in ARM u-boot!");
+ switch (status) {
+ case CPU_BOOT_STATUS_NA:
+ dev_err(hdev->dev,
+ "ARM status %d - BTL did NOT run\n", status);
+ break;
+ case CPU_BOOT_STATUS_IN_WFE:
+ dev_err(hdev->dev,
+ "ARM status %d - Inside WFE loop\n", status);
+ break;
+ case CPU_BOOT_STATUS_IN_BTL:
+ dev_err(hdev->dev,
+ "ARM status %d - Stuck in BTL\n", status);
+ break;
+ case CPU_BOOT_STATUS_IN_PREBOOT:
+ dev_err(hdev->dev,
+ "ARM status %d - Stuck in Preboot\n", status);
+ break;
+ case CPU_BOOT_STATUS_IN_SPL:
+ dev_err(hdev->dev,
+ "ARM status %d - Stuck in SPL\n", status);
+ break;
+ case CPU_BOOT_STATUS_IN_UBOOT:
+ dev_err(hdev->dev,
+ "ARM status %d - Stuck in u-boot\n", status);
+ break;
+ case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
+ dev_err(hdev->dev,
+ "ARM status %d - DDR initialization failed\n",
+ status);
+ break;
+ default:
+ dev_err(hdev->dev,
+ "ARM status %d - Invalid status code\n",
+ status);
+ break;
+ }
+ return -EIO;
+ }
+
+ /* Read U-Boot version now in case we will later fail */
+ goya_read_device_fw_version(hdev, FW_COMP_UBOOT);
+ goya_read_device_fw_version(hdev, FW_COMP_PREBOOT);
+
+ if (status == CPU_BOOT_STATUS_SRAM_AVAIL)
+ goto out;
+
+ if (!hdev->fw_loading) {
+ dev_info(hdev->dev, "Skip loading FW\n");
+ goto out;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
+ dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
+ rc = goya_push_fw_to_device(hdev, fw_name, dst);
+ if (rc)
+ return rc;
+
+ WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY);
+
+ rc = hl_poll_timeout(
+ hdev,
+ mmPSOC_GLOBAL_CONF_WARM_REBOOT,
+ status,
+ (status == CPU_BOOT_STATUS_SRAM_AVAIL),
+ 10000,
+ cpu_timeout);
+
+ if (rc) {
+ if (status == CPU_BOOT_STATUS_FIT_CORRUPTED)
+ dev_err(hdev->dev,
+ "ARM u-boot reports FIT image is corrupted\n");
+ else
+ dev_err(hdev->dev,
+ "ARM Linux failed to load, %d\n", status);
+ WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_NA);
+ return -EIO;
+ }
+
+ dev_info(hdev->dev, "Successfully loaded firmware to device\n");
+
+out:
+ goya->hw_cap_initialized |= HW_CAP_CPU;
+
+ return 0;
+}
+
+static int goya_mmu_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct goya_device *goya = hdev->asic_specific;
+ u64 hop0_addr;
+ int rc, i;
+
+ if (!hdev->mmu_enable)
+ return 0;
+
+ if (goya->hw_cap_initialized & HW_CAP_MMU)
+ return 0;
+
+ hdev->dram_supports_virtual_memory = true;
+ hdev->dram_default_page_mapping = true;
+
+ for (i = 0 ; i < prop->max_asid ; i++) {
+ hop0_addr = prop->mmu_pgt_addr +
+ (i * prop->mmu_hop_table_size);
+
+ rc = goya_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed to set hop0 addr for asid %d\n", i);
+ goto err;
+ }
+ }
+
+ goya->hw_cap_initialized |= HW_CAP_MMU;
+
+ /* init MMU cache manage page */
+ WREG32(mmSTLB_CACHE_INV_BASE_39_8,
+ lower_32_bits(MMU_CACHE_MNG_ADDR >> 8));
+ WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
+
+ /* Remove follower feature due to performance bug */
+ WREG32_AND(mmSTLB_STLB_FEATURE_EN,
+ (~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK));
+
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
+
+ WREG32(mmMMU_MMU_ENABLE, 1);
+ WREG32(mmMMU_SPI_MASK, 0xF);
+
+ return 0;
+
+err:
+ return rc;
+}
+
+/*
+ * goya_hw_init - Goya hardware initialization code
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Returns 0 on success
+ *
+ */
+static int goya_hw_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 val;
+ int rc;
+
+ dev_info(hdev->dev, "Starting initialization of H/W\n");
+
+ /* Perform read from the device to make sure device is up */
+ val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+
+ /*
+ * Let's mark in the H/W that we have reached this point. We check
+ * this value in the reset_before_init function to understand whether
+ * we need to reset the chip before doing H/W init. This register is
+ * cleared by the H/W upon H/W reset
+ */
+ WREG32(mmPSOC_GLOBAL_CONF_APP_STATUS, HL_DEVICE_HW_STATE_DIRTY);
+
+ rc = goya_init_cpu(hdev, GOYA_CPU_TIMEOUT_USEC);
+ if (rc) {
+ dev_err(hdev->dev, "failed to initialize CPU\n");
+ return rc;
+ }
+
+ goya_tpc_mbist_workaround(hdev);
+
+ goya_init_golden_registers(hdev);
+
+ /*
+ * After CPU initialization is finished, change DDR bar mapping inside
+ * iATU to point to the start address of the MMU page tables
+ */
+ rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
+ (MMU_PAGE_TABLES_ADDR & ~(prop->dram_pci_bar_size - 0x1ull)));
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed to map DDR bar to MMU page tables\n");
+ return rc;
+ }
+
+ rc = goya_mmu_init(hdev);
+ if (rc)
+ return rc;
+
+ goya_init_security(hdev);
+
+ goya_init_dma_qmans(hdev);
+
+ goya_init_mme_qmans(hdev);
+
+ goya_init_tpc_qmans(hdev);
+
+ /* MSI-X must be enabled before CPU queues are initialized */
+ rc = goya_enable_msix(hdev);
+ if (rc)
+ goto disable_queues;
+
+ rc = goya_init_cpu_queues(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "failed to initialize CPU H/W queues %d\n",
+ rc);
+ goto disable_msix;
+ }
+
+ /* CPU initialization is finished, we can now move to 48 bit DMA mask */
+ rc = pci_set_dma_mask(hdev->pdev, DMA_BIT_MASK(48));
+ if (rc) {
+ dev_warn(hdev->dev, "Unable to set pci dma mask to 48 bits\n");
+ rc = pci_set_dma_mask(hdev->pdev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(hdev->dev,
+ "Unable to set pci dma mask to 32 bits\n");
+ goto disable_pci_access;
+ }
+ }
+
+ rc = pci_set_consistent_dma_mask(hdev->pdev, DMA_BIT_MASK(48));
+ if (rc) {
+ dev_warn(hdev->dev,
+ "Unable to set pci consistent dma mask to 48 bits\n");
+ rc = pci_set_consistent_dma_mask(hdev->pdev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(hdev->dev,
+ "Unable to set pci consistent dma mask to 32 bits\n");
+ goto disable_pci_access;
+ }
+ }
+
+ /* Perform read from the device to flush all MSI-X configuration */
+ val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+
+ return 0;
+
+disable_pci_access:
+ goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
+disable_msix:
+ goya_disable_msix(hdev);
+disable_queues:
+ goya_disable_internal_queues(hdev);
+ goya_disable_external_queues(hdev);
+
+ return rc;
+}
+
+/*
+ * goya_hw_fini - Goya hardware tear-down code
+ *
+ * @hdev: pointer to hl_device structure
+ * @hard_reset: should we do hard reset to all engines or just reset the
+ * compute/dma engines
+ */
+static void goya_hw_fini(struct hl_device *hdev, bool hard_reset)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ u32 reset_timeout_ms, status;
+
+ if (hdev->pldm)
+ reset_timeout_ms = GOYA_PLDM_RESET_TIMEOUT_MSEC;
+ else
+ reset_timeout_ms = GOYA_RESET_TIMEOUT_MSEC;
+
+ if (hard_reset) {
+ goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
+ goya_disable_clk_rlx(hdev);
+ goya_set_pll_refclk(hdev);
+
+ WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, RESET_ALL);
+ dev_info(hdev->dev,
+ "Issued HARD reset command, going to wait %dms\n",
+ reset_timeout_ms);
+ } else {
+ WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, DMA_MME_TPC_RESET);
+ dev_info(hdev->dev,
+ "Issued SOFT reset command, going to wait %dms\n",
+ reset_timeout_ms);
+ }
+
+ /*
+ * After hard reset, we can't poll the BTM_FSM register because the PSOC
+ * itself is in reset. In either reset we need to wait until the reset
+ * is deasserted
+ */
+ msleep(reset_timeout_ms);
+
+ status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
+ if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK)
+ dev_err(hdev->dev,
+ "Timeout while waiting for device to reset 0x%x\n",
+ status);
+
+ if (!hard_reset) {
+ goya->hw_cap_initialized &= ~(HW_CAP_DMA | HW_CAP_MME |
+ HW_CAP_GOLDEN | HW_CAP_TPC);
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
+ GOYA_ASYNC_EVENT_ID_SOFT_RESET);
+ return;
+ }
+
+ /* Chicken bit to re-initiate boot sequencer flow */
+ WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START,
+ 1 << PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT);
+ /* Move boot manager FSM to pre boot sequencer init state */
+ WREG32(mmPSOC_GLOBAL_CONF_SW_BTM_FSM,
+ 0xA << PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT);
+
+ goya->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
+ HW_CAP_DDR_0 | HW_CAP_DDR_1 |
+ HW_CAP_DMA | HW_CAP_MME |
+ HW_CAP_MMU | HW_CAP_TPC_MBIST |
+ HW_CAP_GOLDEN | HW_CAP_TPC);
+ memset(goya->events_stat, 0, sizeof(goya->events_stat));
+
+ if (!hdev->pldm) {
+ int rc;
+ /* In case we are running inside VM and the VM is
+ * shutting down, we need to make sure CPU boot-loader
+ * is running before we can continue the VM shutdown.
+ * That is because the VM will send an FLR signal that
+ * we must answer
+ */
+ dev_info(hdev->dev,
+ "Going to wait up to %ds for CPU boot loader\n",
+ GOYA_CPU_TIMEOUT_USEC / 1000 / 1000);
+
+ rc = hl_poll_timeout(
+ hdev,
+ mmPSOC_GLOBAL_CONF_WARM_REBOOT,
+ status,
+ (status == CPU_BOOT_STATUS_DRAM_RDY),
+ 10000,
+ GOYA_CPU_TIMEOUT_USEC);
+ if (rc)
+ dev_err(hdev->dev,
+ "failed to wait for CPU boot loader\n");
+ }
+}
+
+int goya_suspend(struct hl_device *hdev)
+{
+ int rc;
+
+ rc = goya_stop_internal_queues(hdev);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop internal queues\n");
+ return rc;
+ }
+
+ rc = goya_stop_external_queues(hdev);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop external queues\n");
+ return rc;
+ }
+
+ rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
+ if (rc)
+ dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
+
+ return rc;
+}
+
+int goya_resume(struct hl_device *hdev)
+{
+ int rc;
+
+ goya_resume_external_queues(hdev);
+ goya_resume_internal_queues(hdev);
+
+ rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
+ if (rc)
+ dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
+ return rc;
+}
+
+static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
+ u64 kaddress, phys_addr_t paddress, u32 size)
+{
+ int rc;
+
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE;
+
+ rc = remap_pfn_range(vma, vma->vm_start, paddress >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+ if (rc)
+ dev_err(hdev->dev, "remap_pfn_range error %d", rc);
+
+ return rc;
+}
+
+static void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
+{
+ u32 db_reg_offset, db_value;
+ bool invalid_queue = false;
+
+ switch (hw_queue_id) {
+ case GOYA_QUEUE_ID_DMA_0:
+ db_reg_offset = mmDMA_QM_0_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_DMA_1:
+ db_reg_offset = mmDMA_QM_1_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_DMA_2:
+ db_reg_offset = mmDMA_QM_2_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_DMA_3:
+ db_reg_offset = mmDMA_QM_3_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_DMA_4:
+ db_reg_offset = mmDMA_QM_4_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_CPU_PQ:
+ if (hdev->cpu_queues_enable)
+ db_reg_offset = mmCPU_IF_PF_PQ_PI;
+ else
+ invalid_queue = true;
+ break;
+
+ case GOYA_QUEUE_ID_MME:
+ db_reg_offset = mmMME_QM_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_TPC0:
+ db_reg_offset = mmTPC0_QM_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_TPC1:
+ db_reg_offset = mmTPC1_QM_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_TPC2:
+ db_reg_offset = mmTPC2_QM_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_TPC3:
+ db_reg_offset = mmTPC3_QM_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_TPC4:
+ db_reg_offset = mmTPC4_QM_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_TPC5:
+ db_reg_offset = mmTPC5_QM_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_TPC6:
+ db_reg_offset = mmTPC6_QM_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_TPC7:
+ db_reg_offset = mmTPC7_QM_PQ_PI;
+ break;
+
+ default:
+ invalid_queue = true;
+ }
+
+ if (invalid_queue) {
+ /* Should never get here */
+ dev_err(hdev->dev, "h/w queue %d is invalid. Can't set pi\n",
+ hw_queue_id);
+ return;
+ }
+
+ db_value = pi;
+
+ /* ring the doorbell */
+ WREG32(db_reg_offset, db_value);
+
+ if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ)
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
+ GOYA_ASYNC_EVENT_ID_PI_UPDATE);
+}
+
+void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val)
+{
+ /* Not needed in Goya */
+}
+
+static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags)
+{
+ return dma_alloc_coherent(&hdev->pdev->dev, size, dma_handle, flags);
+}
+
+static void goya_dma_free_coherent(struct hl_device *hdev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, dma_handle);
+}
+
+void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
+ dma_addr_t *dma_handle, u16 *queue_len)
+{
+ void *base;
+ u32 offset;
+
+ *dma_handle = hdev->asic_prop.sram_base_address;
+
+ base = hdev->pcie_bar[SRAM_CFG_BAR_ID];
+
+ switch (queue_id) {
+ case GOYA_QUEUE_ID_MME:
+ offset = MME_QMAN_BASE_OFFSET;
+ *queue_len = MME_QMAN_LENGTH;
+ break;
+ case GOYA_QUEUE_ID_TPC0:
+ offset = TPC0_QMAN_BASE_OFFSET;
+ *queue_len = TPC_QMAN_LENGTH;
+ break;
+ case GOYA_QUEUE_ID_TPC1:
+ offset = TPC1_QMAN_BASE_OFFSET;
+ *queue_len = TPC_QMAN_LENGTH;
+ break;
+ case GOYA_QUEUE_ID_TPC2:
+ offset = TPC2_QMAN_BASE_OFFSET;
+ *queue_len = TPC_QMAN_LENGTH;
+ break;
+ case GOYA_QUEUE_ID_TPC3:
+ offset = TPC3_QMAN_BASE_OFFSET;
+ *queue_len = TPC_QMAN_LENGTH;
+ break;
+ case GOYA_QUEUE_ID_TPC4:
+ offset = TPC4_QMAN_BASE_OFFSET;
+ *queue_len = TPC_QMAN_LENGTH;
+ break;
+ case GOYA_QUEUE_ID_TPC5:
+ offset = TPC5_QMAN_BASE_OFFSET;
+ *queue_len = TPC_QMAN_LENGTH;
+ break;
+ case GOYA_QUEUE_ID_TPC6:
+ offset = TPC6_QMAN_BASE_OFFSET;
+ *queue_len = TPC_QMAN_LENGTH;
+ break;
+ case GOYA_QUEUE_ID_TPC7:
+ offset = TPC7_QMAN_BASE_OFFSET;
+ *queue_len = TPC_QMAN_LENGTH;
+ break;
+ default:
+ dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id);
+ return NULL;
+ }
+
+ base += offset;
+ *dma_handle += offset;
+
+ return base;
+}
+
+static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ struct packet_msg_prot *fence_pkt;
+ u32 *fence_ptr;
+ dma_addr_t fence_dma_addr;
+ struct hl_cb *cb;
+ u32 tmp, timeout;
+ int rc;
+
+ if (hdev->pldm)
+ timeout = GOYA_PLDM_QMAN0_TIMEOUT_USEC;
+ else
+ timeout = HL_DEVICE_TIMEOUT_USEC;
+
+ if (!hdev->asic_funcs->is_device_idle(hdev)) {
+ dev_err_ratelimited(hdev->dev,
+ "Can't send KMD job on QMAN0 if device is not idle\n");
+ return -EBUSY;
+ }
+
+ fence_ptr = hdev->asic_funcs->dma_pool_zalloc(hdev, 4, GFP_KERNEL,
+ &fence_dma_addr);
+ if (!fence_ptr) {
+ dev_err(hdev->dev,
+ "Failed to allocate fence memory for QMAN0\n");
+ return -ENOMEM;
+ }
+
+ *fence_ptr = 0;
+
+ if (goya->hw_cap_initialized & HW_CAP_MMU) {
+ WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED);
+ RREG32(mmDMA_QM_0_GLBL_PROT);
+ }
+
+ /*
+ * goya cs parser saves space for 2xpacket_msg_prot at end of CB. For
+ * synchronized kernel jobs we only need space for 1 packet_msg_prot
+ */
+ job->job_cb_size -= sizeof(struct packet_msg_prot);
+
+ cb = job->patched_cb;
+
+ fence_pkt = (struct packet_msg_prot *) (uintptr_t) (cb->kernel_address +
+ job->job_cb_size - sizeof(struct packet_msg_prot));
+
+ tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
+ (1 << GOYA_PKT_CTL_EB_SHIFT) |
+ (1 << GOYA_PKT_CTL_MB_SHIFT);
+ fence_pkt->ctl = cpu_to_le32(tmp);
+ fence_pkt->value = cpu_to_le32(GOYA_QMAN0_FENCE_VAL);
+ fence_pkt->addr = cpu_to_le64(fence_dma_addr +
+ hdev->asic_prop.host_phys_base_address);
+
+ rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_DMA_0,
+ job->job_cb_size, cb->bus_address);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc);
+ goto free_fence_ptr;
+ }
+
+ rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) fence_ptr, timeout,
+ &tmp);
+
+ hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0);
+
+ if ((rc) || (tmp != GOYA_QMAN0_FENCE_VAL)) {
+ dev_err(hdev->dev, "QMAN0 Job hasn't finished in time\n");
+ rc = -ETIMEDOUT;
+ }
+
+free_fence_ptr:
+ hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr,
+ fence_dma_addr);
+
+ if (goya->hw_cap_initialized & HW_CAP_MMU) {
+ WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED);
+ RREG32(mmDMA_QM_0_GLBL_PROT);
+ }
+
+ return rc;
+}
+
+int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
+ u32 timeout, long *result)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ struct armcp_packet *pkt;
+ dma_addr_t pkt_dma_addr;
+ u32 tmp;
+ int rc = 0;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) {
+ if (result)
+ *result = 0;
+ return 0;
+ }
+
+ if (len > CPU_CB_SIZE) {
+ dev_err(hdev->dev, "Invalid CPU message size of %d bytes\n",
+ len);
+ return -ENOMEM;
+ }
+
+ pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len,
+ &pkt_dma_addr);
+ if (!pkt) {
+ dev_err(hdev->dev,
+ "Failed to allocate DMA memory for packet to CPU\n");
+ return -ENOMEM;
+ }
+
+ memcpy(pkt, msg, len);
+
+ mutex_lock(&hdev->send_cpu_message_lock);
+
+ if (hdev->disabled)
+ goto out;
+
+ if (hdev->device_cpu_disabled) {
+ rc = -EIO;
+ goto out;
+ }
+
+ rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_CPU_PQ, len,
+ pkt_dma_addr);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to send CB on CPU PQ (%d)\n", rc);
+ goto out;
+ }
+
+ rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) &pkt->fence,
+ timeout, &tmp);
+
+ hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_CPU_PQ);
+
+ if (rc == -ETIMEDOUT) {
+ dev_err(hdev->dev, "Timeout while waiting for device CPU\n");
+ hdev->device_cpu_disabled = true;
+ goto out;
+ }
+
+ if (tmp == ARMCP_PACKET_FENCE_VAL) {
+ u32 ctl = le32_to_cpu(pkt->ctl);
+
+ rc = (ctl & ARMCP_PKT_CTL_RC_MASK) >> ARMCP_PKT_CTL_RC_SHIFT;
+ if (rc) {
+ dev_err(hdev->dev,
+ "F/W ERROR %d for CPU packet %d\n",
+ rc, (ctl & ARMCP_PKT_CTL_OPCODE_MASK)
+ >> ARMCP_PKT_CTL_OPCODE_SHIFT);
+ rc = -EINVAL;
+ } else if (result) {
+ *result = (long) le64_to_cpu(pkt->result);
+ }
+ } else {
+ dev_err(hdev->dev, "CPU packet wrong fence value\n");
+ rc = -EINVAL;
+ }
+
+out:
+ mutex_unlock(&hdev->send_cpu_message_lock);
+
+ hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, len, pkt);
+
+ return rc;
+}
+
+int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
+{
+ struct packet_msg_prot *fence_pkt;
+ dma_addr_t pkt_dma_addr;
+ u32 fence_val, tmp;
+ dma_addr_t fence_dma_addr;
+ u32 *fence_ptr;
+ int rc;
+
+ fence_val = GOYA_QMAN0_FENCE_VAL;
+
+ fence_ptr = hdev->asic_funcs->dma_pool_zalloc(hdev, 4, GFP_KERNEL,
+ &fence_dma_addr);
+ if (!fence_ptr) {
+ dev_err(hdev->dev,
+ "Failed to allocate memory for queue testing\n");
+ return -ENOMEM;
+ }
+
+ *fence_ptr = 0;
+
+ fence_pkt = hdev->asic_funcs->dma_pool_zalloc(hdev,
+ sizeof(struct packet_msg_prot),
+ GFP_KERNEL, &pkt_dma_addr);
+ if (!fence_pkt) {
+ dev_err(hdev->dev,
+ "Failed to allocate packet for queue testing\n");
+ rc = -ENOMEM;
+ goto free_fence_ptr;
+ }
+
+ tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
+ (1 << GOYA_PKT_CTL_EB_SHIFT) |
+ (1 << GOYA_PKT_CTL_MB_SHIFT);
+ fence_pkt->ctl = cpu_to_le32(tmp);
+ fence_pkt->value = cpu_to_le32(fence_val);
+ fence_pkt->addr = cpu_to_le64(fence_dma_addr +
+ hdev->asic_prop.host_phys_base_address);
+
+ rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id,
+ sizeof(struct packet_msg_prot),
+ pkt_dma_addr);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to send fence packet\n");
+ goto free_pkt;
+ }
+
+ rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) fence_ptr,
+ GOYA_TEST_QUEUE_WAIT_USEC, &tmp);
+
+ hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
+
+ if ((!rc) && (tmp == fence_val)) {
+ dev_info(hdev->dev,
+ "queue test on H/W queue %d succeeded\n",
+ hw_queue_id);
+ } else {
+ dev_err(hdev->dev,
+ "H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
+ hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
+ rc = -EINVAL;
+ }
+
+free_pkt:
+ hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_pkt,
+ pkt_dma_addr);
+free_fence_ptr:
+ hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr,
+ fence_dma_addr);
+ return rc;
+}
+
+int goya_test_cpu_queue(struct hl_device *hdev)
+{
+ struct armcp_packet test_pkt;
+ long result;
+ int rc;
+
+ /* cpu_queues_enable flag is always checked in send cpu message */
+
+ memset(&test_pkt, 0, sizeof(test_pkt));
+
+ test_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ test_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
+ sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
+
+ if (!rc) {
+ if (result == ARMCP_PACKET_FENCE_VAL)
+ dev_info(hdev->dev,
+ "queue test on CPU queue succeeded\n");
+ else
+ dev_err(hdev->dev,
+ "CPU queue test failed (0x%08lX)\n", result);
+ } else {
+ dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc);
+ }
+
+ return rc;
+}
+
+static int goya_test_queues(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ int i, rc, ret_val = 0;
+
+ for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
+ rc = goya_test_queue(hdev, i);
+ if (rc)
+ ret_val = -EINVAL;
+ }
+
+ if (hdev->cpu_queues_enable) {
+ rc = goya->test_cpu_queue(hdev);
+ if (rc)
+ ret_val = -EINVAL;
+ }
+
+ return ret_val;
+}
+
+static void *goya_dma_pool_zalloc(struct hl_device *hdev, size_t size,
+ gfp_t mem_flags, dma_addr_t *dma_handle)
+{
+ if (size > GOYA_DMA_POOL_BLK_SIZE)
+ return NULL;
+
+ return dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
+}
+
+static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr,
+ dma_addr_t dma_addr)
+{
+ dma_pool_free(hdev->dma_pool, vaddr, dma_addr);
+}
+
+static void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev,
+ size_t size, dma_addr_t *dma_handle)
+{
+ u64 kernel_addr;
+
+ /* roundup to CPU_PKT_SIZE */
+ size = (size + (CPU_PKT_SIZE - 1)) & CPU_PKT_MASK;
+
+ kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size);
+
+ *dma_handle = hdev->cpu_accessible_dma_address +
+ (kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem);
+
+ return (void *) (uintptr_t) kernel_addr;
+}
+
+static void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev,
+ size_t size, void *vaddr)
+{
+ /* roundup to CPU_PKT_SIZE */
+ size = (size + (CPU_PKT_SIZE - 1)) & CPU_PKT_MASK;
+
+ gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr,
+ size);
+}
+
+static int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ if (!dma_map_sg(&hdev->pdev->dev, sg, nents, dir))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void goya_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ dma_unmap_sg(&hdev->pdev->dev, sg, nents, dir);
+}
+
+u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
+{
+ struct scatterlist *sg, *sg_next_iter;
+ u32 count, dma_desc_cnt;
+ u64 len, len_next;
+ dma_addr_t addr, addr_next;
+
+ dma_desc_cnt = 0;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+
+ len = sg_dma_len(sg);
+ addr = sg_dma_address(sg);
+
+ if (len == 0)
+ break;
+
+ while ((count + 1) < sgt->nents) {
+ sg_next_iter = sg_next(sg);
+ len_next = sg_dma_len(sg_next_iter);
+ addr_next = sg_dma_address(sg_next_iter);
+
+ if (len_next == 0)
+ break;
+
+ if ((addr + len == addr_next) &&
+ (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
+ len += len_next;
+ count++;
+ sg = sg_next_iter;
+ } else {
+ break;
+ }
+ }
+
+ dma_desc_cnt++;
+ }
+
+ return dma_desc_cnt * sizeof(struct packet_lin_dma);
+}
+
+static int goya_pin_memory_before_cs(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_lin_dma *user_dma_pkt,
+ u64 addr, enum dma_data_direction dir)
+{
+ struct hl_userptr *userptr;
+ int rc;
+
+ if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
+ parser->job_userptr_list, &userptr))
+ goto already_pinned;
+
+ userptr = kzalloc(sizeof(*userptr), GFP_ATOMIC);
+ if (!userptr)
+ return -ENOMEM;
+
+ rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
+ userptr);
+ if (rc)
+ goto free_userptr;
+
+ list_add_tail(&userptr->job_node, parser->job_userptr_list);
+
+ rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
+ userptr->sgt->nents, dir);
+ if (rc) {
+ dev_err(hdev->dev, "failed to map sgt with DMA region\n");
+ goto unpin_memory;
+ }
+
+ userptr->dma_mapped = true;
+ userptr->dir = dir;
+
+already_pinned:
+ parser->patched_cb_size +=
+ goya_get_dma_desc_list_size(hdev, userptr->sgt);
+
+ return 0;
+
+unpin_memory:
+ hl_unpin_host_memory(hdev, userptr);
+free_userptr:
+ kfree(userptr);
+ return rc;
+}
+
+static int goya_validate_dma_pkt_host(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_lin_dma *user_dma_pkt)
+{
+ u64 device_memory_addr, addr;
+ enum dma_data_direction dir;
+ enum goya_dma_direction user_dir;
+ bool sram_addr = true;
+ bool skip_host_mem_pin = false;
+ bool user_memset;
+ u32 ctl;
+ int rc = 0;
+
+ ctl = le32_to_cpu(user_dma_pkt->ctl);
+
+ user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
+ GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
+
+ user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
+ GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
+
+ switch (user_dir) {
+ case DMA_HOST_TO_DRAM:
+ dev_dbg(hdev->dev, "DMA direction is HOST --> DRAM\n");
+ dir = DMA_TO_DEVICE;
+ sram_addr = false;
+ addr = le64_to_cpu(user_dma_pkt->src_addr);
+ device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
+ if (user_memset)
+ skip_host_mem_pin = true;
+ break;
+
+ case DMA_DRAM_TO_HOST:
+ dev_dbg(hdev->dev, "DMA direction is DRAM --> HOST\n");
+ dir = DMA_FROM_DEVICE;
+ sram_addr = false;
+ addr = le64_to_cpu(user_dma_pkt->dst_addr);
+ device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
+ break;
+
+ case DMA_HOST_TO_SRAM:
+ dev_dbg(hdev->dev, "DMA direction is HOST --> SRAM\n");
+ dir = DMA_TO_DEVICE;
+ addr = le64_to_cpu(user_dma_pkt->src_addr);
+ device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
+ if (user_memset)
+ skip_host_mem_pin = true;
+ break;
+
+ case DMA_SRAM_TO_HOST:
+ dev_dbg(hdev->dev, "DMA direction is SRAM --> HOST\n");
+ dir = DMA_FROM_DEVICE;
+ addr = le64_to_cpu(user_dma_pkt->dst_addr);
+ device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
+ break;
+ default:
+ dev_err(hdev->dev, "DMA direction is undefined\n");
+ return -EFAULT;
+ }
+
+ if (parser->ctx_id != HL_KERNEL_ASID_ID) {
+ if (sram_addr) {
+ if (!hl_mem_area_inside_range(device_memory_addr,
+ le32_to_cpu(user_dma_pkt->tsize),
+ hdev->asic_prop.sram_user_base_address,
+ hdev->asic_prop.sram_end_address)) {
+
+ dev_err(hdev->dev,
+ "SRAM address 0x%llx + 0x%x is invalid\n",
+ device_memory_addr,
+ user_dma_pkt->tsize);
+ return -EFAULT;
+ }
+ } else {
+ if (!hl_mem_area_inside_range(device_memory_addr,
+ le32_to_cpu(user_dma_pkt->tsize),
+ hdev->asic_prop.dram_user_base_address,
+ hdev->asic_prop.dram_end_address)) {
+
+ dev_err(hdev->dev,
+ "DRAM address 0x%llx + 0x%x is invalid\n",
+ device_memory_addr,
+ user_dma_pkt->tsize);
+ return -EFAULT;
+ }
+ }
+ }
+
+ if (skip_host_mem_pin)
+ parser->patched_cb_size += sizeof(*user_dma_pkt);
+ else {
+ if ((dir == DMA_TO_DEVICE) &&
+ (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1)) {
+ dev_err(hdev->dev,
+ "Can't DMA from host on queue other then 1\n");
+ return -EFAULT;
+ }
+
+ rc = goya_pin_memory_before_cs(hdev, parser, user_dma_pkt,
+ addr, dir);
+ }
+
+ return rc;
+}
+
+static int goya_validate_dma_pkt_no_host(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_lin_dma *user_dma_pkt)
+{
+ u64 sram_memory_addr, dram_memory_addr;
+ enum goya_dma_direction user_dir;
+ u32 ctl;
+
+ ctl = le32_to_cpu(user_dma_pkt->ctl);
+ user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
+ GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
+
+ if (user_dir == DMA_DRAM_TO_SRAM) {
+ dev_dbg(hdev->dev, "DMA direction is DRAM --> SRAM\n");
+ dram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
+ sram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
+ } else {
+ dev_dbg(hdev->dev, "DMA direction is SRAM --> DRAM\n");
+ sram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
+ dram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
+ }
+
+ if (!hl_mem_area_inside_range(sram_memory_addr,
+ le32_to_cpu(user_dma_pkt->tsize),
+ hdev->asic_prop.sram_user_base_address,
+ hdev->asic_prop.sram_end_address)) {
+ dev_err(hdev->dev, "SRAM address 0x%llx + 0x%x is invalid\n",
+ sram_memory_addr, user_dma_pkt->tsize);
+ return -EFAULT;
+ }
+
+ if (!hl_mem_area_inside_range(dram_memory_addr,
+ le32_to_cpu(user_dma_pkt->tsize),
+ hdev->asic_prop.dram_user_base_address,
+ hdev->asic_prop.dram_end_address)) {
+ dev_err(hdev->dev, "DRAM address 0x%llx + 0x%x is invalid\n",
+ dram_memory_addr, user_dma_pkt->tsize);
+ return -EFAULT;
+ }
+
+ parser->patched_cb_size += sizeof(*user_dma_pkt);
+
+ return 0;
+}
+
+static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_lin_dma *user_dma_pkt)
+{
+ enum goya_dma_direction user_dir;
+ u32 ctl;
+ int rc;
+
+ dev_dbg(hdev->dev, "DMA packet details:\n");
+ dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
+ dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
+ dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
+
+ ctl = le32_to_cpu(user_dma_pkt->ctl);
+ user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
+ GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
+
+ /*
+ * Special handling for DMA with size 0. The H/W has a bug where
+ * this can cause the QMAN DMA to get stuck, so block it here.
+ */
+ if (user_dma_pkt->tsize == 0) {
+ dev_err(hdev->dev,
+ "Got DMA with size 0, might reset the device\n");
+ return -EINVAL;
+ }
+
+ if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM))
+ rc = goya_validate_dma_pkt_no_host(hdev, parser, user_dma_pkt);
+ else
+ rc = goya_validate_dma_pkt_host(hdev, parser, user_dma_pkt);
+
+ return rc;
+}
+
+static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_lin_dma *user_dma_pkt)
+{
+ dev_dbg(hdev->dev, "DMA packet details:\n");
+ dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
+ dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
+ dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
+
+ /*
+ * WA for HW-23.
+ * We can't allow user to read from Host using QMANs other than 1.
+ */
+ if (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1 &&
+ hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr),
+ le32_to_cpu(user_dma_pkt->tsize),
+ hdev->asic_prop.va_space_host_start_address,
+ hdev->asic_prop.va_space_host_end_address)) {
+ dev_err(hdev->dev,
+ "Can't DMA from host on queue other then 1\n");
+ return -EFAULT;
+ }
+
+ if (user_dma_pkt->tsize == 0) {
+ dev_err(hdev->dev,
+ "Got DMA with size 0, might reset the device\n");
+ return -EINVAL;
+ }
+
+ parser->patched_cb_size += sizeof(*user_dma_pkt);
+
+ return 0;
+}
+
+static int goya_validate_wreg32(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_wreg32 *wreg_pkt)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ u32 sob_start_addr, sob_end_addr;
+ u16 reg_offset;
+
+ reg_offset = le32_to_cpu(wreg_pkt->ctl) &
+ GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK;
+
+ dev_dbg(hdev->dev, "WREG32 packet details:\n");
+ dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
+ dev_dbg(hdev->dev, "value == 0x%x\n", wreg_pkt->value);
+
+ if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
+ dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
+ reg_offset);
+ return -EPERM;
+ }
+
+ /*
+ * With MMU, DMA channels are not secured, so it doesn't matter where
+ * the WR COMP will be written to because it will go out with
+ * non-secured property
+ */
+ if (goya->hw_cap_initialized & HW_CAP_MMU)
+ return 0;
+
+ sob_start_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+ sob_end_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1023);
+
+ if ((le32_to_cpu(wreg_pkt->value) < sob_start_addr) ||
+ (le32_to_cpu(wreg_pkt->value) > sob_end_addr)) {
+
+ dev_err(hdev->dev, "WREG32 packet with illegal value 0x%x\n",
+ wreg_pkt->value);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int goya_validate_cb(struct hl_device *hdev,
+ struct hl_cs_parser *parser, bool is_mmu)
+{
+ u32 cb_parsed_length = 0;
+ int rc = 0;
+
+ parser->patched_cb_size = 0;
+
+ /* cb_user_size is more than 0 so loop will always be executed */
+ while (cb_parsed_length < parser->user_cb_size) {
+ enum packet_id pkt_id;
+ u16 pkt_size;
+ void *user_pkt;
+
+ user_pkt = (void *) (uintptr_t)
+ (parser->user_cb->kernel_address + cb_parsed_length);
+
+ pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
+ PACKET_HEADER_PACKET_ID_MASK) >>
+ PACKET_HEADER_PACKET_ID_SHIFT);
+
+ pkt_size = goya_packet_sizes[pkt_id];
+ cb_parsed_length += pkt_size;
+ if (cb_parsed_length > parser->user_cb_size) {
+ dev_err(hdev->dev,
+ "packet 0x%x is out of CB boundary\n", pkt_id);
+ rc = -EINVAL;
+ break;
+ }
+
+ switch (pkt_id) {
+ case PACKET_WREG_32:
+ /*
+ * Although it is validated after copy in patch_cb(),
+ * need to validate here as well because patch_cb() is
+ * not called in MMU path while this function is called
+ */
+ rc = goya_validate_wreg32(hdev, parser, user_pkt);
+ break;
+
+ case PACKET_WREG_BULK:
+ dev_err(hdev->dev,
+ "User not allowed to use WREG_BULK\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_MSG_PROT:
+ dev_err(hdev->dev,
+ "User not allowed to use MSG_PROT\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_CP_DMA:
+ dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_STOP:
+ dev_err(hdev->dev, "User not allowed to use STOP\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_LIN_DMA:
+ if (is_mmu)
+ rc = goya_validate_dma_pkt_mmu(hdev, parser,
+ user_pkt);
+ else
+ rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
+ user_pkt);
+ break;
+
+ case PACKET_MSG_LONG:
+ case PACKET_MSG_SHORT:
+ case PACKET_FENCE:
+ case PACKET_NOP:
+ parser->patched_cb_size += pkt_size;
+ break;
+
+ default:
+ dev_err(hdev->dev, "Invalid packet header 0x%x\n",
+ pkt_id);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc)
+ break;
+ }
+
+ /*
+ * The new CB should have space at the end for two MSG_PROT packets:
+ * 1. A packet that will act as a completion packet
+ * 2. A packet that will generate MSI-X interrupt
+ */
+ parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
+
+ return rc;
+}
+
+static int goya_patch_dma_packet(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_lin_dma *user_dma_pkt,
+ struct packet_lin_dma *new_dma_pkt,
+ u32 *new_dma_pkt_size)
+{
+ struct hl_userptr *userptr;
+ struct scatterlist *sg, *sg_next_iter;
+ u32 count, dma_desc_cnt;
+ u64 len, len_next;
+ dma_addr_t dma_addr, dma_addr_next;
+ enum goya_dma_direction user_dir;
+ u64 device_memory_addr, addr;
+ enum dma_data_direction dir;
+ struct sg_table *sgt;
+ bool skip_host_mem_pin = false;
+ bool user_memset;
+ u32 user_rdcomp_mask, user_wrcomp_mask, ctl;
+
+ ctl = le32_to_cpu(user_dma_pkt->ctl);
+
+ user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
+ GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
+
+ user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
+ GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
+
+ if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM) ||
+ (user_dma_pkt->tsize == 0)) {
+ memcpy(new_dma_pkt, user_dma_pkt, sizeof(*new_dma_pkt));
+ *new_dma_pkt_size = sizeof(*new_dma_pkt);
+ return 0;
+ }
+
+ if ((user_dir == DMA_HOST_TO_DRAM) || (user_dir == DMA_HOST_TO_SRAM)) {
+ addr = le64_to_cpu(user_dma_pkt->src_addr);
+ device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
+ dir = DMA_TO_DEVICE;
+ if (user_memset)
+ skip_host_mem_pin = true;
+ } else {
+ addr = le64_to_cpu(user_dma_pkt->dst_addr);
+ device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
+ dir = DMA_FROM_DEVICE;
+ }
+
+ if ((!skip_host_mem_pin) &&
+ (hl_userptr_is_pinned(hdev, addr,
+ le32_to_cpu(user_dma_pkt->tsize),
+ parser->job_userptr_list, &userptr) == false)) {
+ dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n",
+ addr, user_dma_pkt->tsize);
+ return -EFAULT;
+ }
+
+ if ((user_memset) && (dir == DMA_TO_DEVICE)) {
+ memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt));
+ *new_dma_pkt_size = sizeof(*user_dma_pkt);
+ return 0;
+ }
+
+ user_rdcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK;
+
+ user_wrcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK;
+
+ sgt = userptr->sgt;
+ dma_desc_cnt = 0;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+ len = sg_dma_len(sg);
+ dma_addr = sg_dma_address(sg);
+
+ if (len == 0)
+ break;
+
+ while ((count + 1) < sgt->nents) {
+ sg_next_iter = sg_next(sg);
+ len_next = sg_dma_len(sg_next_iter);
+ dma_addr_next = sg_dma_address(sg_next_iter);
+
+ if (len_next == 0)
+ break;
+
+ if ((dma_addr + len == dma_addr_next) &&
+ (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
+ len += len_next;
+ count++;
+ sg = sg_next_iter;
+ } else {
+ break;
+ }
+ }
+
+ ctl = le32_to_cpu(user_dma_pkt->ctl);
+ if (likely(dma_desc_cnt))
+ ctl &= ~GOYA_PKT_CTL_EB_MASK;
+ ctl &= ~(GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK |
+ GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK);
+ new_dma_pkt->ctl = cpu_to_le32(ctl);
+ new_dma_pkt->tsize = cpu_to_le32((u32) len);
+
+ dma_addr += hdev->asic_prop.host_phys_base_address;
+
+ if (dir == DMA_TO_DEVICE) {
+ new_dma_pkt->src_addr = cpu_to_le64(dma_addr);
+ new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr);
+ } else {
+ new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr);
+ new_dma_pkt->dst_addr = cpu_to_le64(dma_addr);
+ }
+
+ if (!user_memset)
+ device_memory_addr += len;
+ dma_desc_cnt++;
+ new_dma_pkt++;
+ }
+
+ if (!dma_desc_cnt) {
+ dev_err(hdev->dev,
+ "Error of 0 SG entries when patching DMA packet\n");
+ return -EFAULT;
+ }
+
+ /* Fix the last dma packet - rdcomp/wrcomp must be as user set them */
+ new_dma_pkt--;
+ new_dma_pkt->ctl |= cpu_to_le32(user_rdcomp_mask | user_wrcomp_mask);
+
+ *new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma);
+
+ return 0;
+}
+
+static int goya_patch_cb(struct hl_device *hdev,
+ struct hl_cs_parser *parser)
+{
+ u32 cb_parsed_length = 0;
+ u32 cb_patched_cur_length = 0;
+ int rc = 0;
+
+ /* cb_user_size is more than 0 so loop will always be executed */
+ while (cb_parsed_length < parser->user_cb_size) {
+ enum packet_id pkt_id;
+ u16 pkt_size;
+ u32 new_pkt_size = 0;
+ void *user_pkt, *kernel_pkt;
+
+ user_pkt = (void *) (uintptr_t)
+ (parser->user_cb->kernel_address + cb_parsed_length);
+ kernel_pkt = (void *) (uintptr_t)
+ (parser->patched_cb->kernel_address +
+ cb_patched_cur_length);
+
+ pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
+ PACKET_HEADER_PACKET_ID_MASK) >>
+ PACKET_HEADER_PACKET_ID_SHIFT);
+
+ pkt_size = goya_packet_sizes[pkt_id];
+ cb_parsed_length += pkt_size;
+ if (cb_parsed_length > parser->user_cb_size) {
+ dev_err(hdev->dev,
+ "packet 0x%x is out of CB boundary\n", pkt_id);
+ rc = -EINVAL;
+ break;
+ }
+
+ switch (pkt_id) {
+ case PACKET_LIN_DMA:
+ rc = goya_patch_dma_packet(hdev, parser, user_pkt,
+ kernel_pkt, &new_pkt_size);
+ cb_patched_cur_length += new_pkt_size;
+ break;
+
+ case PACKET_WREG_32:
+ memcpy(kernel_pkt, user_pkt, pkt_size);
+ cb_patched_cur_length += pkt_size;
+ rc = goya_validate_wreg32(hdev, parser, kernel_pkt);
+ break;
+
+ case PACKET_WREG_BULK:
+ dev_err(hdev->dev,
+ "User not allowed to use WREG_BULK\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_MSG_PROT:
+ dev_err(hdev->dev,
+ "User not allowed to use MSG_PROT\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_CP_DMA:
+ dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_STOP:
+ dev_err(hdev->dev, "User not allowed to use STOP\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_MSG_LONG:
+ case PACKET_MSG_SHORT:
+ case PACKET_FENCE:
+ case PACKET_NOP:
+ memcpy(kernel_pkt, user_pkt, pkt_size);
+ cb_patched_cur_length += pkt_size;
+ break;
+
+ default:
+ dev_err(hdev->dev, "Invalid packet header 0x%x\n",
+ pkt_id);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+static int goya_parse_cb_mmu(struct hl_device *hdev,
+ struct hl_cs_parser *parser)
+{
+ u64 patched_cb_handle;
+ u32 patched_cb_size;
+ struct hl_cb *user_cb;
+ int rc;
+
+ /*
+ * The new CB should have space at the end for two MSG_PROT pkt:
+ * 1. A packet that will act as a completion packet
+ * 2. A packet that will generate MSI-X interrupt
+ */
+ parser->patched_cb_size = parser->user_cb_size +
+ sizeof(struct packet_msg_prot) * 2;
+
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
+ parser->patched_cb_size,
+ &patched_cb_handle, HL_KERNEL_ASID_ID);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to allocate patched CB for DMA CS %d\n",
+ rc);
+ return rc;
+ }
+
+ patched_cb_handle >>= PAGE_SHIFT;
+ parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
+ (u32) patched_cb_handle);
+ /* hl_cb_get should never fail here so use kernel WARN */
+ WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
+ (u32) patched_cb_handle);
+ if (!parser->patched_cb) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ /*
+ * The check that parser->user_cb_size <= parser->user_cb->size was done
+ * in validate_queue_index().
+ */
+ memcpy((void *) (uintptr_t) parser->patched_cb->kernel_address,
+ (void *) (uintptr_t) parser->user_cb->kernel_address,
+ parser->user_cb_size);
+
+ patched_cb_size = parser->patched_cb_size;
+
+ /* validate patched CB instead of user CB */
+ user_cb = parser->user_cb;
+ parser->user_cb = parser->patched_cb;
+ rc = goya_validate_cb(hdev, parser, true);
+ parser->user_cb = user_cb;
+
+ if (rc) {
+ hl_cb_put(parser->patched_cb);
+ goto out;
+ }
+
+ if (patched_cb_size != parser->patched_cb_size) {
+ dev_err(hdev->dev, "user CB size mismatch\n");
+ hl_cb_put(parser->patched_cb);
+ rc = -EINVAL;
+ goto out;
+ }
+
+out:
+ /*
+ * Always call cb destroy here because we still have 1 reference
+ * to it by calling cb_get earlier. After the job will be completed,
+ * cb_put will release it, but here we want to remove it from the
+ * idr
+ */
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
+ patched_cb_handle << PAGE_SHIFT);
+
+ return rc;
+}
+
+static int goya_parse_cb_no_mmu(struct hl_device *hdev,
+ struct hl_cs_parser *parser)
+{
+ u64 patched_cb_handle;
+ int rc;
+
+ rc = goya_validate_cb(hdev, parser, false);
+
+ if (rc)
+ goto free_userptr;
+
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
+ parser->patched_cb_size,
+ &patched_cb_handle, HL_KERNEL_ASID_ID);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to allocate patched CB for DMA CS %d\n", rc);
+ goto free_userptr;
+ }
+
+ patched_cb_handle >>= PAGE_SHIFT;
+ parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
+ (u32) patched_cb_handle);
+ /* hl_cb_get should never fail here so use kernel WARN */
+ WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
+ (u32) patched_cb_handle);
+ if (!parser->patched_cb) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ rc = goya_patch_cb(hdev, parser);
+
+ if (rc)
+ hl_cb_put(parser->patched_cb);
+
+out:
+ /*
+ * Always call cb destroy here because we still have 1 reference
+ * to it by calling cb_get earlier. After the job will be completed,
+ * cb_put will release it, but here we want to remove it from the
+ * idr
+ */
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
+ patched_cb_handle << PAGE_SHIFT);
+
+free_userptr:
+ if (rc)
+ hl_userptr_delete_list(hdev, parser->job_userptr_list);
+ return rc;
+}
+
+static int goya_parse_cb_no_ext_quque(struct hl_device *hdev,
+ struct hl_cs_parser *parser)
+{
+ struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
+ struct goya_device *goya = hdev->asic_specific;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU)) {
+ /* For internal queue jobs, just check if cb address is valid */
+ if (hl_mem_area_inside_range(
+ (u64) (uintptr_t) parser->user_cb,
+ parser->user_cb_size,
+ asic_prop->sram_user_base_address,
+ asic_prop->sram_end_address))
+ return 0;
+
+ if (hl_mem_area_inside_range(
+ (u64) (uintptr_t) parser->user_cb,
+ parser->user_cb_size,
+ asic_prop->dram_user_base_address,
+ asic_prop->dram_end_address))
+ return 0;
+
+ dev_err(hdev->dev,
+ "Internal CB address %px + 0x%x is not in SRAM nor in DRAM\n",
+ parser->user_cb, parser->user_cb_size);
+
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
+{
+ struct goya_device *goya = hdev->asic_specific;
+
+ if (!parser->ext_queue)
+ return goya_parse_cb_no_ext_quque(hdev, parser);
+
+ if ((goya->hw_cap_initialized & HW_CAP_MMU) && parser->use_virt_addr)
+ return goya_parse_cb_mmu(hdev, parser);
+ else
+ return goya_parse_cb_no_mmu(hdev, parser);
+}
+
+void goya_add_end_of_cb_packets(u64 kernel_address, u32 len, u64 cq_addr,
+ u32 cq_val, u32 msix_vec)
+{
+ struct packet_msg_prot *cq_pkt;
+ u32 tmp;
+
+ cq_pkt = (struct packet_msg_prot *) (uintptr_t)
+ (kernel_address + len - (sizeof(struct packet_msg_prot) * 2));
+
+ tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
+ (1 << GOYA_PKT_CTL_EB_SHIFT) |
+ (1 << GOYA_PKT_CTL_MB_SHIFT);
+ cq_pkt->ctl = cpu_to_le32(tmp);
+ cq_pkt->value = cpu_to_le32(cq_val);
+ cq_pkt->addr = cpu_to_le64(cq_addr);
+
+ cq_pkt++;
+
+ tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
+ (1 << GOYA_PKT_CTL_MB_SHIFT);
+ cq_pkt->ctl = cpu_to_le32(tmp);
+ cq_pkt->value = cpu_to_le32(msix_vec & 0x7FF);
+ cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_DBI_MSIX_DOORBELL_OFF);
+}
+
+static void goya_update_eq_ci(struct hl_device *hdev, u32 val)
+{
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, val);
+}
+
+static void goya_restore_phase_topology(struct hl_device *hdev)
+{
+ int i, num_of_sob_in_longs, num_of_mon_in_longs;
+
+ num_of_sob_in_longs =
+ ((mmSYNC_MNGR_SOB_OBJ_1023 - mmSYNC_MNGR_SOB_OBJ_0) + 4);
+
+ num_of_mon_in_longs =
+ ((mmSYNC_MNGR_MON_STATUS_255 - mmSYNC_MNGR_MON_STATUS_0) + 4);
+
+ for (i = 0 ; i < num_of_sob_in_longs ; i += 4)
+ WREG32(mmSYNC_MNGR_SOB_OBJ_0 + i, 0);
+
+ for (i = 0 ; i < num_of_mon_in_longs ; i += 4)
+ WREG32(mmSYNC_MNGR_MON_STATUS_0 + i, 0);
+
+ /* Flush all WREG to prevent race */
+ i = RREG32(mmSYNC_MNGR_SOB_OBJ_0);
+}
+
+/*
+ * goya_debugfs_read32 - read a 32bit value from a given device address
+ *
+ * @hdev: pointer to hl_device structure
+ * @addr: address in device
+ * @val: returned value
+ *
+ * In case of DDR address that is not mapped into the default aperture that
+ * the DDR bar exposes, the function will configure the iATU so that the DDR
+ * bar will be positioned at a base address that allows reading from the
+ * required address. Configuring the iATU during normal operation can
+ * lead to undefined behavior and therefore, should be done with extreme care
+ *
+ */
+static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int rc = 0;
+
+ if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
+ *val = RREG32(addr - CFG_BASE);
+
+ } else if ((addr >= SRAM_BASE_ADDR) &&
+ (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
+
+ *val = readl(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
+ (addr - SRAM_BASE_ADDR));
+
+ } else if ((addr >= DRAM_PHYS_BASE) &&
+ (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
+
+ u64 bar_base_addr = DRAM_PHYS_BASE +
+ (addr & ~(prop->dram_pci_bar_size - 0x1ull));
+
+ rc = goya_set_ddr_bar_base(hdev, bar_base_addr);
+ if (!rc) {
+ *val = readl(hdev->pcie_bar[DDR_BAR_ID] +
+ (addr - bar_base_addr));
+
+ rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
+ (MMU_PAGE_TABLES_ADDR &
+ ~(prop->dram_pci_bar_size - 0x1ull)));
+ }
+ } else {
+ rc = -EFAULT;
+ }
+
+ return rc;
+}
+
+/*
+ * goya_debugfs_write32 - write a 32bit value to a given device address
+ *
+ * @hdev: pointer to hl_device structure
+ * @addr: address in device
+ * @val: returned value
+ *
+ * In case of DDR address that is not mapped into the default aperture that
+ * the DDR bar exposes, the function will configure the iATU so that the DDR
+ * bar will be positioned at a base address that allows writing to the
+ * required address. Configuring the iATU during normal operation can
+ * lead to undefined behavior and therefore, should be done with extreme care
+ *
+ */
+static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int rc = 0;
+
+ if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
+ WREG32(addr - CFG_BASE, val);
+
+ } else if ((addr >= SRAM_BASE_ADDR) &&
+ (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
+
+ writel(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
+ (addr - SRAM_BASE_ADDR));
+
+ } else if ((addr >= DRAM_PHYS_BASE) &&
+ (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
+
+ u64 bar_base_addr = DRAM_PHYS_BASE +
+ (addr & ~(prop->dram_pci_bar_size - 0x1ull));
+
+ rc = goya_set_ddr_bar_base(hdev, bar_base_addr);
+ if (!rc) {
+ writel(val, hdev->pcie_bar[DDR_BAR_ID] +
+ (addr - bar_base_addr));
+
+ rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
+ (MMU_PAGE_TABLES_ADDR &
+ ~(prop->dram_pci_bar_size - 0x1ull)));
+ }
+ } else {
+ rc = -EFAULT;
+ }
+
+ return rc;
+}
+
+static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
+{
+ struct goya_device *goya = hdev->asic_specific;
+
+ return readq(hdev->pcie_bar[DDR_BAR_ID] +
+ (addr - goya->ddr_bar_cur_addr));
+}
+
+static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val)
+{
+ struct goya_device *goya = hdev->asic_specific;
+
+ writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
+ (addr - goya->ddr_bar_cur_addr));
+}
+
+static const char *_goya_get_event_desc(u16 event_type)
+{
+ switch (event_type) {
+ case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
+ return "PCIe_dec";
+ case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
+ return "TPC%d_dec";
+ case GOYA_ASYNC_EVENT_ID_MME_WACS:
+ return "MME_wacs";
+ case GOYA_ASYNC_EVENT_ID_MME_WACSD:
+ return "MME_wacsd";
+ case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
+ return "CPU_axi_splitter";
+ case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
+ return "PSOC_axi_dec";
+ case GOYA_ASYNC_EVENT_ID_PSOC:
+ return "PSOC";
+ case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
+ return "TPC%d_krn_err";
+ case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
+ return "TPC%d_cq";
+ case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
+ return "TPC%d_qm";
+ case GOYA_ASYNC_EVENT_ID_MME_QM:
+ return "MME_qm";
+ case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
+ return "MME_cq";
+ case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
+ return "DMA%d_qm";
+ case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
+ return "DMA%d_ch";
+ default:
+ return "N/A";
+ }
+}
+
+static void goya_get_event_desc(u16 event_type, char *desc, size_t size)
+{
+ u8 index;
+
+ switch (event_type) {
+ case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
+ index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_DEC) / 3;
+ snprintf(desc, size, _goya_get_event_desc(event_type), index);
+ break;
+ case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
+ index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR) / 10;
+ snprintf(desc, size, _goya_get_event_desc(event_type), index);
+ break;
+ case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
+ index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_CMDQ;
+ snprintf(desc, size, _goya_get_event_desc(event_type), index);
+ break;
+ case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
+ index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_QM;
+ snprintf(desc, size, _goya_get_event_desc(event_type), index);
+ break;
+ case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
+ index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_QM;
+ snprintf(desc, size, _goya_get_event_desc(event_type), index);
+ break;
+ case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
+ index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_CH;
+ snprintf(desc, size, _goya_get_event_desc(event_type), index);
+ break;
+ default:
+ snprintf(desc, size, _goya_get_event_desc(event_type));
+ break;
+ }
+}
+
+static void goya_print_razwi_info(struct hl_device *hdev)
+{
+ if (RREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD)) {
+ dev_err(hdev->dev, "Illegal write to LBW\n");
+ WREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD, 0);
+ }
+
+ if (RREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD)) {
+ dev_err(hdev->dev, "Illegal read from LBW\n");
+ WREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD, 0);
+ }
+
+ if (RREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD)) {
+ dev_err(hdev->dev, "Illegal write to HBW\n");
+ WREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD, 0);
+ }
+
+ if (RREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD)) {
+ dev_err(hdev->dev, "Illegal read from HBW\n");
+ WREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD, 0);
+ }
+}
+
+static void goya_print_mmu_error_info(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ u64 addr;
+ u32 val;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU))
+ return;
+
+ val = RREG32(mmMMU_PAGE_ERROR_CAPTURE);
+ if (val & MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) {
+ addr = val & MMU_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
+ addr <<= 32;
+ addr |= RREG32(mmMMU_PAGE_ERROR_CAPTURE_VA);
+
+ dev_err(hdev->dev, "MMU page fault on va 0x%llx\n", addr);
+
+ WREG32(mmMMU_PAGE_ERROR_CAPTURE, 0);
+ }
+}
+
+static void goya_print_irq_info(struct hl_device *hdev, u16 event_type)
+{
+ char desc[20] = "";
+
+ goya_get_event_desc(event_type, desc, sizeof(desc));
+ dev_err(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
+ event_type, desc);
+
+ goya_print_razwi_info(hdev);
+ goya_print_mmu_error_info(hdev);
+}
+
+static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
+ size_t irq_arr_size)
+{
+ struct armcp_unmask_irq_arr_packet *pkt;
+ size_t total_pkt_size;
+ long result;
+ int rc;
+
+ total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) +
+ irq_arr_size;
+
+ /* data should be aligned to 8 bytes in order to ArmCP to copy it */
+ total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
+
+ /* total_pkt_size is casted to u16 later on */
+ if (total_pkt_size > USHRT_MAX) {
+ dev_err(hdev->dev, "too many elements in IRQ array\n");
+ return -EINVAL;
+ }
+
+ pkt = kzalloc(total_pkt_size, GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0]));
+ memcpy(&pkt->irqs, irq_arr, irq_arr_size);
+
+ pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
+ total_pkt_size, HL_DEVICE_TIMEOUT_USEC, &result);
+
+ if (rc)
+ dev_err(hdev->dev, "failed to unmask IRQ array\n");
+
+ kfree(pkt);
+
+ return rc;
+}
+
+static int goya_soft_reset_late_init(struct hl_device *hdev)
+{
+ /*
+ * Unmask all IRQs since some could have been received
+ * during the soft reset
+ */
+ return goya_unmask_irq_arr(hdev, goya_non_fatal_events,
+ sizeof(goya_non_fatal_events));
+}
+
+static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
+{
+ struct armcp_packet pkt;
+ long result;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.value = cpu_to_le64(event_type);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ HL_DEVICE_TIMEOUT_USEC, &result);
+
+ if (rc)
+ dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
+
+ return rc;
+}
+
+void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
+{
+ u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
+ u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
+ >> EQ_CTL_EVENT_TYPE_SHIFT);
+ struct goya_device *goya = hdev->asic_specific;
+
+ goya->events_stat[event_type]++;
+
+ switch (event_type) {
+ case GOYA_ASYNC_EVENT_ID_PCIE_IF:
+ case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
+ case GOYA_ASYNC_EVENT_ID_MME_ECC:
+ case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
+ case GOYA_ASYNC_EVENT_ID_MMU_ECC:
+ case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
+ case GOYA_ASYNC_EVENT_ID_DMA_ECC:
+ case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
+ case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
+ case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
+ case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
+ case GOYA_ASYNC_EVENT_ID_GIC500:
+ case GOYA_ASYNC_EVENT_ID_PLL0:
+ case GOYA_ASYNC_EVENT_ID_PLL1:
+ case GOYA_ASYNC_EVENT_ID_PLL3:
+ case GOYA_ASYNC_EVENT_ID_PLL4:
+ case GOYA_ASYNC_EVENT_ID_PLL5:
+ case GOYA_ASYNC_EVENT_ID_PLL6:
+ case GOYA_ASYNC_EVENT_ID_AXI_ECC:
+ case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
+ case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
+ case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
+ dev_err(hdev->dev,
+ "Received H/W interrupt %d, reset the chip\n",
+ event_type);
+ hl_device_reset(hdev, true, false);
+ break;
+
+ case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
+ case GOYA_ASYNC_EVENT_ID_MME_WACS:
+ case GOYA_ASYNC_EVENT_ID_MME_WACSD:
+ case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
+ case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
+ case GOYA_ASYNC_EVENT_ID_PSOC:
+ case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
+ case GOYA_ASYNC_EVENT_ID_MME_QM:
+ case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
+ case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
+ case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
+ goya_print_irq_info(hdev, event_type);
+ goya_unmask_irq(hdev, event_type);
+ break;
+
+ case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0:
+ case GOYA_ASYNC_EVENT_ID_DMA_BM_CH1:
+ case GOYA_ASYNC_EVENT_ID_DMA_BM_CH2:
+ case GOYA_ASYNC_EVENT_ID_DMA_BM_CH3:
+ case GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
+ dev_info(hdev->dev, "Received H/W interrupt %d\n", event_type);
+ break;
+
+ default:
+ dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
+ event_type);
+ break;
+ }
+}
+
+void *goya_get_events_stat(struct hl_device *hdev, u32 *size)
+{
+ struct goya_device *goya = hdev->asic_specific;
+
+ *size = (u32) sizeof(goya->events_stat);
+
+ return goya->events_stat;
+}
+
+static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u32 size,
+ u64 val, bool is_dram)
+{
+ struct packet_lin_dma *lin_dma_pkt;
+ struct hl_cs_parser parser;
+ struct hl_cs_job *job;
+ u32 cb_size, ctl;
+ struct hl_cb *cb;
+ int rc;
+
+ cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
+ if (!cb)
+ return -EFAULT;
+
+ lin_dma_pkt = (struct packet_lin_dma *) (uintptr_t) cb->kernel_address;
+
+ memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
+ cb_size = sizeof(*lin_dma_pkt);
+
+ ctl = ((PACKET_LIN_DMA << GOYA_PKT_CTL_OPCODE_SHIFT) |
+ (1 << GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT) |
+ (1 << GOYA_PKT_LIN_DMA_CTL_WO_SHIFT) |
+ (1 << GOYA_PKT_CTL_RB_SHIFT) |
+ (1 << GOYA_PKT_CTL_MB_SHIFT));
+ ctl |= (is_dram ? DMA_HOST_TO_DRAM : DMA_HOST_TO_SRAM) <<
+ GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
+ lin_dma_pkt->ctl = cpu_to_le32(ctl);
+
+ lin_dma_pkt->src_addr = cpu_to_le64(val);
+ lin_dma_pkt->dst_addr = cpu_to_le64(addr);
+ lin_dma_pkt->tsize = cpu_to_le32(size);
+
+ job = hl_cs_allocate_job(hdev, true);
+ if (!job) {
+ dev_err(hdev->dev, "Failed to allocate a new job\n");
+ rc = -ENOMEM;
+ goto release_cb;
+ }
+
+ job->id = 0;
+ job->user_cb = cb;
+ job->user_cb->cs_cnt++;
+ job->user_cb_size = cb_size;
+ job->hw_queue_id = GOYA_QUEUE_ID_DMA_0;
+
+ hl_debugfs_add_job(hdev, job);
+
+ parser.ctx_id = HL_KERNEL_ASID_ID;
+ parser.cs_sequence = 0;
+ parser.job_id = job->id;
+ parser.hw_queue_id = job->hw_queue_id;
+ parser.job_userptr_list = &job->userptr_list;
+ parser.user_cb = job->user_cb;
+ parser.user_cb_size = job->user_cb_size;
+ parser.ext_queue = job->ext_queue;
+ parser.use_virt_addr = hdev->mmu_enable;
+
+ rc = hdev->asic_funcs->cs_parser(hdev, &parser);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to parse kernel CB\n");
+ goto free_job;
+ }
+
+ job->patched_cb = parser.patched_cb;
+ job->job_cb_size = parser.patched_cb_size;
+ job->patched_cb->cs_cnt++;
+
+ rc = goya_send_job_on_qman0(hdev, job);
+
+ job->patched_cb->cs_cnt--;
+ hl_cb_put(job->patched_cb);
+
+free_job:
+ hl_userptr_delete_list(hdev, &job->userptr_list);
+ hl_debugfs_remove_job(hdev, job);
+ kfree(job);
+ cb->cs_cnt--;
+
+release_cb:
+ hl_cb_put(cb);
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+
+ return rc;
+}
+
+static int goya_context_switch(struct hl_device *hdev, u32 asid)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 addr = prop->sram_base_address;
+ u32 size = hdev->pldm ? 0x10000 : prop->sram_size;
+ u64 val = 0x7777777777777777ull;
+ int rc;
+
+ rc = goya_memset_device_memory(hdev, addr, size, val, false);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to clear SRAM in context switch\n");
+ return rc;
+ }
+
+ goya_mmu_prepare(hdev, asid);
+
+ return 0;
+}
+
+static int goya_mmu_clear_pgt_range(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct goya_device *goya = hdev->asic_specific;
+ u64 addr = prop->mmu_pgt_addr;
+ u32 size = prop->mmu_pgt_size + MMU_DRAM_DEFAULT_PAGE_SIZE +
+ MMU_CACHE_MNG_SIZE;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU))
+ return 0;
+
+ return goya_memset_device_memory(hdev, addr, size, 0, true);
+}
+
+static int goya_mmu_set_dram_default_page(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ u64 addr = hdev->asic_prop.mmu_dram_default_page_addr;
+ u32 size = MMU_DRAM_DEFAULT_PAGE_SIZE;
+ u64 val = 0x9999999999999999ull;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU))
+ return 0;
+
+ return goya_memset_device_memory(hdev, addr, size, val, true);
+}
+
+static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ int i;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU))
+ return;
+
+ if (asid & ~MME_QM_GLBL_SECURE_PROPS_ASID_MASK) {
+ WARN(1, "asid %u is too big\n", asid);
+ return;
+ }
+
+ /* zero the MMBP and ASID bits and then set the ASID */
+ for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++) {
+ WREG32_AND(goya_mmu_regs[i], ~0x7FF);
+ WREG32_OR(goya_mmu_regs[i], asid);
+ }
+}
+
+static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ u32 status, timeout_usec;
+ int rc;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU))
+ return;
+
+ /* no need in L1 only invalidation in Goya */
+ if (!is_hard)
+ return;
+
+ if (hdev->pldm)
+ timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
+ else
+ timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
+
+ mutex_lock(&hdev->mmu_cache_lock);
+
+ /* L0 & L1 invalidation */
+ WREG32(mmSTLB_INV_ALL_START, 1);
+
+ rc = hl_poll_timeout(
+ hdev,
+ mmSTLB_INV_ALL_START,
+ status,
+ !status,
+ 1000,
+ timeout_usec);
+
+ mutex_unlock(&hdev->mmu_cache_lock);
+
+ if (rc)
+ dev_notice_ratelimited(hdev->dev,
+ "Timeout when waiting for MMU cache invalidation\n");
+}
+
+static void goya_mmu_invalidate_cache_range(struct hl_device *hdev,
+ bool is_hard, u32 asid, u64 va, u64 size)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ u32 status, timeout_usec, inv_data, pi;
+ int rc;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU))
+ return;
+
+ /* no need in L1 only invalidation in Goya */
+ if (!is_hard)
+ return;
+
+ if (hdev->pldm)
+ timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
+ else
+ timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
+
+ mutex_lock(&hdev->mmu_cache_lock);
+
+ /*
+ * TODO: currently invalidate entire L0 & L1 as in regular hard
+ * invalidation. Need to apply invalidation of specific cache lines with
+ * mask of ASID & VA & size.
+ * Note that L1 with be flushed entirely in any case.
+ */
+
+ /* L0 & L1 invalidation */
+ inv_data = RREG32(mmSTLB_CACHE_INV);
+ /* PI is 8 bit */
+ pi = ((inv_data & STLB_CACHE_INV_PRODUCER_INDEX_MASK) + 1) & 0xFF;
+ WREG32(mmSTLB_CACHE_INV,
+ (inv_data & STLB_CACHE_INV_INDEX_MASK_MASK) | pi);
+
+ rc = hl_poll_timeout(
+ hdev,
+ mmSTLB_INV_CONSUMER_INDEX,
+ status,
+ status == pi,
+ 1000,
+ timeout_usec);
+
+ mutex_unlock(&hdev->mmu_cache_lock);
+
+ if (rc)
+ dev_notice_ratelimited(hdev->dev,
+ "Timeout when waiting for MMU cache invalidation\n");
+}
+
+static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
+ u64 phys_addr)
+{
+ u32 status, timeout_usec;
+ int rc;
+
+ if (hdev->pldm)
+ timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
+ else
+ timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
+
+ WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
+ WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
+ WREG32(MMU_ASID_BUSY, 0x80000000 | asid);
+
+ rc = hl_poll_timeout(
+ hdev,
+ MMU_ASID_BUSY,
+ status,
+ !(status & 0x80000000),
+ 1000,
+ timeout_usec);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Timeout during MMU hop0 config of asid %d\n", asid);
+ return rc;
+ }
+
+ return 0;
+}
+
+int goya_send_heartbeat(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ struct armcp_packet hb_pkt;
+ long result;
+ int rc;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
+ return 0;
+
+ memset(&hb_pkt, 0, sizeof(hb_pkt));
+
+ hb_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ hb_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
+ sizeof(hb_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
+
+ if ((rc) || (result != ARMCP_PACKET_FENCE_VAL))
+ rc = -EIO;
+
+ return rc;
+}
+
+static int goya_armcp_info_get(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct armcp_packet pkt;
+ void *armcp_info_cpu_addr;
+ dma_addr_t armcp_info_dma_addr;
+ u64 dram_size;
+ long result;
+ int rc;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
+ return 0;
+
+ armcp_info_cpu_addr =
+ hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
+ sizeof(struct armcp_info), &armcp_info_dma_addr);
+ if (!armcp_info_cpu_addr) {
+ dev_err(hdev->dev,
+ "Failed to allocate DMA memory for ArmCP info packet\n");
+ return -ENOMEM;
+ }
+
+ memset(armcp_info_cpu_addr, 0, sizeof(struct armcp_info));
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_INFO_GET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.addr = cpu_to_le64(armcp_info_dma_addr +
+ prop->host_phys_base_address);
+ pkt.data_max_size = cpu_to_le32(sizeof(struct armcp_info));
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ GOYA_ARMCP_INFO_TIMEOUT, &result);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to send armcp info pkt, error %d\n", rc);
+ goto out;
+ }
+
+ memcpy(&prop->armcp_info, armcp_info_cpu_addr,
+ sizeof(prop->armcp_info));
+
+ dram_size = le64_to_cpu(prop->armcp_info.dram_size);
+ if (dram_size) {
+ if ((!is_power_of_2(dram_size)) ||
+ (dram_size < DRAM_PHYS_DEFAULT_SIZE)) {
+ dev_err(hdev->dev,
+ "F/W reported invalid DRAM size %llu. Trying to use default size\n",
+ dram_size);
+ dram_size = DRAM_PHYS_DEFAULT_SIZE;
+ }
+
+ prop->dram_size = dram_size;
+ prop->dram_end_address = prop->dram_base_address + dram_size;
+ }
+
+ rc = hl_build_hwmon_channel_info(hdev, prop->armcp_info.sensors);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to build hwmon channel info, error %d\n", rc);
+ rc = -EFAULT;
+ goto out;
+ }
+
+out:
+ hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
+ sizeof(struct armcp_info), armcp_info_cpu_addr);
+
+ return rc;
+}
+
+static void goya_init_clock_gating(struct hl_device *hdev)
+{
+
+}
+
+static void goya_disable_clock_gating(struct hl_device *hdev)
+{
+
+}
+
+static bool goya_is_device_idle(struct hl_device *hdev)
+{
+ u64 offset, dma_qm_reg, tpc_qm_reg, tpc_cmdq_reg, tpc_cfg_reg;
+ int i;
+
+ offset = mmDMA_QM_1_GLBL_STS0 - mmDMA_QM_0_GLBL_STS0;
+
+ for (i = 0 ; i < DMA_MAX_NUM ; i++) {
+ dma_qm_reg = mmDMA_QM_0_GLBL_STS0 + i * offset;
+
+ if ((RREG32(dma_qm_reg) & DMA_QM_IDLE_MASK) !=
+ DMA_QM_IDLE_MASK)
+ return false;
+ }
+
+ offset = mmTPC1_QM_GLBL_STS0 - mmTPC0_QM_GLBL_STS0;
+
+ for (i = 0 ; i < TPC_MAX_NUM ; i++) {
+ tpc_qm_reg = mmTPC0_QM_GLBL_STS0 + i * offset;
+ tpc_cmdq_reg = mmTPC0_CMDQ_GLBL_STS0 + i * offset;
+ tpc_cfg_reg = mmTPC0_CFG_STATUS + i * offset;
+
+ if ((RREG32(tpc_qm_reg) & TPC_QM_IDLE_MASK) !=
+ TPC_QM_IDLE_MASK)
+ return false;
+
+ if ((RREG32(tpc_cmdq_reg) & TPC_CMDQ_IDLE_MASK) !=
+ TPC_CMDQ_IDLE_MASK)
+ return false;
+
+ if ((RREG32(tpc_cfg_reg) & TPC_CFG_IDLE_MASK) !=
+ TPC_CFG_IDLE_MASK)
+ return false;
+ }
+
+ if ((RREG32(mmMME_QM_GLBL_STS0) & MME_QM_IDLE_MASK) !=
+ MME_QM_IDLE_MASK)
+ return false;
+
+ if ((RREG32(mmMME_CMDQ_GLBL_STS0) & MME_CMDQ_IDLE_MASK) !=
+ MME_CMDQ_IDLE_MASK)
+ return false;
+
+ if ((RREG32(mmMME_ARCH_STATUS) & MME_ARCH_IDLE_MASK) !=
+ MME_ARCH_IDLE_MASK)
+ return false;
+
+ if (RREG32(mmMME_SHADOW_0_STATUS) & MME_SHADOW_IDLE_MASK)
+ return false;
+
+ return true;
+}
+
+static void goya_hw_queues_lock(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+
+ spin_lock(&goya->hw_queues_lock);
+}
+
+static void goya_hw_queues_unlock(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+
+ spin_unlock(&goya->hw_queues_lock);
+}
+
+static u32 goya_get_pci_id(struct hl_device *hdev)
+{
+ return hdev->pdev->device;
+}
+
+static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
+ size_t max_size)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct armcp_packet pkt;
+ void *eeprom_info_cpu_addr;
+ dma_addr_t eeprom_info_dma_addr;
+ long result;
+ int rc;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
+ return 0;
+
+ eeprom_info_cpu_addr =
+ hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
+ max_size, &eeprom_info_dma_addr);
+ if (!eeprom_info_cpu_addr) {
+ dev_err(hdev->dev,
+ "Failed to allocate DMA memory for EEPROM info packet\n");
+ return -ENOMEM;
+ }
+
+ memset(eeprom_info_cpu_addr, 0, max_size);
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_EEPROM_DATA_GET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.addr = cpu_to_le64(eeprom_info_dma_addr +
+ prop->host_phys_base_address);
+ pkt.data_max_size = cpu_to_le32(max_size);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ GOYA_ARMCP_EEPROM_TIMEOUT, &result);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to send armcp EEPROM pkt, error %d\n", rc);
+ goto out;
+ }
+
+ /* result contains the actual size */
+ memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
+
+out:
+ hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, max_size,
+ eeprom_info_cpu_addr);
+
+ return rc;
+}
+
+static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
+{
+ return RREG32(mmPSOC_GLOBAL_CONF_APP_STATUS);
+}
+
+static const struct hl_asic_funcs goya_funcs = {
+ .early_init = goya_early_init,
+ .early_fini = goya_early_fini,
+ .late_init = goya_late_init,
+ .late_fini = goya_late_fini,
+ .sw_init = goya_sw_init,
+ .sw_fini = goya_sw_fini,
+ .hw_init = goya_hw_init,
+ .hw_fini = goya_hw_fini,
+ .halt_engines = goya_halt_engines,
+ .suspend = goya_suspend,
+ .resume = goya_resume,
+ .cb_mmap = goya_cb_mmap,
+ .ring_doorbell = goya_ring_doorbell,
+ .flush_pq_write = goya_flush_pq_write,
+ .dma_alloc_coherent = goya_dma_alloc_coherent,
+ .dma_free_coherent = goya_dma_free_coherent,
+ .get_int_queue_base = goya_get_int_queue_base,
+ .test_queues = goya_test_queues,
+ .dma_pool_zalloc = goya_dma_pool_zalloc,
+ .dma_pool_free = goya_dma_pool_free,
+ .cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc,
+ .cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free,
+ .hl_dma_unmap_sg = goya_dma_unmap_sg,
+ .cs_parser = goya_cs_parser,
+ .asic_dma_map_sg = goya_dma_map_sg,
+ .get_dma_desc_list_size = goya_get_dma_desc_list_size,
+ .add_end_of_cb_packets = goya_add_end_of_cb_packets,
+ .update_eq_ci = goya_update_eq_ci,
+ .context_switch = goya_context_switch,
+ .restore_phase_topology = goya_restore_phase_topology,
+ .debugfs_read32 = goya_debugfs_read32,
+ .debugfs_write32 = goya_debugfs_write32,
+ .add_device_attr = goya_add_device_attr,
+ .handle_eqe = goya_handle_eqe,
+ .set_pll_profile = goya_set_pll_profile,
+ .get_events_stat = goya_get_events_stat,
+ .read_pte = goya_read_pte,
+ .write_pte = goya_write_pte,
+ .mmu_invalidate_cache = goya_mmu_invalidate_cache,
+ .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
+ .send_heartbeat = goya_send_heartbeat,
+ .enable_clock_gating = goya_init_clock_gating,
+ .disable_clock_gating = goya_disable_clock_gating,
+ .is_device_idle = goya_is_device_idle,
+ .soft_reset_late_init = goya_soft_reset_late_init,
+ .hw_queues_lock = goya_hw_queues_lock,
+ .hw_queues_unlock = goya_hw_queues_unlock,
+ .get_pci_id = goya_get_pci_id,
+ .get_eeprom_data = goya_get_eeprom_data,
+ .send_cpu_message = goya_send_cpu_message,
+ .get_hw_state = goya_get_hw_state
+};
+
+/*
+ * goya_set_asic_funcs - set Goya function pointers
+ *
+ * @*hdev: pointer to hl_device structure
+ *
+ */
+void goya_set_asic_funcs(struct hl_device *hdev)
+{
+ hdev->asic_funcs = &goya_funcs;
+}
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
new file mode 100644
index 000000000000..830551b6b062
--- /dev/null
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GOYAP_H_
+#define GOYAP_H_
+
+#include <uapi/misc/habanalabs.h>
+#include "habanalabs.h"
+#include "include/hl_boot_if.h"
+#include "include/goya/goya_packets.h"
+#include "include/goya/goya.h"
+#include "include/goya/goya_async_events.h"
+#include "include/goya/goya_fw_if.h"
+
+#define NUMBER_OF_CMPLT_QUEUES 5
+#define NUMBER_OF_EXT_HW_QUEUES 5
+#define NUMBER_OF_CPU_HW_QUEUES 1
+#define NUMBER_OF_INT_HW_QUEUES 9
+#define NUMBER_OF_HW_QUEUES (NUMBER_OF_EXT_HW_QUEUES + \
+ NUMBER_OF_CPU_HW_QUEUES + \
+ NUMBER_OF_INT_HW_QUEUES)
+
+/*
+ * Number of MSIX interrupts IDS:
+ * Each completion queue has 1 ID
+ * The event queue has 1 ID
+ */
+#define NUMBER_OF_INTERRUPTS (NUMBER_OF_CMPLT_QUEUES + 1)
+
+#if (NUMBER_OF_HW_QUEUES >= HL_MAX_QUEUES)
+#error "Number of H/W queues must be smaller than HL_MAX_QUEUES"
+#endif
+
+#if (NUMBER_OF_INTERRUPTS > GOYA_MSIX_ENTRIES)
+#error "Number of MSIX interrupts must be smaller or equal to GOYA_MSIX_ENTRIES"
+#endif
+
+#define QMAN_FENCE_TIMEOUT_USEC 10000 /* 10 ms */
+
+#define QMAN_STOP_TIMEOUT_USEC 100000 /* 100 ms */
+
+#define TPC_ENABLED_MASK 0xFF
+
+#define PLL_HIGH_DEFAULT 1575000000 /* 1.575 GHz */
+
+#define MAX_POWER_DEFAULT 200000 /* 200W */
+
+#define GOYA_ARMCP_INFO_TIMEOUT 10000000 /* 10s */
+#define GOYA_ARMCP_EEPROM_TIMEOUT 10000000 /* 10s */
+
+#define DRAM_PHYS_DEFAULT_SIZE 0x100000000ull /* 4GB */
+
+/* DRAM Memory Map */
+
+#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */
+#define MMU_PAGE_TABLES_SIZE 0x0DE00000 /* 222MB */
+#define MMU_DRAM_DEFAULT_PAGE_SIZE 0x00200000 /* 2MB */
+#define MMU_CACHE_MNG_SIZE 0x00001000 /* 4KB */
+#define CPU_PQ_PKT_SIZE 0x00001000 /* 4KB */
+#define CPU_PQ_DATA_SIZE 0x01FFE000 /* 32MB - 8KB */
+
+#define CPU_FW_IMAGE_ADDR DRAM_PHYS_BASE
+#define MMU_PAGE_TABLES_ADDR (CPU_FW_IMAGE_ADDR + CPU_FW_IMAGE_SIZE)
+#define MMU_DRAM_DEFAULT_PAGE_ADDR (MMU_PAGE_TABLES_ADDR + \
+ MMU_PAGE_TABLES_SIZE)
+#define MMU_CACHE_MNG_ADDR (MMU_DRAM_DEFAULT_PAGE_ADDR + \
+ MMU_DRAM_DEFAULT_PAGE_SIZE)
+#define CPU_PQ_PKT_ADDR (MMU_CACHE_MNG_ADDR + \
+ MMU_CACHE_MNG_SIZE)
+#define CPU_PQ_DATA_ADDR (CPU_PQ_PKT_ADDR + CPU_PQ_PKT_SIZE)
+#define DRAM_BASE_ADDR_USER (CPU_PQ_DATA_ADDR + CPU_PQ_DATA_SIZE)
+
+#if (DRAM_BASE_ADDR_USER != 0x20000000)
+#error "KMD must reserve 512MB"
+#endif
+
+/*
+ * SRAM Memory Map for KMD
+ *
+ * KMD occupies KMD_SRAM_SIZE bytes from the start of SRAM. It is used for
+ * MME/TPC QMANs
+ *
+ */
+
+#define MME_QMAN_BASE_OFFSET 0x000000 /* Must be 0 */
+#define MME_QMAN_LENGTH 64
+#define TPC_QMAN_LENGTH 64
+
+#define TPC0_QMAN_BASE_OFFSET (MME_QMAN_BASE_OFFSET + \
+ (MME_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
+#define TPC1_QMAN_BASE_OFFSET (TPC0_QMAN_BASE_OFFSET + \
+ (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
+#define TPC2_QMAN_BASE_OFFSET (TPC1_QMAN_BASE_OFFSET + \
+ (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
+#define TPC3_QMAN_BASE_OFFSET (TPC2_QMAN_BASE_OFFSET + \
+ (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
+#define TPC4_QMAN_BASE_OFFSET (TPC3_QMAN_BASE_OFFSET + \
+ (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
+#define TPC5_QMAN_BASE_OFFSET (TPC4_QMAN_BASE_OFFSET + \
+ (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
+#define TPC6_QMAN_BASE_OFFSET (TPC5_QMAN_BASE_OFFSET + \
+ (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
+#define TPC7_QMAN_BASE_OFFSET (TPC6_QMAN_BASE_OFFSET + \
+ (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
+
+#define SRAM_KMD_RES_OFFSET (TPC7_QMAN_BASE_OFFSET + \
+ (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
+
+#if (SRAM_KMD_RES_OFFSET >= GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START)
+#error "MME/TPC QMANs SRAM space exceeds limit"
+#endif
+
+#define SRAM_USER_BASE_OFFSET GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START
+
+/* Virtual address space */
+#define VA_HOST_SPACE_START 0x1000000000000ull /* 256TB */
+#define VA_HOST_SPACE_END 0x3FF8000000000ull /* 1PB - 1TB */
+#define VA_HOST_SPACE_SIZE (VA_HOST_SPACE_END - \
+ VA_HOST_SPACE_START) /* 767TB */
+
+#define VA_DDR_SPACE_START 0x800000000ull /* 32GB */
+#define VA_DDR_SPACE_END 0x2000000000ull /* 128GB */
+#define VA_DDR_SPACE_SIZE (VA_DDR_SPACE_END - \
+ VA_DDR_SPACE_START) /* 128GB */
+
+#define DMA_MAX_TRANSFER_SIZE U32_MAX
+
+#define HW_CAP_PLL 0x00000001
+#define HW_CAP_DDR_0 0x00000002
+#define HW_CAP_DDR_1 0x00000004
+#define HW_CAP_MME 0x00000008
+#define HW_CAP_CPU 0x00000010
+#define HW_CAP_DMA 0x00000020
+#define HW_CAP_MSIX 0x00000040
+#define HW_CAP_CPU_Q 0x00000080
+#define HW_CAP_MMU 0x00000100
+#define HW_CAP_TPC_MBIST 0x00000200
+#define HW_CAP_GOLDEN 0x00000400
+#define HW_CAP_TPC 0x00000800
+
+#define CPU_PKT_SHIFT 5
+#define CPU_PKT_SIZE (1 << CPU_PKT_SHIFT)
+#define CPU_PKT_MASK (~((1 << CPU_PKT_SHIFT) - 1))
+#define CPU_MAX_PKTS_IN_CB 32
+#define CPU_CB_SIZE (CPU_PKT_SIZE * CPU_MAX_PKTS_IN_CB)
+#define CPU_ACCESSIBLE_MEM_SIZE (HL_QUEUE_LENGTH * CPU_CB_SIZE)
+
+enum goya_fw_component {
+ FW_COMP_UBOOT,
+ FW_COMP_PREBOOT
+};
+
+struct goya_device {
+ int (*test_cpu_queue)(struct hl_device *hdev);
+ int (*armcp_info_get)(struct hl_device *hdev);
+
+ /* TODO: remove hw_queues_lock after moving to scheduler code */
+ spinlock_t hw_queues_lock;
+
+ u64 mme_clk;
+ u64 tpc_clk;
+ u64 ic_clk;
+
+ u64 ddr_bar_cur_addr;
+ u32 events_stat[GOYA_ASYNC_EVENT_ID_SIZE];
+ u32 hw_cap_initialized;
+};
+
+int goya_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus,
+ u8 i2c_addr, u8 i2c_reg, u32 *val);
+int goya_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus,
+ u8 i2c_addr, u8 i2c_reg, u32 val);
+int goya_test_cpu_queue(struct hl_device *hdev);
+int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
+ u32 timeout, long *result);
+long goya_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr);
+long goya_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr);
+long goya_get_current(struct hl_device *hdev, int sensor_index, u32 attr);
+long goya_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr);
+long goya_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr);
+void goya_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
+ long value);
+void goya_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state);
+void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq);
+void goya_add_device_attr(struct hl_device *hdev,
+ struct attribute_group *dev_attr_grp);
+void goya_init_security(struct hl_device *hdev);
+u64 goya_get_max_power(struct hl_device *hdev);
+void goya_set_max_power(struct hl_device *hdev, u64 value);
+
+int goya_send_pci_access_msg(struct hl_device *hdev, u32 opcode);
+void goya_late_fini(struct hl_device *hdev);
+int goya_suspend(struct hl_device *hdev);
+int goya_resume(struct hl_device *hdev);
+void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val);
+void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry);
+void *goya_get_events_stat(struct hl_device *hdev, u32 *size);
+void goya_add_end_of_cb_packets(u64 kernel_address, u32 len, u64 cq_addr,
+ u32 cq_val, u32 msix_vec);
+int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser);
+void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
+ dma_addr_t *dma_handle, u16 *queue_len);
+u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt);
+int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id);
+int goya_send_heartbeat(struct hl_device *hdev);
+
+#endif /* GOYAP_H_ */
diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c
new file mode 100644
index 000000000000..088692c852b6
--- /dev/null
+++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "goyaP.h"
+
+void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
+{
+ struct goya_device *goya = hdev->asic_specific;
+
+ switch (freq) {
+ case PLL_HIGH:
+ hl_set_frequency(hdev, MME_PLL, hdev->high_pll);
+ hl_set_frequency(hdev, TPC_PLL, hdev->high_pll);
+ hl_set_frequency(hdev, IC_PLL, hdev->high_pll);
+ break;
+ case PLL_LOW:
+ hl_set_frequency(hdev, MME_PLL, GOYA_PLL_FREQ_LOW);
+ hl_set_frequency(hdev, TPC_PLL, GOYA_PLL_FREQ_LOW);
+ hl_set_frequency(hdev, IC_PLL, GOYA_PLL_FREQ_LOW);
+ break;
+ case PLL_LAST:
+ hl_set_frequency(hdev, MME_PLL, goya->mme_clk);
+ hl_set_frequency(hdev, TPC_PLL, goya->tpc_clk);
+ hl_set_frequency(hdev, IC_PLL, goya->ic_clk);
+ break;
+ default:
+ dev_err(hdev->dev, "unknown frequency setting\n");
+ }
+}
+
+static ssize_t mme_clk_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ value = hl_get_frequency(hdev, MME_PLL, false);
+
+ if (value < 0)
+ return value;
+
+ return sprintf(buf, "%lu\n", value);
+}
+
+static ssize_t mme_clk_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ struct goya_device *goya = hdev->asic_specific;
+ int rc;
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ count = -ENODEV;
+ goto fail;
+ }
+
+ if (hdev->pm_mng_profile == PM_AUTO) {
+ count = -EPERM;
+ goto fail;
+ }
+
+ rc = kstrtoul(buf, 0, &value);
+
+ if (rc) {
+ count = -EINVAL;
+ goto fail;
+ }
+
+ hl_set_frequency(hdev, MME_PLL, value);
+ goya->mme_clk = value;
+
+fail:
+ return count;
+}
+
+static ssize_t tpc_clk_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ value = hl_get_frequency(hdev, TPC_PLL, false);
+
+ if (value < 0)
+ return value;
+
+ return sprintf(buf, "%lu\n", value);
+}
+
+static ssize_t tpc_clk_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ struct goya_device *goya = hdev->asic_specific;
+ int rc;
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ count = -ENODEV;
+ goto fail;
+ }
+
+ if (hdev->pm_mng_profile == PM_AUTO) {
+ count = -EPERM;
+ goto fail;
+ }
+
+ rc = kstrtoul(buf, 0, &value);
+
+ if (rc) {
+ count = -EINVAL;
+ goto fail;
+ }
+
+ hl_set_frequency(hdev, TPC_PLL, value);
+ goya->tpc_clk = value;
+
+fail:
+ return count;
+}
+
+static ssize_t ic_clk_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ value = hl_get_frequency(hdev, IC_PLL, false);
+
+ if (value < 0)
+ return value;
+
+ return sprintf(buf, "%lu\n", value);
+}
+
+static ssize_t ic_clk_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ struct goya_device *goya = hdev->asic_specific;
+ int rc;
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ count = -ENODEV;
+ goto fail;
+ }
+
+ if (hdev->pm_mng_profile == PM_AUTO) {
+ count = -EPERM;
+ goto fail;
+ }
+
+ rc = kstrtoul(buf, 0, &value);
+
+ if (rc) {
+ count = -EINVAL;
+ goto fail;
+ }
+
+ hl_set_frequency(hdev, IC_PLL, value);
+ goya->ic_clk = value;
+
+fail:
+ return count;
+}
+
+static ssize_t mme_clk_curr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ value = hl_get_frequency(hdev, MME_PLL, true);
+
+ if (value < 0)
+ return value;
+
+ return sprintf(buf, "%lu\n", value);
+}
+
+static ssize_t tpc_clk_curr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ value = hl_get_frequency(hdev, TPC_PLL, true);
+
+ if (value < 0)
+ return value;
+
+ return sprintf(buf, "%lu\n", value);
+}
+
+static ssize_t ic_clk_curr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ value = hl_get_frequency(hdev, IC_PLL, true);
+
+ if (value < 0)
+ return value;
+
+ return sprintf(buf, "%lu\n", value);
+}
+
+static DEVICE_ATTR_RW(ic_clk);
+static DEVICE_ATTR_RO(ic_clk_curr);
+static DEVICE_ATTR_RW(mme_clk);
+static DEVICE_ATTR_RO(mme_clk_curr);
+static DEVICE_ATTR_RW(tpc_clk);
+static DEVICE_ATTR_RO(tpc_clk_curr);
+
+static struct attribute *goya_dev_attrs[] = {
+ &dev_attr_ic_clk.attr,
+ &dev_attr_ic_clk_curr.attr,
+ &dev_attr_mme_clk.attr,
+ &dev_attr_mme_clk_curr.attr,
+ &dev_attr_tpc_clk.attr,
+ &dev_attr_tpc_clk_curr.attr,
+ NULL,
+};
+
+void goya_add_device_attr(struct hl_device *hdev,
+ struct attribute_group *dev_attr_grp)
+{
+ dev_attr_grp->attrs = goya_dev_attrs;
+}
diff --git a/drivers/misc/habanalabs/goya/goya_security.c b/drivers/misc/habanalabs/goya/goya_security.c
new file mode 100644
index 000000000000..575003238401
--- /dev/null
+++ b/drivers/misc/habanalabs/goya/goya_security.c
@@ -0,0 +1,2999 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "goyaP.h"
+
+/*
+ * goya_set_block_as_protected - set the given block as protected
+ *
+ * @hdev: pointer to hl_device structure
+ * @block: block base address
+ *
+ */
+static void goya_pb_set_block(struct hl_device *hdev, u64 base)
+{
+ u32 pb_addr = base - CFG_BASE + PROT_BITS_OFFS;
+
+ while (pb_addr & 0xFFF) {
+ WREG32(pb_addr, 0);
+ pb_addr += 4;
+ }
+}
+
+static void goya_init_mme_protection_bits(struct hl_device *hdev)
+{
+ u32 pb_addr, mask;
+ u8 word_offset;
+
+ /* TODO: change to real reg name when Soc Online is updated */
+ u64 mmMME_SBB_POWER_ECO1 = 0xDFF60,
+ mmMME_SBB_POWER_ECO2 = 0xDFF64;
+
+ goya_pb_set_block(hdev, mmACC_MS_ECC_MEM_0_BASE);
+ goya_pb_set_block(hdev, mmACC_MS_ECC_MEM_1_BASE);
+ goya_pb_set_block(hdev, mmACC_MS_ECC_MEM_2_BASE);
+ goya_pb_set_block(hdev, mmACC_MS_ECC_MEM_3_BASE);
+
+ goya_pb_set_block(hdev, mmSBA_ECC_MEM_BASE);
+ goya_pb_set_block(hdev, mmSBB_ECC_MEM_BASE);
+
+ goya_pb_set_block(hdev, mmMME1_RTR_BASE);
+ goya_pb_set_block(hdev, mmMME1_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmMME1_WR_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmMME2_RTR_BASE);
+ goya_pb_set_block(hdev, mmMME2_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmMME2_WR_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmMME3_RTR_BASE);
+ goya_pb_set_block(hdev, mmMME3_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmMME3_WR_REGULATOR_BASE);
+
+ goya_pb_set_block(hdev, mmMME4_RTR_BASE);
+ goya_pb_set_block(hdev, mmMME4_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmMME4_WR_REGULATOR_BASE);
+
+ goya_pb_set_block(hdev, mmMME5_RTR_BASE);
+ goya_pb_set_block(hdev, mmMME5_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmMME5_WR_REGULATOR_BASE);
+
+ goya_pb_set_block(hdev, mmMME6_RTR_BASE);
+ goya_pb_set_block(hdev, mmMME6_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmMME6_WR_REGULATOR_BASE);
+
+ pb_addr = (mmMME_DUMMY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME_DUMMY & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME_DUMMY & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_RESET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_LOG_SHADOW & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME_STORE_MAX_CREDIT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME_STORE_MAX_CREDIT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME_STORE_MAX_CREDIT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_AGU & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SBA & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SBB & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SBC & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_WBC & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SBA_CONTROL_DATA & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SBB_CONTROL_DATA & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SBC_CONTROL_DATA & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_WBC_CONTROL_DATA & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_TE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_TE2DEC & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_REI_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_REI_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SEI_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SEI_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SPI_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SPI_MASK & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME_QM_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME_QM_CP_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME_QM_CP_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME_QM_CP_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_CURRENT_INST_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_CURRENT_INST_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_BARRIER_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_DBG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_BUF_ADDR & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_BUF_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_BUF_ADDR & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_BUF_RDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_GLBL_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME_CMDQ_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME_CMDQ_CQ_IFIFO_CNT &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_DBG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME_SBB_POWER_ECO1 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME_SBB_POWER_ECO1 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME_SBB_POWER_ECO1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SBB_POWER_ECO2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+}
+
+static void goya_init_dma_protection_bits(struct hl_device *hdev)
+{
+ u32 pb_addr, mask;
+ u8 word_offset;
+
+ goya_pb_set_block(hdev, mmDMA_NRTR_BASE);
+ goya_pb_set_block(hdev, mmDMA_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmDMA_WR_REGULATOR_BASE);
+
+ pb_addr = (mmDMA_QM_0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_0_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA_QM_0_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_0_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_0_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA_QM_0_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_0_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_0_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ goya_pb_set_block(hdev, mmDMA_CH_0_BASE);
+
+ pb_addr = (mmDMA_QM_1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_1_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA_QM_1_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_1_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_1_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA_QM_1_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_1_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_1_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ goya_pb_set_block(hdev, mmDMA_CH_1_BASE);
+
+ pb_addr = (mmDMA_QM_2_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_2_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_2_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA_QM_2_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_2_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_2_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA_QM_2_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_2_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_2_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ goya_pb_set_block(hdev, mmDMA_CH_2_BASE);
+
+ pb_addr = (mmDMA_QM_3_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_3_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_3_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA_QM_3_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_3_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_3_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA_QM_3_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_3_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_3_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ goya_pb_set_block(hdev, mmDMA_CH_3_BASE);
+
+ pb_addr = (mmDMA_QM_4_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_4_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_4_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA_QM_4_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_4_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_4_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA_QM_4_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_4_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_4_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ goya_pb_set_block(hdev, mmDMA_CH_4_BASE);
+}
+
+static void goya_init_tpc_protection_bits(struct hl_device *hdev)
+{
+ u32 pb_addr, mask;
+ u8 word_offset;
+
+ goya_pb_set_block(hdev, mmTPC0_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmTPC0_WR_REGULATOR_BASE);
+
+ pb_addr = (mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_CFG_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_AWUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_CFG_FUNC_MBIST_CNTRL &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_GLBL_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_CMDQ_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC0_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_DBG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ goya_pb_set_block(hdev, mmTPC1_RTR_BASE);
+ goya_pb_set_block(hdev, mmTPC1_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmTPC1_WR_REGULATOR_BASE);
+
+ pb_addr = (mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_CFG_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_AWUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_CFG_FUNC_MBIST_CNTRL & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC1_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_GLBL_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_CMDQ_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC1_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_DBG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ goya_pb_set_block(hdev, mmTPC2_RTR_BASE);
+ goya_pb_set_block(hdev, mmTPC2_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmTPC2_WR_REGULATOR_BASE);
+
+ pb_addr = (mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_CFG_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_AWUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_CFG_FUNC_MBIST_CNTRL & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC2_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_GLBL_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_CMDQ_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC2_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_DBG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ goya_pb_set_block(hdev, mmTPC3_RTR_BASE);
+ goya_pb_set_block(hdev, mmTPC3_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmTPC3_WR_REGULATOR_BASE);
+
+ pb_addr = (mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH
+ & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_CFG_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_AWUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_CFG_FUNC_MBIST_CNTRL
+ & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_GLBL_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_CMDQ_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC3_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_DBG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ goya_pb_set_block(hdev, mmTPC4_RTR_BASE);
+ goya_pb_set_block(hdev, mmTPC4_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmTPC4_WR_REGULATOR_BASE);
+
+ pb_addr = (mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_CFG_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_AWUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_CFG_FUNC_MBIST_CNTRL &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_GLBL_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_CMDQ_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC4_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_DBG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ goya_pb_set_block(hdev, mmTPC5_RTR_BASE);
+ goya_pb_set_block(hdev, mmTPC5_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmTPC5_WR_REGULATOR_BASE);
+
+ pb_addr = (mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_CFG_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_AWUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_CFG_FUNC_MBIST_CNTRL &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_GLBL_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_CMDQ_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC5_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_DBG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ goya_pb_set_block(hdev, mmTPC6_RTR_BASE);
+ goya_pb_set_block(hdev, mmTPC6_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmTPC6_WR_REGULATOR_BASE);
+
+ pb_addr = (mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_CFG_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_AWUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_CFG_FUNC_MBIST_CNTRL &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_GLBL_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_CMDQ_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC6_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_DBG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ goya_pb_set_block(hdev, mmTPC7_NRTR_BASE);
+ goya_pb_set_block(hdev, mmTPC7_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmTPC7_WR_REGULATOR_BASE);
+
+ pb_addr = (mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_CFG_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_AWUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_CFG_FUNC_MBIST_CNTRL &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_GLBL_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_CMDQ_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC7_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_DBG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+}
+
+/*
+ * goya_init_protection_bits - Initialize protection bits for specific registers
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * All protection bits are 1 by default, means not protected. Need to set to 0
+ * each bit that belongs to a protected register.
+ *
+ */
+static void goya_init_protection_bits(struct hl_device *hdev)
+{
+ /*
+ * In each 4K block of registers, the last 128 bytes are protection
+ * bits - total of 1024 bits, one for each register. Each bit is related
+ * to a specific register, by the order of the registers.
+ * So in order to calculate the bit that is related to a given register,
+ * we need to calculate its word offset and then the exact bit inside
+ * the word (which is 4 bytes).
+ *
+ * Register address:
+ *
+ * 31 12 11 7 6 2 1 0
+ * -----------------------------------------------------------------
+ * | Don't | word | bit location | 0 |
+ * | care | offset | inside word | |
+ * -----------------------------------------------------------------
+ *
+ * Bits 7-11 represents the word offset inside the 128 bytes.
+ * Bits 2-6 represents the bit location inside the word.
+ */
+
+ goya_pb_set_block(hdev, mmPCI_NRTR_BASE);
+ goya_pb_set_block(hdev, mmPCI_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmPCI_WR_REGULATOR_BASE);
+
+ goya_pb_set_block(hdev, mmSRAM_Y0_X0_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y0_X0_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y0_X1_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y0_X1_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y0_X2_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y0_X2_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y0_X3_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y0_X3_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y0_X4_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y0_X4_RTR_BASE);
+
+ goya_pb_set_block(hdev, mmSRAM_Y1_X0_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y1_X0_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y1_X1_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y1_X1_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y1_X2_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y1_X2_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y1_X3_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y1_X3_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y1_X4_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y1_X4_RTR_BASE);
+
+ goya_pb_set_block(hdev, mmSRAM_Y2_X0_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y2_X0_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y2_X1_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y2_X1_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y2_X2_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y2_X2_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y2_X3_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y2_X3_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y2_X4_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y2_X4_RTR_BASE);
+
+ goya_pb_set_block(hdev, mmSRAM_Y3_X0_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y3_X0_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y3_X1_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y3_X1_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y3_X2_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y3_X2_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y3_X3_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y3_X3_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y3_X4_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y3_X4_RTR_BASE);
+
+ goya_pb_set_block(hdev, mmSRAM_Y4_X0_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y4_X0_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y4_X1_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y4_X1_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y4_X2_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y4_X2_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y4_X3_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y4_X3_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y4_X4_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y4_X4_RTR_BASE);
+
+ goya_pb_set_block(hdev, mmSRAM_Y5_X0_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y5_X0_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y5_X1_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y5_X1_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y5_X2_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y5_X2_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y5_X3_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y5_X3_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y5_X4_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y5_X4_RTR_BASE);
+
+ goya_pb_set_block(hdev, mmPCIE_WRAP_BASE);
+ goya_pb_set_block(hdev, mmPCIE_CORE_BASE);
+ goya_pb_set_block(hdev, mmPCIE_DB_CFG_BASE);
+ goya_pb_set_block(hdev, mmPCIE_DB_CMD_BASE);
+ goya_pb_set_block(hdev, mmPCIE_AUX_BASE);
+ goya_pb_set_block(hdev, mmPCIE_DB_RSV_BASE);
+ goya_pb_set_block(hdev, mmPCIE_PHY_BASE);
+
+ goya_init_mme_protection_bits(hdev);
+
+ goya_init_dma_protection_bits(hdev);
+
+ goya_init_tpc_protection_bits(hdev);
+}
+
+/*
+ * goya_init_security - Initialize security model
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Initialize the security model of the device
+ * That includes range registers and protection bit per register
+ *
+ */
+void goya_init_security(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+
+ u32 dram_addr_lo = lower_32_bits(DRAM_PHYS_BASE);
+ u32 dram_addr_hi = upper_32_bits(DRAM_PHYS_BASE);
+
+ u32 lbw_rng0_base = 0xFC440000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng0_mask = 0xFFFF0000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng1_base = 0xFC480000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng1_mask = 0xFFF80000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng2_base = 0xFC600000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng2_mask = 0xFFE00000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng3_base = 0xFC800000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng3_mask = 0xFFF00000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng4_base = 0xFCC02000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng4_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng5_base = 0xFCC40000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng5_mask = 0xFFFF8000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng6_base = 0xFCC48000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng6_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng7_base = 0xFCC4A000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng7_mask = 0xFFFFE000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng8_base = 0xFCC4C000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng8_mask = 0xFFFFC000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng9_base = 0xFCC50000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng9_mask = 0xFFFF0000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng10_base = 0xFCC60000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng10_mask = 0xFFFE0000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng11_base = 0xFCE00000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng11_mask = 0xFFFFC000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng12_base = 0xFE484000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng12_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng13_base = 0xFEC43000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng13_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ WREG32(mmDMA_MACRO_LBW_RANGE_HIT_BLOCK, 0xFFFF);
+ WREG32(mmDMA_MACRO_HBW_RANGE_HIT_BLOCK, 0xFF);
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU)) {
+ WREG32(mmDMA_MACRO_HBW_RANGE_HIT_BLOCK, 0xFE);
+
+ /* Protect HOST */
+ WREG32(mmDMA_MACRO_HBW_RANGE_BASE_31_0_0, 0);
+ WREG32(mmDMA_MACRO_HBW_RANGE_BASE_49_32_0, 0);
+ WREG32(mmDMA_MACRO_HBW_RANGE_MASK_31_0_0, 0);
+ WREG32(mmDMA_MACRO_HBW_RANGE_MASK_49_32_0, 0xFFF80);
+ }
+
+ /*
+ * Protect DDR @
+ * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
+ * The mask protects the first 512MB
+ */
+ WREG32(mmDMA_MACRO_HBW_RANGE_BASE_31_0_1, dram_addr_lo);
+ WREG32(mmDMA_MACRO_HBW_RANGE_BASE_49_32_1, dram_addr_hi);
+ WREG32(mmDMA_MACRO_HBW_RANGE_MASK_31_0_1, 0xE0000000);
+ WREG32(mmDMA_MACRO_HBW_RANGE_MASK_49_32_1, 0x3FFFF);
+
+ /* Protect registers */
+
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmMME1_RTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmMME2_RTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmMME3_RTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmMME4_RTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmMME5_RTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmMME6_RTR_LBW_RANGE_HIT, 0xFFFF);
+
+ WREG32(mmMME1_RTR_HBW_RANGE_HIT, 0xFE);
+ WREG32(mmMME2_RTR_HBW_RANGE_HIT, 0xFE);
+ WREG32(mmMME3_RTR_HBW_RANGE_HIT, 0xFE);
+ WREG32(mmMME4_RTR_HBW_RANGE_HIT, 0xFE);
+ WREG32(mmMME5_RTR_HBW_RANGE_HIT, 0xFE);
+ WREG32(mmMME6_RTR_HBW_RANGE_HIT, 0xFE);
+
+ /* Protect HOST */
+ WREG32(mmMME1_RTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmMME1_RTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmMME1_RTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmMME1_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ WREG32(mmMME2_RTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmMME2_RTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmMME2_RTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmMME2_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ WREG32(mmMME3_RTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmMME3_RTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmMME3_RTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmMME3_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ WREG32(mmMME4_RTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmMME4_RTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmMME4_RTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmMME4_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ WREG32(mmMME5_RTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmMME5_RTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmMME5_RTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmMME5_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ WREG32(mmMME6_RTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmMME6_RTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmMME6_RTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmMME6_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ /*
+ * Protect DDR @
+ * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
+ * The mask protects the first 512MB
+ */
+ WREG32(mmMME1_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmMME1_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmMME1_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmMME1_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmMME2_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmMME2_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmMME2_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmMME2_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmMME3_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmMME3_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmMME3_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmMME3_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmMME4_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmMME4_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmMME4_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmMME4_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmMME5_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmMME5_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmMME5_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmMME5_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmMME6_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmMME6_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmMME6_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmMME6_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmTPC0_NRTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmTPC0_NRTR_HBW_RANGE_HIT, 0xFE);
+
+ /* Protect HOST */
+ WREG32(mmTPC0_NRTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmTPC0_NRTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmTPC0_NRTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmTPC0_NRTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ /*
+ * Protect DDR @
+ * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
+ * The mask protects the first 512MB
+ */
+ WREG32(mmTPC0_NRTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmTPC0_NRTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmTPC0_NRTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmTPC0_NRTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmTPC1_RTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmTPC1_RTR_HBW_RANGE_HIT, 0xFE);
+
+ /* Protect HOST */
+ WREG32(mmTPC1_RTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmTPC1_RTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmTPC1_RTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmTPC1_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ /*
+ * Protect DDR @
+ * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
+ * The mask protects the first 512MB
+ */
+ WREG32(mmTPC1_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmTPC1_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmTPC1_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmTPC1_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmTPC2_RTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmTPC2_RTR_HBW_RANGE_HIT, 0xFE);
+
+ /* Protect HOST */
+ WREG32(mmTPC2_RTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmTPC2_RTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmTPC2_RTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmTPC2_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ /*
+ * Protect DDR @
+ * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
+ * The mask protects the first 512MB
+ */
+ WREG32(mmTPC2_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmTPC2_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmTPC2_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmTPC2_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmTPC3_RTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmTPC3_RTR_HBW_RANGE_HIT, 0xFE);
+
+ /* Protect HOST */
+ WREG32(mmTPC3_RTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmTPC3_RTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmTPC3_RTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmTPC3_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ /*
+ * Protect DDR @
+ * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
+ * The mask protects the first 512MB
+ */
+ WREG32(mmTPC3_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmTPC3_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmTPC3_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmTPC3_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmTPC4_RTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmTPC4_RTR_HBW_RANGE_HIT, 0xFE);
+
+ /* Protect HOST */
+ WREG32(mmTPC4_RTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmTPC4_RTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmTPC4_RTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmTPC4_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ /*
+ * Protect DDR @
+ * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
+ * The mask protects the first 512MB
+ */
+ WREG32(mmTPC4_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmTPC4_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmTPC4_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmTPC4_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmTPC5_RTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmTPC5_RTR_HBW_RANGE_HIT, 0xFE);
+
+ /* Protect HOST */
+ WREG32(mmTPC5_RTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmTPC5_RTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmTPC5_RTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmTPC5_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ /*
+ * Protect DDR @
+ * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
+ * The mask protects the first 512MB
+ */
+ WREG32(mmTPC5_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmTPC5_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmTPC5_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmTPC5_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmTPC6_RTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmTPC6_RTR_HBW_RANGE_HIT, 0xFE);
+
+ /* Protect HOST */
+ WREG32(mmTPC6_RTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmTPC6_RTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmTPC6_RTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmTPC6_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ /*
+ * Protect DDR @
+ * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
+ * The mask protects the first 512MB
+ */
+ WREG32(mmTPC6_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmTPC6_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmTPC6_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmTPC6_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmTPC7_NRTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmTPC7_NRTR_HBW_RANGE_HIT, 0xFE);
+
+ /* Protect HOST */
+ WREG32(mmTPC7_NRTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmTPC7_NRTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmTPC7_NRTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmTPC7_NRTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ /*
+ * Protect DDR @
+ * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
+ * The mask protects the first 512MB
+ */
+ WREG32(mmTPC7_NRTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmTPC7_NRTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmTPC7_NRTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmTPC7_NRTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ goya_init_protection_bits(hdev);
+}
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
new file mode 100644
index 000000000000..a7c95e9f9b9a
--- /dev/null
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -0,0 +1,1464 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef HABANALABSP_H_
+#define HABANALABSP_H_
+
+#include "include/armcp_if.h"
+#include "include/qman_if.h"
+
+#define pr_fmt(fmt) "habanalabs: " fmt
+
+#include <linux/cdev.h>
+#include <linux/iopoll.h>
+#include <linux/irqreturn.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-direction.h>
+#include <linux/scatterlist.h>
+#include <linux/hashtable.h>
+
+#define HL_NAME "habanalabs"
+
+#define HL_MMAP_CB_MASK (0x8000000000000000ull >> PAGE_SHIFT)
+
+#define HL_PENDING_RESET_PER_SEC 5
+
+#define HL_DEVICE_TIMEOUT_USEC 1000000 /* 1 s */
+
+#define HL_HEARTBEAT_PER_USEC 5000000 /* 5 s */
+
+#define HL_PLL_LOW_JOB_FREQ_USEC 5000000 /* 5 s */
+
+#define HL_MAX_QUEUES 128
+
+#define HL_MAX_JOBS_PER_CS 64
+
+/* MUST BE POWER OF 2 and larger than 1 */
+#define HL_MAX_PENDING_CS 64
+
+/* Memory */
+#define MEM_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
+
+/* MMU */
+#define MMU_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
+
+/**
+ * struct pgt_info - MMU hop page info.
+ * @node: hash linked-list node for the pgts hash of pgts.
+ * @addr: physical address of the pgt.
+ * @ctx: pointer to the owner ctx.
+ * @num_of_ptes: indicates how many ptes are used in the pgt.
+ *
+ * The MMU page tables hierarchy is placed on the DRAM. When a new level (hop)
+ * is needed during mapping, a new page is allocated and this structure holds
+ * its essential information. During unmapping, if no valid PTEs remained in the
+ * page, it is freed with its pgt_info structure.
+ */
+struct pgt_info {
+ struct hlist_node node;
+ u64 addr;
+ struct hl_ctx *ctx;
+ int num_of_ptes;
+};
+
+struct hl_device;
+struct hl_fpriv;
+
+/**
+ * enum hl_queue_type - Supported QUEUE types.
+ * @QUEUE_TYPE_NA: queue is not available.
+ * @QUEUE_TYPE_EXT: external queue which is a DMA channel that may access the
+ * host.
+ * @QUEUE_TYPE_INT: internal queue that performs DMA inside the device's
+ * memories and/or operates the compute engines.
+ * @QUEUE_TYPE_CPU: S/W queue for communication with the device's CPU.
+ */
+enum hl_queue_type {
+ QUEUE_TYPE_NA,
+ QUEUE_TYPE_EXT,
+ QUEUE_TYPE_INT,
+ QUEUE_TYPE_CPU
+};
+
+/**
+ * struct hw_queue_properties - queue information.
+ * @type: queue type.
+ * @kmd_only: true if only KMD is allowed to send a job to this queue, false
+ * otherwise.
+ */
+struct hw_queue_properties {
+ enum hl_queue_type type;
+ u8 kmd_only;
+};
+
+/**
+ * enum vm_type_t - virtual memory mapping request information.
+ * @VM_TYPE_USERPTR: mapping of user memory to device virtual address.
+ * @VM_TYPE_PHYS_PACK: mapping of DRAM memory to device virtual address.
+ */
+enum vm_type_t {
+ VM_TYPE_USERPTR,
+ VM_TYPE_PHYS_PACK
+};
+
+/**
+ * enum hl_device_hw_state - H/W device state. use this to understand whether
+ * to do reset before hw_init or not
+ * @HL_DEVICE_HW_STATE_CLEAN: H/W state is clean. i.e. after hard reset
+ * @HL_DEVICE_HW_STATE_DIRTY: H/W state is dirty. i.e. we started to execute
+ * hw_init
+ */
+enum hl_device_hw_state {
+ HL_DEVICE_HW_STATE_CLEAN = 0,
+ HL_DEVICE_HW_STATE_DIRTY
+};
+
+/**
+ * struct asic_fixed_properties - ASIC specific immutable properties.
+ * @hw_queues_props: H/W queues properties.
+ * @armcp_info: received various information from ArmCP regarding the H/W. e.g.
+ * available sensors.
+ * @uboot_ver: F/W U-boot version.
+ * @preboot_ver: F/W Preboot version.
+ * @sram_base_address: SRAM physical start address.
+ * @sram_end_address: SRAM physical end address.
+ * @sram_user_base_address - SRAM physical start address for user access.
+ * @dram_base_address: DRAM physical start address.
+ * @dram_end_address: DRAM physical end address.
+ * @dram_user_base_address: DRAM physical start address for user access.
+ * @dram_size: DRAM total size.
+ * @dram_pci_bar_size: size of PCI bar towards DRAM.
+ * @host_phys_base_address: base physical address of host memory for
+ * transactions that the device generates.
+ * @max_power_default: max power of the device after reset
+ * @va_space_host_start_address: base address of virtual memory range for
+ * mapping host memory.
+ * @va_space_host_end_address: end address of virtual memory range for
+ * mapping host memory.
+ * @va_space_dram_start_address: base address of virtual memory range for
+ * mapping DRAM memory.
+ * @va_space_dram_end_address: end address of virtual memory range for
+ * mapping DRAM memory.
+ * @dram_size_for_default_page_mapping: DRAM size needed to map to avoid page
+ * fault.
+ * @mmu_pgt_addr: base physical address in DRAM of MMU page tables.
+ * @mmu_dram_default_page_addr: DRAM default page physical address.
+ * @mmu_pgt_size: MMU page tables total size.
+ * @mmu_pte_size: PTE size in MMU page tables.
+ * @mmu_hop_table_size: MMU hop table size.
+ * @mmu_hop0_tables_total_size: total size of MMU hop0 tables.
+ * @dram_page_size: page size for MMU DRAM allocation.
+ * @cfg_size: configuration space size on SRAM.
+ * @sram_size: total size of SRAM.
+ * @max_asid: maximum number of open contexts (ASIDs).
+ * @num_of_events: number of possible internal H/W IRQs.
+ * @psoc_pci_pll_nr: PCI PLL NR value.
+ * @psoc_pci_pll_nf: PCI PLL NF value.
+ * @psoc_pci_pll_od: PCI PLL OD value.
+ * @psoc_pci_pll_div_factor: PCI PLL DIV FACTOR 1 value.
+ * @completion_queues_count: number of completion queues.
+ * @high_pll: high PLL frequency used by the device.
+ * @cb_pool_cb_cnt: number of CBs in the CB pool.
+ * @cb_pool_cb_size: size of each CB in the CB pool.
+ * @tpc_enabled_mask: which TPCs are enabled.
+ */
+struct asic_fixed_properties {
+ struct hw_queue_properties hw_queues_props[HL_MAX_QUEUES];
+ struct armcp_info armcp_info;
+ char uboot_ver[VERSION_MAX_LEN];
+ char preboot_ver[VERSION_MAX_LEN];
+ u64 sram_base_address;
+ u64 sram_end_address;
+ u64 sram_user_base_address;
+ u64 dram_base_address;
+ u64 dram_end_address;
+ u64 dram_user_base_address;
+ u64 dram_size;
+ u64 dram_pci_bar_size;
+ u64 host_phys_base_address;
+ u64 max_power_default;
+ u64 va_space_host_start_address;
+ u64 va_space_host_end_address;
+ u64 va_space_dram_start_address;
+ u64 va_space_dram_end_address;
+ u64 dram_size_for_default_page_mapping;
+ u64 mmu_pgt_addr;
+ u64 mmu_dram_default_page_addr;
+ u32 mmu_pgt_size;
+ u32 mmu_pte_size;
+ u32 mmu_hop_table_size;
+ u32 mmu_hop0_tables_total_size;
+ u32 dram_page_size;
+ u32 cfg_size;
+ u32 sram_size;
+ u32 max_asid;
+ u32 num_of_events;
+ u32 psoc_pci_pll_nr;
+ u32 psoc_pci_pll_nf;
+ u32 psoc_pci_pll_od;
+ u32 psoc_pci_pll_div_factor;
+ u32 high_pll;
+ u32 cb_pool_cb_cnt;
+ u32 cb_pool_cb_size;
+ u8 completion_queues_count;
+ u8 tpc_enabled_mask;
+};
+
+/**
+ * struct hl_dma_fence - wrapper for fence object used by command submissions.
+ * @base_fence: kernel fence object.
+ * @lock: spinlock to protect fence.
+ * @hdev: habanalabs device structure.
+ * @cs_seq: command submission sequence number.
+ */
+struct hl_dma_fence {
+ struct dma_fence base_fence;
+ spinlock_t lock;
+ struct hl_device *hdev;
+ u64 cs_seq;
+};
+
+/*
+ * Command Buffers
+ */
+
+#define HL_MAX_CB_SIZE 0x200000 /* 2MB */
+
+/**
+ * struct hl_cb_mgr - describes a Command Buffer Manager.
+ * @cb_lock: protects cb_handles.
+ * @cb_handles: an idr to hold all command buffer handles.
+ */
+struct hl_cb_mgr {
+ spinlock_t cb_lock;
+ struct idr cb_handles; /* protected by cb_lock */
+};
+
+/**
+ * struct hl_cb - describes a Command Buffer.
+ * @refcount: reference counter for usage of the CB.
+ * @hdev: pointer to device this CB belongs to.
+ * @lock: spinlock to protect mmap/cs flows.
+ * @debugfs_list: node in debugfs list of command buffers.
+ * @pool_list: node in pool list of command buffers.
+ * @kernel_address: Holds the CB's kernel virtual address.
+ * @bus_address: Holds the CB's DMA address.
+ * @mmap_size: Holds the CB's size that was mmaped.
+ * @size: holds the CB's size.
+ * @id: the CB's ID.
+ * @cs_cnt: holds number of CS that this CB participates in.
+ * @ctx_id: holds the ID of the owner's context.
+ * @mmap: true if the CB is currently mmaped to user.
+ * @is_pool: true if CB was acquired from the pool, false otherwise.
+ */
+struct hl_cb {
+ struct kref refcount;
+ struct hl_device *hdev;
+ spinlock_t lock;
+ struct list_head debugfs_list;
+ struct list_head pool_list;
+ u64 kernel_address;
+ dma_addr_t bus_address;
+ u32 mmap_size;
+ u32 size;
+ u32 id;
+ u32 cs_cnt;
+ u32 ctx_id;
+ u8 mmap;
+ u8 is_pool;
+};
+
+
+/*
+ * QUEUES
+ */
+
+struct hl_cs_job;
+
+/*
+ * Currently, there are two limitations on the maximum length of a queue:
+ *
+ * 1. The memory footprint of the queue. The current allocated space for the
+ * queue is PAGE_SIZE. Because each entry in the queue is HL_BD_SIZE,
+ * the maximum length of the queue can be PAGE_SIZE / HL_BD_SIZE,
+ * which currently is 4096/16 = 256 entries.
+ *
+ * To increase that, we need either to decrease the size of the
+ * BD (difficult), or allocate more than a single page (easier).
+ *
+ * 2. Because the size of the JOB handle field in the BD CTL / completion queue
+ * is 10-bit, we can have up to 1024 open jobs per hardware queue.
+ * Therefore, each queue can hold up to 1024 entries.
+ *
+ * HL_QUEUE_LENGTH is in units of struct hl_bd.
+ * HL_QUEUE_LENGTH * sizeof(struct hl_bd) should be <= HL_PAGE_SIZE
+ */
+
+#define HL_PAGE_SIZE 4096 /* minimum page size */
+/* Must be power of 2 (HL_PAGE_SIZE / HL_BD_SIZE) */
+#define HL_QUEUE_LENGTH 256
+#define HL_QUEUE_SIZE_IN_BYTES (HL_QUEUE_LENGTH * HL_BD_SIZE)
+
+/*
+ * HL_CQ_LENGTH is in units of struct hl_cq_entry.
+ * HL_CQ_LENGTH should be <= HL_PAGE_SIZE
+ */
+#define HL_CQ_LENGTH HL_QUEUE_LENGTH
+#define HL_CQ_SIZE_IN_BYTES (HL_CQ_LENGTH * HL_CQ_ENTRY_SIZE)
+
+/* Must be power of 2 (HL_PAGE_SIZE / HL_EQ_ENTRY_SIZE) */
+#define HL_EQ_LENGTH 64
+#define HL_EQ_SIZE_IN_BYTES (HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE)
+
+
+/**
+ * struct hl_hw_queue - describes a H/W transport queue.
+ * @shadow_queue: pointer to a shadow queue that holds pointers to jobs.
+ * @queue_type: type of queue.
+ * @kernel_address: holds the queue's kernel virtual address.
+ * @bus_address: holds the queue's DMA address.
+ * @pi: holds the queue's pi value.
+ * @ci: holds the queue's ci value, AS CALCULATED BY THE DRIVER (not real ci).
+ * @hw_queue_id: the id of the H/W queue.
+ * @int_queue_len: length of internal queue (number of entries).
+ * @valid: is the queue valid (we have array of 32 queues, not all of them
+ * exists).
+ */
+struct hl_hw_queue {
+ struct hl_cs_job **shadow_queue;
+ enum hl_queue_type queue_type;
+ u64 kernel_address;
+ dma_addr_t bus_address;
+ u32 pi;
+ u32 ci;
+ u32 hw_queue_id;
+ u16 int_queue_len;
+ u8 valid;
+};
+
+/**
+ * struct hl_cq - describes a completion queue
+ * @hdev: pointer to the device structure
+ * @kernel_address: holds the queue's kernel virtual address
+ * @bus_address: holds the queue's DMA address
+ * @hw_queue_id: the id of the matching H/W queue
+ * @ci: ci inside the queue
+ * @pi: pi inside the queue
+ * @free_slots_cnt: counter of free slots in queue
+ */
+struct hl_cq {
+ struct hl_device *hdev;
+ u64 kernel_address;
+ dma_addr_t bus_address;
+ u32 hw_queue_id;
+ u32 ci;
+ u32 pi;
+ atomic_t free_slots_cnt;
+};
+
+/**
+ * struct hl_eq - describes the event queue (single one per device)
+ * @hdev: pointer to the device structure
+ * @kernel_address: holds the queue's kernel virtual address
+ * @bus_address: holds the queue's DMA address
+ * @ci: ci inside the queue
+ */
+struct hl_eq {
+ struct hl_device *hdev;
+ u64 kernel_address;
+ dma_addr_t bus_address;
+ u32 ci;
+};
+
+
+/*
+ * ASICs
+ */
+
+/**
+ * enum hl_asic_type - supported ASIC types.
+ * @ASIC_AUTO_DETECT: ASIC type will be automatically set.
+ * @ASIC_GOYA: Goya device.
+ * @ASIC_INVALID: Invalid ASIC type.
+ */
+enum hl_asic_type {
+ ASIC_AUTO_DETECT,
+ ASIC_GOYA,
+ ASIC_INVALID
+};
+
+struct hl_cs_parser;
+
+/**
+ * enum hl_pm_mng_profile - power management profile.
+ * @PM_AUTO: internal clock is set by KMD.
+ * @PM_MANUAL: internal clock is set by the user.
+ * @PM_LAST: last power management type.
+ */
+enum hl_pm_mng_profile {
+ PM_AUTO = 1,
+ PM_MANUAL,
+ PM_LAST
+};
+
+/**
+ * enum hl_pll_frequency - PLL frequency.
+ * @PLL_HIGH: high frequency.
+ * @PLL_LOW: low frequency.
+ * @PLL_LAST: last frequency values that were configured by the user.
+ */
+enum hl_pll_frequency {
+ PLL_HIGH = 1,
+ PLL_LOW,
+ PLL_LAST
+};
+
+/**
+ * struct hl_asic_funcs - ASIC specific functions that are can be called from
+ * common code.
+ * @early_init: sets up early driver state (pre sw_init), doesn't configure H/W.
+ * @early_fini: tears down what was done in early_init.
+ * @late_init: sets up late driver/hw state (post hw_init) - Optional.
+ * @late_fini: tears down what was done in late_init (pre hw_fini) - Optional.
+ * @sw_init: sets up driver state, does not configure H/W.
+ * @sw_fini: tears down driver state, does not configure H/W.
+ * @hw_init: sets up the H/W state.
+ * @hw_fini: tears down the H/W state.
+ * @halt_engines: halt engines, needed for reset sequence. This also disables
+ * interrupts from the device. Should be called before
+ * hw_fini and before CS rollback.
+ * @suspend: handles IP specific H/W or SW changes for suspend.
+ * @resume: handles IP specific H/W or SW changes for resume.
+ * @cb_mmap: maps a CB.
+ * @ring_doorbell: increment PI on a given QMAN.
+ * @flush_pq_write: flush PQ entry write if necessary, WARN if flushing failed.
+ * @dma_alloc_coherent: Allocate coherent DMA memory by calling
+ * dma_alloc_coherent(). This is ASIC function because its
+ * implementation is not trivial when the driver is loaded
+ * in simulation mode (not upstreamed).
+ * @dma_free_coherent: Free coherent DMA memory by calling dma_free_coherent().
+ * This is ASIC function because its implementation is not
+ * trivial when the driver is loaded in simulation mode
+ * (not upstreamed).
+ * @get_int_queue_base: get the internal queue base address.
+ * @test_queues: run simple test on all queues for sanity check.
+ * @dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool.
+ * size of allocation is HL_DMA_POOL_BLK_SIZE.
+ * @dma_pool_free: free small DMA allocation from pool.
+ * @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool.
+ * @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool.
+ * @hl_dma_unmap_sg: DMA unmap scatter-gather list.
+ * @cs_parser: parse Command Submission.
+ * @asic_dma_map_sg: DMA map scatter-gather list.
+ * @get_dma_desc_list_size: get number of LIN_DMA packets required for CB.
+ * @add_end_of_cb_packets: Add packets to the end of CB, if device requires it.
+ * @update_eq_ci: update event queue CI.
+ * @context_switch: called upon ASID context switch.
+ * @restore_phase_topology: clear all SOBs amd MONs.
+ * @debugfs_read32: debug interface for reading u32 from DRAM/SRAM.
+ * @debugfs_write32: debug interface for writing u32 to DRAM/SRAM.
+ * @add_device_attr: add ASIC specific device attributes.
+ * @handle_eqe: handle event queue entry (IRQ) from ArmCP.
+ * @set_pll_profile: change PLL profile (manual/automatic).
+ * @get_events_stat: retrieve event queue entries histogram.
+ * @read_pte: read MMU page table entry from DRAM.
+ * @write_pte: write MMU page table entry to DRAM.
+ * @mmu_invalidate_cache: flush MMU STLB cache, either with soft (L1 only) or
+ * hard (L0 & L1) flush.
+ * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
+ * ASID-VA-size mask.
+ * @send_heartbeat: send is-alive packet to ArmCP and verify response.
+ * @enable_clock_gating: enable clock gating for reducing power consumption.
+ * @disable_clock_gating: disable clock for accessing registers on HBW.
+ * @is_device_idle: return true if device is idle, false otherwise.
+ * @soft_reset_late_init: perform certain actions needed after soft reset.
+ * @hw_queues_lock: acquire H/W queues lock.
+ * @hw_queues_unlock: release H/W queues lock.
+ * @get_pci_id: retrieve PCI ID.
+ * @get_eeprom_data: retrieve EEPROM data from F/W.
+ * @send_cpu_message: send buffer to ArmCP.
+ * @get_hw_state: retrieve the H/W state
+ */
+struct hl_asic_funcs {
+ int (*early_init)(struct hl_device *hdev);
+ int (*early_fini)(struct hl_device *hdev);
+ int (*late_init)(struct hl_device *hdev);
+ void (*late_fini)(struct hl_device *hdev);
+ int (*sw_init)(struct hl_device *hdev);
+ int (*sw_fini)(struct hl_device *hdev);
+ int (*hw_init)(struct hl_device *hdev);
+ void (*hw_fini)(struct hl_device *hdev, bool hard_reset);
+ void (*halt_engines)(struct hl_device *hdev, bool hard_reset);
+ int (*suspend)(struct hl_device *hdev);
+ int (*resume)(struct hl_device *hdev);
+ int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
+ u64 kaddress, phys_addr_t paddress, u32 size);
+ void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
+ void (*flush_pq_write)(struct hl_device *hdev, u64 *pq, u64 exp_val);
+ void* (*dma_alloc_coherent)(struct hl_device *hdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag);
+ void (*dma_free_coherent)(struct hl_device *hdev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle);
+ void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id,
+ dma_addr_t *dma_handle, u16 *queue_len);
+ int (*test_queues)(struct hl_device *hdev);
+ void* (*dma_pool_zalloc)(struct hl_device *hdev, size_t size,
+ gfp_t mem_flags, dma_addr_t *dma_handle);
+ void (*dma_pool_free)(struct hl_device *hdev, void *vaddr,
+ dma_addr_t dma_addr);
+ void* (*cpu_accessible_dma_pool_alloc)(struct hl_device *hdev,
+ size_t size, dma_addr_t *dma_handle);
+ void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev,
+ size_t size, void *vaddr);
+ void (*hl_dma_unmap_sg)(struct hl_device *hdev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+ int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser);
+ int (*asic_dma_map_sg)(struct hl_device *hdev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+ u32 (*get_dma_desc_list_size)(struct hl_device *hdev,
+ struct sg_table *sgt);
+ void (*add_end_of_cb_packets)(u64 kernel_address, u32 len, u64 cq_addr,
+ u32 cq_val, u32 msix_num);
+ void (*update_eq_ci)(struct hl_device *hdev, u32 val);
+ int (*context_switch)(struct hl_device *hdev, u32 asid);
+ void (*restore_phase_topology)(struct hl_device *hdev);
+ int (*debugfs_read32)(struct hl_device *hdev, u64 addr, u32 *val);
+ int (*debugfs_write32)(struct hl_device *hdev, u64 addr, u32 val);
+ void (*add_device_attr)(struct hl_device *hdev,
+ struct attribute_group *dev_attr_grp);
+ void (*handle_eqe)(struct hl_device *hdev,
+ struct hl_eq_entry *eq_entry);
+ void (*set_pll_profile)(struct hl_device *hdev,
+ enum hl_pll_frequency freq);
+ void* (*get_events_stat)(struct hl_device *hdev, u32 *size);
+ u64 (*read_pte)(struct hl_device *hdev, u64 addr);
+ void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val);
+ void (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard);
+ void (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
+ u32 asid, u64 va, u64 size);
+ int (*send_heartbeat)(struct hl_device *hdev);
+ void (*enable_clock_gating)(struct hl_device *hdev);
+ void (*disable_clock_gating)(struct hl_device *hdev);
+ bool (*is_device_idle)(struct hl_device *hdev);
+ int (*soft_reset_late_init)(struct hl_device *hdev);
+ void (*hw_queues_lock)(struct hl_device *hdev);
+ void (*hw_queues_unlock)(struct hl_device *hdev);
+ u32 (*get_pci_id)(struct hl_device *hdev);
+ int (*get_eeprom_data)(struct hl_device *hdev, void *data,
+ size_t max_size);
+ int (*send_cpu_message)(struct hl_device *hdev, u32 *msg,
+ u16 len, u32 timeout, long *result);
+ enum hl_device_hw_state (*get_hw_state)(struct hl_device *hdev);
+};
+
+
+/*
+ * CONTEXTS
+ */
+
+#define HL_KERNEL_ASID_ID 0
+
+/**
+ * struct hl_va_range - virtual addresses range.
+ * @lock: protects the virtual addresses list.
+ * @list: list of virtual addresses blocks available for mappings.
+ * @start_addr: range start address.
+ * @end_addr: range end address.
+ */
+struct hl_va_range {
+ struct mutex lock;
+ struct list_head list;
+ u64 start_addr;
+ u64 end_addr;
+};
+
+/**
+ * struct hl_ctx - user/kernel context.
+ * @mem_hash: holds mapping from virtual address to virtual memory area
+ * descriptor (hl_vm_phys_pg_list or hl_userptr).
+ * @mmu_hash: holds a mapping from virtual address to pgt_info structure.
+ * @hpriv: pointer to the private (KMD) data of the process (fd).
+ * @hdev: pointer to the device structure.
+ * @refcount: reference counter for the context. Context is released only when
+ * this hits 0l. It is incremented on CS and CS_WAIT.
+ * @cs_pending: array of DMA fence objects representing pending CS.
+ * @host_va_range: holds available virtual addresses for host mappings.
+ * @dram_va_range: holds available virtual addresses for DRAM mappings.
+ * @mem_hash_lock: protects the mem_hash.
+ * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifing the
+ * MMU hash or walking the PGT requires talking this lock
+ * @debugfs_list: node in debugfs list of contexts.
+ * @cs_sequence: sequence number for CS. Value is assigned to a CS and passed
+ * to user so user could inquire about CS. It is used as
+ * index to cs_pending array.
+ * @dram_default_hops: array that holds all hops addresses needed for default
+ * DRAM mapping.
+ * @cs_lock: spinlock to protect cs_sequence.
+ * @dram_phys_mem: amount of used physical DRAM memory by this context.
+ * @thread_restore_token: token to prevent multiple threads of the same context
+ * from running the restore phase. Only one thread
+ * should run it.
+ * @thread_restore_wait_token: token to prevent the threads that didn't run
+ * the restore phase from moving to their execution
+ * phase before the restore phase has finished.
+ * @asid: context's unique address space ID in the device's MMU.
+ */
+struct hl_ctx {
+ DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS);
+ DECLARE_HASHTABLE(mmu_hash, MMU_HASH_TABLE_BITS);
+ struct hl_fpriv *hpriv;
+ struct hl_device *hdev;
+ struct kref refcount;
+ struct dma_fence *cs_pending[HL_MAX_PENDING_CS];
+ struct hl_va_range host_va_range;
+ struct hl_va_range dram_va_range;
+ struct mutex mem_hash_lock;
+ struct mutex mmu_lock;
+ struct list_head debugfs_list;
+ u64 cs_sequence;
+ u64 *dram_default_hops;
+ spinlock_t cs_lock;
+ atomic64_t dram_phys_mem;
+ atomic_t thread_restore_token;
+ u32 thread_restore_wait_token;
+ u32 asid;
+};
+
+/**
+ * struct hl_ctx_mgr - for handling multiple contexts.
+ * @ctx_lock: protects ctx_handles.
+ * @ctx_handles: idr to hold all ctx handles.
+ */
+struct hl_ctx_mgr {
+ struct mutex ctx_lock;
+ struct idr ctx_handles;
+};
+
+
+
+/*
+ * COMMAND SUBMISSIONS
+ */
+
+/**
+ * struct hl_userptr - memory mapping chunk information
+ * @vm_type: type of the VM.
+ * @job_node: linked-list node for hanging the object on the Job's list.
+ * @vec: pointer to the frame vector.
+ * @sgt: pointer to the scatter-gather table that holds the pages.
+ * @dir: for DMA unmapping, the direction must be supplied, so save it.
+ * @debugfs_list: node in debugfs list of command submissions.
+ * @addr: user-space virtual pointer to the start of the memory area.
+ * @size: size of the memory area to pin & map.
+ * @dma_mapped: true if the SG was mapped to DMA addresses, false otherwise.
+ */
+struct hl_userptr {
+ enum vm_type_t vm_type; /* must be first */
+ struct list_head job_node;
+ struct frame_vector *vec;
+ struct sg_table *sgt;
+ enum dma_data_direction dir;
+ struct list_head debugfs_list;
+ u64 addr;
+ u32 size;
+ u8 dma_mapped;
+};
+
+/**
+ * struct hl_cs - command submission.
+ * @jobs_in_queue_cnt: per each queue, maintain counter of submitted jobs.
+ * @ctx: the context this CS belongs to.
+ * @job_list: list of the CS's jobs in the various queues.
+ * @job_lock: spinlock for the CS's jobs list. Needed for free_job.
+ * @refcount: reference counter for usage of the CS.
+ * @fence: pointer to the fence object of this CS.
+ * @work_tdr: delayed work node for TDR.
+ * @mirror_node : node in device mirror list of command submissions.
+ * @debugfs_list: node in debugfs list of command submissions.
+ * @sequence: the sequence number of this CS.
+ * @submitted: true if CS was submitted to H/W.
+ * @completed: true if CS was completed by device.
+ * @timedout : true if CS was timedout.
+ * @tdr_active: true if TDR was activated for this CS (to prevent
+ * double TDR activation).
+ * @aborted: true if CS was aborted due to some device error.
+ */
+struct hl_cs {
+ u8 jobs_in_queue_cnt[HL_MAX_QUEUES];
+ struct hl_ctx *ctx;
+ struct list_head job_list;
+ spinlock_t job_lock;
+ struct kref refcount;
+ struct dma_fence *fence;
+ struct delayed_work work_tdr;
+ struct list_head mirror_node;
+ struct list_head debugfs_list;
+ u64 sequence;
+ u8 submitted;
+ u8 completed;
+ u8 timedout;
+ u8 tdr_active;
+ u8 aborted;
+};
+
+/**
+ * struct hl_cs_job - command submission job.
+ * @cs_node: the node to hang on the CS jobs list.
+ * @cs: the CS this job belongs to.
+ * @user_cb: the CB we got from the user.
+ * @patched_cb: in case of patching, this is internal CB which is submitted on
+ * the queue instead of the CB we got from the IOCTL.
+ * @finish_work: workqueue object to run when job is completed.
+ * @userptr_list: linked-list of userptr mappings that belong to this job and
+ * wait for completion.
+ * @debugfs_list: node in debugfs list of command submission jobs.
+ * @id: the id of this job inside a CS.
+ * @hw_queue_id: the id of the H/W queue this job is submitted to.
+ * @user_cb_size: the actual size of the CB we got from the user.
+ * @job_cb_size: the actual size of the CB that we put on the queue.
+ * @ext_queue: whether the job is for external queue or internal queue.
+ */
+struct hl_cs_job {
+ struct list_head cs_node;
+ struct hl_cs *cs;
+ struct hl_cb *user_cb;
+ struct hl_cb *patched_cb;
+ struct work_struct finish_work;
+ struct list_head userptr_list;
+ struct list_head debugfs_list;
+ u32 id;
+ u32 hw_queue_id;
+ u32 user_cb_size;
+ u32 job_cb_size;
+ u8 ext_queue;
+};
+
+/**
+ * struct hl_cs_parser - command submission paerser properties.
+ * @user_cb: the CB we got from the user.
+ * @patched_cb: in case of patching, this is internal CB which is submitted on
+ * the queue instead of the CB we got from the IOCTL.
+ * @job_userptr_list: linked-list of userptr mappings that belong to the related
+ * job and wait for completion.
+ * @cs_sequence: the sequence number of the related CS.
+ * @ctx_id: the ID of the context the related CS belongs to.
+ * @hw_queue_id: the id of the H/W queue this job is submitted to.
+ * @user_cb_size: the actual size of the CB we got from the user.
+ * @patched_cb_size: the size of the CB after parsing.
+ * @ext_queue: whether the job is for external queue or internal queue.
+ * @job_id: the id of the related job inside the related CS.
+ * @use_virt_addr: whether to treat the addresses in the CB as virtual during
+ * parsing.
+ */
+struct hl_cs_parser {
+ struct hl_cb *user_cb;
+ struct hl_cb *patched_cb;
+ struct list_head *job_userptr_list;
+ u64 cs_sequence;
+ u32 ctx_id;
+ u32 hw_queue_id;
+ u32 user_cb_size;
+ u32 patched_cb_size;
+ u8 ext_queue;
+ u8 job_id;
+ u8 use_virt_addr;
+};
+
+
+/*
+ * MEMORY STRUCTURE
+ */
+
+/**
+ * struct hl_vm_hash_node - hash element from virtual address to virtual
+ * memory area descriptor (hl_vm_phys_pg_list or
+ * hl_userptr).
+ * @node: node to hang on the hash table in context object.
+ * @vaddr: key virtual address.
+ * @ptr: value pointer (hl_vm_phys_pg_list or hl_userptr).
+ */
+struct hl_vm_hash_node {
+ struct hlist_node node;
+ u64 vaddr;
+ void *ptr;
+};
+
+/**
+ * struct hl_vm_phys_pg_pack - physical page pack.
+ * @vm_type: describes the type of the virtual area descriptor.
+ * @pages: the physical page array.
+ * @mapping_cnt: number of shared mappings.
+ * @asid: the context related to this list.
+ * @npages: num physical pages in the pack.
+ * @page_size: size of each page in the pack.
+ * @total_size: total size of all the pages in this list.
+ * @flags: HL_MEM_* flags related to this list.
+ * @handle: the provided handle related to this list.
+ * @offset: offset from the first page.
+ * @contiguous: is contiguous physical memory.
+ * @created_from_userptr: is product of host virtual address.
+ */
+struct hl_vm_phys_pg_pack {
+ enum vm_type_t vm_type; /* must be first */
+ u64 *pages;
+ atomic_t mapping_cnt;
+ u32 asid;
+ u32 npages;
+ u32 page_size;
+ u32 total_size;
+ u32 flags;
+ u32 handle;
+ u32 offset;
+ u8 contiguous;
+ u8 created_from_userptr;
+};
+
+/**
+ * struct hl_vm_va_block - virtual range block information.
+ * @node: node to hang on the virtual range list in context object.
+ * @start: virtual range start address.
+ * @end: virtual range end address.
+ * @size: virtual range size.
+ */
+struct hl_vm_va_block {
+ struct list_head node;
+ u64 start;
+ u64 end;
+ u64 size;
+};
+
+/**
+ * struct hl_vm - virtual memory manager for MMU.
+ * @dram_pg_pool: pool for DRAM physical pages of 2MB.
+ * @dram_pg_pool_refcount: reference counter for the pool usage.
+ * @idr_lock: protects the phys_pg_list_handles.
+ * @phys_pg_pack_handles: idr to hold all device allocations handles.
+ * @init_done: whether initialization was done. We need this because VM
+ * initialization might be skipped during device initialization.
+ */
+struct hl_vm {
+ struct gen_pool *dram_pg_pool;
+ struct kref dram_pg_pool_refcount;
+ spinlock_t idr_lock;
+ struct idr phys_pg_pack_handles;
+ u8 init_done;
+};
+
+/*
+ * FILE PRIVATE STRUCTURE
+ */
+
+/**
+ * struct hl_fpriv - process information stored in FD private data.
+ * @hdev: habanalabs device structure.
+ * @filp: pointer to the given file structure.
+ * @taskpid: current process ID.
+ * @ctx: current executing context.
+ * @ctx_mgr: context manager to handle multiple context for this FD.
+ * @cb_mgr: command buffer manager to handle multiple buffers for this FD.
+ * @debugfs_list: list of relevant ASIC debugfs.
+ * @refcount: number of related contexts.
+ * @restore_phase_mutex: lock for context switch and restore phase.
+ */
+struct hl_fpriv {
+ struct hl_device *hdev;
+ struct file *filp;
+ struct pid *taskpid;
+ struct hl_ctx *ctx; /* TODO: remove for multiple ctx */
+ struct hl_ctx_mgr ctx_mgr;
+ struct hl_cb_mgr cb_mgr;
+ struct list_head debugfs_list;
+ struct kref refcount;
+ struct mutex restore_phase_mutex;
+};
+
+
+/*
+ * DebugFS
+ */
+
+/**
+ * struct hl_info_list - debugfs file ops.
+ * @name: file name.
+ * @show: function to output information.
+ * @write: function to write to the file.
+ */
+struct hl_info_list {
+ const char *name;
+ int (*show)(struct seq_file *s, void *data);
+ ssize_t (*write)(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos);
+};
+
+/**
+ * struct hl_debugfs_entry - debugfs dentry wrapper.
+ * @dent: base debugfs entry structure.
+ * @info_ent: dentry realted ops.
+ * @dev_entry: ASIC specific debugfs manager.
+ */
+struct hl_debugfs_entry {
+ struct dentry *dent;
+ const struct hl_info_list *info_ent;
+ struct hl_dbg_device_entry *dev_entry;
+};
+
+/**
+ * struct hl_dbg_device_entry - ASIC specific debugfs manager.
+ * @root: root dentry.
+ * @hdev: habanalabs device structure.
+ * @entry_arr: array of available hl_debugfs_entry.
+ * @file_list: list of available debugfs files.
+ * @file_mutex: protects file_list.
+ * @cb_list: list of available CBs.
+ * @cb_spinlock: protects cb_list.
+ * @cs_list: list of available CSs.
+ * @cs_spinlock: protects cs_list.
+ * @cs_job_list: list of available CB jobs.
+ * @cs_job_spinlock: protects cs_job_list.
+ * @userptr_list: list of available userptrs (virtual memory chunk descriptor).
+ * @userptr_spinlock: protects userptr_list.
+ * @ctx_mem_hash_list: list of available contexts with MMU mappings.
+ * @ctx_mem_hash_spinlock: protects cb_list.
+ * @addr: next address to read/write from/to in read/write32.
+ * @mmu_addr: next virtual address to translate to physical address in mmu_show.
+ * @mmu_asid: ASID to use while translating in mmu_show.
+ * @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read.
+ * @i2c_bus: generic u8 debugfs file for address value to use in i2c_data_read.
+ * @i2c_bus: generic u8 debugfs file for register value to use in i2c_data_read.
+ */
+struct hl_dbg_device_entry {
+ struct dentry *root;
+ struct hl_device *hdev;
+ struct hl_debugfs_entry *entry_arr;
+ struct list_head file_list;
+ struct mutex file_mutex;
+ struct list_head cb_list;
+ spinlock_t cb_spinlock;
+ struct list_head cs_list;
+ spinlock_t cs_spinlock;
+ struct list_head cs_job_list;
+ spinlock_t cs_job_spinlock;
+ struct list_head userptr_list;
+ spinlock_t userptr_spinlock;
+ struct list_head ctx_mem_hash_list;
+ spinlock_t ctx_mem_hash_spinlock;
+ u64 addr;
+ u64 mmu_addr;
+ u32 mmu_asid;
+ u8 i2c_bus;
+ u8 i2c_addr;
+ u8 i2c_reg;
+};
+
+
+/*
+ * DEVICES
+ */
+
+/* Theoretical limit only. A single host can only contain up to 4 or 8 PCIe
+ * x16 cards. In extereme cases, there are hosts that can accommodate 16 cards
+ */
+#define HL_MAX_MINORS 256
+
+/*
+ * Registers read & write functions.
+ */
+
+u32 hl_rreg(struct hl_device *hdev, u32 reg);
+void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
+
+#define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \
+ readl_poll_timeout(hdev->rmmio + addr, val, cond, sleep_us, timeout_us)
+
+#define RREG32(reg) hl_rreg(hdev, (reg))
+#define WREG32(reg, v) hl_wreg(hdev, (reg), (v))
+#define DREG32(reg) pr_info("REGISTER: " #reg " : 0x%08X\n", \
+ hl_rreg(hdev, (reg)))
+
+#define WREG32_P(reg, val, mask) \
+ do { \
+ u32 tmp_ = RREG32(reg); \
+ tmp_ &= (mask); \
+ tmp_ |= ((val) & ~(mask)); \
+ WREG32(reg, tmp_); \
+ } while (0)
+#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
+#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
+
+#define REG_FIELD_SHIFT(reg, field) reg##_##field##_SHIFT
+#define REG_FIELD_MASK(reg, field) reg##_##field##_MASK
+#define WREG32_FIELD(reg, field, val) \
+ WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | \
+ (val) << REG_FIELD_SHIFT(reg, field))
+
+struct hwmon_chip_info;
+
+/**
+ * struct hl_device_reset_work - reset workqueue task wrapper.
+ * @reset_work: reset work to be done.
+ * @hdev: habanalabs device structure.
+ */
+struct hl_device_reset_work {
+ struct work_struct reset_work;
+ struct hl_device *hdev;
+};
+
+/**
+ * struct hl_device - habanalabs device structure.
+ * @pdev: pointer to PCI device, can be NULL in case of simulator device.
+ * @pcie_bar: array of available PCIe bars.
+ * @rmmio: configuration area address on SRAM.
+ * @cdev: related char device.
+ * @dev: realted kernel basic device structure.
+ * @work_freq: delayed work to lower device frequency if possible.
+ * @work_heartbeat: delayed work for ArmCP is-alive check.
+ * @asic_name: ASIC specific nmae.
+ * @asic_type: ASIC specific type.
+ * @completion_queue: array of hl_cq.
+ * @cq_wq: work queue of completion queues for executing work in process context
+ * @eq_wq: work queue of event queue for executing work in process context.
+ * @kernel_ctx: KMD context structure.
+ * @kernel_queues: array of hl_hw_queue.
+ * @hw_queues_mirror_list: CS mirror list for TDR.
+ * @hw_queues_mirror_lock: protects hw_queues_mirror_list.
+ * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CGs.
+ * @event_queue: event queue for IRQ from ArmCP.
+ * @dma_pool: DMA pool for small allocations.
+ * @cpu_accessible_dma_mem: KMD <-> ArmCP shared memory CPU address.
+ * @cpu_accessible_dma_address: KMD <-> ArmCP shared memory DMA address.
+ * @cpu_accessible_dma_pool: KMD <-> ArmCP shared memory pool.
+ * @asid_bitmap: holds used/available ASIDs.
+ * @asid_mutex: protects asid_bitmap.
+ * @fd_open_cnt_lock: lock for updating fd_open_cnt in hl_device_open. Although
+ * fd_open_cnt is atomic, we need this lock to serialize
+ * the open function because the driver currently supports
+ * only a single process at a time. In addition, we need a
+ * lock here so we can flush user processes which are opening
+ * the device while we are trying to hard reset it
+ * @send_cpu_message_lock: enforces only one message in KMD <-> ArmCP queue.
+ * @asic_prop: ASIC specific immutable properties.
+ * @asic_funcs: ASIC specific functions.
+ * @asic_specific: ASIC specific information to use only from ASIC files.
+ * @mmu_pgt_pool: pool of available MMU hops.
+ * @vm: virtual memory manager for MMU.
+ * @mmu_cache_lock: protects MMU cache invalidation as it can serve one context
+ * @hwmon_dev: H/W monitor device.
+ * @pm_mng_profile: current power management profile.
+ * @hl_chip_info: ASIC's sensors information.
+ * @hl_debugfs: device's debugfs manager.
+ * @cb_pool: list of preallocated CBs.
+ * @cb_pool_lock: protects the CB pool.
+ * @user_ctx: current user context executing.
+ * @dram_used_mem: current DRAM memory consumption.
+ * @in_reset: is device in reset flow.
+ * @curr_pll_profile: current PLL profile.
+ * @fd_open_cnt: number of open user processes.
+ * @timeout_jiffies: device CS timeout value.
+ * @max_power: the max power of the device, as configured by the sysadmin. This
+ * value is saved so in case of hard-reset, KMD will restore this
+ * value and update the F/W after the re-initialization
+ * @major: habanalabs KMD major.
+ * @high_pll: high PLL profile frequency.
+ * @soft_reset_cnt: number of soft reset since KMD loading.
+ * @hard_reset_cnt: number of hard reset since KMD loading.
+ * @id: device minor.
+ * @disabled: is device disabled.
+ * @late_init_done: is late init stage was done during initialization.
+ * @hwmon_initialized: is H/W monitor sensors was initialized.
+ * @hard_reset_pending: is there a hard reset work pending.
+ * @heartbeat: is heartbeat sanity check towards ArmCP enabled.
+ * @reset_on_lockup: true if a reset should be done in case of stuck CS, false
+ * otherwise.
+ * @dram_supports_virtual_memory: is MMU enabled towards DRAM.
+ * @dram_default_page_mapping: is DRAM default page mapping enabled.
+ * @init_done: is the initialization of the device done.
+ * @mmu_enable: is MMU enabled.
+ * @device_cpu_disabled: is the device CPU disabled (due to timeouts)
+ */
+struct hl_device {
+ struct pci_dev *pdev;
+ void __iomem *pcie_bar[6];
+ void __iomem *rmmio;
+ struct cdev cdev;
+ struct device *dev;
+ struct delayed_work work_freq;
+ struct delayed_work work_heartbeat;
+ char asic_name[16];
+ enum hl_asic_type asic_type;
+ struct hl_cq *completion_queue;
+ struct workqueue_struct *cq_wq;
+ struct workqueue_struct *eq_wq;
+ struct hl_ctx *kernel_ctx;
+ struct hl_hw_queue *kernel_queues;
+ struct list_head hw_queues_mirror_list;
+ spinlock_t hw_queues_mirror_lock;
+ struct hl_cb_mgr kernel_cb_mgr;
+ struct hl_eq event_queue;
+ struct dma_pool *dma_pool;
+ void *cpu_accessible_dma_mem;
+ dma_addr_t cpu_accessible_dma_address;
+ struct gen_pool *cpu_accessible_dma_pool;
+ unsigned long *asid_bitmap;
+ struct mutex asid_mutex;
+ /* TODO: remove fd_open_cnt_lock for multiple process support */
+ struct mutex fd_open_cnt_lock;
+ struct mutex send_cpu_message_lock;
+ struct asic_fixed_properties asic_prop;
+ const struct hl_asic_funcs *asic_funcs;
+ void *asic_specific;
+ struct gen_pool *mmu_pgt_pool;
+ struct hl_vm vm;
+ struct mutex mmu_cache_lock;
+ struct device *hwmon_dev;
+ enum hl_pm_mng_profile pm_mng_profile;
+ struct hwmon_chip_info *hl_chip_info;
+
+ struct hl_dbg_device_entry hl_debugfs;
+
+ struct list_head cb_pool;
+ spinlock_t cb_pool_lock;
+
+ /* TODO: remove user_ctx for multiple process support */
+ struct hl_ctx *user_ctx;
+
+ atomic64_t dram_used_mem;
+ atomic_t in_reset;
+ atomic_t curr_pll_profile;
+ atomic_t fd_open_cnt;
+ u64 timeout_jiffies;
+ u64 max_power;
+ u32 major;
+ u32 high_pll;
+ u32 soft_reset_cnt;
+ u32 hard_reset_cnt;
+ u16 id;
+ u8 disabled;
+ u8 late_init_done;
+ u8 hwmon_initialized;
+ u8 hard_reset_pending;
+ u8 heartbeat;
+ u8 reset_on_lockup;
+ u8 dram_supports_virtual_memory;
+ u8 dram_default_page_mapping;
+ u8 init_done;
+ u8 device_cpu_disabled;
+
+ /* Parameters for bring-up */
+ u8 mmu_enable;
+ u8 cpu_enable;
+ u8 reset_pcilink;
+ u8 cpu_queues_enable;
+ u8 fw_loading;
+ u8 pldm;
+};
+
+
+/*
+ * IOCTLs
+ */
+
+/**
+ * typedef hl_ioctl_t - typedef for ioctl function in the driver
+ * @hpriv: pointer to the FD's private data, which contains state of
+ * user process
+ * @data: pointer to the input/output arguments structure of the IOCTL
+ *
+ * Return: 0 for success, negative value for error
+ */
+typedef int hl_ioctl_t(struct hl_fpriv *hpriv, void *data);
+
+/**
+ * struct hl_ioctl_desc - describes an IOCTL entry of the driver.
+ * @cmd: the IOCTL code as created by the kernel macros.
+ * @func: pointer to the driver's function that should be called for this IOCTL.
+ */
+struct hl_ioctl_desc {
+ unsigned int cmd;
+ hl_ioctl_t *func;
+};
+
+
+/*
+ * Kernel module functions that can be accessed by entire module
+ */
+
+/**
+ * hl_mem_area_inside_range() - Checks whether address+size are inside a range.
+ * @address: The start address of the area we want to validate.
+ * @size: The size in bytes of the area we want to validate.
+ * @range_start_address: The start address of the valid range.
+ * @range_end_address: The end address of the valid range.
+ *
+ * Return: true if the area is inside the valid range, false otherwise.
+ */
+static inline bool hl_mem_area_inside_range(u64 address, u32 size,
+ u64 range_start_address, u64 range_end_address)
+{
+ u64 end_address = address + size;
+
+ if ((address >= range_start_address) &&
+ (end_address <= range_end_address) &&
+ (end_address > address))
+ return true;
+
+ return false;
+}
+
+/**
+ * hl_mem_area_crosses_range() - Checks whether address+size crossing a range.
+ * @address: The start address of the area we want to validate.
+ * @size: The size in bytes of the area we want to validate.
+ * @range_start_address: The start address of the valid range.
+ * @range_end_address: The end address of the valid range.
+ *
+ * Return: true if the area overlaps part or all of the valid range,
+ * false otherwise.
+ */
+static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
+ u64 range_start_address, u64 range_end_address)
+{
+ u64 end_address = address + size;
+
+ if ((address >= range_start_address) &&
+ (address < range_end_address))
+ return true;
+
+ if ((end_address >= range_start_address) &&
+ (end_address < range_end_address))
+ return true;
+
+ if ((address < range_start_address) &&
+ (end_address >= range_end_address))
+ return true;
+
+ return false;
+}
+
+int hl_device_open(struct inode *inode, struct file *filp);
+bool hl_device_disabled_or_in_reset(struct hl_device *hdev);
+int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
+ enum hl_asic_type asic_type, int minor);
+void destroy_hdev(struct hl_device *hdev);
+int hl_poll_timeout_memory(struct hl_device *hdev, u64 addr, u32 timeout_us,
+ u32 *val);
+int hl_poll_timeout_device_memory(struct hl_device *hdev, void __iomem *addr,
+ u32 timeout_us, u32 *val);
+int hl_hw_queues_create(struct hl_device *hdev);
+void hl_hw_queues_destroy(struct hl_device *hdev);
+int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
+ u32 cb_size, u64 cb_ptr);
+int hl_hw_queue_schedule_cs(struct hl_cs *cs);
+u32 hl_hw_queue_add_ptr(u32 ptr, u16 val);
+void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id);
+void hl_int_hw_queue_update_ci(struct hl_cs *cs);
+void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset);
+
+#define hl_queue_inc_ptr(p) hl_hw_queue_add_ptr(p, 1)
+#define hl_pi_2_offset(pi) ((pi) & (HL_QUEUE_LENGTH - 1))
+
+int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id);
+void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q);
+int hl_eq_init(struct hl_device *hdev, struct hl_eq *q);
+void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q);
+void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q);
+void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q);
+irqreturn_t hl_irq_handler_cq(int irq, void *arg);
+irqreturn_t hl_irq_handler_eq(int irq, void *arg);
+u32 hl_cq_inc_ptr(u32 ptr);
+
+int hl_asid_init(struct hl_device *hdev);
+void hl_asid_fini(struct hl_device *hdev);
+unsigned long hl_asid_alloc(struct hl_device *hdev);
+void hl_asid_free(struct hl_device *hdev, unsigned long asid);
+
+int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv);
+void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx);
+int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx);
+void hl_ctx_do_release(struct kref *ref);
+void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx);
+int hl_ctx_put(struct hl_ctx *ctx);
+struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq);
+void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr);
+void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr);
+
+int hl_device_init(struct hl_device *hdev, struct class *hclass);
+void hl_device_fini(struct hl_device *hdev);
+int hl_device_suspend(struct hl_device *hdev);
+int hl_device_resume(struct hl_device *hdev);
+int hl_device_reset(struct hl_device *hdev, bool hard_reset,
+ bool from_hard_reset_thread);
+void hl_hpriv_get(struct hl_fpriv *hpriv);
+void hl_hpriv_put(struct hl_fpriv *hpriv);
+int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq);
+
+int hl_build_hwmon_channel_info(struct hl_device *hdev,
+ struct armcp_sensor *sensors_arr);
+
+int hl_sysfs_init(struct hl_device *hdev);
+void hl_sysfs_fini(struct hl_device *hdev);
+
+int hl_hwmon_init(struct hl_device *hdev);
+void hl_hwmon_fini(struct hl_device *hdev);
+
+int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, u32 cb_size,
+ u64 *handle, int ctx_id);
+int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle);
+int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
+struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
+ u32 handle);
+void hl_cb_put(struct hl_cb *cb);
+void hl_cb_mgr_init(struct hl_cb_mgr *mgr);
+void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr);
+struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size);
+int hl_cb_pool_init(struct hl_device *hdev);
+int hl_cb_pool_fini(struct hl_device *hdev);
+
+void hl_cs_rollback_all(struct hl_device *hdev);
+struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue);
+
+void goya_set_asic_funcs(struct hl_device *hdev);
+
+int hl_vm_ctx_init(struct hl_ctx *ctx);
+void hl_vm_ctx_fini(struct hl_ctx *ctx);
+
+int hl_vm_init(struct hl_device *hdev);
+void hl_vm_fini(struct hl_device *hdev);
+
+int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
+ struct hl_userptr *userptr);
+int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr);
+void hl_userptr_delete_list(struct hl_device *hdev,
+ struct list_head *userptr_list);
+bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, u32 size,
+ struct list_head *userptr_list,
+ struct hl_userptr **userptr);
+
+int hl_mmu_init(struct hl_device *hdev);
+void hl_mmu_fini(struct hl_device *hdev);
+int hl_mmu_ctx_init(struct hl_ctx *ctx);
+void hl_mmu_ctx_fini(struct hl_ctx *ctx);
+int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size);
+int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size);
+void hl_mmu_swap_out(struct hl_ctx *ctx);
+void hl_mmu_swap_in(struct hl_ctx *ctx);
+
+long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr);
+void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq);
+long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr);
+long hl_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr);
+long hl_get_current(struct hl_device *hdev, int sensor_index, u32 attr);
+long hl_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr);
+long hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr);
+void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
+ long value);
+u64 hl_get_max_power(struct hl_device *hdev);
+void hl_set_max_power(struct hl_device *hdev, u64 value);
+
+#ifdef CONFIG_DEBUG_FS
+
+void hl_debugfs_init(void);
+void hl_debugfs_fini(void);
+void hl_debugfs_add_device(struct hl_device *hdev);
+void hl_debugfs_remove_device(struct hl_device *hdev);
+void hl_debugfs_add_file(struct hl_fpriv *hpriv);
+void hl_debugfs_remove_file(struct hl_fpriv *hpriv);
+void hl_debugfs_add_cb(struct hl_cb *cb);
+void hl_debugfs_remove_cb(struct hl_cb *cb);
+void hl_debugfs_add_cs(struct hl_cs *cs);
+void hl_debugfs_remove_cs(struct hl_cs *cs);
+void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job);
+void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job);
+void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr);
+void hl_debugfs_remove_userptr(struct hl_device *hdev,
+ struct hl_userptr *userptr);
+void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
+void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
+
+#else
+
+static inline void __init hl_debugfs_init(void)
+{
+}
+
+static inline void hl_debugfs_fini(void)
+{
+}
+
+static inline void hl_debugfs_add_device(struct hl_device *hdev)
+{
+}
+
+static inline void hl_debugfs_remove_device(struct hl_device *hdev)
+{
+}
+
+static inline void hl_debugfs_add_file(struct hl_fpriv *hpriv)
+{
+}
+
+static inline void hl_debugfs_remove_file(struct hl_fpriv *hpriv)
+{
+}
+
+static inline void hl_debugfs_add_cb(struct hl_cb *cb)
+{
+}
+
+static inline void hl_debugfs_remove_cb(struct hl_cb *cb)
+{
+}
+
+static inline void hl_debugfs_add_cs(struct hl_cs *cs)
+{
+}
+
+static inline void hl_debugfs_remove_cs(struct hl_cs *cs)
+{
+}
+
+static inline void hl_debugfs_add_job(struct hl_device *hdev,
+ struct hl_cs_job *job)
+{
+}
+
+static inline void hl_debugfs_remove_job(struct hl_device *hdev,
+ struct hl_cs_job *job)
+{
+}
+
+static inline void hl_debugfs_add_userptr(struct hl_device *hdev,
+ struct hl_userptr *userptr)
+{
+}
+
+static inline void hl_debugfs_remove_userptr(struct hl_device *hdev,
+ struct hl_userptr *userptr)
+{
+}
+
+static inline void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev,
+ struct hl_ctx *ctx)
+{
+}
+
+static inline void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev,
+ struct hl_ctx *ctx)
+{
+}
+
+#endif
+
+/* IOCTLs */
+long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
+int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data);
+int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data);
+int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data);
+int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data);
+
+#endif /* HABANALABSP_H_ */
diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c
new file mode 100644
index 000000000000..748601463f11
--- /dev/null
+++ b/drivers/misc/habanalabs/habanalabs_drv.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#include "habanalabs.h"
+
+#include <linux/pci.h>
+#include <linux/module.h>
+
+#define HL_DRIVER_AUTHOR "HabanaLabs Kernel Driver Team"
+
+#define HL_DRIVER_DESC "Driver for HabanaLabs's AI Accelerators"
+
+MODULE_AUTHOR(HL_DRIVER_AUTHOR);
+MODULE_DESCRIPTION(HL_DRIVER_DESC);
+MODULE_LICENSE("GPL v2");
+
+static int hl_major;
+static struct class *hl_class;
+static DEFINE_IDR(hl_devs_idr);
+static DEFINE_MUTEX(hl_devs_idr_lock);
+
+static int timeout_locked = 5;
+static int reset_on_lockup = 1;
+
+module_param(timeout_locked, int, 0444);
+MODULE_PARM_DESC(timeout_locked,
+ "Device lockup timeout in seconds (0 = disabled, default 5s)");
+
+module_param(reset_on_lockup, int, 0444);
+MODULE_PARM_DESC(reset_on_lockup,
+ "Do device reset on lockup (0 = no, 1 = yes, default yes)");
+
+#define PCI_VENDOR_ID_HABANALABS 0x1da3
+
+#define PCI_IDS_GOYA 0x0001
+
+static const struct pci_device_id ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GOYA), },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ids);
+
+/*
+ * get_asic_type - translate device id to asic type
+ *
+ * @device: id of the PCI device
+ *
+ * Translate device id to asic type.
+ * In case of unidentified device, return -1
+ */
+static enum hl_asic_type get_asic_type(u16 device)
+{
+ enum hl_asic_type asic_type;
+
+ switch (device) {
+ case PCI_IDS_GOYA:
+ asic_type = ASIC_GOYA;
+ break;
+ default:
+ asic_type = ASIC_INVALID;
+ break;
+ }
+
+ return asic_type;
+}
+
+/*
+ * hl_device_open - open function for habanalabs device
+ *
+ * @inode: pointer to inode structure
+ * @filp: pointer to file structure
+ *
+ * Called when process opens an habanalabs device.
+ */
+int hl_device_open(struct inode *inode, struct file *filp)
+{
+ struct hl_device *hdev;
+ struct hl_fpriv *hpriv;
+ int rc;
+
+ mutex_lock(&hl_devs_idr_lock);
+ hdev = idr_find(&hl_devs_idr, iminor(inode));
+ mutex_unlock(&hl_devs_idr_lock);
+
+ if (!hdev) {
+ pr_err("Couldn't find device %d:%d\n",
+ imajor(inode), iminor(inode));
+ return -ENXIO;
+ }
+
+ mutex_lock(&hdev->fd_open_cnt_lock);
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ dev_err_ratelimited(hdev->dev,
+ "Can't open %s because it is disabled or in reset\n",
+ dev_name(hdev->dev));
+ mutex_unlock(&hdev->fd_open_cnt_lock);
+ return -EPERM;
+ }
+
+ if (atomic_read(&hdev->fd_open_cnt)) {
+ dev_info_ratelimited(hdev->dev,
+ "Device %s is already attached to application\n",
+ dev_name(hdev->dev));
+ mutex_unlock(&hdev->fd_open_cnt_lock);
+ return -EBUSY;
+ }
+
+ atomic_inc(&hdev->fd_open_cnt);
+
+ mutex_unlock(&hdev->fd_open_cnt_lock);
+
+ hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
+ if (!hpriv) {
+ rc = -ENOMEM;
+ goto close_device;
+ }
+
+ hpriv->hdev = hdev;
+ filp->private_data = hpriv;
+ hpriv->filp = filp;
+ mutex_init(&hpriv->restore_phase_mutex);
+ kref_init(&hpriv->refcount);
+ nonseekable_open(inode, filp);
+
+ hl_cb_mgr_init(&hpriv->cb_mgr);
+ hl_ctx_mgr_init(&hpriv->ctx_mgr);
+
+ rc = hl_ctx_create(hdev, hpriv);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to open FD (CTX fail)\n");
+ goto out_err;
+ }
+
+ hpriv->taskpid = find_get_pid(current->pid);
+
+ /*
+ * Device is IDLE at this point so it is legal to change PLLs. There
+ * is no need to check anything because if the PLL is already HIGH, the
+ * set function will return without doing anything
+ */
+ hl_device_set_frequency(hdev, PLL_HIGH);
+
+ hl_debugfs_add_file(hpriv);
+
+ return 0;
+
+out_err:
+ filp->private_data = NULL;
+ hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
+ hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr);
+ mutex_destroy(&hpriv->restore_phase_mutex);
+ kfree(hpriv);
+
+close_device:
+ atomic_dec(&hdev->fd_open_cnt);
+ return rc;
+}
+
+/*
+ * create_hdev - create habanalabs device instance
+ *
+ * @dev: will hold the pointer to the new habanalabs device structure
+ * @pdev: pointer to the pci device
+ * @asic_type: in case of simulator device, which device is it
+ * @minor: in case of simulator device, the minor of the device
+ *
+ * Allocate memory for habanalabs device and initialize basic fields
+ * Identify the ASIC type
+ * Allocate ID (minor) for the device (only for real devices)
+ */
+int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
+ enum hl_asic_type asic_type, int minor)
+{
+ struct hl_device *hdev;
+ int rc;
+
+ *dev = NULL;
+
+ hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+ hdev->major = hl_major;
+ hdev->reset_on_lockup = reset_on_lockup;
+
+ /* Parameters for bring-up - set them to defaults */
+ hdev->mmu_enable = 1;
+ hdev->cpu_enable = 1;
+ hdev->reset_pcilink = 0;
+ hdev->cpu_queues_enable = 1;
+ hdev->fw_loading = 1;
+ hdev->pldm = 0;
+ hdev->heartbeat = 1;
+
+ /* If CPU is disabled, no point in loading FW */
+ if (!hdev->cpu_enable)
+ hdev->fw_loading = 0;
+
+ /* If we don't load FW, no need to initialize CPU queues */
+ if (!hdev->fw_loading)
+ hdev->cpu_queues_enable = 0;
+
+ /* If CPU queues not enabled, no way to do heartbeat */
+ if (!hdev->cpu_queues_enable)
+ hdev->heartbeat = 0;
+
+ if (timeout_locked)
+ hdev->timeout_jiffies = msecs_to_jiffies(timeout_locked * 1000);
+ else
+ hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
+
+ hdev->disabled = true;
+ hdev->pdev = pdev; /* can be NULL in case of simulator device */
+
+ if (asic_type == ASIC_AUTO_DETECT) {
+ hdev->asic_type = get_asic_type(pdev->device);
+ if (hdev->asic_type == ASIC_INVALID) {
+ dev_err(&pdev->dev, "Unsupported ASIC\n");
+ rc = -ENODEV;
+ goto free_hdev;
+ }
+ } else {
+ hdev->asic_type = asic_type;
+ }
+
+ mutex_lock(&hl_devs_idr_lock);
+
+ if (minor == -1) {
+ rc = idr_alloc(&hl_devs_idr, hdev, 0, HL_MAX_MINORS,
+ GFP_KERNEL);
+ } else {
+ void *old_idr = idr_replace(&hl_devs_idr, hdev, minor);
+
+ if (IS_ERR_VALUE(old_idr)) {
+ rc = PTR_ERR(old_idr);
+ pr_err("Error %d when trying to replace minor %d\n",
+ rc, minor);
+ mutex_unlock(&hl_devs_idr_lock);
+ goto free_hdev;
+ }
+ rc = minor;
+ }
+
+ mutex_unlock(&hl_devs_idr_lock);
+
+ if (rc < 0) {
+ if (rc == -ENOSPC) {
+ pr_err("too many devices in the system\n");
+ rc = -EBUSY;
+ }
+ goto free_hdev;
+ }
+
+ hdev->id = rc;
+
+ *dev = hdev;
+
+ return 0;
+
+free_hdev:
+ kfree(hdev);
+ return rc;
+}
+
+/*
+ * destroy_hdev - destroy habanalabs device instance
+ *
+ * @dev: pointer to the habanalabs device structure
+ *
+ */
+void destroy_hdev(struct hl_device *hdev)
+{
+ /* Remove device from the device list */
+ mutex_lock(&hl_devs_idr_lock);
+ idr_remove(&hl_devs_idr, hdev->id);
+ mutex_unlock(&hl_devs_idr_lock);
+
+ kfree(hdev);
+}
+
+static int hl_pmops_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct hl_device *hdev = pci_get_drvdata(pdev);
+
+ pr_debug("Going to suspend PCI device\n");
+
+ if (!hdev) {
+ pr_err("device pointer is NULL in suspend\n");
+ return 0;
+ }
+
+ return hl_device_suspend(hdev);
+}
+
+static int hl_pmops_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct hl_device *hdev = pci_get_drvdata(pdev);
+
+ pr_debug("Going to resume PCI device\n");
+
+ if (!hdev) {
+ pr_err("device pointer is NULL in resume\n");
+ return 0;
+ }
+
+ return hl_device_resume(hdev);
+}
+
+/*
+ * hl_pci_probe - probe PCI habanalabs devices
+ *
+ * @pdev: pointer to pci device
+ * @id: pointer to pci device id structure
+ *
+ * Standard PCI probe function for habanalabs device.
+ * Create a new habanalabs device and initialize it according to the
+ * device's type
+ */
+static int hl_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct hl_device *hdev;
+ int rc;
+
+ dev_info(&pdev->dev, HL_NAME
+ " device found [%04x:%04x] (rev %x)\n",
+ (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
+
+ rc = create_hdev(&hdev, pdev, ASIC_AUTO_DETECT, -1);
+ if (rc)
+ return rc;
+
+ pci_set_drvdata(pdev, hdev);
+
+ rc = hl_device_init(hdev, hl_class);
+ if (rc) {
+ dev_err(&pdev->dev, "Fatal error during habanalabs device init\n");
+ rc = -ENODEV;
+ goto disable_device;
+ }
+
+ return 0;
+
+disable_device:
+ pci_set_drvdata(pdev, NULL);
+ destroy_hdev(hdev);
+
+ return rc;
+}
+
+/*
+ * hl_pci_remove - remove PCI habanalabs devices
+ *
+ * @pdev: pointer to pci device
+ *
+ * Standard PCI remove function for habanalabs device
+ */
+static void hl_pci_remove(struct pci_dev *pdev)
+{
+ struct hl_device *hdev;
+
+ hdev = pci_get_drvdata(pdev);
+ if (!hdev)
+ return;
+
+ hl_device_fini(hdev);
+ pci_set_drvdata(pdev, NULL);
+
+ destroy_hdev(hdev);
+}
+
+static const struct dev_pm_ops hl_pm_ops = {
+ .suspend = hl_pmops_suspend,
+ .resume = hl_pmops_resume,
+};
+
+static struct pci_driver hl_pci_driver = {
+ .name = HL_NAME,
+ .id_table = ids,
+ .probe = hl_pci_probe,
+ .remove = hl_pci_remove,
+ .driver.pm = &hl_pm_ops,
+};
+
+/*
+ * hl_init - Initialize the habanalabs kernel driver
+ */
+static int __init hl_init(void)
+{
+ int rc;
+ dev_t dev;
+
+ pr_info("loading driver\n");
+
+ rc = alloc_chrdev_region(&dev, 0, HL_MAX_MINORS, HL_NAME);
+ if (rc < 0) {
+ pr_err("unable to get major\n");
+ return rc;
+ }
+
+ hl_major = MAJOR(dev);
+
+ hl_class = class_create(THIS_MODULE, HL_NAME);
+ if (IS_ERR(hl_class)) {
+ pr_err("failed to allocate class\n");
+ rc = PTR_ERR(hl_class);
+ goto remove_major;
+ }
+
+ hl_debugfs_init();
+
+ rc = pci_register_driver(&hl_pci_driver);
+ if (rc) {
+ pr_err("failed to register pci device\n");
+ goto remove_debugfs;
+ }
+
+ pr_debug("driver loaded\n");
+
+ return 0;
+
+remove_debugfs:
+ hl_debugfs_fini();
+ class_destroy(hl_class);
+remove_major:
+ unregister_chrdev_region(MKDEV(hl_major, 0), HL_MAX_MINORS);
+ return rc;
+}
+
+/*
+ * hl_exit - Release all resources of the habanalabs kernel driver
+ */
+static void __exit hl_exit(void)
+{
+ pci_unregister_driver(&hl_pci_driver);
+
+ /*
+ * Removing debugfs must be after all devices or simulator devices
+ * have been removed because otherwise we get a bug in the
+ * debugfs module for referencing NULL objects
+ */
+ hl_debugfs_fini();
+
+ class_destroy(hl_class);
+ unregister_chrdev_region(MKDEV(hl_major, 0), HL_MAX_MINORS);
+
+ idr_destroy(&hl_devs_idr);
+
+ pr_debug("driver removed\n");
+}
+
+module_init(hl_init);
+module_exit(hl_exit);
diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/habanalabs_ioctl.c
new file mode 100644
index 000000000000..2c2739a3c5ec
--- /dev/null
+++ b/drivers/misc/habanalabs/habanalabs_ioctl.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include <uapi/misc/habanalabs.h>
+#include "habanalabs.h"
+
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+
+static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
+{
+ struct hl_info_hw_ip_info hw_ip = {0};
+ u32 size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 sram_kmd_size, dram_kmd_size;
+
+ if ((!size) || (!out))
+ return -EINVAL;
+
+ sram_kmd_size = (prop->sram_user_base_address -
+ prop->sram_base_address);
+ dram_kmd_size = (prop->dram_user_base_address -
+ prop->dram_base_address);
+
+ hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev);
+ hw_ip.sram_base_address = prop->sram_user_base_address;
+ hw_ip.dram_base_address = prop->dram_user_base_address;
+ hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask;
+ hw_ip.sram_size = prop->sram_size - sram_kmd_size;
+ hw_ip.dram_size = prop->dram_size - dram_kmd_size;
+ if (hw_ip.dram_size > 0)
+ hw_ip.dram_enabled = 1;
+ hw_ip.num_of_events = prop->num_of_events;
+ memcpy(hw_ip.armcp_version,
+ prop->armcp_info.armcp_version, VERSION_MAX_LEN);
+ hw_ip.armcp_cpld_version = __le32_to_cpu(prop->armcp_info.cpld_version);
+ hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr;
+ hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf;
+ hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od;
+ hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor;
+
+ return copy_to_user(out, &hw_ip,
+ min((size_t)size, sizeof(hw_ip))) ? -EFAULT : 0;
+}
+
+static int hw_events_info(struct hl_device *hdev, struct hl_info_args *args)
+{
+ u32 size, max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ void *arr;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ arr = hdev->asic_funcs->get_events_stat(hdev, &size);
+
+ return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0;
+}
+
+static int dram_usage_info(struct hl_device *hdev, struct hl_info_args *args)
+{
+ struct hl_info_dram_usage dram_usage = {0};
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 dram_kmd_size;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ dram_kmd_size = (prop->dram_user_base_address -
+ prop->dram_base_address);
+ dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) -
+ atomic64_read(&hdev->dram_used_mem);
+ dram_usage.ctx_dram_mem = atomic64_read(&hdev->user_ctx->dram_phys_mem);
+
+ return copy_to_user(out, &dram_usage,
+ min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0;
+}
+
+static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
+{
+ struct hl_info_hw_idle hw_idle = {0};
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev);
+
+ return copy_to_user(out, &hw_idle,
+ min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
+}
+
+static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
+{
+ struct hl_info_args *args = data;
+ struct hl_device *hdev = hpriv->hdev;
+ int rc;
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ dev_err(hdev->dev,
+ "Device is disabled or in reset. Can't execute INFO IOCTL\n");
+ return -EBUSY;
+ }
+
+ switch (args->op) {
+ case HL_INFO_HW_IP_INFO:
+ rc = hw_ip_info(hdev, args);
+ break;
+
+ case HL_INFO_HW_EVENTS:
+ rc = hw_events_info(hdev, args);
+ break;
+
+ case HL_INFO_DRAM_USAGE:
+ rc = dram_usage_info(hdev, args);
+ break;
+
+ case HL_INFO_HW_IDLE:
+ rc = hw_idle(hdev, args);
+ break;
+
+ default:
+ dev_err(hdev->dev, "Invalid request %d\n", args->op);
+ rc = -ENOTTY;
+ break;
+ }
+
+ return rc;
+}
+
+#define HL_IOCTL_DEF(ioctl, _func) \
+ [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func}
+
+static const struct hl_ioctl_desc hl_ioctls[] = {
+ HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl),
+ HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl),
+ HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl),
+ HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_cs_wait_ioctl),
+ HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl)
+};
+
+#define HL_CORE_IOCTL_COUNT ARRAY_SIZE(hl_ioctls)
+
+long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+ struct hl_fpriv *hpriv = filep->private_data;
+ struct hl_device *hdev = hpriv->hdev;
+ hl_ioctl_t *func;
+ const struct hl_ioctl_desc *ioctl = NULL;
+ unsigned int nr = _IOC_NR(cmd);
+ char stack_kdata[128] = {0};
+ char *kdata = NULL;
+ unsigned int usize, asize;
+ int retcode;
+
+ if (hdev->hard_reset_pending) {
+ dev_crit_ratelimited(hdev->dev,
+ "Device HARD reset pending! Please close FD\n");
+ return -ENODEV;
+ }
+
+ if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) {
+ u32 hl_size;
+
+ ioctl = &hl_ioctls[nr];
+
+ hl_size = _IOC_SIZE(ioctl->cmd);
+ usize = asize = _IOC_SIZE(cmd);
+ if (hl_size > asize)
+ asize = hl_size;
+
+ cmd = ioctl->cmd;
+ } else {
+ dev_err(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n",
+ task_pid_nr(current), nr);
+ return -ENOTTY;
+ }
+
+ /* Do not trust userspace, use our own definition */
+ func = ioctl->func;
+
+ if (unlikely(!func)) {
+ dev_dbg(hdev->dev, "no function\n");
+ retcode = -ENOTTY;
+ goto out_err;
+ }
+
+ if (cmd & (IOC_IN | IOC_OUT)) {
+ if (asize <= sizeof(stack_kdata)) {
+ kdata = stack_kdata;
+ } else {
+ kdata = kzalloc(asize, GFP_KERNEL);
+ if (!kdata) {
+ retcode = -ENOMEM;
+ goto out_err;
+ }
+ }
+ }
+
+ if (cmd & IOC_IN) {
+ if (copy_from_user(kdata, (void __user *)arg, usize)) {
+ retcode = -EFAULT;
+ goto out_err;
+ }
+ } else if (cmd & IOC_OUT) {
+ memset(kdata, 0, usize);
+ }
+
+ retcode = func(hpriv, kdata);
+
+ if (cmd & IOC_OUT)
+ if (copy_to_user((void __user *)arg, kdata, usize))
+ retcode = -EFAULT;
+
+out_err:
+ if (retcode)
+ dev_dbg(hdev->dev,
+ "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
+ task_pid_nr(current), cmd, nr);
+
+ if (kdata != stack_kdata)
+ kfree(kdata);
+
+ return retcode;
+}
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
new file mode 100644
index 000000000000..67bece26417c
--- /dev/null
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -0,0 +1,635 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+
+#include <linux/slab.h>
+
+/*
+ * hl_queue_add_ptr - add to pi or ci and checks if it wraps around
+ *
+ * @ptr: the current pi/ci value
+ * @val: the amount to add
+ *
+ * Add val to ptr. It can go until twice the queue length.
+ */
+inline u32 hl_hw_queue_add_ptr(u32 ptr, u16 val)
+{
+ ptr += val;
+ ptr &= ((HL_QUEUE_LENGTH << 1) - 1);
+ return ptr;
+}
+
+static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len)
+{
+ int delta = (q->pi - q->ci);
+
+ if (delta >= 0)
+ return (queue_len - delta);
+ else
+ return (abs(delta) - queue_len);
+}
+
+void hl_int_hw_queue_update_ci(struct hl_cs *cs)
+{
+ struct hl_device *hdev = cs->ctx->hdev;
+ struct hl_hw_queue *q;
+ int i;
+
+ hdev->asic_funcs->hw_queues_lock(hdev);
+
+ if (hdev->disabled)
+ goto out;
+
+ q = &hdev->kernel_queues[0];
+ for (i = 0 ; i < HL_MAX_QUEUES ; i++, q++) {
+ if (q->queue_type == QUEUE_TYPE_INT) {
+ q->ci += cs->jobs_in_queue_cnt[i];
+ q->ci &= ((q->int_queue_len << 1) - 1);
+ }
+ }
+
+out:
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+}
+
+/*
+ * ext_queue_submit_bd - Submit a buffer descriptor to an external queue
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @q: pointer to habanalabs queue structure
+ * @ctl: BD's control word
+ * @len: BD's length
+ * @ptr: BD's pointer
+ *
+ * This function assumes there is enough space on the queue to submit a new
+ * BD to it. It initializes the next BD and calls the device specific
+ * function to set the pi (and doorbell)
+ *
+ * This function must be called when the scheduler mutex is taken
+ *
+ */
+static void ext_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
+ u32 ctl, u32 len, u64 ptr)
+{
+ struct hl_bd *bd;
+
+ bd = (struct hl_bd *) (uintptr_t) q->kernel_address;
+ bd += hl_pi_2_offset(q->pi);
+ bd->ctl = __cpu_to_le32(ctl);
+ bd->len = __cpu_to_le32(len);
+ bd->ptr = __cpu_to_le64(ptr + hdev->asic_prop.host_phys_base_address);
+
+ q->pi = hl_queue_inc_ptr(q->pi);
+ hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
+}
+
+/*
+ * ext_queue_sanity_checks - perform some sanity checks on external queue
+ *
+ * @hdev : pointer to hl_device structure
+ * @q : pointer to hl_hw_queue structure
+ * @num_of_entries : how many entries to check for space
+ * @reserve_cq_entry : whether to reserve an entry in the cq
+ *
+ * H/W queues spinlock should be taken before calling this function
+ *
+ * Perform the following:
+ * - Make sure we have enough space in the h/w queue
+ * - Make sure we have enough space in the completion queue
+ * - Reserve space in the completion queue (needs to be reversed if there
+ * is a failure down the road before the actual submission of work). Only
+ * do this action if reserve_cq_entry is true
+ *
+ */
+static int ext_queue_sanity_checks(struct hl_device *hdev,
+ struct hl_hw_queue *q, int num_of_entries,
+ bool reserve_cq_entry)
+{
+ atomic_t *free_slots =
+ &hdev->completion_queue[q->hw_queue_id].free_slots_cnt;
+ int free_slots_cnt;
+
+ /* Check we have enough space in the queue */
+ free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH);
+
+ if (free_slots_cnt < num_of_entries) {
+ dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
+ q->hw_queue_id, num_of_entries);
+ return -EAGAIN;
+ }
+
+ if (reserve_cq_entry) {
+ /*
+ * Check we have enough space in the completion queue
+ * Add -1 to counter (decrement) unless counter was already 0
+ * In that case, CQ is full so we can't submit a new CB because
+ * we won't get ack on its completion
+ * atomic_add_unless will return 0 if counter was already 0
+ */
+ if (atomic_add_negative(num_of_entries * -1, free_slots)) {
+ dev_dbg(hdev->dev, "No space for %d on CQ %d\n",
+ num_of_entries, q->hw_queue_id);
+ atomic_add(num_of_entries, free_slots);
+ return -EAGAIN;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * int_queue_sanity_checks - perform some sanity checks on internal queue
+ *
+ * @hdev : pointer to hl_device structure
+ * @q : pointer to hl_hw_queue structure
+ * @num_of_entries : how many entries to check for space
+ *
+ * H/W queues spinlock should be taken before calling this function
+ *
+ * Perform the following:
+ * - Make sure we have enough space in the h/w queue
+ *
+ */
+static int int_queue_sanity_checks(struct hl_device *hdev,
+ struct hl_hw_queue *q,
+ int num_of_entries)
+{
+ int free_slots_cnt;
+
+ /* Check we have enough space in the queue */
+ free_slots_cnt = queue_free_slots(q, q->int_queue_len);
+
+ if (free_slots_cnt < num_of_entries) {
+ dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
+ q->hw_queue_id, num_of_entries);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/*
+ * hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion
+ *
+ * @hdev: pointer to hl_device structure
+ * @hw_queue_id: Queue's type
+ * @cb_size: size of CB
+ * @cb_ptr: pointer to CB location
+ *
+ * This function sends a single CB, that must NOT generate a completion entry
+ *
+ */
+int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
+ u32 cb_size, u64 cb_ptr)
+{
+ struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
+ int rc;
+
+ /*
+ * The CPU queue is a synchronous queue with an effective depth of
+ * a single entry (although it is allocated with room for multiple
+ * entries). Therefore, there is a different lock, called
+ * send_cpu_message_lock, that serializes accesses to the CPU queue.
+ * As a result, we don't need to lock the access to the entire H/W
+ * queues module when submitting a JOB to the CPU queue
+ */
+ if (q->queue_type != QUEUE_TYPE_CPU)
+ hdev->asic_funcs->hw_queues_lock(hdev);
+
+ if (hdev->disabled) {
+ rc = -EPERM;
+ goto out;
+ }
+
+ rc = ext_queue_sanity_checks(hdev, q, 1, false);
+ if (rc)
+ goto out;
+
+ ext_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
+
+out:
+ if (q->queue_type != QUEUE_TYPE_CPU)
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+
+ return rc;
+}
+
+/*
+ * ext_hw_queue_schedule_job - submit an JOB to an external queue
+ *
+ * @job: pointer to the job that needs to be submitted to the queue
+ *
+ * This function must be called when the scheduler mutex is taken
+ *
+ */
+static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
+{
+ struct hl_device *hdev = job->cs->ctx->hdev;
+ struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
+ struct hl_cq_entry cq_pkt;
+ struct hl_cq *cq;
+ u64 cq_addr;
+ struct hl_cb *cb;
+ u32 ctl;
+ u32 len;
+ u64 ptr;
+
+ /*
+ * Update the JOB ID inside the BD CTL so the device would know what
+ * to write in the completion queue
+ */
+ ctl = ((q->pi << BD_CTL_SHADOW_INDEX_SHIFT) & BD_CTL_SHADOW_INDEX_MASK);
+
+ cb = job->patched_cb;
+ len = job->job_cb_size;
+ ptr = cb->bus_address;
+
+ cq_pkt.data = __cpu_to_le32(
+ ((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT)
+ & CQ_ENTRY_SHADOW_INDEX_MASK) |
+ (1 << CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT) |
+ (1 << CQ_ENTRY_READY_SHIFT));
+
+ /*
+ * No need to protect pi_offset because scheduling to the
+ * H/W queues is done under the scheduler mutex
+ *
+ * No need to check if CQ is full because it was already
+ * checked in hl_queue_sanity_checks
+ */
+ cq = &hdev->completion_queue[q->hw_queue_id];
+ cq_addr = cq->bus_address +
+ hdev->asic_prop.host_phys_base_address;
+ cq_addr += cq->pi * sizeof(struct hl_cq_entry);
+
+ hdev->asic_funcs->add_end_of_cb_packets(cb->kernel_address, len,
+ cq_addr,
+ __le32_to_cpu(cq_pkt.data),
+ q->hw_queue_id);
+
+ q->shadow_queue[hl_pi_2_offset(q->pi)] = job;
+
+ cq->pi = hl_cq_inc_ptr(cq->pi);
+
+ ext_queue_submit_bd(hdev, q, ctl, len, ptr);
+}
+
+/*
+ * int_hw_queue_schedule_job - submit an JOB to an internal queue
+ *
+ * @job: pointer to the job that needs to be submitted to the queue
+ *
+ * This function must be called when the scheduler mutex is taken
+ *
+ */
+static void int_hw_queue_schedule_job(struct hl_cs_job *job)
+{
+ struct hl_device *hdev = job->cs->ctx->hdev;
+ struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
+ struct hl_bd bd;
+ u64 *pi, *pbd = (u64 *) &bd;
+
+ bd.ctl = 0;
+ bd.len = __cpu_to_le32(job->job_cb_size);
+ bd.ptr = __cpu_to_le64((u64) (uintptr_t) job->user_cb);
+
+ pi = (u64 *) (uintptr_t) (q->kernel_address +
+ ((q->pi & (q->int_queue_len - 1)) * sizeof(bd)));
+
+ pi[0] = pbd[0];
+ pi[1] = pbd[1];
+
+ q->pi++;
+ q->pi &= ((q->int_queue_len << 1) - 1);
+
+ /* Flush PQ entry write. Relevant only for specific ASICs */
+ hdev->asic_funcs->flush_pq_write(hdev, pi, pbd[0]);
+
+ hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
+}
+
+/*
+ * hl_hw_queue_schedule_cs - schedule a command submission
+ *
+ * @job : pointer to the CS
+ *
+ */
+int hl_hw_queue_schedule_cs(struct hl_cs *cs)
+{
+ struct hl_device *hdev = cs->ctx->hdev;
+ struct hl_cs_job *job, *tmp;
+ struct hl_hw_queue *q;
+ int rc = 0, i, cq_cnt;
+
+ hdev->asic_funcs->hw_queues_lock(hdev);
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ dev_err(hdev->dev,
+ "device is disabled or in reset, CS rejected!\n");
+ rc = -EPERM;
+ goto out;
+ }
+
+ q = &hdev->kernel_queues[0];
+ /* This loop assumes all external queues are consecutive */
+ for (i = 0, cq_cnt = 0 ; i < HL_MAX_QUEUES ; i++, q++) {
+ if (q->queue_type == QUEUE_TYPE_EXT) {
+ if (cs->jobs_in_queue_cnt[i]) {
+ rc = ext_queue_sanity_checks(hdev, q,
+ cs->jobs_in_queue_cnt[i], true);
+ if (rc)
+ goto unroll_cq_resv;
+ cq_cnt++;
+ }
+ } else if (q->queue_type == QUEUE_TYPE_INT) {
+ if (cs->jobs_in_queue_cnt[i]) {
+ rc = int_queue_sanity_checks(hdev, q,
+ cs->jobs_in_queue_cnt[i]);
+ if (rc)
+ goto unroll_cq_resv;
+ }
+ }
+ }
+
+ spin_lock(&hdev->hw_queues_mirror_lock);
+ list_add_tail(&cs->mirror_node, &hdev->hw_queues_mirror_list);
+
+ /* Queue TDR if the CS is the first entry and if timeout is wanted */
+ if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) &&
+ (list_first_entry(&hdev->hw_queues_mirror_list,
+ struct hl_cs, mirror_node) == cs)) {
+ cs->tdr_active = true;
+ schedule_delayed_work(&cs->work_tdr, hdev->timeout_jiffies);
+ spin_unlock(&hdev->hw_queues_mirror_lock);
+ } else {
+ spin_unlock(&hdev->hw_queues_mirror_lock);
+ }
+
+ list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) {
+ if (job->ext_queue)
+ ext_hw_queue_schedule_job(job);
+ else
+ int_hw_queue_schedule_job(job);
+ }
+
+ cs->submitted = true;
+
+ goto out;
+
+unroll_cq_resv:
+ /* This loop assumes all external queues are consecutive */
+ q = &hdev->kernel_queues[0];
+ for (i = 0 ; (i < HL_MAX_QUEUES) && (cq_cnt > 0) ; i++, q++) {
+ if ((q->queue_type == QUEUE_TYPE_EXT) &&
+ (cs->jobs_in_queue_cnt[i])) {
+ atomic_t *free_slots =
+ &hdev->completion_queue[i].free_slots_cnt;
+ atomic_add(cs->jobs_in_queue_cnt[i], free_slots);
+ cq_cnt--;
+ }
+ }
+
+out:
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+
+ return rc;
+}
+
+/*
+ * hl_hw_queue_inc_ci_kernel - increment ci for kernel's queue
+ *
+ * @hdev: pointer to hl_device structure
+ * @hw_queue_id: which queue to increment its ci
+ */
+void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id)
+{
+ struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
+
+ q->ci = hl_queue_inc_ptr(q->ci);
+}
+
+static int ext_and_cpu_hw_queue_init(struct hl_device *hdev,
+ struct hl_hw_queue *q)
+{
+ void *p;
+ int rc;
+
+ p = hdev->asic_funcs->dma_alloc_coherent(hdev,
+ HL_QUEUE_SIZE_IN_BYTES,
+ &q->bus_address, GFP_KERNEL | __GFP_ZERO);
+ if (!p)
+ return -ENOMEM;
+
+ q->kernel_address = (u64) (uintptr_t) p;
+
+ q->shadow_queue = kmalloc_array(HL_QUEUE_LENGTH,
+ sizeof(*q->shadow_queue),
+ GFP_KERNEL);
+ if (!q->shadow_queue) {
+ dev_err(hdev->dev,
+ "Failed to allocate shadow queue for H/W queue %d\n",
+ q->hw_queue_id);
+ rc = -ENOMEM;
+ goto free_queue;
+ }
+
+ /* Make sure read/write pointers are initialized to start of queue */
+ q->ci = 0;
+ q->pi = 0;
+
+ return 0;
+
+free_queue:
+ hdev->asic_funcs->dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES,
+ (void *) (uintptr_t) q->kernel_address, q->bus_address);
+
+ return rc;
+}
+
+static int int_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
+{
+ void *p;
+
+ p = hdev->asic_funcs->get_int_queue_base(hdev, q->hw_queue_id,
+ &q->bus_address, &q->int_queue_len);
+ if (!p) {
+ dev_err(hdev->dev,
+ "Failed to get base address for internal queue %d\n",
+ q->hw_queue_id);
+ return -EFAULT;
+ }
+
+ q->kernel_address = (u64) (uintptr_t) p;
+ q->pi = 0;
+ q->ci = 0;
+
+ return 0;
+}
+
+static int cpu_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
+{
+ return ext_and_cpu_hw_queue_init(hdev, q);
+}
+
+static int ext_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
+{
+ return ext_and_cpu_hw_queue_init(hdev, q);
+}
+
+/*
+ * hw_queue_init - main initialization function for H/W queue object
+ *
+ * @hdev: pointer to hl_device device structure
+ * @q: pointer to hl_hw_queue queue structure
+ * @hw_queue_id: The id of the H/W queue
+ *
+ * Allocate dma-able memory for the queue and initialize fields
+ * Returns 0 on success
+ */
+static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
+ u32 hw_queue_id)
+{
+ int rc;
+
+ BUILD_BUG_ON(HL_QUEUE_SIZE_IN_BYTES > HL_PAGE_SIZE);
+
+ q->hw_queue_id = hw_queue_id;
+
+ switch (q->queue_type) {
+ case QUEUE_TYPE_EXT:
+ rc = ext_hw_queue_init(hdev, q);
+ break;
+
+ case QUEUE_TYPE_INT:
+ rc = int_hw_queue_init(hdev, q);
+ break;
+
+ case QUEUE_TYPE_CPU:
+ rc = cpu_hw_queue_init(hdev, q);
+ break;
+
+ case QUEUE_TYPE_NA:
+ q->valid = 0;
+ return 0;
+
+ default:
+ dev_crit(hdev->dev, "wrong queue type %d during init\n",
+ q->queue_type);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc)
+ return rc;
+
+ q->valid = 1;
+
+ return 0;
+}
+
+/*
+ * hw_queue_fini - destroy queue
+ *
+ * @hdev: pointer to hl_device device structure
+ * @q: pointer to hl_hw_queue queue structure
+ *
+ * Free the queue memory
+ */
+static void hw_queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
+{
+ if (!q->valid)
+ return;
+
+ /*
+ * If we arrived here, there are no jobs waiting on this queue
+ * so we can safely remove it.
+ * This is because this function can only called when:
+ * 1. Either a context is deleted, which only can occur if all its
+ * jobs were finished
+ * 2. A context wasn't able to be created due to failure or timeout,
+ * which means there are no jobs on the queue yet
+ *
+ * The only exception are the queues of the kernel context, but
+ * if they are being destroyed, it means that the entire module is
+ * being removed. If the module is removed, it means there is no open
+ * user context. It also means that if a job was submitted by
+ * the kernel driver (e.g. context creation), the job itself was
+ * released by the kernel driver when a timeout occurred on its
+ * Completion. Thus, we don't need to release it again.
+ */
+
+ if (q->queue_type == QUEUE_TYPE_INT)
+ return;
+
+ kfree(q->shadow_queue);
+
+ hdev->asic_funcs->dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES,
+ (void *) (uintptr_t) q->kernel_address, q->bus_address);
+}
+
+int hl_hw_queues_create(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *asic = &hdev->asic_prop;
+ struct hl_hw_queue *q;
+ int i, rc, q_ready_cnt;
+
+ hdev->kernel_queues = kcalloc(HL_MAX_QUEUES,
+ sizeof(*hdev->kernel_queues), GFP_KERNEL);
+
+ if (!hdev->kernel_queues) {
+ dev_err(hdev->dev, "Not enough memory for H/W queues\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize the H/W queues */
+ for (i = 0, q_ready_cnt = 0, q = hdev->kernel_queues;
+ i < HL_MAX_QUEUES ; i++, q_ready_cnt++, q++) {
+
+ q->queue_type = asic->hw_queues_props[i].type;
+ rc = hw_queue_init(hdev, q, i);
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed to initialize queue %d\n", i);
+ goto release_queues;
+ }
+ }
+
+ return 0;
+
+release_queues:
+ for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++)
+ hw_queue_fini(hdev, q);
+
+ kfree(hdev->kernel_queues);
+
+ return rc;
+}
+
+void hl_hw_queues_destroy(struct hl_device *hdev)
+{
+ struct hl_hw_queue *q;
+ int i;
+
+ for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++)
+ hw_queue_fini(hdev, q);
+
+ kfree(hdev->kernel_queues);
+}
+
+void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset)
+{
+ struct hl_hw_queue *q;
+ int i;
+
+ for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++) {
+ if ((!q->valid) ||
+ ((!hard_reset) && (q->queue_type == QUEUE_TYPE_CPU)))
+ continue;
+ q->pi = q->ci = 0;
+ }
+}
diff --git a/drivers/misc/habanalabs/hwmon.c b/drivers/misc/habanalabs/hwmon.c
new file mode 100644
index 000000000000..77facd25c4a2
--- /dev/null
+++ b/drivers/misc/habanalabs/hwmon.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+
+#include <linux/pci.h>
+#include <linux/hwmon.h>
+
+#define SENSORS_PKT_TIMEOUT 1000000 /* 1s */
+#define HWMON_NR_SENSOR_TYPES (hwmon_pwm + 1)
+
+int hl_build_hwmon_channel_info(struct hl_device *hdev,
+ struct armcp_sensor *sensors_arr)
+{
+ u32 counts[HWMON_NR_SENSOR_TYPES] = {0};
+ u32 *sensors_by_type[HWMON_NR_SENSOR_TYPES] = {NULL};
+ u32 sensors_by_type_next_index[HWMON_NR_SENSOR_TYPES] = {0};
+ struct hwmon_channel_info **channels_info;
+ u32 num_sensors_for_type, num_active_sensor_types = 0,
+ arr_size = 0, *curr_arr;
+ enum hwmon_sensor_types type;
+ int rc, i, j;
+
+ for (i = 0 ; i < ARMCP_MAX_SENSORS ; i++) {
+ type = __le32_to_cpu(sensors_arr[i].type);
+
+ if ((type == 0) && (sensors_arr[i].flags == 0))
+ break;
+
+ if (type >= HWMON_NR_SENSOR_TYPES) {
+ dev_err(hdev->dev,
+ "Got wrong sensor type %d from device\n", type);
+ return -EINVAL;
+ }
+
+ counts[type]++;
+ arr_size++;
+ }
+
+ for (i = 0 ; i < HWMON_NR_SENSOR_TYPES ; i++) {
+ if (counts[i] == 0)
+ continue;
+
+ num_sensors_for_type = counts[i] + 1;
+ curr_arr = kcalloc(num_sensors_for_type, sizeof(*curr_arr),
+ GFP_KERNEL);
+ if (!curr_arr) {
+ rc = -ENOMEM;
+ goto sensors_type_err;
+ }
+
+ num_active_sensor_types++;
+ sensors_by_type[i] = curr_arr;
+ }
+
+ for (i = 0 ; i < arr_size ; i++) {
+ type = __le32_to_cpu(sensors_arr[i].type);
+ curr_arr = sensors_by_type[type];
+ curr_arr[sensors_by_type_next_index[type]++] =
+ __le32_to_cpu(sensors_arr[i].flags);
+ }
+
+ channels_info = kcalloc(num_active_sensor_types + 1,
+ sizeof(*channels_info), GFP_KERNEL);
+ if (!channels_info) {
+ rc = -ENOMEM;
+ goto channels_info_array_err;
+ }
+
+ for (i = 0 ; i < num_active_sensor_types ; i++) {
+ channels_info[i] = kzalloc(sizeof(*channels_info[i]),
+ GFP_KERNEL);
+ if (!channels_info[i]) {
+ rc = -ENOMEM;
+ goto channel_info_err;
+ }
+ }
+
+ for (i = 0, j = 0 ; i < HWMON_NR_SENSOR_TYPES ; i++) {
+ if (!sensors_by_type[i])
+ continue;
+
+ channels_info[j]->type = i;
+ channels_info[j]->config = sensors_by_type[i];
+ j++;
+ }
+
+ hdev->hl_chip_info->info =
+ (const struct hwmon_channel_info **)channels_info;
+
+ return 0;
+
+channel_info_err:
+ for (i = 0 ; i < num_active_sensor_types ; i++)
+ if (channels_info[i]) {
+ kfree(channels_info[i]->config);
+ kfree(channels_info[i]);
+ }
+ kfree(channels_info);
+channels_info_array_err:
+sensors_type_err:
+ for (i = 0 ; i < HWMON_NR_SENSOR_TYPES ; i++)
+ kfree(sensors_by_type[i]);
+
+ return rc;
+}
+
+static int hl_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_max:
+ case hwmon_temp_crit:
+ case hwmon_temp_max_hyst:
+ case hwmon_temp_crit_hyst:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *val = hl_get_temperature(hdev, channel, attr);
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_min:
+ case hwmon_in_max:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *val = hl_get_voltage(hdev, channel, attr);
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ case hwmon_curr_min:
+ case hwmon_curr_max:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *val = hl_get_current(hdev, channel, attr);
+ break;
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ case hwmon_fan_min:
+ case hwmon_fan_max:
+ break;
+ default:
+ return -EINVAL;
+ }
+ *val = hl_get_fan_speed(hdev, channel, attr);
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ case hwmon_pwm_enable:
+ break;
+ default:
+ return -EINVAL;
+ }
+ *val = hl_get_pwm_info(hdev, channel, attr);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int hl_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ case hwmon_pwm_enable:
+ break;
+ default:
+ return -EINVAL;
+ }
+ hl_set_pwm_info(hdev, channel, attr, val);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static umode_t hl_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_max:
+ case hwmon_temp_max_hyst:
+ case hwmon_temp_crit:
+ case hwmon_temp_crit_hyst:
+ return 0444;
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_min:
+ case hwmon_in_max:
+ return 0444;
+ }
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ case hwmon_curr_min:
+ case hwmon_curr_max:
+ return 0444;
+ }
+ break;
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ case hwmon_fan_min:
+ case hwmon_fan_max:
+ return 0444;
+ }
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ case hwmon_pwm_enable:
+ return 0644;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_ops hl_hwmon_ops = {
+ .is_visible = hl_is_visible,
+ .read = hl_read,
+ .write = hl_write
+};
+
+long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr)
+{
+ struct armcp_packet pkt;
+ long result;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_TEMPERATURE_GET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.sensor_index = __cpu_to_le16(sensor_index);
+ pkt.type = __cpu_to_le16(attr);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SENSORS_PKT_TIMEOUT, &result);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to get temperature from sensor %d, error %d\n",
+ sensor_index, rc);
+ result = 0;
+ }
+
+ return result;
+}
+
+long hl_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr)
+{
+ struct armcp_packet pkt;
+ long result;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_VOLTAGE_GET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.sensor_index = __cpu_to_le16(sensor_index);
+ pkt.type = __cpu_to_le16(attr);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SENSORS_PKT_TIMEOUT, &result);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to get voltage from sensor %d, error %d\n",
+ sensor_index, rc);
+ result = 0;
+ }
+
+ return result;
+}
+
+long hl_get_current(struct hl_device *hdev, int sensor_index, u32 attr)
+{
+ struct armcp_packet pkt;
+ long result;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_CURRENT_GET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.sensor_index = __cpu_to_le16(sensor_index);
+ pkt.type = __cpu_to_le16(attr);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SENSORS_PKT_TIMEOUT, &result);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to get current from sensor %d, error %d\n",
+ sensor_index, rc);
+ result = 0;
+ }
+
+ return result;
+}
+
+long hl_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr)
+{
+ struct armcp_packet pkt;
+ long result;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FAN_SPEED_GET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.sensor_index = __cpu_to_le16(sensor_index);
+ pkt.type = __cpu_to_le16(attr);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SENSORS_PKT_TIMEOUT, &result);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to get fan speed from sensor %d, error %d\n",
+ sensor_index, rc);
+ result = 0;
+ }
+
+ return result;
+}
+
+long hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr)
+{
+ struct armcp_packet pkt;
+ long result;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_PWM_GET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.sensor_index = __cpu_to_le16(sensor_index);
+ pkt.type = __cpu_to_le16(attr);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SENSORS_PKT_TIMEOUT, &result);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to get pwm info from sensor %d, error %d\n",
+ sensor_index, rc);
+ result = 0;
+ }
+
+ return result;
+}
+
+void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
+ long value)
+{
+ struct armcp_packet pkt;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_PWM_SET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.sensor_index = __cpu_to_le16(sensor_index);
+ pkt.type = __cpu_to_le16(attr);
+ pkt.value = __cpu_to_le64(value);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SENSORS_PKT_TIMEOUT, NULL);
+
+ if (rc)
+ dev_err(hdev->dev,
+ "Failed to set pwm info to sensor %d, error %d\n",
+ sensor_index, rc);
+}
+
+int hl_hwmon_init(struct hl_device *hdev)
+{
+ struct device *dev = hdev->pdev ? &hdev->pdev->dev : hdev->dev;
+ int rc;
+
+ if ((hdev->hwmon_initialized) || !(hdev->fw_loading))
+ return 0;
+
+ if (hdev->hl_chip_info->info) {
+ hdev->hl_chip_info->ops = &hl_hwmon_ops;
+
+ hdev->hwmon_dev = hwmon_device_register_with_info(dev,
+ "habanalabs", hdev, hdev->hl_chip_info, NULL);
+ if (IS_ERR(hdev->hwmon_dev)) {
+ rc = PTR_ERR(hdev->hwmon_dev);
+ dev_err(hdev->dev,
+ "Unable to register hwmon device: %d\n", rc);
+ return rc;
+ }
+
+ dev_info(hdev->dev, "%s: add sensors information\n",
+ dev_name(hdev->hwmon_dev));
+
+ hdev->hwmon_initialized = true;
+ } else {
+ dev_info(hdev->dev, "no available sensors\n");
+ }
+
+ return 0;
+}
+
+void hl_hwmon_fini(struct hl_device *hdev)
+{
+ if (!hdev->hwmon_initialized)
+ return;
+
+ hwmon_device_unregister(hdev->hwmon_dev);
+}
diff --git a/drivers/misc/habanalabs/include/armcp_if.h b/drivers/misc/habanalabs/include/armcp_if.h
new file mode 100644
index 000000000000..9dddb917e72c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/armcp_if.h
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef ARMCP_IF_H
+#define ARMCP_IF_H
+
+#include <linux/types.h>
+
+/*
+ * EVENT QUEUE
+ */
+
+struct hl_eq_header {
+ __le32 reserved;
+ __le32 ctl;
+};
+
+struct hl_eq_entry {
+ struct hl_eq_header hdr;
+ __le64 data[7];
+};
+
+#define HL_EQ_ENTRY_SIZE sizeof(struct hl_eq_entry)
+
+#define EQ_CTL_READY_SHIFT 31
+#define EQ_CTL_READY_MASK 0x80000000
+
+#define EQ_CTL_EVENT_TYPE_SHIFT 16
+#define EQ_CTL_EVENT_TYPE_MASK 0x03FF0000
+
+#define EVENT_QUEUE_MSIX_IDX 5
+
+enum pq_init_status {
+ PQ_INIT_STATUS_NA = 0,
+ PQ_INIT_STATUS_READY_FOR_CP,
+ PQ_INIT_STATUS_READY_FOR_HOST
+};
+
+/*
+ * ArmCP Primary Queue Packets
+ *
+ * During normal operation, KMD needs to send various messages to ArmCP,
+ * usually either to SET some value into a H/W periphery or to GET the current
+ * value of some H/W periphery. For example, SET the frequency of MME/TPC and
+ * GET the value of the thermal sensor.
+ *
+ * These messages can be initiated either by the User application or by KMD
+ * itself, e.g. power management code. In either case, the communication from
+ * KMD to ArmCP will *always* be in synchronous mode, meaning that KMD will
+ * send a single message and poll until the message was acknowledged and the
+ * results are ready (if results are needed).
+ *
+ * This means that only a single message can be sent at a time and KMD must
+ * wait for its result before sending the next message. Having said that,
+ * because these are control messages which are sent in a relatively low
+ * frequency, this limitation seems acceptable. It's important to note that
+ * in case of multiple devices, messages to different devices *can* be sent
+ * at the same time.
+ *
+ * The message, inputs/outputs (if relevant) and fence object will be located
+ * on the device DDR at an address that will be determined by KMD. During
+ * device initialization phase, KMD will pass to ArmCP that address. Most of
+ * the message types will contain inputs/outputs inside the message itself.
+ * The common part of each message will contain the opcode of the message (its
+ * type) and a field representing a fence object.
+ *
+ * When KMD wishes to send a message to ArmCP, it will write the message
+ * contents to the device DDR, clear the fence object and then write the
+ * value 484 to the mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR register to issue
+ * the 484 interrupt-id to the ARM core.
+ *
+ * Upon receiving the 484 interrupt-id, ArmCP will read the message from the
+ * DDR. In case the message is a SET operation, ArmCP will first perform the
+ * operation and then write to the fence object on the device DDR. In case the
+ * message is a GET operation, ArmCP will first fill the results section on the
+ * device DDR and then write to the fence object. If an error occurred, ArmCP
+ * will fill the rc field with the right error code.
+ *
+ * In the meantime, KMD will poll on the fence object. Once KMD sees that the
+ * fence object is signaled, it will read the results from the device DDR
+ * (if relevant) and resume the code execution in KMD.
+ *
+ * To use QMAN packets, the opcode must be the QMAN opcode, shifted by 8
+ * so the value being put by the KMD matches the value read by ArmCP
+ *
+ * Non-QMAN packets should be limited to values 1 through (2^8 - 1)
+ *
+ * Detailed description:
+ *
+ * ARMCP_PACKET_DISABLE_PCI_ACCESS -
+ * After receiving this packet the embedded CPU must NOT issue PCI
+ * transactions (read/write) towards the Host CPU. This also include
+ * sending MSI-X interrupts.
+ * This packet is usually sent before the device is moved to D3Hot state.
+ *
+ * ARMCP_PACKET_ENABLE_PCI_ACCESS -
+ * After receiving this packet the embedded CPU is allowed to issue PCI
+ * transactions towards the Host CPU, including sending MSI-X interrupts.
+ * This packet is usually send after the device is moved to D0 state.
+ *
+ * ARMCP_PACKET_TEMPERATURE_GET -
+ * Fetch the current temperature / Max / Max Hyst / Critical /
+ * Critical Hyst of a specified thermal sensor. The packet's
+ * arguments specify the desired sensor and the field to get.
+ *
+ * ARMCP_PACKET_VOLTAGE_GET -
+ * Fetch the voltage / Max / Min of a specified sensor. The packet's
+ * arguments specify the sensor and type.
+ *
+ * ARMCP_PACKET_CURRENT_GET -
+ * Fetch the current / Max / Min of a specified sensor. The packet's
+ * arguments specify the sensor and type.
+ *
+ * ARMCP_PACKET_FAN_SPEED_GET -
+ * Fetch the speed / Max / Min of a specified fan. The packet's
+ * arguments specify the sensor and type.
+ *
+ * ARMCP_PACKET_PWM_GET -
+ * Fetch the pwm value / mode of a specified pwm. The packet's
+ * arguments specify the sensor and type.
+ *
+ * ARMCP_PACKET_PWM_SET -
+ * Set the pwm value / mode of a specified pwm. The packet's
+ * arguments specify the sensor, type and value.
+ *
+ * ARMCP_PACKET_FREQUENCY_SET -
+ * Set the frequency of a specified PLL. The packet's arguments specify
+ * the PLL and the desired frequency. The actual frequency in the device
+ * might differ from the requested frequency.
+ *
+ * ARMCP_PACKET_FREQUENCY_GET -
+ * Fetch the frequency of a specified PLL. The packet's arguments specify
+ * the PLL.
+ *
+ * ARMCP_PACKET_LED_SET -
+ * Set the state of a specified led. The packet's arguments
+ * specify the led and the desired state.
+ *
+ * ARMCP_PACKET_I2C_WR -
+ * Write 32-bit value to I2C device. The packet's arguments specify the
+ * I2C bus, address and value.
+ *
+ * ARMCP_PACKET_I2C_RD -
+ * Read 32-bit value from I2C device. The packet's arguments specify the
+ * I2C bus and address.
+ *
+ * ARMCP_PACKET_INFO_GET -
+ * Fetch information from the device as specified in the packet's
+ * structure. KMD passes the max size it allows the ArmCP to write to
+ * the structure, to prevent data corruption in case of mismatched
+ * KMD/FW versions.
+ *
+ * ARMCP_PACKET_FLASH_PROGRAM_REMOVED - this packet was removed
+ *
+ * ARMCP_PACKET_UNMASK_RAZWI_IRQ -
+ * Unmask the given IRQ. The IRQ number is specified in the value field.
+ * The packet is sent after receiving an interrupt and printing its
+ * relevant information.
+ *
+ * ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY -
+ * Unmask the given IRQs. The IRQs numbers are specified in an array right
+ * after the armcp_packet structure, where its first element is the array
+ * length. The packet is sent after a soft reset was done in order to
+ * handle any interrupts that were sent during the reset process.
+ *
+ * ARMCP_PACKET_TEST -
+ * Test packet for ArmCP connectivity. The CPU will put the fence value
+ * in the result field.
+ *
+ * ARMCP_PACKET_FREQUENCY_CURR_GET -
+ * Fetch the current frequency of a specified PLL. The packet's arguments
+ * specify the PLL.
+ *
+ * ARMCP_PACKET_MAX_POWER_GET -
+ * Fetch the maximal power of the device.
+ *
+ * ARMCP_PACKET_MAX_POWER_SET -
+ * Set the maximal power of the device. The packet's arguments specify
+ * the power.
+ *
+ * ARMCP_PACKET_EEPROM_DATA_GET -
+ * Get EEPROM data from the ArmCP kernel. The buffer is specified in the
+ * addr field. The CPU will put the returned data size in the result
+ * field. In addition, KMD passes the max size it allows the ArmCP to
+ * write to the structure, to prevent data corruption in case of
+ * mismatched KMD/FW versions.
+ *
+ */
+
+enum armcp_packet_id {
+ ARMCP_PACKET_DISABLE_PCI_ACCESS = 1, /* internal */
+ ARMCP_PACKET_ENABLE_PCI_ACCESS, /* internal */
+ ARMCP_PACKET_TEMPERATURE_GET, /* sysfs */
+ ARMCP_PACKET_VOLTAGE_GET, /* sysfs */
+ ARMCP_PACKET_CURRENT_GET, /* sysfs */
+ ARMCP_PACKET_FAN_SPEED_GET, /* sysfs */
+ ARMCP_PACKET_PWM_GET, /* sysfs */
+ ARMCP_PACKET_PWM_SET, /* sysfs */
+ ARMCP_PACKET_FREQUENCY_SET, /* sysfs */
+ ARMCP_PACKET_FREQUENCY_GET, /* sysfs */
+ ARMCP_PACKET_LED_SET, /* debugfs */
+ ARMCP_PACKET_I2C_WR, /* debugfs */
+ ARMCP_PACKET_I2C_RD, /* debugfs */
+ ARMCP_PACKET_INFO_GET, /* IOCTL */
+ ARMCP_PACKET_FLASH_PROGRAM_REMOVED,
+ ARMCP_PACKET_UNMASK_RAZWI_IRQ, /* internal */
+ ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY, /* internal */
+ ARMCP_PACKET_TEST, /* internal */
+ ARMCP_PACKET_FREQUENCY_CURR_GET, /* sysfs */
+ ARMCP_PACKET_MAX_POWER_GET, /* sysfs */
+ ARMCP_PACKET_MAX_POWER_SET, /* sysfs */
+ ARMCP_PACKET_EEPROM_DATA_GET, /* sysfs */
+};
+
+#define ARMCP_PACKET_FENCE_VAL 0xFE8CE7A5
+
+#define ARMCP_PKT_CTL_RC_SHIFT 12
+#define ARMCP_PKT_CTL_RC_MASK 0x0000F000
+
+#define ARMCP_PKT_CTL_OPCODE_SHIFT 16
+#define ARMCP_PKT_CTL_OPCODE_MASK 0x1FFF0000
+
+struct armcp_packet {
+ union {
+ __le64 value; /* For SET packets */
+ __le64 result; /* For GET packets */
+ __le64 addr; /* For PQ */
+ };
+
+ __le32 ctl;
+
+ __le32 fence; /* Signal to KMD that message is completed */
+
+ union {
+ struct {/* For temperature/current/voltage/fan/pwm get/set */
+ __le16 sensor_index;
+ __le16 type;
+ };
+
+ struct { /* For I2C read/write */
+ __u8 i2c_bus;
+ __u8 i2c_addr;
+ __u8 i2c_reg;
+ __u8 pad; /* unused */
+ };
+
+ /* For frequency get/set */
+ __le32 pll_index;
+
+ /* For led set */
+ __le32 led_index;
+
+ /* For get Armcp info/EEPROM data */
+ __le32 data_max_size;
+ };
+};
+
+struct armcp_unmask_irq_arr_packet {
+ struct armcp_packet armcp_pkt;
+ __le32 length;
+ __le32 irqs[0];
+};
+
+enum armcp_packet_rc {
+ armcp_packet_success,
+ armcp_packet_invalid,
+ armcp_packet_fault
+};
+
+enum armcp_temp_type {
+ armcp_temp_input,
+ armcp_temp_max = 6,
+ armcp_temp_max_hyst,
+ armcp_temp_crit,
+ armcp_temp_crit_hyst
+};
+
+enum armcp_in_attributes {
+ armcp_in_input,
+ armcp_in_min,
+ armcp_in_max
+};
+
+enum armcp_curr_attributes {
+ armcp_curr_input,
+ armcp_curr_min,
+ armcp_curr_max
+};
+
+enum armcp_fan_attributes {
+ armcp_fan_input,
+ armcp_fan_min = 2,
+ armcp_fan_max
+};
+
+enum armcp_pwm_attributes {
+ armcp_pwm_input,
+ armcp_pwm_enable
+};
+
+/* Event Queue Packets */
+
+struct eq_generic_event {
+ __le64 data[7];
+};
+
+/*
+ * ArmCP info
+ */
+
+#define VERSION_MAX_LEN 128
+#define ARMCP_MAX_SENSORS 128
+
+struct armcp_sensor {
+ __le32 type;
+ __le32 flags;
+};
+
+struct armcp_info {
+ struct armcp_sensor sensors[ARMCP_MAX_SENSORS];
+ __u8 kernel_version[VERSION_MAX_LEN];
+ __le32 reserved[3];
+ __le32 cpld_version;
+ __le32 infineon_version;
+ __u8 fuse_version[VERSION_MAX_LEN];
+ __u8 thermal_version[VERSION_MAX_LEN];
+ __u8 armcp_version[VERSION_MAX_LEN];
+ __le64 dram_size;
+};
+
+#endif /* ARMCP_IF_H */
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h
new file mode 100644
index 000000000000..2cf5c46b6e8e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_CPU_CA53_CFG_MASKS_H_
+#define ASIC_REG_CPU_CA53_CFG_MASKS_H_
+
+/*
+ *****************************************
+ * CPU_CA53_CFG (Prototype: CA53_CFG)
+ *****************************************
+ */
+
+/* CPU_CA53_CFG_ARM_CFG */
+#define CPU_CA53_CFG_ARM_CFG_AA64NAA32_SHIFT 0
+#define CPU_CA53_CFG_ARM_CFG_AA64NAA32_MASK 0x3
+#define CPU_CA53_CFG_ARM_CFG_END_SHIFT 4
+#define CPU_CA53_CFG_ARM_CFG_END_MASK 0x30
+#define CPU_CA53_CFG_ARM_CFG_TE_SHIFT 8
+#define CPU_CA53_CFG_ARM_CFG_TE_MASK 0x300
+#define CPU_CA53_CFG_ARM_CFG_VINITHI_SHIFT 12
+#define CPU_CA53_CFG_ARM_CFG_VINITHI_MASK 0x3000
+
+/* CPU_CA53_CFG_RST_ADDR_LSB */
+#define CPU_CA53_CFG_RST_ADDR_LSB_VECTOR_SHIFT 0
+#define CPU_CA53_CFG_RST_ADDR_LSB_VECTOR_MASK 0xFFFFFFFF
+
+/* CPU_CA53_CFG_RST_ADDR_MSB */
+#define CPU_CA53_CFG_RST_ADDR_MSB_VECTOR_SHIFT 0
+#define CPU_CA53_CFG_RST_ADDR_MSB_VECTOR_MASK 0xFF
+
+/* CPU_CA53_CFG_ARM_RST_CONTROL */
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NCPUPORESET_SHIFT 0
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NCPUPORESET_MASK 0x3
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NCORERESET_SHIFT 4
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NCORERESET_MASK 0x30
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_SHIFT 8
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_MASK 0x100
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NPRESETDBG_SHIFT 12
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NPRESETDBG_MASK 0x1000
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT 16
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_MASK 0x10000
+#define CPU_CA53_CFG_ARM_RST_CONTROL_WARMRSTREQ_SHIFT 20
+#define CPU_CA53_CFG_ARM_RST_CONTROL_WARMRSTREQ_MASK 0x300000
+
+/* CPU_CA53_CFG_ARM_AFFINITY */
+#define CPU_CA53_CFG_ARM_AFFINITY_LEVEL_1_SHIFT 0
+#define CPU_CA53_CFG_ARM_AFFINITY_LEVEL_1_MASK 0xFF
+#define CPU_CA53_CFG_ARM_AFFINITY_LEVEL_2_SHIFT 8
+#define CPU_CA53_CFG_ARM_AFFINITY_LEVEL_2_MASK 0xFF00
+
+/* CPU_CA53_CFG_ARM_DISABLE */
+#define CPU_CA53_CFG_ARM_DISABLE_CP15S_SHIFT 0
+#define CPU_CA53_CFG_ARM_DISABLE_CP15S_MASK 0x3
+#define CPU_CA53_CFG_ARM_DISABLE_CRYPTO_SHIFT 4
+#define CPU_CA53_CFG_ARM_DISABLE_CRYPTO_MASK 0x30
+#define CPU_CA53_CFG_ARM_DISABLE_L2_RST_SHIFT 8
+#define CPU_CA53_CFG_ARM_DISABLE_L2_RST_MASK 0x100
+#define CPU_CA53_CFG_ARM_DISABLE_DBG_L1_RST_SHIFT 9
+#define CPU_CA53_CFG_ARM_DISABLE_DBG_L1_RST_MASK 0x200
+
+/* CPU_CA53_CFG_ARM_GIC_PERIPHBASE */
+#define CPU_CA53_CFG_ARM_GIC_PERIPHBASE_PERIPHBASE_SHIFT 0
+#define CPU_CA53_CFG_ARM_GIC_PERIPHBASE_PERIPHBASE_MASK 0x3FFFFF
+
+/* CPU_CA53_CFG_ARM_GIC_IRQ_CFG */
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NREI_SHIFT 0
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NREI_MASK 0x3
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NSEI_SHIFT 4
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NSEI_MASK 0x30
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NIRQ_SHIFT 8
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NIRQ_MASK 0x300
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NFIQ_SHIFT 12
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NFIQ_MASK 0x3000
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVFIQ_SHIFT 16
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVFIQ_MASK 0x30000
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVIRQ_SHIFT 20
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVIRQ_MASK 0x300000
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVSEI_SHIFT 24
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVSEI_MASK 0x3000000
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_GIC_EN_SHIFT 31
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_GIC_EN_MASK 0x80000000
+
+/* CPU_CA53_CFG_ARM_PWR_MNG */
+#define CPU_CA53_CFG_ARM_PWR_MNG_CLREXMONREQ_SHIFT 0
+#define CPU_CA53_CFG_ARM_PWR_MNG_CLREXMONREQ_MASK 0x1
+#define CPU_CA53_CFG_ARM_PWR_MNG_EVENTI_SHIFT 1
+#define CPU_CA53_CFG_ARM_PWR_MNG_EVENTI_MASK 0x2
+#define CPU_CA53_CFG_ARM_PWR_MNG_L2FLUSHREQ_SHIFT 2
+#define CPU_CA53_CFG_ARM_PWR_MNG_L2FLUSHREQ_MASK 0x4
+#define CPU_CA53_CFG_ARM_PWR_MNG_L2QREQN_SHIFT 3
+#define CPU_CA53_CFG_ARM_PWR_MNG_L2QREQN_MASK 0x8
+#define CPU_CA53_CFG_ARM_PWR_MNG_CPUQREQN_SHIFT 4
+#define CPU_CA53_CFG_ARM_PWR_MNG_CPUQREQN_MASK 0x30
+#define CPU_CA53_CFG_ARM_PWR_MNG_NEONQREQN_SHIFT 8
+#define CPU_CA53_CFG_ARM_PWR_MNG_NEONQREQN_MASK 0x300
+#define CPU_CA53_CFG_ARM_PWR_MNG_DBGPWRDUP_SHIFT 12
+#define CPU_CA53_CFG_ARM_PWR_MNG_DBGPWRDUP_MASK 0x3000
+
+/* CPU_CA53_CFG_ARB_DBG_ROM_ADDR */
+#define CPU_CA53_CFG_ARB_DBG_ROM_ADDR_DEBUG_ROM_BASE_ADDR_SHIFT 0
+#define CPU_CA53_CFG_ARB_DBG_ROM_ADDR_DEBUG_ROM_BASE_ADDR_MASK 0xFFFFFFF
+#define CPU_CA53_CFG_ARB_DBG_ROM_ADDR_DEBUG_ROM_BASE_ADDR_VALID_SHIFT 31
+#define CPU_CA53_CFG_ARB_DBG_ROM_ADDR_DEBUG_ROM_BASE_ADDR_VALID_MASK 0x80000000
+
+/* CPU_CA53_CFG_ARM_DBG_MODES */
+#define CPU_CA53_CFG_ARM_DBG_MODES_EDBGRQ_SHIFT 0
+#define CPU_CA53_CFG_ARM_DBG_MODES_EDBGRQ_MASK 0x3
+#define CPU_CA53_CFG_ARM_DBG_MODES_DBGEN_SHIFT 4
+#define CPU_CA53_CFG_ARM_DBG_MODES_DBGEN_MASK 0x30
+#define CPU_CA53_CFG_ARM_DBG_MODES_NIDEN_SHIFT 8
+#define CPU_CA53_CFG_ARM_DBG_MODES_NIDEN_MASK 0x300
+#define CPU_CA53_CFG_ARM_DBG_MODES_SPIDEN_SHIFT 12
+#define CPU_CA53_CFG_ARM_DBG_MODES_SPIDEN_MASK 0x3000
+#define CPU_CA53_CFG_ARM_DBG_MODES_SPNIDEN_SHIFT 16
+#define CPU_CA53_CFG_ARM_DBG_MODES_SPNIDEN_MASK 0x30000
+
+/* CPU_CA53_CFG_ARM_PWR_STAT_0 */
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_CLREXMONACK_SHIFT 0
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_CLREXMONACK_MASK 0x1
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_EVENTO_SHIFT 1
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_EVENTO_MASK 0x2
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFI_SHIFT 4
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFI_MASK 0x30
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFE_SHIFT 8
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFE_MASK 0x300
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFIL2_SHIFT 12
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFIL2_MASK 0x1000
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_L2FLUSHDONE_SHIFT 13
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_L2FLUSHDONE_MASK 0x2000
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_SMPEN_SHIFT 16
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_SMPEN_MASK 0x30000
+
+/* CPU_CA53_CFG_ARM_PWR_STAT_1 */
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQACTIVE_SHIFT 0
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQACTIVE_MASK 0x3
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQDENY_SHIFT 4
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQDENY_MASK 0x30
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQACCEPTN_SHIFT 8
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQACCEPTN_MASK 0x300
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQACTIVE_SHIFT 12
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQACTIVE_MASK 0x3000
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQDENY_SHIFT 16
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQDENY_MASK 0x30000
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQACCEPTN_SHIFT 20
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQACCEPTN_MASK 0x300000
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QACTIVE_SHIFT 24
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QACTIVE_MASK 0x1000000
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QDENY_SHIFT 25
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QDENY_MASK 0x2000000
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QACCEPTN_SHIFT 26
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QACCEPTN_MASK 0x4000000
+
+/* CPU_CA53_CFG_ARM_DBG_STATUS */
+#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGACK_SHIFT 0
+#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGACK_MASK 0x3
+#define CPU_CA53_CFG_ARM_DBG_STATUS_COMMRX_SHIFT 4
+#define CPU_CA53_CFG_ARM_DBG_STATUS_COMMRX_MASK 0x30
+#define CPU_CA53_CFG_ARM_DBG_STATUS_COMMTX_SHIFT 8
+#define CPU_CA53_CFG_ARM_DBG_STATUS_COMMTX_MASK 0x300
+#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGRSTREQ_SHIFT 12
+#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGRSTREQ_MASK 0x3000
+#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGNOPWRDWN_SHIFT 16
+#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGNOPWRDWN_MASK 0x30000
+#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGPWRUPREQ_SHIFT 20
+#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGPWRUPREQ_MASK 0x300000
+
+/* CPU_CA53_CFG_ARM_MEM_ATTR */
+#define CPU_CA53_CFG_ARM_MEM_ATTR_RDMEMATTR_SHIFT 0
+#define CPU_CA53_CFG_ARM_MEM_ATTR_RDMEMATTR_MASK 0xFF
+#define CPU_CA53_CFG_ARM_MEM_ATTR_WRMEMATTR_SHIFT 8
+#define CPU_CA53_CFG_ARM_MEM_ATTR_WRMEMATTR_MASK 0xFF00
+#define CPU_CA53_CFG_ARM_MEM_ATTR_RACKM_SHIFT 16
+#define CPU_CA53_CFG_ARM_MEM_ATTR_RACKM_MASK 0x10000
+#define CPU_CA53_CFG_ARM_MEM_ATTR_WACKM_SHIFT 20
+#define CPU_CA53_CFG_ARM_MEM_ATTR_WACKM_MASK 0x100000
+
+/* CPU_CA53_CFG_ARM_PMU */
+#define CPU_CA53_CFG_ARM_PMU_EVENT_SHIFT 0
+#define CPU_CA53_CFG_ARM_PMU_EVENT_MASK 0x3FFFFFFF
+
+#endif /* ASIC_REG_CPU_CA53_CFG_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h
new file mode 100644
index 000000000000..840ccffa1081
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_CPU_CA53_CFG_REGS_H_
+#define ASIC_REG_CPU_CA53_CFG_REGS_H_
+
+/*
+ *****************************************
+ * CPU_CA53_CFG (Prototype: CA53_CFG)
+ *****************************************
+ */
+
+#define mmCPU_CA53_CFG_ARM_CFG 0x441100
+
+#define mmCPU_CA53_CFG_RST_ADDR_LSB_0 0x441104
+
+#define mmCPU_CA53_CFG_RST_ADDR_LSB_1 0x441108
+
+#define mmCPU_CA53_CFG_RST_ADDR_MSB_0 0x441114
+
+#define mmCPU_CA53_CFG_RST_ADDR_MSB_1 0x441118
+
+#define mmCPU_CA53_CFG_ARM_RST_CONTROL 0x441124
+
+#define mmCPU_CA53_CFG_ARM_AFFINITY 0x441128
+
+#define mmCPU_CA53_CFG_ARM_DISABLE 0x44112C
+
+#define mmCPU_CA53_CFG_ARM_GIC_PERIPHBASE 0x441130
+
+#define mmCPU_CA53_CFG_ARM_GIC_IRQ_CFG 0x441134
+
+#define mmCPU_CA53_CFG_ARM_PWR_MNG 0x441138
+
+#define mmCPU_CA53_CFG_ARB_DBG_ROM_ADDR 0x44113C
+
+#define mmCPU_CA53_CFG_ARM_DBG_MODES 0x441140
+
+#define mmCPU_CA53_CFG_ARM_PWR_STAT_0 0x441200
+
+#define mmCPU_CA53_CFG_ARM_PWR_STAT_1 0x441204
+
+#define mmCPU_CA53_CFG_ARM_DBG_STATUS 0x441208
+
+#define mmCPU_CA53_CFG_ARM_MEM_ATTR 0x44120C
+
+#define mmCPU_CA53_CFG_ARM_PMU_0 0x441210
+
+#define mmCPU_CA53_CFG_ARM_PMU_1 0x441214
+
+#endif /* ASIC_REG_CPU_CA53_CFG_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h
new file mode 100644
index 000000000000..f23cb3e41c30
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_CPU_IF_REGS_H_
+#define ASIC_REG_CPU_IF_REGS_H_
+
+/*
+ *****************************************
+ * CPU_IF (Prototype: CPU_IF)
+ *****************************************
+ */
+
+#define mmCPU_IF_PF_PQ_PI 0x442100
+
+#define mmCPU_IF_ARUSER_OVR 0x442104
+
+#define mmCPU_IF_ARUSER_OVR_EN 0x442108
+
+#define mmCPU_IF_AWUSER_OVR 0x44210C
+
+#define mmCPU_IF_AWUSER_OVR_EN 0x442110
+
+#define mmCPU_IF_AXCACHE_OVR 0x442114
+
+#define mmCPU_IF_LOCK_OVR 0x442118
+
+#define mmCPU_IF_PROT_OVR 0x44211C
+
+#define mmCPU_IF_MAX_OUTSTANDING 0x442120
+
+#define mmCPU_IF_EARLY_BRESP_EN 0x442124
+
+#define mmCPU_IF_FORCE_RSP_OK 0x442128
+
+#define mmCPU_IF_CPU_MSB_ADDR 0x44212C
+
+#define mmCPU_IF_AXI_SPLIT_INTR 0x442130
+
+#endif /* ASIC_REG_CPU_IF_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h
new file mode 100644
index 000000000000..8fc97f838ada
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_CPU_PLL_REGS_H_
+#define ASIC_REG_CPU_PLL_REGS_H_
+
+/*
+ *****************************************
+ * CPU_PLL (Prototype: PLL)
+ *****************************************
+ */
+
+#define mmCPU_PLL_NR 0x4A2100
+
+#define mmCPU_PLL_NF 0x4A2104
+
+#define mmCPU_PLL_OD 0x4A2108
+
+#define mmCPU_PLL_NB 0x4A210C
+
+#define mmCPU_PLL_CFG 0x4A2110
+
+#define mmCPU_PLL_LOSE_MASK 0x4A2120
+
+#define mmCPU_PLL_LOCK_INTR 0x4A2128
+
+#define mmCPU_PLL_LOCK_BYPASS 0x4A212C
+
+#define mmCPU_PLL_DATA_CHNG 0x4A2130
+
+#define mmCPU_PLL_RST 0x4A2134
+
+#define mmCPU_PLL_SLIP_WD_CNTR 0x4A2150
+
+#define mmCPU_PLL_DIV_FACTOR_0 0x4A2200
+
+#define mmCPU_PLL_DIV_FACTOR_1 0x4A2204
+
+#define mmCPU_PLL_DIV_FACTOR_2 0x4A2208
+
+#define mmCPU_PLL_DIV_FACTOR_3 0x4A220C
+
+#define mmCPU_PLL_DIV_FACTOR_CMD_0 0x4A2220
+
+#define mmCPU_PLL_DIV_FACTOR_CMD_1 0x4A2224
+
+#define mmCPU_PLL_DIV_FACTOR_CMD_2 0x4A2228
+
+#define mmCPU_PLL_DIV_FACTOR_CMD_3 0x4A222C
+
+#define mmCPU_PLL_DIV_SEL_0 0x4A2280
+
+#define mmCPU_PLL_DIV_SEL_1 0x4A2284
+
+#define mmCPU_PLL_DIV_SEL_2 0x4A2288
+
+#define mmCPU_PLL_DIV_SEL_3 0x4A228C
+
+#define mmCPU_PLL_DIV_EN_0 0x4A22A0
+
+#define mmCPU_PLL_DIV_EN_1 0x4A22A4
+
+#define mmCPU_PLL_DIV_EN_2 0x4A22A8
+
+#define mmCPU_PLL_DIV_EN_3 0x4A22AC
+
+#define mmCPU_PLL_DIV_FACTOR_BUSY_0 0x4A22C0
+
+#define mmCPU_PLL_DIV_FACTOR_BUSY_1 0x4A22C4
+
+#define mmCPU_PLL_DIV_FACTOR_BUSY_2 0x4A22C8
+
+#define mmCPU_PLL_DIV_FACTOR_BUSY_3 0x4A22CC
+
+#define mmCPU_PLL_CLK_GATER 0x4A2300
+
+#define mmCPU_PLL_CLK_RLX_0 0x4A2310
+
+#define mmCPU_PLL_CLK_RLX_1 0x4A2314
+
+#define mmCPU_PLL_CLK_RLX_2 0x4A2318
+
+#define mmCPU_PLL_CLK_RLX_3 0x4A231C
+
+#define mmCPU_PLL_REF_CNTR_PERIOD 0x4A2400
+
+#define mmCPU_PLL_REF_LOW_THRESHOLD 0x4A2410
+
+#define mmCPU_PLL_REF_HIGH_THRESHOLD 0x4A2420
+
+#define mmCPU_PLL_PLL_NOT_STABLE 0x4A2430
+
+#define mmCPU_PLL_FREQ_CALC_EN 0x4A2440
+
+#endif /* ASIC_REG_CPU_PLL_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h
new file mode 100644
index 000000000000..61c8cd9ce58b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_CH_0_REGS_H_
+#define ASIC_REG_DMA_CH_0_REGS_H_
+
+/*
+ *****************************************
+ * DMA_CH_0 (Prototype: DMA_CH)
+ *****************************************
+ */
+
+#define mmDMA_CH_0_CFG0 0x401000
+
+#define mmDMA_CH_0_CFG1 0x401004
+
+#define mmDMA_CH_0_ERRMSG_ADDR_LO 0x401008
+
+#define mmDMA_CH_0_ERRMSG_ADDR_HI 0x40100C
+
+#define mmDMA_CH_0_ERRMSG_WDATA 0x401010
+
+#define mmDMA_CH_0_RD_COMP_ADDR_LO 0x401014
+
+#define mmDMA_CH_0_RD_COMP_ADDR_HI 0x401018
+
+#define mmDMA_CH_0_RD_COMP_WDATA 0x40101C
+
+#define mmDMA_CH_0_WR_COMP_ADDR_LO 0x401020
+
+#define mmDMA_CH_0_WR_COMP_ADDR_HI 0x401024
+
+#define mmDMA_CH_0_WR_COMP_WDATA 0x401028
+
+#define mmDMA_CH_0_LDMA_SRC_ADDR_LO 0x40102C
+
+#define mmDMA_CH_0_LDMA_SRC_ADDR_HI 0x401030
+
+#define mmDMA_CH_0_LDMA_DST_ADDR_LO 0x401034
+
+#define mmDMA_CH_0_LDMA_DST_ADDR_HI 0x401038
+
+#define mmDMA_CH_0_LDMA_TSIZE 0x40103C
+
+#define mmDMA_CH_0_COMIT_TRANSFER 0x401040
+
+#define mmDMA_CH_0_STS0 0x401044
+
+#define mmDMA_CH_0_STS1 0x401048
+
+#define mmDMA_CH_0_STS2 0x40104C
+
+#define mmDMA_CH_0_STS3 0x401050
+
+#define mmDMA_CH_0_STS4 0x401054
+
+#define mmDMA_CH_0_SRC_ADDR_LO_STS 0x401058
+
+#define mmDMA_CH_0_SRC_ADDR_HI_STS 0x40105C
+
+#define mmDMA_CH_0_SRC_TSIZE_STS 0x401060
+
+#define mmDMA_CH_0_DST_ADDR_LO_STS 0x401064
+
+#define mmDMA_CH_0_DST_ADDR_HI_STS 0x401068
+
+#define mmDMA_CH_0_DST_TSIZE_STS 0x40106C
+
+#define mmDMA_CH_0_RD_RATE_LIM_EN 0x401070
+
+#define mmDMA_CH_0_RD_RATE_LIM_RST_TOKEN 0x401074
+
+#define mmDMA_CH_0_RD_RATE_LIM_SAT 0x401078
+
+#define mmDMA_CH_0_RD_RATE_LIM_TOUT 0x40107C
+
+#define mmDMA_CH_0_WR_RATE_LIM_EN 0x401080
+
+#define mmDMA_CH_0_WR_RATE_LIM_RST_TOKEN 0x401084
+
+#define mmDMA_CH_0_WR_RATE_LIM_SAT 0x401088
+
+#define mmDMA_CH_0_WR_RATE_LIM_TOUT 0x40108C
+
+#define mmDMA_CH_0_CFG2 0x401090
+
+#define mmDMA_CH_0_TDMA_CTL 0x401100
+
+#define mmDMA_CH_0_TDMA_SRC_BASE_ADDR_LO 0x401104
+
+#define mmDMA_CH_0_TDMA_SRC_BASE_ADDR_HI 0x401108
+
+#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_0 0x40110C
+
+#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_0 0x401110
+
+#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_0 0x401114
+
+#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_0 0x401118
+
+#define mmDMA_CH_0_TDMA_SRC_STRIDE_0 0x40111C
+
+#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_1 0x401120
+
+#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_1 0x401124
+
+#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_1 0x401128
+
+#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_1 0x40112C
+
+#define mmDMA_CH_0_TDMA_SRC_STRIDE_1 0x401130
+
+#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_2 0x401134
+
+#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_2 0x401138
+
+#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_2 0x40113C
+
+#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_2 0x401140
+
+#define mmDMA_CH_0_TDMA_SRC_STRIDE_2 0x401144
+
+#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_3 0x401148
+
+#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_3 0x40114C
+
+#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_3 0x401150
+
+#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_3 0x401154
+
+#define mmDMA_CH_0_TDMA_SRC_STRIDE_3 0x401158
+
+#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_4 0x40115C
+
+#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_4 0x401160
+
+#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_4 0x401164
+
+#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_4 0x401168
+
+#define mmDMA_CH_0_TDMA_SRC_STRIDE_4 0x40116C
+
+#define mmDMA_CH_0_TDMA_DST_BASE_ADDR_LO 0x401170
+
+#define mmDMA_CH_0_TDMA_DST_BASE_ADDR_HI 0x401174
+
+#define mmDMA_CH_0_TDMA_DST_ROI_BASE_0 0x401178
+
+#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_0 0x40117C
+
+#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_0 0x401180
+
+#define mmDMA_CH_0_TDMA_DST_START_OFFSET_0 0x401184
+
+#define mmDMA_CH_0_TDMA_DST_STRIDE_0 0x401188
+
+#define mmDMA_CH_0_TDMA_DST_ROI_BASE_1 0x40118C
+
+#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_1 0x401190
+
+#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_1 0x401194
+
+#define mmDMA_CH_0_TDMA_DST_START_OFFSET_1 0x401198
+
+#define mmDMA_CH_0_TDMA_DST_STRIDE_1 0x40119C
+
+#define mmDMA_CH_0_TDMA_DST_ROI_BASE_2 0x4011A0
+
+#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_2 0x4011A4
+
+#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_2 0x4011A8
+
+#define mmDMA_CH_0_TDMA_DST_START_OFFSET_2 0x4011AC
+
+#define mmDMA_CH_0_TDMA_DST_STRIDE_2 0x4011B0
+
+#define mmDMA_CH_0_TDMA_DST_ROI_BASE_3 0x4011B4
+
+#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_3 0x4011B8
+
+#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_3 0x4011BC
+
+#define mmDMA_CH_0_TDMA_DST_START_OFFSET_3 0x4011C0
+
+#define mmDMA_CH_0_TDMA_DST_STRIDE_3 0x4011C4
+
+#define mmDMA_CH_0_TDMA_DST_ROI_BASE_4 0x4011C8
+
+#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_4 0x4011CC
+
+#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_4 0x4011D0
+
+#define mmDMA_CH_0_TDMA_DST_START_OFFSET_4 0x4011D4
+
+#define mmDMA_CH_0_TDMA_DST_STRIDE_4 0x4011D8
+
+#define mmDMA_CH_0_MEM_INIT_BUSY 0x4011FC
+
+#endif /* ASIC_REG_DMA_CH_0_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h
new file mode 100644
index 000000000000..92960ef5e308
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_CH_1_REGS_H_
+#define ASIC_REG_DMA_CH_1_REGS_H_
+
+/*
+ *****************************************
+ * DMA_CH_1 (Prototype: DMA_CH)
+ *****************************************
+ */
+
+#define mmDMA_CH_1_CFG0 0x409000
+
+#define mmDMA_CH_1_CFG1 0x409004
+
+#define mmDMA_CH_1_ERRMSG_ADDR_LO 0x409008
+
+#define mmDMA_CH_1_ERRMSG_ADDR_HI 0x40900C
+
+#define mmDMA_CH_1_ERRMSG_WDATA 0x409010
+
+#define mmDMA_CH_1_RD_COMP_ADDR_LO 0x409014
+
+#define mmDMA_CH_1_RD_COMP_ADDR_HI 0x409018
+
+#define mmDMA_CH_1_RD_COMP_WDATA 0x40901C
+
+#define mmDMA_CH_1_WR_COMP_ADDR_LO 0x409020
+
+#define mmDMA_CH_1_WR_COMP_ADDR_HI 0x409024
+
+#define mmDMA_CH_1_WR_COMP_WDATA 0x409028
+
+#define mmDMA_CH_1_LDMA_SRC_ADDR_LO 0x40902C
+
+#define mmDMA_CH_1_LDMA_SRC_ADDR_HI 0x409030
+
+#define mmDMA_CH_1_LDMA_DST_ADDR_LO 0x409034
+
+#define mmDMA_CH_1_LDMA_DST_ADDR_HI 0x409038
+
+#define mmDMA_CH_1_LDMA_TSIZE 0x40903C
+
+#define mmDMA_CH_1_COMIT_TRANSFER 0x409040
+
+#define mmDMA_CH_1_STS0 0x409044
+
+#define mmDMA_CH_1_STS1 0x409048
+
+#define mmDMA_CH_1_STS2 0x40904C
+
+#define mmDMA_CH_1_STS3 0x409050
+
+#define mmDMA_CH_1_STS4 0x409054
+
+#define mmDMA_CH_1_SRC_ADDR_LO_STS 0x409058
+
+#define mmDMA_CH_1_SRC_ADDR_HI_STS 0x40905C
+
+#define mmDMA_CH_1_SRC_TSIZE_STS 0x409060
+
+#define mmDMA_CH_1_DST_ADDR_LO_STS 0x409064
+
+#define mmDMA_CH_1_DST_ADDR_HI_STS 0x409068
+
+#define mmDMA_CH_1_DST_TSIZE_STS 0x40906C
+
+#define mmDMA_CH_1_RD_RATE_LIM_EN 0x409070
+
+#define mmDMA_CH_1_RD_RATE_LIM_RST_TOKEN 0x409074
+
+#define mmDMA_CH_1_RD_RATE_LIM_SAT 0x409078
+
+#define mmDMA_CH_1_RD_RATE_LIM_TOUT 0x40907C
+
+#define mmDMA_CH_1_WR_RATE_LIM_EN 0x409080
+
+#define mmDMA_CH_1_WR_RATE_LIM_RST_TOKEN 0x409084
+
+#define mmDMA_CH_1_WR_RATE_LIM_SAT 0x409088
+
+#define mmDMA_CH_1_WR_RATE_LIM_TOUT 0x40908C
+
+#define mmDMA_CH_1_CFG2 0x409090
+
+#define mmDMA_CH_1_TDMA_CTL 0x409100
+
+#define mmDMA_CH_1_TDMA_SRC_BASE_ADDR_LO 0x409104
+
+#define mmDMA_CH_1_TDMA_SRC_BASE_ADDR_HI 0x409108
+
+#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_0 0x40910C
+
+#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_0 0x409110
+
+#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_0 0x409114
+
+#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_0 0x409118
+
+#define mmDMA_CH_1_TDMA_SRC_STRIDE_0 0x40911C
+
+#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_1 0x409120
+
+#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_1 0x409124
+
+#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_1 0x409128
+
+#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_1 0x40912C
+
+#define mmDMA_CH_1_TDMA_SRC_STRIDE_1 0x409130
+
+#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_2 0x409134
+
+#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_2 0x409138
+
+#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_2 0x40913C
+
+#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_2 0x409140
+
+#define mmDMA_CH_1_TDMA_SRC_STRIDE_2 0x409144
+
+#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_3 0x409148
+
+#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_3 0x40914C
+
+#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_3 0x409150
+
+#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_3 0x409154
+
+#define mmDMA_CH_1_TDMA_SRC_STRIDE_3 0x409158
+
+#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_4 0x40915C
+
+#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_4 0x409160
+
+#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_4 0x409164
+
+#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_4 0x409168
+
+#define mmDMA_CH_1_TDMA_SRC_STRIDE_4 0x40916C
+
+#define mmDMA_CH_1_TDMA_DST_BASE_ADDR_LO 0x409170
+
+#define mmDMA_CH_1_TDMA_DST_BASE_ADDR_HI 0x409174
+
+#define mmDMA_CH_1_TDMA_DST_ROI_BASE_0 0x409178
+
+#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_0 0x40917C
+
+#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_0 0x409180
+
+#define mmDMA_CH_1_TDMA_DST_START_OFFSET_0 0x409184
+
+#define mmDMA_CH_1_TDMA_DST_STRIDE_0 0x409188
+
+#define mmDMA_CH_1_TDMA_DST_ROI_BASE_1 0x40918C
+
+#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_1 0x409190
+
+#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_1 0x409194
+
+#define mmDMA_CH_1_TDMA_DST_START_OFFSET_1 0x409198
+
+#define mmDMA_CH_1_TDMA_DST_STRIDE_1 0x40919C
+
+#define mmDMA_CH_1_TDMA_DST_ROI_BASE_2 0x4091A0
+
+#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_2 0x4091A4
+
+#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_2 0x4091A8
+
+#define mmDMA_CH_1_TDMA_DST_START_OFFSET_2 0x4091AC
+
+#define mmDMA_CH_1_TDMA_DST_STRIDE_2 0x4091B0
+
+#define mmDMA_CH_1_TDMA_DST_ROI_BASE_3 0x4091B4
+
+#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_3 0x4091B8
+
+#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_3 0x4091BC
+
+#define mmDMA_CH_1_TDMA_DST_START_OFFSET_3 0x4091C0
+
+#define mmDMA_CH_1_TDMA_DST_STRIDE_3 0x4091C4
+
+#define mmDMA_CH_1_TDMA_DST_ROI_BASE_4 0x4091C8
+
+#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_4 0x4091CC
+
+#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_4 0x4091D0
+
+#define mmDMA_CH_1_TDMA_DST_START_OFFSET_4 0x4091D4
+
+#define mmDMA_CH_1_TDMA_DST_STRIDE_4 0x4091D8
+
+#define mmDMA_CH_1_MEM_INIT_BUSY 0x4091FC
+
+#endif /* ASIC_REG_DMA_CH_1_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h
new file mode 100644
index 000000000000..4e37871a51bb
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_CH_2_REGS_H_
+#define ASIC_REG_DMA_CH_2_REGS_H_
+
+/*
+ *****************************************
+ * DMA_CH_2 (Prototype: DMA_CH)
+ *****************************************
+ */
+
+#define mmDMA_CH_2_CFG0 0x411000
+
+#define mmDMA_CH_2_CFG1 0x411004
+
+#define mmDMA_CH_2_ERRMSG_ADDR_LO 0x411008
+
+#define mmDMA_CH_2_ERRMSG_ADDR_HI 0x41100C
+
+#define mmDMA_CH_2_ERRMSG_WDATA 0x411010
+
+#define mmDMA_CH_2_RD_COMP_ADDR_LO 0x411014
+
+#define mmDMA_CH_2_RD_COMP_ADDR_HI 0x411018
+
+#define mmDMA_CH_2_RD_COMP_WDATA 0x41101C
+
+#define mmDMA_CH_2_WR_COMP_ADDR_LO 0x411020
+
+#define mmDMA_CH_2_WR_COMP_ADDR_HI 0x411024
+
+#define mmDMA_CH_2_WR_COMP_WDATA 0x411028
+
+#define mmDMA_CH_2_LDMA_SRC_ADDR_LO 0x41102C
+
+#define mmDMA_CH_2_LDMA_SRC_ADDR_HI 0x411030
+
+#define mmDMA_CH_2_LDMA_DST_ADDR_LO 0x411034
+
+#define mmDMA_CH_2_LDMA_DST_ADDR_HI 0x411038
+
+#define mmDMA_CH_2_LDMA_TSIZE 0x41103C
+
+#define mmDMA_CH_2_COMIT_TRANSFER 0x411040
+
+#define mmDMA_CH_2_STS0 0x411044
+
+#define mmDMA_CH_2_STS1 0x411048
+
+#define mmDMA_CH_2_STS2 0x41104C
+
+#define mmDMA_CH_2_STS3 0x411050
+
+#define mmDMA_CH_2_STS4 0x411054
+
+#define mmDMA_CH_2_SRC_ADDR_LO_STS 0x411058
+
+#define mmDMA_CH_2_SRC_ADDR_HI_STS 0x41105C
+
+#define mmDMA_CH_2_SRC_TSIZE_STS 0x411060
+
+#define mmDMA_CH_2_DST_ADDR_LO_STS 0x411064
+
+#define mmDMA_CH_2_DST_ADDR_HI_STS 0x411068
+
+#define mmDMA_CH_2_DST_TSIZE_STS 0x41106C
+
+#define mmDMA_CH_2_RD_RATE_LIM_EN 0x411070
+
+#define mmDMA_CH_2_RD_RATE_LIM_RST_TOKEN 0x411074
+
+#define mmDMA_CH_2_RD_RATE_LIM_SAT 0x411078
+
+#define mmDMA_CH_2_RD_RATE_LIM_TOUT 0x41107C
+
+#define mmDMA_CH_2_WR_RATE_LIM_EN 0x411080
+
+#define mmDMA_CH_2_WR_RATE_LIM_RST_TOKEN 0x411084
+
+#define mmDMA_CH_2_WR_RATE_LIM_SAT 0x411088
+
+#define mmDMA_CH_2_WR_RATE_LIM_TOUT 0x41108C
+
+#define mmDMA_CH_2_CFG2 0x411090
+
+#define mmDMA_CH_2_TDMA_CTL 0x411100
+
+#define mmDMA_CH_2_TDMA_SRC_BASE_ADDR_LO 0x411104
+
+#define mmDMA_CH_2_TDMA_SRC_BASE_ADDR_HI 0x411108
+
+#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_0 0x41110C
+
+#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_0 0x411110
+
+#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_0 0x411114
+
+#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_0 0x411118
+
+#define mmDMA_CH_2_TDMA_SRC_STRIDE_0 0x41111C
+
+#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_1 0x411120
+
+#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_1 0x411124
+
+#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_1 0x411128
+
+#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_1 0x41112C
+
+#define mmDMA_CH_2_TDMA_SRC_STRIDE_1 0x411130
+
+#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_2 0x411134
+
+#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_2 0x411138
+
+#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_2 0x41113C
+
+#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_2 0x411140
+
+#define mmDMA_CH_2_TDMA_SRC_STRIDE_2 0x411144
+
+#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_3 0x411148
+
+#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_3 0x41114C
+
+#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_3 0x411150
+
+#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_3 0x411154
+
+#define mmDMA_CH_2_TDMA_SRC_STRIDE_3 0x411158
+
+#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_4 0x41115C
+
+#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_4 0x411160
+
+#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_4 0x411164
+
+#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_4 0x411168
+
+#define mmDMA_CH_2_TDMA_SRC_STRIDE_4 0x41116C
+
+#define mmDMA_CH_2_TDMA_DST_BASE_ADDR_LO 0x411170
+
+#define mmDMA_CH_2_TDMA_DST_BASE_ADDR_HI 0x411174
+
+#define mmDMA_CH_2_TDMA_DST_ROI_BASE_0 0x411178
+
+#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_0 0x41117C
+
+#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_0 0x411180
+
+#define mmDMA_CH_2_TDMA_DST_START_OFFSET_0 0x411184
+
+#define mmDMA_CH_2_TDMA_DST_STRIDE_0 0x411188
+
+#define mmDMA_CH_2_TDMA_DST_ROI_BASE_1 0x41118C
+
+#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_1 0x411190
+
+#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_1 0x411194
+
+#define mmDMA_CH_2_TDMA_DST_START_OFFSET_1 0x411198
+
+#define mmDMA_CH_2_TDMA_DST_STRIDE_1 0x41119C
+
+#define mmDMA_CH_2_TDMA_DST_ROI_BASE_2 0x4111A0
+
+#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_2 0x4111A4
+
+#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_2 0x4111A8
+
+#define mmDMA_CH_2_TDMA_DST_START_OFFSET_2 0x4111AC
+
+#define mmDMA_CH_2_TDMA_DST_STRIDE_2 0x4111B0
+
+#define mmDMA_CH_2_TDMA_DST_ROI_BASE_3 0x4111B4
+
+#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_3 0x4111B8
+
+#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_3 0x4111BC
+
+#define mmDMA_CH_2_TDMA_DST_START_OFFSET_3 0x4111C0
+
+#define mmDMA_CH_2_TDMA_DST_STRIDE_3 0x4111C4
+
+#define mmDMA_CH_2_TDMA_DST_ROI_BASE_4 0x4111C8
+
+#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_4 0x4111CC
+
+#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_4 0x4111D0
+
+#define mmDMA_CH_2_TDMA_DST_START_OFFSET_4 0x4111D4
+
+#define mmDMA_CH_2_TDMA_DST_STRIDE_4 0x4111D8
+
+#define mmDMA_CH_2_MEM_INIT_BUSY 0x4111FC
+
+#endif /* ASIC_REG_DMA_CH_2_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h
new file mode 100644
index 000000000000..a2d6aeb32a18
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_CH_3_REGS_H_
+#define ASIC_REG_DMA_CH_3_REGS_H_
+
+/*
+ *****************************************
+ * DMA_CH_3 (Prototype: DMA_CH)
+ *****************************************
+ */
+
+#define mmDMA_CH_3_CFG0 0x419000
+
+#define mmDMA_CH_3_CFG1 0x419004
+
+#define mmDMA_CH_3_ERRMSG_ADDR_LO 0x419008
+
+#define mmDMA_CH_3_ERRMSG_ADDR_HI 0x41900C
+
+#define mmDMA_CH_3_ERRMSG_WDATA 0x419010
+
+#define mmDMA_CH_3_RD_COMP_ADDR_LO 0x419014
+
+#define mmDMA_CH_3_RD_COMP_ADDR_HI 0x419018
+
+#define mmDMA_CH_3_RD_COMP_WDATA 0x41901C
+
+#define mmDMA_CH_3_WR_COMP_ADDR_LO 0x419020
+
+#define mmDMA_CH_3_WR_COMP_ADDR_HI 0x419024
+
+#define mmDMA_CH_3_WR_COMP_WDATA 0x419028
+
+#define mmDMA_CH_3_LDMA_SRC_ADDR_LO 0x41902C
+
+#define mmDMA_CH_3_LDMA_SRC_ADDR_HI 0x419030
+
+#define mmDMA_CH_3_LDMA_DST_ADDR_LO 0x419034
+
+#define mmDMA_CH_3_LDMA_DST_ADDR_HI 0x419038
+
+#define mmDMA_CH_3_LDMA_TSIZE 0x41903C
+
+#define mmDMA_CH_3_COMIT_TRANSFER 0x419040
+
+#define mmDMA_CH_3_STS0 0x419044
+
+#define mmDMA_CH_3_STS1 0x419048
+
+#define mmDMA_CH_3_STS2 0x41904C
+
+#define mmDMA_CH_3_STS3 0x419050
+
+#define mmDMA_CH_3_STS4 0x419054
+
+#define mmDMA_CH_3_SRC_ADDR_LO_STS 0x419058
+
+#define mmDMA_CH_3_SRC_ADDR_HI_STS 0x41905C
+
+#define mmDMA_CH_3_SRC_TSIZE_STS 0x419060
+
+#define mmDMA_CH_3_DST_ADDR_LO_STS 0x419064
+
+#define mmDMA_CH_3_DST_ADDR_HI_STS 0x419068
+
+#define mmDMA_CH_3_DST_TSIZE_STS 0x41906C
+
+#define mmDMA_CH_3_RD_RATE_LIM_EN 0x419070
+
+#define mmDMA_CH_3_RD_RATE_LIM_RST_TOKEN 0x419074
+
+#define mmDMA_CH_3_RD_RATE_LIM_SAT 0x419078
+
+#define mmDMA_CH_3_RD_RATE_LIM_TOUT 0x41907C
+
+#define mmDMA_CH_3_WR_RATE_LIM_EN 0x419080
+
+#define mmDMA_CH_3_WR_RATE_LIM_RST_TOKEN 0x419084
+
+#define mmDMA_CH_3_WR_RATE_LIM_SAT 0x419088
+
+#define mmDMA_CH_3_WR_RATE_LIM_TOUT 0x41908C
+
+#define mmDMA_CH_3_CFG2 0x419090
+
+#define mmDMA_CH_3_TDMA_CTL 0x419100
+
+#define mmDMA_CH_3_TDMA_SRC_BASE_ADDR_LO 0x419104
+
+#define mmDMA_CH_3_TDMA_SRC_BASE_ADDR_HI 0x419108
+
+#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_0 0x41910C
+
+#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_0 0x419110
+
+#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_0 0x419114
+
+#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_0 0x419118
+
+#define mmDMA_CH_3_TDMA_SRC_STRIDE_0 0x41911C
+
+#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_1 0x419120
+
+#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_1 0x419124
+
+#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_1 0x419128
+
+#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_1 0x41912C
+
+#define mmDMA_CH_3_TDMA_SRC_STRIDE_1 0x419130
+
+#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_2 0x419134
+
+#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_2 0x419138
+
+#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_2 0x41913C
+
+#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_2 0x419140
+
+#define mmDMA_CH_3_TDMA_SRC_STRIDE_2 0x419144
+
+#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_3 0x419148
+
+#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_3 0x41914C
+
+#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_3 0x419150
+
+#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_3 0x419154
+
+#define mmDMA_CH_3_TDMA_SRC_STRIDE_3 0x419158
+
+#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_4 0x41915C
+
+#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_4 0x419160
+
+#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_4 0x419164
+
+#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_4 0x419168
+
+#define mmDMA_CH_3_TDMA_SRC_STRIDE_4 0x41916C
+
+#define mmDMA_CH_3_TDMA_DST_BASE_ADDR_LO 0x419170
+
+#define mmDMA_CH_3_TDMA_DST_BASE_ADDR_HI 0x419174
+
+#define mmDMA_CH_3_TDMA_DST_ROI_BASE_0 0x419178
+
+#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_0 0x41917C
+
+#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_0 0x419180
+
+#define mmDMA_CH_3_TDMA_DST_START_OFFSET_0 0x419184
+
+#define mmDMA_CH_3_TDMA_DST_STRIDE_0 0x419188
+
+#define mmDMA_CH_3_TDMA_DST_ROI_BASE_1 0x41918C
+
+#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_1 0x419190
+
+#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_1 0x419194
+
+#define mmDMA_CH_3_TDMA_DST_START_OFFSET_1 0x419198
+
+#define mmDMA_CH_3_TDMA_DST_STRIDE_1 0x41919C
+
+#define mmDMA_CH_3_TDMA_DST_ROI_BASE_2 0x4191A0
+
+#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_2 0x4191A4
+
+#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_2 0x4191A8
+
+#define mmDMA_CH_3_TDMA_DST_START_OFFSET_2 0x4191AC
+
+#define mmDMA_CH_3_TDMA_DST_STRIDE_2 0x4191B0
+
+#define mmDMA_CH_3_TDMA_DST_ROI_BASE_3 0x4191B4
+
+#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_3 0x4191B8
+
+#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_3 0x4191BC
+
+#define mmDMA_CH_3_TDMA_DST_START_OFFSET_3 0x4191C0
+
+#define mmDMA_CH_3_TDMA_DST_STRIDE_3 0x4191C4
+
+#define mmDMA_CH_3_TDMA_DST_ROI_BASE_4 0x4191C8
+
+#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_4 0x4191CC
+
+#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_4 0x4191D0
+
+#define mmDMA_CH_3_TDMA_DST_START_OFFSET_4 0x4191D4
+
+#define mmDMA_CH_3_TDMA_DST_STRIDE_4 0x4191D8
+
+#define mmDMA_CH_3_MEM_INIT_BUSY 0x4191FC
+
+#endif /* ASIC_REG_DMA_CH_3_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h
new file mode 100644
index 000000000000..400d6fd3acf5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_CH_4_REGS_H_
+#define ASIC_REG_DMA_CH_4_REGS_H_
+
+/*
+ *****************************************
+ * DMA_CH_4 (Prototype: DMA_CH)
+ *****************************************
+ */
+
+#define mmDMA_CH_4_CFG0 0x421000
+
+#define mmDMA_CH_4_CFG1 0x421004
+
+#define mmDMA_CH_4_ERRMSG_ADDR_LO 0x421008
+
+#define mmDMA_CH_4_ERRMSG_ADDR_HI 0x42100C
+
+#define mmDMA_CH_4_ERRMSG_WDATA 0x421010
+
+#define mmDMA_CH_4_RD_COMP_ADDR_LO 0x421014
+
+#define mmDMA_CH_4_RD_COMP_ADDR_HI 0x421018
+
+#define mmDMA_CH_4_RD_COMP_WDATA 0x42101C
+
+#define mmDMA_CH_4_WR_COMP_ADDR_LO 0x421020
+
+#define mmDMA_CH_4_WR_COMP_ADDR_HI 0x421024
+
+#define mmDMA_CH_4_WR_COMP_WDATA 0x421028
+
+#define mmDMA_CH_4_LDMA_SRC_ADDR_LO 0x42102C
+
+#define mmDMA_CH_4_LDMA_SRC_ADDR_HI 0x421030
+
+#define mmDMA_CH_4_LDMA_DST_ADDR_LO 0x421034
+
+#define mmDMA_CH_4_LDMA_DST_ADDR_HI 0x421038
+
+#define mmDMA_CH_4_LDMA_TSIZE 0x42103C
+
+#define mmDMA_CH_4_COMIT_TRANSFER 0x421040
+
+#define mmDMA_CH_4_STS0 0x421044
+
+#define mmDMA_CH_4_STS1 0x421048
+
+#define mmDMA_CH_4_STS2 0x42104C
+
+#define mmDMA_CH_4_STS3 0x421050
+
+#define mmDMA_CH_4_STS4 0x421054
+
+#define mmDMA_CH_4_SRC_ADDR_LO_STS 0x421058
+
+#define mmDMA_CH_4_SRC_ADDR_HI_STS 0x42105C
+
+#define mmDMA_CH_4_SRC_TSIZE_STS 0x421060
+
+#define mmDMA_CH_4_DST_ADDR_LO_STS 0x421064
+
+#define mmDMA_CH_4_DST_ADDR_HI_STS 0x421068
+
+#define mmDMA_CH_4_DST_TSIZE_STS 0x42106C
+
+#define mmDMA_CH_4_RD_RATE_LIM_EN 0x421070
+
+#define mmDMA_CH_4_RD_RATE_LIM_RST_TOKEN 0x421074
+
+#define mmDMA_CH_4_RD_RATE_LIM_SAT 0x421078
+
+#define mmDMA_CH_4_RD_RATE_LIM_TOUT 0x42107C
+
+#define mmDMA_CH_4_WR_RATE_LIM_EN 0x421080
+
+#define mmDMA_CH_4_WR_RATE_LIM_RST_TOKEN 0x421084
+
+#define mmDMA_CH_4_WR_RATE_LIM_SAT 0x421088
+
+#define mmDMA_CH_4_WR_RATE_LIM_TOUT 0x42108C
+
+#define mmDMA_CH_4_CFG2 0x421090
+
+#define mmDMA_CH_4_TDMA_CTL 0x421100
+
+#define mmDMA_CH_4_TDMA_SRC_BASE_ADDR_LO 0x421104
+
+#define mmDMA_CH_4_TDMA_SRC_BASE_ADDR_HI 0x421108
+
+#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_0 0x42110C
+
+#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_0 0x421110
+
+#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_0 0x421114
+
+#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_0 0x421118
+
+#define mmDMA_CH_4_TDMA_SRC_STRIDE_0 0x42111C
+
+#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_1 0x421120
+
+#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_1 0x421124
+
+#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_1 0x421128
+
+#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_1 0x42112C
+
+#define mmDMA_CH_4_TDMA_SRC_STRIDE_1 0x421130
+
+#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_2 0x421134
+
+#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_2 0x421138
+
+#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_2 0x42113C
+
+#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_2 0x421140
+
+#define mmDMA_CH_4_TDMA_SRC_STRIDE_2 0x421144
+
+#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_3 0x421148
+
+#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_3 0x42114C
+
+#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_3 0x421150
+
+#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_3 0x421154
+
+#define mmDMA_CH_4_TDMA_SRC_STRIDE_3 0x421158
+
+#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_4 0x42115C
+
+#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_4 0x421160
+
+#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_4 0x421164
+
+#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_4 0x421168
+
+#define mmDMA_CH_4_TDMA_SRC_STRIDE_4 0x42116C
+
+#define mmDMA_CH_4_TDMA_DST_BASE_ADDR_LO 0x421170
+
+#define mmDMA_CH_4_TDMA_DST_BASE_ADDR_HI 0x421174
+
+#define mmDMA_CH_4_TDMA_DST_ROI_BASE_0 0x421178
+
+#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_0 0x42117C
+
+#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_0 0x421180
+
+#define mmDMA_CH_4_TDMA_DST_START_OFFSET_0 0x421184
+
+#define mmDMA_CH_4_TDMA_DST_STRIDE_0 0x421188
+
+#define mmDMA_CH_4_TDMA_DST_ROI_BASE_1 0x42118C
+
+#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_1 0x421190
+
+#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_1 0x421194
+
+#define mmDMA_CH_4_TDMA_DST_START_OFFSET_1 0x421198
+
+#define mmDMA_CH_4_TDMA_DST_STRIDE_1 0x42119C
+
+#define mmDMA_CH_4_TDMA_DST_ROI_BASE_2 0x4211A0
+
+#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_2 0x4211A4
+
+#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_2 0x4211A8
+
+#define mmDMA_CH_4_TDMA_DST_START_OFFSET_2 0x4211AC
+
+#define mmDMA_CH_4_TDMA_DST_STRIDE_2 0x4211B0
+
+#define mmDMA_CH_4_TDMA_DST_ROI_BASE_3 0x4211B4
+
+#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_3 0x4211B8
+
+#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_3 0x4211BC
+
+#define mmDMA_CH_4_TDMA_DST_START_OFFSET_3 0x4211C0
+
+#define mmDMA_CH_4_TDMA_DST_STRIDE_3 0x4211C4
+
+#define mmDMA_CH_4_TDMA_DST_ROI_BASE_4 0x4211C8
+
+#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_4 0x4211CC
+
+#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_4 0x4211D0
+
+#define mmDMA_CH_4_TDMA_DST_START_OFFSET_4 0x4211D4
+
+#define mmDMA_CH_4_TDMA_DST_STRIDE_4 0x4211D8
+
+#define mmDMA_CH_4_MEM_INIT_BUSY 0x4211FC
+
+#endif /* ASIC_REG_DMA_CH_4_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h
new file mode 100644
index 000000000000..8d965443c51e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_MACRO_MASKS_H_
+#define ASIC_REG_DMA_MACRO_MASKS_H_
+
+/*
+ *****************************************
+ * DMA_MACRO (Prototype: DMA_MACRO)
+ *****************************************
+ */
+
+/* DMA_MACRO_LBW_RANGE_HIT_BLOCK */
+#define DMA_MACRO_LBW_RANGE_HIT_BLOCK_R_SHIFT 0
+#define DMA_MACRO_LBW_RANGE_HIT_BLOCK_R_MASK 0xFFFF
+
+/* DMA_MACRO_LBW_RANGE_MASK */
+#define DMA_MACRO_LBW_RANGE_MASK_R_SHIFT 0
+#define DMA_MACRO_LBW_RANGE_MASK_R_MASK 0x3FFFFFF
+
+/* DMA_MACRO_LBW_RANGE_BASE */
+#define DMA_MACRO_LBW_RANGE_BASE_R_SHIFT 0
+#define DMA_MACRO_LBW_RANGE_BASE_R_MASK 0x3FFFFFF
+
+/* DMA_MACRO_HBW_RANGE_HIT_BLOCK */
+#define DMA_MACRO_HBW_RANGE_HIT_BLOCK_R_SHIFT 0
+#define DMA_MACRO_HBW_RANGE_HIT_BLOCK_R_MASK 0xFF
+
+/* DMA_MACRO_HBW_RANGE_MASK_49_32 */
+#define DMA_MACRO_HBW_RANGE_MASK_49_32_R_SHIFT 0
+#define DMA_MACRO_HBW_RANGE_MASK_49_32_R_MASK 0x3FFFF
+
+/* DMA_MACRO_HBW_RANGE_MASK_31_0 */
+#define DMA_MACRO_HBW_RANGE_MASK_31_0_R_SHIFT 0
+#define DMA_MACRO_HBW_RANGE_MASK_31_0_R_MASK 0xFFFFFFFF
+
+/* DMA_MACRO_HBW_RANGE_BASE_49_32 */
+#define DMA_MACRO_HBW_RANGE_BASE_49_32_R_SHIFT 0
+#define DMA_MACRO_HBW_RANGE_BASE_49_32_R_MASK 0x3FFFF
+
+/* DMA_MACRO_HBW_RANGE_BASE_31_0 */
+#define DMA_MACRO_HBW_RANGE_BASE_31_0_R_SHIFT 0
+#define DMA_MACRO_HBW_RANGE_BASE_31_0_R_MASK 0xFFFFFFFF
+
+/* DMA_MACRO_WRITE_EN */
+#define DMA_MACRO_WRITE_EN_R_SHIFT 0
+#define DMA_MACRO_WRITE_EN_R_MASK 0x1
+
+/* DMA_MACRO_WRITE_CREDIT */
+#define DMA_MACRO_WRITE_CREDIT_R_SHIFT 0
+#define DMA_MACRO_WRITE_CREDIT_R_MASK 0x3FF
+
+/* DMA_MACRO_READ_EN */
+#define DMA_MACRO_READ_EN_R_SHIFT 0
+#define DMA_MACRO_READ_EN_R_MASK 0x1
+
+/* DMA_MACRO_READ_CREDIT */
+#define DMA_MACRO_READ_CREDIT_R_SHIFT 0
+#define DMA_MACRO_READ_CREDIT_R_MASK 0x3FF
+
+/* DMA_MACRO_SRAM_BUSY */
+
+/* DMA_MACRO_RAZWI_LBW_WT_VLD */
+#define DMA_MACRO_RAZWI_LBW_WT_VLD_R_SHIFT 0
+#define DMA_MACRO_RAZWI_LBW_WT_VLD_R_MASK 0x1
+
+/* DMA_MACRO_RAZWI_LBW_WT_ID */
+#define DMA_MACRO_RAZWI_LBW_WT_ID_R_SHIFT 0
+#define DMA_MACRO_RAZWI_LBW_WT_ID_R_MASK 0x7FFF
+
+/* DMA_MACRO_RAZWI_LBW_RD_VLD */
+#define DMA_MACRO_RAZWI_LBW_RD_VLD_R_SHIFT 0
+#define DMA_MACRO_RAZWI_LBW_RD_VLD_R_MASK 0x1
+
+/* DMA_MACRO_RAZWI_LBW_RD_ID */
+#define DMA_MACRO_RAZWI_LBW_RD_ID_R_SHIFT 0
+#define DMA_MACRO_RAZWI_LBW_RD_ID_R_MASK 0x7FFF
+
+/* DMA_MACRO_RAZWI_HBW_WT_VLD */
+#define DMA_MACRO_RAZWI_HBW_WT_VLD_R_SHIFT 0
+#define DMA_MACRO_RAZWI_HBW_WT_VLD_R_MASK 0x1
+
+/* DMA_MACRO_RAZWI_HBW_WT_ID */
+#define DMA_MACRO_RAZWI_HBW_WT_ID_R_SHIFT 0
+#define DMA_MACRO_RAZWI_HBW_WT_ID_R_MASK 0x1FFFFFFF
+
+/* DMA_MACRO_RAZWI_HBW_RD_VLD */
+#define DMA_MACRO_RAZWI_HBW_RD_VLD_R_SHIFT 0
+#define DMA_MACRO_RAZWI_HBW_RD_VLD_R_MASK 0x1
+
+/* DMA_MACRO_RAZWI_HBW_RD_ID */
+#define DMA_MACRO_RAZWI_HBW_RD_ID_R_SHIFT 0
+#define DMA_MACRO_RAZWI_HBW_RD_ID_R_MASK 0x1FFFFFFF
+
+#endif /* ASIC_REG_DMA_MACRO_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h
new file mode 100644
index 000000000000..8bfcb001189d
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_MACRO_REGS_H_
+#define ASIC_REG_DMA_MACRO_REGS_H_
+
+/*
+ *****************************************
+ * DMA_MACRO (Prototype: DMA_MACRO)
+ *****************************************
+ */
+
+#define mmDMA_MACRO_LBW_RANGE_HIT_BLOCK 0x4B0000
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_0 0x4B0004
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_1 0x4B0008
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_2 0x4B000C
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_3 0x4B0010
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_4 0x4B0014
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_5 0x4B0018
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_6 0x4B001C
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_7 0x4B0020
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_8 0x4B0024
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_9 0x4B0028
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_10 0x4B002C
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_11 0x4B0030
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_12 0x4B0034
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_13 0x4B0038
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_14 0x4B003C
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_15 0x4B0040
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_0 0x4B0044
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_1 0x4B0048
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_2 0x4B004C
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_3 0x4B0050
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_4 0x4B0054
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_5 0x4B0058
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_6 0x4B005C
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_7 0x4B0060
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_8 0x4B0064
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_9 0x4B0068
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_10 0x4B006C
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_11 0x4B0070
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_12 0x4B0074
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_13 0x4B0078
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_14 0x4B007C
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_15 0x4B0080
+
+#define mmDMA_MACRO_HBW_RANGE_HIT_BLOCK 0x4B0084
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_0 0x4B00A8
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_1 0x4B00AC
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_2 0x4B00B0
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_3 0x4B00B4
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_4 0x4B00B8
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_5 0x4B00BC
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_6 0x4B00C0
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_7 0x4B00C4
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_0 0x4B00C8
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_1 0x4B00CC
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_2 0x4B00D0
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_3 0x4B00D4
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_4 0x4B00D8
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_5 0x4B00DC
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_6 0x4B00E0
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_7 0x4B00E4
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_0 0x4B00E8
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_1 0x4B00EC
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_2 0x4B00F0
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_3 0x4B00F4
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_4 0x4B00F8
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_5 0x4B00FC
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_6 0x4B0100
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_7 0x4B0104
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_0 0x4B0108
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_1 0x4B010C
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_2 0x4B0110
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_3 0x4B0114
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_4 0x4B0118
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_5 0x4B011C
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_6 0x4B0120
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_7 0x4B0124
+
+#define mmDMA_MACRO_WRITE_EN 0x4B0128
+
+#define mmDMA_MACRO_WRITE_CREDIT 0x4B012C
+
+#define mmDMA_MACRO_READ_EN 0x4B0130
+
+#define mmDMA_MACRO_READ_CREDIT 0x4B0134
+
+#define mmDMA_MACRO_SRAM_BUSY 0x4B0138
+
+#define mmDMA_MACRO_RAZWI_LBW_WT_VLD 0x4B013C
+
+#define mmDMA_MACRO_RAZWI_LBW_WT_ID 0x4B0140
+
+#define mmDMA_MACRO_RAZWI_LBW_RD_VLD 0x4B0144
+
+#define mmDMA_MACRO_RAZWI_LBW_RD_ID 0x4B0148
+
+#define mmDMA_MACRO_RAZWI_HBW_WT_VLD 0x4B014C
+
+#define mmDMA_MACRO_RAZWI_HBW_WT_ID 0x4B0150
+
+#define mmDMA_MACRO_RAZWI_HBW_RD_VLD 0x4B0154
+
+#define mmDMA_MACRO_RAZWI_HBW_RD_ID 0x4B0158
+
+#endif /* ASIC_REG_DMA_MACRO_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h
new file mode 100644
index 000000000000..9f33f351a3c1
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_NRTR_MASKS_H_
+#define ASIC_REG_DMA_NRTR_MASKS_H_
+
+/*
+ *****************************************
+ * DMA_NRTR (Prototype: IF_NRTR)
+ *****************************************
+ */
+
+/* DMA_NRTR_HBW_MAX_CRED */
+#define DMA_NRTR_HBW_MAX_CRED_WR_RQ_SHIFT 0
+#define DMA_NRTR_HBW_MAX_CRED_WR_RQ_MASK 0x3F
+#define DMA_NRTR_HBW_MAX_CRED_WR_RS_SHIFT 8
+#define DMA_NRTR_HBW_MAX_CRED_WR_RS_MASK 0x3F00
+#define DMA_NRTR_HBW_MAX_CRED_RD_RQ_SHIFT 16
+#define DMA_NRTR_HBW_MAX_CRED_RD_RQ_MASK 0x3F0000
+#define DMA_NRTR_HBW_MAX_CRED_RD_RS_SHIFT 24
+#define DMA_NRTR_HBW_MAX_CRED_RD_RS_MASK 0x3F000000
+
+/* DMA_NRTR_LBW_MAX_CRED */
+#define DMA_NRTR_LBW_MAX_CRED_WR_RQ_SHIFT 0
+#define DMA_NRTR_LBW_MAX_CRED_WR_RQ_MASK 0x3F
+#define DMA_NRTR_LBW_MAX_CRED_WR_RS_SHIFT 8
+#define DMA_NRTR_LBW_MAX_CRED_WR_RS_MASK 0x3F00
+#define DMA_NRTR_LBW_MAX_CRED_RD_RQ_SHIFT 16
+#define DMA_NRTR_LBW_MAX_CRED_RD_RQ_MASK 0x3F0000
+#define DMA_NRTR_LBW_MAX_CRED_RD_RS_SHIFT 24
+#define DMA_NRTR_LBW_MAX_CRED_RD_RS_MASK 0x3F000000
+
+/* DMA_NRTR_DBG_E_ARB */
+#define DMA_NRTR_DBG_E_ARB_W_SHIFT 0
+#define DMA_NRTR_DBG_E_ARB_W_MASK 0x7
+#define DMA_NRTR_DBG_E_ARB_S_SHIFT 8
+#define DMA_NRTR_DBG_E_ARB_S_MASK 0x700
+#define DMA_NRTR_DBG_E_ARB_N_SHIFT 16
+#define DMA_NRTR_DBG_E_ARB_N_MASK 0x70000
+#define DMA_NRTR_DBG_E_ARB_L_SHIFT 24
+#define DMA_NRTR_DBG_E_ARB_L_MASK 0x7000000
+
+/* DMA_NRTR_DBG_W_ARB */
+#define DMA_NRTR_DBG_W_ARB_E_SHIFT 0
+#define DMA_NRTR_DBG_W_ARB_E_MASK 0x7
+#define DMA_NRTR_DBG_W_ARB_S_SHIFT 8
+#define DMA_NRTR_DBG_W_ARB_S_MASK 0x700
+#define DMA_NRTR_DBG_W_ARB_N_SHIFT 16
+#define DMA_NRTR_DBG_W_ARB_N_MASK 0x70000
+#define DMA_NRTR_DBG_W_ARB_L_SHIFT 24
+#define DMA_NRTR_DBG_W_ARB_L_MASK 0x7000000
+
+/* DMA_NRTR_DBG_N_ARB */
+#define DMA_NRTR_DBG_N_ARB_W_SHIFT 0
+#define DMA_NRTR_DBG_N_ARB_W_MASK 0x7
+#define DMA_NRTR_DBG_N_ARB_E_SHIFT 8
+#define DMA_NRTR_DBG_N_ARB_E_MASK 0x700
+#define DMA_NRTR_DBG_N_ARB_S_SHIFT 16
+#define DMA_NRTR_DBG_N_ARB_S_MASK 0x70000
+#define DMA_NRTR_DBG_N_ARB_L_SHIFT 24
+#define DMA_NRTR_DBG_N_ARB_L_MASK 0x7000000
+
+/* DMA_NRTR_DBG_S_ARB */
+#define DMA_NRTR_DBG_S_ARB_W_SHIFT 0
+#define DMA_NRTR_DBG_S_ARB_W_MASK 0x7
+#define DMA_NRTR_DBG_S_ARB_E_SHIFT 8
+#define DMA_NRTR_DBG_S_ARB_E_MASK 0x700
+#define DMA_NRTR_DBG_S_ARB_N_SHIFT 16
+#define DMA_NRTR_DBG_S_ARB_N_MASK 0x70000
+#define DMA_NRTR_DBG_S_ARB_L_SHIFT 24
+#define DMA_NRTR_DBG_S_ARB_L_MASK 0x7000000
+
+/* DMA_NRTR_DBG_L_ARB */
+#define DMA_NRTR_DBG_L_ARB_W_SHIFT 0
+#define DMA_NRTR_DBG_L_ARB_W_MASK 0x7
+#define DMA_NRTR_DBG_L_ARB_E_SHIFT 8
+#define DMA_NRTR_DBG_L_ARB_E_MASK 0x700
+#define DMA_NRTR_DBG_L_ARB_S_SHIFT 16
+#define DMA_NRTR_DBG_L_ARB_S_MASK 0x70000
+#define DMA_NRTR_DBG_L_ARB_N_SHIFT 24
+#define DMA_NRTR_DBG_L_ARB_N_MASK 0x7000000
+
+/* DMA_NRTR_DBG_E_ARB_MAX */
+#define DMA_NRTR_DBG_E_ARB_MAX_CREDIT_SHIFT 0
+#define DMA_NRTR_DBG_E_ARB_MAX_CREDIT_MASK 0x3F
+
+/* DMA_NRTR_DBG_W_ARB_MAX */
+#define DMA_NRTR_DBG_W_ARB_MAX_CREDIT_SHIFT 0
+#define DMA_NRTR_DBG_W_ARB_MAX_CREDIT_MASK 0x3F
+
+/* DMA_NRTR_DBG_N_ARB_MAX */
+#define DMA_NRTR_DBG_N_ARB_MAX_CREDIT_SHIFT 0
+#define DMA_NRTR_DBG_N_ARB_MAX_CREDIT_MASK 0x3F
+
+/* DMA_NRTR_DBG_S_ARB_MAX */
+#define DMA_NRTR_DBG_S_ARB_MAX_CREDIT_SHIFT 0
+#define DMA_NRTR_DBG_S_ARB_MAX_CREDIT_MASK 0x3F
+
+/* DMA_NRTR_DBG_L_ARB_MAX */
+#define DMA_NRTR_DBG_L_ARB_MAX_CREDIT_SHIFT 0
+#define DMA_NRTR_DBG_L_ARB_MAX_CREDIT_MASK 0x3F
+
+/* DMA_NRTR_SPLIT_COEF */
+#define DMA_NRTR_SPLIT_COEF_VAL_SHIFT 0
+#define DMA_NRTR_SPLIT_COEF_VAL_MASK 0xFFFF
+
+/* DMA_NRTR_SPLIT_CFG */
+#define DMA_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_SHIFT 0
+#define DMA_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_MASK 0x1
+#define DMA_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_SHIFT 1
+#define DMA_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_MASK 0x2
+#define DMA_NRTR_SPLIT_CFG_DEFAULT_MESH_SHIFT 2
+#define DMA_NRTR_SPLIT_CFG_DEFAULT_MESH_MASK 0xC
+#define DMA_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_SHIFT 4
+#define DMA_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_MASK 0x10
+#define DMA_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_SHIFT 5
+#define DMA_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_MASK 0x20
+#define DMA_NRTR_SPLIT_CFG_B2B_OPT_SHIFT 6
+#define DMA_NRTR_SPLIT_CFG_B2B_OPT_MASK 0x1C0
+
+/* DMA_NRTR_SPLIT_RD_SAT */
+#define DMA_NRTR_SPLIT_RD_SAT_VAL_SHIFT 0
+#define DMA_NRTR_SPLIT_RD_SAT_VAL_MASK 0xFFFF
+
+/* DMA_NRTR_SPLIT_RD_RST_TOKEN */
+#define DMA_NRTR_SPLIT_RD_RST_TOKEN_VAL_SHIFT 0
+#define DMA_NRTR_SPLIT_RD_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* DMA_NRTR_SPLIT_RD_TIMEOUT */
+#define DMA_NRTR_SPLIT_RD_TIMEOUT_VAL_SHIFT 0
+#define DMA_NRTR_SPLIT_RD_TIMEOUT_VAL_MASK 0xFFFFFFFF
+
+/* DMA_NRTR_SPLIT_WR_SAT */
+#define DMA_NRTR_SPLIT_WR_SAT_VAL_SHIFT 0
+#define DMA_NRTR_SPLIT_WR_SAT_VAL_MASK 0xFFFF
+
+/* DMA_NRTR_WPLIT_WR_TST_TOLEN */
+#define DMA_NRTR_WPLIT_WR_TST_TOLEN_VAL_SHIFT 0
+#define DMA_NRTR_WPLIT_WR_TST_TOLEN_VAL_MASK 0xFFFF
+
+/* DMA_NRTR_SPLIT_WR_TIMEOUT */
+#define DMA_NRTR_SPLIT_WR_TIMEOUT_VAL_SHIFT 0
+#define DMA_NRTR_SPLIT_WR_TIMEOUT_VAL_MASK 0xFFFFFFFF
+
+/* DMA_NRTR_HBW_RANGE_HIT */
+#define DMA_NRTR_HBW_RANGE_HIT_IND_SHIFT 0
+#define DMA_NRTR_HBW_RANGE_HIT_IND_MASK 0xFF
+
+/* DMA_NRTR_HBW_RANGE_MASK_L */
+#define DMA_NRTR_HBW_RANGE_MASK_L_VAL_SHIFT 0
+#define DMA_NRTR_HBW_RANGE_MASK_L_VAL_MASK 0xFFFFFFFF
+
+/* DMA_NRTR_HBW_RANGE_MASK_H */
+#define DMA_NRTR_HBW_RANGE_MASK_H_VAL_SHIFT 0
+#define DMA_NRTR_HBW_RANGE_MASK_H_VAL_MASK 0x3FFFF
+
+/* DMA_NRTR_HBW_RANGE_BASE_L */
+#define DMA_NRTR_HBW_RANGE_BASE_L_VAL_SHIFT 0
+#define DMA_NRTR_HBW_RANGE_BASE_L_VAL_MASK 0xFFFFFFFF
+
+/* DMA_NRTR_HBW_RANGE_BASE_H */
+#define DMA_NRTR_HBW_RANGE_BASE_H_VAL_SHIFT 0
+#define DMA_NRTR_HBW_RANGE_BASE_H_VAL_MASK 0x3FFFF
+
+/* DMA_NRTR_LBW_RANGE_HIT */
+#define DMA_NRTR_LBW_RANGE_HIT_IND_SHIFT 0
+#define DMA_NRTR_LBW_RANGE_HIT_IND_MASK 0xFFFF
+
+/* DMA_NRTR_LBW_RANGE_MASK */
+#define DMA_NRTR_LBW_RANGE_MASK_VAL_SHIFT 0
+#define DMA_NRTR_LBW_RANGE_MASK_VAL_MASK 0x3FFFFFF
+
+/* DMA_NRTR_LBW_RANGE_BASE */
+#define DMA_NRTR_LBW_RANGE_BASE_VAL_SHIFT 0
+#define DMA_NRTR_LBW_RANGE_BASE_VAL_MASK 0x3FFFFFF
+
+/* DMA_NRTR_RGLTR */
+#define DMA_NRTR_RGLTR_WR_EN_SHIFT 0
+#define DMA_NRTR_RGLTR_WR_EN_MASK 0x1
+#define DMA_NRTR_RGLTR_RD_EN_SHIFT 4
+#define DMA_NRTR_RGLTR_RD_EN_MASK 0x10
+
+/* DMA_NRTR_RGLTR_WR_RESULT */
+#define DMA_NRTR_RGLTR_WR_RESULT_VAL_SHIFT 0
+#define DMA_NRTR_RGLTR_WR_RESULT_VAL_MASK 0xFF
+
+/* DMA_NRTR_RGLTR_RD_RESULT */
+#define DMA_NRTR_RGLTR_RD_RESULT_VAL_SHIFT 0
+#define DMA_NRTR_RGLTR_RD_RESULT_VAL_MASK 0xFF
+
+/* DMA_NRTR_SCRAMB_EN */
+#define DMA_NRTR_SCRAMB_EN_VAL_SHIFT 0
+#define DMA_NRTR_SCRAMB_EN_VAL_MASK 0x1
+
+/* DMA_NRTR_NON_LIN_SCRAMB */
+#define DMA_NRTR_NON_LIN_SCRAMB_EN_SHIFT 0
+#define DMA_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1
+
+#endif /* ASIC_REG_DMA_NRTR_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h
new file mode 100644
index 000000000000..d8293745a02b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_NRTR_REGS_H_
+#define ASIC_REG_DMA_NRTR_REGS_H_
+
+/*
+ *****************************************
+ * DMA_NRTR (Prototype: IF_NRTR)
+ *****************************************
+ */
+
+#define mmDMA_NRTR_HBW_MAX_CRED 0x1C0100
+
+#define mmDMA_NRTR_LBW_MAX_CRED 0x1C0120
+
+#define mmDMA_NRTR_DBG_E_ARB 0x1C0300
+
+#define mmDMA_NRTR_DBG_W_ARB 0x1C0304
+
+#define mmDMA_NRTR_DBG_N_ARB 0x1C0308
+
+#define mmDMA_NRTR_DBG_S_ARB 0x1C030C
+
+#define mmDMA_NRTR_DBG_L_ARB 0x1C0310
+
+#define mmDMA_NRTR_DBG_E_ARB_MAX 0x1C0320
+
+#define mmDMA_NRTR_DBG_W_ARB_MAX 0x1C0324
+
+#define mmDMA_NRTR_DBG_N_ARB_MAX 0x1C0328
+
+#define mmDMA_NRTR_DBG_S_ARB_MAX 0x1C032C
+
+#define mmDMA_NRTR_DBG_L_ARB_MAX 0x1C0330
+
+#define mmDMA_NRTR_SPLIT_COEF_0 0x1C0400
+
+#define mmDMA_NRTR_SPLIT_COEF_1 0x1C0404
+
+#define mmDMA_NRTR_SPLIT_COEF_2 0x1C0408
+
+#define mmDMA_NRTR_SPLIT_COEF_3 0x1C040C
+
+#define mmDMA_NRTR_SPLIT_COEF_4 0x1C0410
+
+#define mmDMA_NRTR_SPLIT_COEF_5 0x1C0414
+
+#define mmDMA_NRTR_SPLIT_COEF_6 0x1C0418
+
+#define mmDMA_NRTR_SPLIT_COEF_7 0x1C041C
+
+#define mmDMA_NRTR_SPLIT_COEF_8 0x1C0420
+
+#define mmDMA_NRTR_SPLIT_COEF_9 0x1C0424
+
+#define mmDMA_NRTR_SPLIT_CFG 0x1C0440
+
+#define mmDMA_NRTR_SPLIT_RD_SAT 0x1C0444
+
+#define mmDMA_NRTR_SPLIT_RD_RST_TOKEN 0x1C0448
+
+#define mmDMA_NRTR_SPLIT_RD_TIMEOUT_0 0x1C044C
+
+#define mmDMA_NRTR_SPLIT_RD_TIMEOUT_1 0x1C0450
+
+#define mmDMA_NRTR_SPLIT_WR_SAT 0x1C0454
+
+#define mmDMA_NRTR_WPLIT_WR_TST_TOLEN 0x1C0458
+
+#define mmDMA_NRTR_SPLIT_WR_TIMEOUT_0 0x1C045C
+
+#define mmDMA_NRTR_SPLIT_WR_TIMEOUT_1 0x1C0460
+
+#define mmDMA_NRTR_HBW_RANGE_HIT 0x1C0470
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_L_0 0x1C0480
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_L_1 0x1C0484
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_L_2 0x1C0488
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_L_3 0x1C048C
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_L_4 0x1C0490
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_L_5 0x1C0494
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_L_6 0x1C0498
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_L_7 0x1C049C
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_H_0 0x1C04A0
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_H_1 0x1C04A4
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_H_2 0x1C04A8
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_H_3 0x1C04AC
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_H_4 0x1C04B0
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_H_5 0x1C04B4
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_H_6 0x1C04B8
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_H_7 0x1C04BC
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_L_0 0x1C04C0
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_L_1 0x1C04C4
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_L_2 0x1C04C8
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_L_3 0x1C04CC
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_L_4 0x1C04D0
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_L_5 0x1C04D4
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_L_6 0x1C04D8
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_L_7 0x1C04DC
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_H_0 0x1C04E0
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_H_1 0x1C04E4
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_H_2 0x1C04E8
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_H_3 0x1C04EC
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_H_4 0x1C04F0
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_H_5 0x1C04F4
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_H_6 0x1C04F8
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_H_7 0x1C04FC
+
+#define mmDMA_NRTR_LBW_RANGE_HIT 0x1C0500
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_0 0x1C0510
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_1 0x1C0514
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_2 0x1C0518
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_3 0x1C051C
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_4 0x1C0520
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_5 0x1C0524
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_6 0x1C0528
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_7 0x1C052C
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_8 0x1C0530
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_9 0x1C0534
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_10 0x1C0538
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_11 0x1C053C
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_12 0x1C0540
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_13 0x1C0544
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_14 0x1C0548
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_15 0x1C054C
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_0 0x1C0550
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_1 0x1C0554
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_2 0x1C0558
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_3 0x1C055C
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_4 0x1C0560
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_5 0x1C0564
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_6 0x1C0568
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_7 0x1C056C
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_8 0x1C0570
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_9 0x1C0574
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_10 0x1C0578
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_11 0x1C057C
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_12 0x1C0580
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_13 0x1C0584
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_14 0x1C0588
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_15 0x1C058C
+
+#define mmDMA_NRTR_RGLTR 0x1C0590
+
+#define mmDMA_NRTR_RGLTR_WR_RESULT 0x1C0594
+
+#define mmDMA_NRTR_RGLTR_RD_RESULT 0x1C0598
+
+#define mmDMA_NRTR_SCRAMB_EN 0x1C0600
+
+#define mmDMA_NRTR_NON_LIN_SCRAMB 0x1C0604
+
+#endif /* ASIC_REG_DMA_NRTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h
new file mode 100644
index 000000000000..10619dbb9b17
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h
@@ -0,0 +1,465 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_QM_0_MASKS_H_
+#define ASIC_REG_DMA_QM_0_MASKS_H_
+
+/*
+ *****************************************
+ * DMA_QM_0 (Prototype: QMAN)
+ *****************************************
+ */
+
+/* DMA_QM_0_GLBL_CFG0 */
+#define DMA_QM_0_GLBL_CFG0_PQF_EN_SHIFT 0
+#define DMA_QM_0_GLBL_CFG0_PQF_EN_MASK 0x1
+#define DMA_QM_0_GLBL_CFG0_CQF_EN_SHIFT 1
+#define DMA_QM_0_GLBL_CFG0_CQF_EN_MASK 0x2
+#define DMA_QM_0_GLBL_CFG0_CP_EN_SHIFT 2
+#define DMA_QM_0_GLBL_CFG0_CP_EN_MASK 0x4
+#define DMA_QM_0_GLBL_CFG0_DMA_EN_SHIFT 3
+#define DMA_QM_0_GLBL_CFG0_DMA_EN_MASK 0x8
+
+/* DMA_QM_0_GLBL_CFG1 */
+#define DMA_QM_0_GLBL_CFG1_PQF_STOP_SHIFT 0
+#define DMA_QM_0_GLBL_CFG1_PQF_STOP_MASK 0x1
+#define DMA_QM_0_GLBL_CFG1_CQF_STOP_SHIFT 1
+#define DMA_QM_0_GLBL_CFG1_CQF_STOP_MASK 0x2
+#define DMA_QM_0_GLBL_CFG1_CP_STOP_SHIFT 2
+#define DMA_QM_0_GLBL_CFG1_CP_STOP_MASK 0x4
+#define DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT 3
+#define DMA_QM_0_GLBL_CFG1_DMA_STOP_MASK 0x8
+#define DMA_QM_0_GLBL_CFG1_PQF_FLUSH_SHIFT 8
+#define DMA_QM_0_GLBL_CFG1_PQF_FLUSH_MASK 0x100
+#define DMA_QM_0_GLBL_CFG1_CQF_FLUSH_SHIFT 9
+#define DMA_QM_0_GLBL_CFG1_CQF_FLUSH_MASK 0x200
+#define DMA_QM_0_GLBL_CFG1_CP_FLUSH_SHIFT 10
+#define DMA_QM_0_GLBL_CFG1_CP_FLUSH_MASK 0x400
+#define DMA_QM_0_GLBL_CFG1_DMA_FLUSH_SHIFT 11
+#define DMA_QM_0_GLBL_CFG1_DMA_FLUSH_MASK 0x800
+
+/* DMA_QM_0_GLBL_PROT */
+#define DMA_QM_0_GLBL_PROT_PQF_PROT_SHIFT 0
+#define DMA_QM_0_GLBL_PROT_PQF_PROT_MASK 0x1
+#define DMA_QM_0_GLBL_PROT_CQF_PROT_SHIFT 1
+#define DMA_QM_0_GLBL_PROT_CQF_PROT_MASK 0x2
+#define DMA_QM_0_GLBL_PROT_CP_PROT_SHIFT 2
+#define DMA_QM_0_GLBL_PROT_CP_PROT_MASK 0x4
+#define DMA_QM_0_GLBL_PROT_DMA_PROT_SHIFT 3
+#define DMA_QM_0_GLBL_PROT_DMA_PROT_MASK 0x8
+#define DMA_QM_0_GLBL_PROT_PQF_ERR_PROT_SHIFT 4
+#define DMA_QM_0_GLBL_PROT_PQF_ERR_PROT_MASK 0x10
+#define DMA_QM_0_GLBL_PROT_CQF_ERR_PROT_SHIFT 5
+#define DMA_QM_0_GLBL_PROT_CQF_ERR_PROT_MASK 0x20
+#define DMA_QM_0_GLBL_PROT_CP_ERR_PROT_SHIFT 6
+#define DMA_QM_0_GLBL_PROT_CP_ERR_PROT_MASK 0x40
+#define DMA_QM_0_GLBL_PROT_DMA_ERR_PROT_SHIFT 7
+#define DMA_QM_0_GLBL_PROT_DMA_ERR_PROT_MASK 0x80
+
+/* DMA_QM_0_GLBL_ERR_CFG */
+#define DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0
+#define DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1
+#define DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1
+#define DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2
+#define DMA_QM_0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2
+#define DMA_QM_0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4
+#define DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3
+#define DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8
+#define DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4
+#define DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10
+#define DMA_QM_0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5
+#define DMA_QM_0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20
+#define DMA_QM_0_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6
+#define DMA_QM_0_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40
+#define DMA_QM_0_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7
+#define DMA_QM_0_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80
+#define DMA_QM_0_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8
+#define DMA_QM_0_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100
+#define DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9
+#define DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200
+#define DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10
+#define DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400
+#define DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11
+#define DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800
+
+/* DMA_QM_0_GLBL_ERR_ADDR_LO */
+#define DMA_QM_0_GLBL_ERR_ADDR_LO_VAL_SHIFT 0
+#define DMA_QM_0_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_GLBL_ERR_ADDR_HI */
+#define DMA_QM_0_GLBL_ERR_ADDR_HI_VAL_SHIFT 0
+#define DMA_QM_0_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_GLBL_ERR_WDATA */
+#define DMA_QM_0_GLBL_ERR_WDATA_VAL_SHIFT 0
+#define DMA_QM_0_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_GLBL_SECURE_PROPS */
+#define DMA_QM_0_GLBL_SECURE_PROPS_ASID_SHIFT 0
+#define DMA_QM_0_GLBL_SECURE_PROPS_ASID_MASK 0x3FF
+#define DMA_QM_0_GLBL_SECURE_PROPS_MMBP_SHIFT 10
+#define DMA_QM_0_GLBL_SECURE_PROPS_MMBP_MASK 0x400
+
+/* DMA_QM_0_GLBL_NON_SECURE_PROPS */
+#define DMA_QM_0_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0
+#define DMA_QM_0_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF
+#define DMA_QM_0_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10
+#define DMA_QM_0_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400
+
+/* DMA_QM_0_GLBL_STS0 */
+#define DMA_QM_0_GLBL_STS0_PQF_IDLE_SHIFT 0
+#define DMA_QM_0_GLBL_STS0_PQF_IDLE_MASK 0x1
+#define DMA_QM_0_GLBL_STS0_CQF_IDLE_SHIFT 1
+#define DMA_QM_0_GLBL_STS0_CQF_IDLE_MASK 0x2
+#define DMA_QM_0_GLBL_STS0_CP_IDLE_SHIFT 2
+#define DMA_QM_0_GLBL_STS0_CP_IDLE_MASK 0x4
+#define DMA_QM_0_GLBL_STS0_DMA_IDLE_SHIFT 3
+#define DMA_QM_0_GLBL_STS0_DMA_IDLE_MASK 0x8
+#define DMA_QM_0_GLBL_STS0_PQF_IS_STOP_SHIFT 4
+#define DMA_QM_0_GLBL_STS0_PQF_IS_STOP_MASK 0x10
+#define DMA_QM_0_GLBL_STS0_CQF_IS_STOP_SHIFT 5
+#define DMA_QM_0_GLBL_STS0_CQF_IS_STOP_MASK 0x20
+#define DMA_QM_0_GLBL_STS0_CP_IS_STOP_SHIFT 6
+#define DMA_QM_0_GLBL_STS0_CP_IS_STOP_MASK 0x40
+#define DMA_QM_0_GLBL_STS0_DMA_IS_STOP_SHIFT 7
+#define DMA_QM_0_GLBL_STS0_DMA_IS_STOP_MASK 0x80
+
+/* DMA_QM_0_GLBL_STS1 */
+#define DMA_QM_0_GLBL_STS1_PQF_RD_ERR_SHIFT 0
+#define DMA_QM_0_GLBL_STS1_PQF_RD_ERR_MASK 0x1
+#define DMA_QM_0_GLBL_STS1_CQF_RD_ERR_SHIFT 1
+#define DMA_QM_0_GLBL_STS1_CQF_RD_ERR_MASK 0x2
+#define DMA_QM_0_GLBL_STS1_CP_RD_ERR_SHIFT 2
+#define DMA_QM_0_GLBL_STS1_CP_RD_ERR_MASK 0x4
+#define DMA_QM_0_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3
+#define DMA_QM_0_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8
+#define DMA_QM_0_GLBL_STS1_CP_STOP_OP_SHIFT 4
+#define DMA_QM_0_GLBL_STS1_CP_STOP_OP_MASK 0x10
+#define DMA_QM_0_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5
+#define DMA_QM_0_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20
+#define DMA_QM_0_GLBL_STS1_DMA_RD_ERR_SHIFT 8
+#define DMA_QM_0_GLBL_STS1_DMA_RD_ERR_MASK 0x100
+#define DMA_QM_0_GLBL_STS1_DMA_WR_ERR_SHIFT 9
+#define DMA_QM_0_GLBL_STS1_DMA_WR_ERR_MASK 0x200
+#define DMA_QM_0_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10
+#define DMA_QM_0_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400
+#define DMA_QM_0_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11
+#define DMA_QM_0_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800
+
+/* DMA_QM_0_PQ_BASE_LO */
+#define DMA_QM_0_PQ_BASE_LO_VAL_SHIFT 0
+#define DMA_QM_0_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_PQ_BASE_HI */
+#define DMA_QM_0_PQ_BASE_HI_VAL_SHIFT 0
+#define DMA_QM_0_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_PQ_SIZE */
+#define DMA_QM_0_PQ_SIZE_VAL_SHIFT 0
+#define DMA_QM_0_PQ_SIZE_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_PQ_PI */
+#define DMA_QM_0_PQ_PI_VAL_SHIFT 0
+#define DMA_QM_0_PQ_PI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_PQ_CI */
+#define DMA_QM_0_PQ_CI_VAL_SHIFT 0
+#define DMA_QM_0_PQ_CI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_PQ_CFG0 */
+#define DMA_QM_0_PQ_CFG0_RESERVED_SHIFT 0
+#define DMA_QM_0_PQ_CFG0_RESERVED_MASK 0x1
+
+/* DMA_QM_0_PQ_CFG1 */
+#define DMA_QM_0_PQ_CFG1_CREDIT_LIM_SHIFT 0
+#define DMA_QM_0_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define DMA_QM_0_PQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define DMA_QM_0_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* DMA_QM_0_PQ_ARUSER */
+#define DMA_QM_0_PQ_ARUSER_NOSNOOP_SHIFT 0
+#define DMA_QM_0_PQ_ARUSER_NOSNOOP_MASK 0x1
+#define DMA_QM_0_PQ_ARUSER_WORD_SHIFT 1
+#define DMA_QM_0_PQ_ARUSER_WORD_MASK 0x2
+
+/* DMA_QM_0_PQ_PUSH0 */
+#define DMA_QM_0_PQ_PUSH0_PTR_LO_SHIFT 0
+#define DMA_QM_0_PQ_PUSH0_PTR_LO_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_PQ_PUSH1 */
+#define DMA_QM_0_PQ_PUSH1_PTR_HI_SHIFT 0
+#define DMA_QM_0_PQ_PUSH1_PTR_HI_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_PQ_PUSH2 */
+#define DMA_QM_0_PQ_PUSH2_TSIZE_SHIFT 0
+#define DMA_QM_0_PQ_PUSH2_TSIZE_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_PQ_PUSH3 */
+#define DMA_QM_0_PQ_PUSH3_RPT_SHIFT 0
+#define DMA_QM_0_PQ_PUSH3_RPT_MASK 0xFFFF
+#define DMA_QM_0_PQ_PUSH3_CTL_SHIFT 16
+#define DMA_QM_0_PQ_PUSH3_CTL_MASK 0xFFFF0000
+
+/* DMA_QM_0_PQ_STS0 */
+#define DMA_QM_0_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0
+#define DMA_QM_0_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF
+#define DMA_QM_0_PQ_STS0_PQ_FREE_CNT_SHIFT 16
+#define DMA_QM_0_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000
+
+/* DMA_QM_0_PQ_STS1 */
+#define DMA_QM_0_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0
+#define DMA_QM_0_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF
+#define DMA_QM_0_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30
+#define DMA_QM_0_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000
+#define DMA_QM_0_PQ_STS1_PQ_BUSY_SHIFT 31
+#define DMA_QM_0_PQ_STS1_PQ_BUSY_MASK 0x80000000
+
+/* DMA_QM_0_PQ_RD_RATE_LIM_EN */
+#define DMA_QM_0_PQ_RD_RATE_LIM_EN_VAL_SHIFT 0
+#define DMA_QM_0_PQ_RD_RATE_LIM_EN_VAL_MASK 0x1
+
+/* DMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN */
+#define DMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
+#define DMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* DMA_QM_0_PQ_RD_RATE_LIM_SAT */
+#define DMA_QM_0_PQ_RD_RATE_LIM_SAT_VAL_SHIFT 0
+#define DMA_QM_0_PQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
+
+/* DMA_QM_0_PQ_RD_RATE_LIM_TOUT */
+#define DMA_QM_0_PQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0
+#define DMA_QM_0_PQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
+
+/* DMA_QM_0_CQ_CFG0 */
+#define DMA_QM_0_CQ_CFG0_RESERVED_SHIFT 0
+#define DMA_QM_0_CQ_CFG0_RESERVED_MASK 0x1
+
+/* DMA_QM_0_CQ_CFG1 */
+#define DMA_QM_0_CQ_CFG1_CREDIT_LIM_SHIFT 0
+#define DMA_QM_0_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define DMA_QM_0_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define DMA_QM_0_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* DMA_QM_0_CQ_ARUSER */
+#define DMA_QM_0_CQ_ARUSER_NOSNOOP_SHIFT 0
+#define DMA_QM_0_CQ_ARUSER_NOSNOOP_MASK 0x1
+#define DMA_QM_0_CQ_ARUSER_WORD_SHIFT 1
+#define DMA_QM_0_CQ_ARUSER_WORD_MASK 0x2
+
+/* DMA_QM_0_CQ_PTR_LO */
+#define DMA_QM_0_CQ_PTR_LO_VAL_SHIFT 0
+#define DMA_QM_0_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CQ_PTR_HI */
+#define DMA_QM_0_CQ_PTR_HI_VAL_SHIFT 0
+#define DMA_QM_0_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CQ_TSIZE */
+#define DMA_QM_0_CQ_TSIZE_VAL_SHIFT 0
+#define DMA_QM_0_CQ_TSIZE_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CQ_CTL */
+#define DMA_QM_0_CQ_CTL_RPT_SHIFT 0
+#define DMA_QM_0_CQ_CTL_RPT_MASK 0xFFFF
+#define DMA_QM_0_CQ_CTL_CTL_SHIFT 16
+#define DMA_QM_0_CQ_CTL_CTL_MASK 0xFFFF0000
+
+/* DMA_QM_0_CQ_PTR_LO_STS */
+#define DMA_QM_0_CQ_PTR_LO_STS_VAL_SHIFT 0
+#define DMA_QM_0_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CQ_PTR_HI_STS */
+#define DMA_QM_0_CQ_PTR_HI_STS_VAL_SHIFT 0
+#define DMA_QM_0_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CQ_TSIZE_STS */
+#define DMA_QM_0_CQ_TSIZE_STS_VAL_SHIFT 0
+#define DMA_QM_0_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CQ_CTL_STS */
+#define DMA_QM_0_CQ_CTL_STS_RPT_SHIFT 0
+#define DMA_QM_0_CQ_CTL_STS_RPT_MASK 0xFFFF
+#define DMA_QM_0_CQ_CTL_STS_CTL_SHIFT 16
+#define DMA_QM_0_CQ_CTL_STS_CTL_MASK 0xFFFF0000
+
+/* DMA_QM_0_CQ_STS0 */
+#define DMA_QM_0_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0
+#define DMA_QM_0_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF
+#define DMA_QM_0_CQ_STS0_CQ_FREE_CNT_SHIFT 16
+#define DMA_QM_0_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000
+
+/* DMA_QM_0_CQ_STS1 */
+#define DMA_QM_0_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0
+#define DMA_QM_0_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF
+#define DMA_QM_0_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30
+#define DMA_QM_0_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000
+#define DMA_QM_0_CQ_STS1_CQ_BUSY_SHIFT 31
+#define DMA_QM_0_CQ_STS1_CQ_BUSY_MASK 0x80000000
+
+/* DMA_QM_0_CQ_RD_RATE_LIM_EN */
+#define DMA_QM_0_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0
+#define DMA_QM_0_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1
+
+/* DMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN */
+#define DMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
+#define DMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* DMA_QM_0_CQ_RD_RATE_LIM_SAT */
+#define DMA_QM_0_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0
+#define DMA_QM_0_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
+
+/* DMA_QM_0_CQ_RD_RATE_LIM_TOUT */
+#define DMA_QM_0_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0
+#define DMA_QM_0_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
+
+/* DMA_QM_0_CQ_IFIFO_CNT */
+#define DMA_QM_0_CQ_IFIFO_CNT_VAL_SHIFT 0
+#define DMA_QM_0_CQ_IFIFO_CNT_VAL_MASK 0x3
+
+/* DMA_QM_0_CP_MSG_BASE0_ADDR_LO */
+#define DMA_QM_0_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0
+#define DMA_QM_0_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_MSG_BASE0_ADDR_HI */
+#define DMA_QM_0_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0
+#define DMA_QM_0_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_MSG_BASE1_ADDR_LO */
+#define DMA_QM_0_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0
+#define DMA_QM_0_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_MSG_BASE1_ADDR_HI */
+#define DMA_QM_0_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0
+#define DMA_QM_0_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_MSG_BASE2_ADDR_LO */
+#define DMA_QM_0_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0
+#define DMA_QM_0_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_MSG_BASE2_ADDR_HI */
+#define DMA_QM_0_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0
+#define DMA_QM_0_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_MSG_BASE3_ADDR_LO */
+#define DMA_QM_0_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0
+#define DMA_QM_0_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_MSG_BASE3_ADDR_HI */
+#define DMA_QM_0_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0
+#define DMA_QM_0_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_LDMA_TSIZE_OFFSET */
+#define DMA_QM_0_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0
+#define DMA_QM_0_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET */
+#define DMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0
+#define DMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET */
+#define DMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0
+#define DMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET */
+#define DMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0
+#define DMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET */
+#define DMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0
+#define DMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_LDMA_COMMIT_OFFSET */
+#define DMA_QM_0_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0
+#define DMA_QM_0_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_FENCE0_RDATA */
+#define DMA_QM_0_CP_FENCE0_RDATA_INC_VAL_SHIFT 0
+#define DMA_QM_0_CP_FENCE0_RDATA_INC_VAL_MASK 0xF
+
+/* DMA_QM_0_CP_FENCE1_RDATA */
+#define DMA_QM_0_CP_FENCE1_RDATA_INC_VAL_SHIFT 0
+#define DMA_QM_0_CP_FENCE1_RDATA_INC_VAL_MASK 0xF
+
+/* DMA_QM_0_CP_FENCE2_RDATA */
+#define DMA_QM_0_CP_FENCE2_RDATA_INC_VAL_SHIFT 0
+#define DMA_QM_0_CP_FENCE2_RDATA_INC_VAL_MASK 0xF
+
+/* DMA_QM_0_CP_FENCE3_RDATA */
+#define DMA_QM_0_CP_FENCE3_RDATA_INC_VAL_SHIFT 0
+#define DMA_QM_0_CP_FENCE3_RDATA_INC_VAL_MASK 0xF
+
+/* DMA_QM_0_CP_FENCE0_CNT */
+#define DMA_QM_0_CP_FENCE0_CNT_VAL_SHIFT 0
+#define DMA_QM_0_CP_FENCE0_CNT_VAL_MASK 0xFF
+
+/* DMA_QM_0_CP_FENCE1_CNT */
+#define DMA_QM_0_CP_FENCE1_CNT_VAL_SHIFT 0
+#define DMA_QM_0_CP_FENCE1_CNT_VAL_MASK 0xFF
+
+/* DMA_QM_0_CP_FENCE2_CNT */
+#define DMA_QM_0_CP_FENCE2_CNT_VAL_SHIFT 0
+#define DMA_QM_0_CP_FENCE2_CNT_VAL_MASK 0xFF
+
+/* DMA_QM_0_CP_FENCE3_CNT */
+#define DMA_QM_0_CP_FENCE3_CNT_VAL_SHIFT 0
+#define DMA_QM_0_CP_FENCE3_CNT_VAL_MASK 0xFF
+
+/* DMA_QM_0_CP_STS */
+#define DMA_QM_0_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0
+#define DMA_QM_0_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF
+#define DMA_QM_0_CP_STS_ERDY_SHIFT 16
+#define DMA_QM_0_CP_STS_ERDY_MASK 0x10000
+#define DMA_QM_0_CP_STS_RRDY_SHIFT 17
+#define DMA_QM_0_CP_STS_RRDY_MASK 0x20000
+#define DMA_QM_0_CP_STS_MRDY_SHIFT 18
+#define DMA_QM_0_CP_STS_MRDY_MASK 0x40000
+#define DMA_QM_0_CP_STS_SW_STOP_SHIFT 19
+#define DMA_QM_0_CP_STS_SW_STOP_MASK 0x80000
+#define DMA_QM_0_CP_STS_FENCE_ID_SHIFT 20
+#define DMA_QM_0_CP_STS_FENCE_ID_MASK 0x300000
+#define DMA_QM_0_CP_STS_FENCE_IN_PROGRESS_SHIFT 22
+#define DMA_QM_0_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000
+
+/* DMA_QM_0_CP_CURRENT_INST_LO */
+#define DMA_QM_0_CP_CURRENT_INST_LO_VAL_SHIFT 0
+#define DMA_QM_0_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_CURRENT_INST_HI */
+#define DMA_QM_0_CP_CURRENT_INST_HI_VAL_SHIFT 0
+#define DMA_QM_0_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_BARRIER_CFG */
+#define DMA_QM_0_CP_BARRIER_CFG_EBGUARD_SHIFT 0
+#define DMA_QM_0_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF
+
+/* DMA_QM_0_CP_DBG_0 */
+#define DMA_QM_0_CP_DBG_0_VAL_SHIFT 0
+#define DMA_QM_0_CP_DBG_0_VAL_MASK 0xFF
+
+/* DMA_QM_0_PQ_BUF_ADDR */
+#define DMA_QM_0_PQ_BUF_ADDR_VAL_SHIFT 0
+#define DMA_QM_0_PQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_PQ_BUF_RDATA */
+#define DMA_QM_0_PQ_BUF_RDATA_VAL_SHIFT 0
+#define DMA_QM_0_PQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CQ_BUF_ADDR */
+#define DMA_QM_0_CQ_BUF_ADDR_VAL_SHIFT 0
+#define DMA_QM_0_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CQ_BUF_RDATA */
+#define DMA_QM_0_CQ_BUF_RDATA_VAL_SHIFT 0
+#define DMA_QM_0_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_DMA_QM_0_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h
new file mode 100644
index 000000000000..c693bc5dcb22
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_QM_0_REGS_H_
+#define ASIC_REG_DMA_QM_0_REGS_H_
+
+/*
+ *****************************************
+ * DMA_QM_0 (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA_QM_0_GLBL_CFG0 0x400000
+
+#define mmDMA_QM_0_GLBL_CFG1 0x400004
+
+#define mmDMA_QM_0_GLBL_PROT 0x400008
+
+#define mmDMA_QM_0_GLBL_ERR_CFG 0x40000C
+
+#define mmDMA_QM_0_GLBL_ERR_ADDR_LO 0x400010
+
+#define mmDMA_QM_0_GLBL_ERR_ADDR_HI 0x400014
+
+#define mmDMA_QM_0_GLBL_ERR_WDATA 0x400018
+
+#define mmDMA_QM_0_GLBL_SECURE_PROPS 0x40001C
+
+#define mmDMA_QM_0_GLBL_NON_SECURE_PROPS 0x400020
+
+#define mmDMA_QM_0_GLBL_STS0 0x400024
+
+#define mmDMA_QM_0_GLBL_STS1 0x400028
+
+#define mmDMA_QM_0_PQ_BASE_LO 0x400060
+
+#define mmDMA_QM_0_PQ_BASE_HI 0x400064
+
+#define mmDMA_QM_0_PQ_SIZE 0x400068
+
+#define mmDMA_QM_0_PQ_PI 0x40006C
+
+#define mmDMA_QM_0_PQ_CI 0x400070
+
+#define mmDMA_QM_0_PQ_CFG0 0x400074
+
+#define mmDMA_QM_0_PQ_CFG1 0x400078
+
+#define mmDMA_QM_0_PQ_ARUSER 0x40007C
+
+#define mmDMA_QM_0_PQ_PUSH0 0x400080
+
+#define mmDMA_QM_0_PQ_PUSH1 0x400084
+
+#define mmDMA_QM_0_PQ_PUSH2 0x400088
+
+#define mmDMA_QM_0_PQ_PUSH3 0x40008C
+
+#define mmDMA_QM_0_PQ_STS0 0x400090
+
+#define mmDMA_QM_0_PQ_STS1 0x400094
+
+#define mmDMA_QM_0_PQ_RD_RATE_LIM_EN 0x4000A0
+
+#define mmDMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN 0x4000A4
+
+#define mmDMA_QM_0_PQ_RD_RATE_LIM_SAT 0x4000A8
+
+#define mmDMA_QM_0_PQ_RD_RATE_LIM_TOUT 0x4000AC
+
+#define mmDMA_QM_0_CQ_CFG0 0x4000B0
+
+#define mmDMA_QM_0_CQ_CFG1 0x4000B4
+
+#define mmDMA_QM_0_CQ_ARUSER 0x4000B8
+
+#define mmDMA_QM_0_CQ_PTR_LO 0x4000C0
+
+#define mmDMA_QM_0_CQ_PTR_HI 0x4000C4
+
+#define mmDMA_QM_0_CQ_TSIZE 0x4000C8
+
+#define mmDMA_QM_0_CQ_CTL 0x4000CC
+
+#define mmDMA_QM_0_CQ_PTR_LO_STS 0x4000D4
+
+#define mmDMA_QM_0_CQ_PTR_HI_STS 0x4000D8
+
+#define mmDMA_QM_0_CQ_TSIZE_STS 0x4000DC
+
+#define mmDMA_QM_0_CQ_CTL_STS 0x4000E0
+
+#define mmDMA_QM_0_CQ_STS0 0x4000E4
+
+#define mmDMA_QM_0_CQ_STS1 0x4000E8
+
+#define mmDMA_QM_0_CQ_RD_RATE_LIM_EN 0x4000F0
+
+#define mmDMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN 0x4000F4
+
+#define mmDMA_QM_0_CQ_RD_RATE_LIM_SAT 0x4000F8
+
+#define mmDMA_QM_0_CQ_RD_RATE_LIM_TOUT 0x4000FC
+
+#define mmDMA_QM_0_CQ_IFIFO_CNT 0x400108
+
+#define mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO 0x400120
+
+#define mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI 0x400124
+
+#define mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO 0x400128
+
+#define mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI 0x40012C
+
+#define mmDMA_QM_0_CP_MSG_BASE2_ADDR_LO 0x400130
+
+#define mmDMA_QM_0_CP_MSG_BASE2_ADDR_HI 0x400134
+
+#define mmDMA_QM_0_CP_MSG_BASE3_ADDR_LO 0x400138
+
+#define mmDMA_QM_0_CP_MSG_BASE3_ADDR_HI 0x40013C
+
+#define mmDMA_QM_0_CP_LDMA_TSIZE_OFFSET 0x400140
+
+#define mmDMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET 0x400144
+
+#define mmDMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET 0x400148
+
+#define mmDMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET 0x40014C
+
+#define mmDMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET 0x400150
+
+#define mmDMA_QM_0_CP_LDMA_COMMIT_OFFSET 0x400154
+
+#define mmDMA_QM_0_CP_FENCE0_RDATA 0x400158
+
+#define mmDMA_QM_0_CP_FENCE1_RDATA 0x40015C
+
+#define mmDMA_QM_0_CP_FENCE2_RDATA 0x400160
+
+#define mmDMA_QM_0_CP_FENCE3_RDATA 0x400164
+
+#define mmDMA_QM_0_CP_FENCE0_CNT 0x400168
+
+#define mmDMA_QM_0_CP_FENCE1_CNT 0x40016C
+
+#define mmDMA_QM_0_CP_FENCE2_CNT 0x400170
+
+#define mmDMA_QM_0_CP_FENCE3_CNT 0x400174
+
+#define mmDMA_QM_0_CP_STS 0x400178
+
+#define mmDMA_QM_0_CP_CURRENT_INST_LO 0x40017C
+
+#define mmDMA_QM_0_CP_CURRENT_INST_HI 0x400180
+
+#define mmDMA_QM_0_CP_BARRIER_CFG 0x400184
+
+#define mmDMA_QM_0_CP_DBG_0 0x400188
+
+#define mmDMA_QM_0_PQ_BUF_ADDR 0x400300
+
+#define mmDMA_QM_0_PQ_BUF_RDATA 0x400304
+
+#define mmDMA_QM_0_CQ_BUF_ADDR 0x400308
+
+#define mmDMA_QM_0_CQ_BUF_RDATA 0x40030C
+
+#endif /* ASIC_REG_DMA_QM_0_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h
new file mode 100644
index 000000000000..da928390f89c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_QM_1_REGS_H_
+#define ASIC_REG_DMA_QM_1_REGS_H_
+
+/*
+ *****************************************
+ * DMA_QM_1 (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA_QM_1_GLBL_CFG0 0x408000
+
+#define mmDMA_QM_1_GLBL_CFG1 0x408004
+
+#define mmDMA_QM_1_GLBL_PROT 0x408008
+
+#define mmDMA_QM_1_GLBL_ERR_CFG 0x40800C
+
+#define mmDMA_QM_1_GLBL_ERR_ADDR_LO 0x408010
+
+#define mmDMA_QM_1_GLBL_ERR_ADDR_HI 0x408014
+
+#define mmDMA_QM_1_GLBL_ERR_WDATA 0x408018
+
+#define mmDMA_QM_1_GLBL_SECURE_PROPS 0x40801C
+
+#define mmDMA_QM_1_GLBL_NON_SECURE_PROPS 0x408020
+
+#define mmDMA_QM_1_GLBL_STS0 0x408024
+
+#define mmDMA_QM_1_GLBL_STS1 0x408028
+
+#define mmDMA_QM_1_PQ_BASE_LO 0x408060
+
+#define mmDMA_QM_1_PQ_BASE_HI 0x408064
+
+#define mmDMA_QM_1_PQ_SIZE 0x408068
+
+#define mmDMA_QM_1_PQ_PI 0x40806C
+
+#define mmDMA_QM_1_PQ_CI 0x408070
+
+#define mmDMA_QM_1_PQ_CFG0 0x408074
+
+#define mmDMA_QM_1_PQ_CFG1 0x408078
+
+#define mmDMA_QM_1_PQ_ARUSER 0x40807C
+
+#define mmDMA_QM_1_PQ_PUSH0 0x408080
+
+#define mmDMA_QM_1_PQ_PUSH1 0x408084
+
+#define mmDMA_QM_1_PQ_PUSH2 0x408088
+
+#define mmDMA_QM_1_PQ_PUSH3 0x40808C
+
+#define mmDMA_QM_1_PQ_STS0 0x408090
+
+#define mmDMA_QM_1_PQ_STS1 0x408094
+
+#define mmDMA_QM_1_PQ_RD_RATE_LIM_EN 0x4080A0
+
+#define mmDMA_QM_1_PQ_RD_RATE_LIM_RST_TOKEN 0x4080A4
+
+#define mmDMA_QM_1_PQ_RD_RATE_LIM_SAT 0x4080A8
+
+#define mmDMA_QM_1_PQ_RD_RATE_LIM_TOUT 0x4080AC
+
+#define mmDMA_QM_1_CQ_CFG0 0x4080B0
+
+#define mmDMA_QM_1_CQ_CFG1 0x4080B4
+
+#define mmDMA_QM_1_CQ_ARUSER 0x4080B8
+
+#define mmDMA_QM_1_CQ_PTR_LO 0x4080C0
+
+#define mmDMA_QM_1_CQ_PTR_HI 0x4080C4
+
+#define mmDMA_QM_1_CQ_TSIZE 0x4080C8
+
+#define mmDMA_QM_1_CQ_CTL 0x4080CC
+
+#define mmDMA_QM_1_CQ_PTR_LO_STS 0x4080D4
+
+#define mmDMA_QM_1_CQ_PTR_HI_STS 0x4080D8
+
+#define mmDMA_QM_1_CQ_TSIZE_STS 0x4080DC
+
+#define mmDMA_QM_1_CQ_CTL_STS 0x4080E0
+
+#define mmDMA_QM_1_CQ_STS0 0x4080E4
+
+#define mmDMA_QM_1_CQ_STS1 0x4080E8
+
+#define mmDMA_QM_1_CQ_RD_RATE_LIM_EN 0x4080F0
+
+#define mmDMA_QM_1_CQ_RD_RATE_LIM_RST_TOKEN 0x4080F4
+
+#define mmDMA_QM_1_CQ_RD_RATE_LIM_SAT 0x4080F8
+
+#define mmDMA_QM_1_CQ_RD_RATE_LIM_TOUT 0x4080FC
+
+#define mmDMA_QM_1_CQ_IFIFO_CNT 0x408108
+
+#define mmDMA_QM_1_CP_MSG_BASE0_ADDR_LO 0x408120
+
+#define mmDMA_QM_1_CP_MSG_BASE0_ADDR_HI 0x408124
+
+#define mmDMA_QM_1_CP_MSG_BASE1_ADDR_LO 0x408128
+
+#define mmDMA_QM_1_CP_MSG_BASE1_ADDR_HI 0x40812C
+
+#define mmDMA_QM_1_CP_MSG_BASE2_ADDR_LO 0x408130
+
+#define mmDMA_QM_1_CP_MSG_BASE2_ADDR_HI 0x408134
+
+#define mmDMA_QM_1_CP_MSG_BASE3_ADDR_LO 0x408138
+
+#define mmDMA_QM_1_CP_MSG_BASE3_ADDR_HI 0x40813C
+
+#define mmDMA_QM_1_CP_LDMA_TSIZE_OFFSET 0x408140
+
+#define mmDMA_QM_1_CP_LDMA_SRC_BASE_LO_OFFSET 0x408144
+
+#define mmDMA_QM_1_CP_LDMA_SRC_BASE_HI_OFFSET 0x408148
+
+#define mmDMA_QM_1_CP_LDMA_DST_BASE_LO_OFFSET 0x40814C
+
+#define mmDMA_QM_1_CP_LDMA_DST_BASE_HI_OFFSET 0x408150
+
+#define mmDMA_QM_1_CP_LDMA_COMMIT_OFFSET 0x408154
+
+#define mmDMA_QM_1_CP_FENCE0_RDATA 0x408158
+
+#define mmDMA_QM_1_CP_FENCE1_RDATA 0x40815C
+
+#define mmDMA_QM_1_CP_FENCE2_RDATA 0x408160
+
+#define mmDMA_QM_1_CP_FENCE3_RDATA 0x408164
+
+#define mmDMA_QM_1_CP_FENCE0_CNT 0x408168
+
+#define mmDMA_QM_1_CP_FENCE1_CNT 0x40816C
+
+#define mmDMA_QM_1_CP_FENCE2_CNT 0x408170
+
+#define mmDMA_QM_1_CP_FENCE3_CNT 0x408174
+
+#define mmDMA_QM_1_CP_STS 0x408178
+
+#define mmDMA_QM_1_CP_CURRENT_INST_LO 0x40817C
+
+#define mmDMA_QM_1_CP_CURRENT_INST_HI 0x408180
+
+#define mmDMA_QM_1_CP_BARRIER_CFG 0x408184
+
+#define mmDMA_QM_1_CP_DBG_0 0x408188
+
+#define mmDMA_QM_1_PQ_BUF_ADDR 0x408300
+
+#define mmDMA_QM_1_PQ_BUF_RDATA 0x408304
+
+#define mmDMA_QM_1_CQ_BUF_ADDR 0x408308
+
+#define mmDMA_QM_1_CQ_BUF_RDATA 0x40830C
+
+#endif /* ASIC_REG_DMA_QM_1_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h
new file mode 100644
index 000000000000..b4f06e9b71d6
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_QM_2_REGS_H_
+#define ASIC_REG_DMA_QM_2_REGS_H_
+
+/*
+ *****************************************
+ * DMA_QM_2 (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA_QM_2_GLBL_CFG0 0x410000
+
+#define mmDMA_QM_2_GLBL_CFG1 0x410004
+
+#define mmDMA_QM_2_GLBL_PROT 0x410008
+
+#define mmDMA_QM_2_GLBL_ERR_CFG 0x41000C
+
+#define mmDMA_QM_2_GLBL_ERR_ADDR_LO 0x410010
+
+#define mmDMA_QM_2_GLBL_ERR_ADDR_HI 0x410014
+
+#define mmDMA_QM_2_GLBL_ERR_WDATA 0x410018
+
+#define mmDMA_QM_2_GLBL_SECURE_PROPS 0x41001C
+
+#define mmDMA_QM_2_GLBL_NON_SECURE_PROPS 0x410020
+
+#define mmDMA_QM_2_GLBL_STS0 0x410024
+
+#define mmDMA_QM_2_GLBL_STS1 0x410028
+
+#define mmDMA_QM_2_PQ_BASE_LO 0x410060
+
+#define mmDMA_QM_2_PQ_BASE_HI 0x410064
+
+#define mmDMA_QM_2_PQ_SIZE 0x410068
+
+#define mmDMA_QM_2_PQ_PI 0x41006C
+
+#define mmDMA_QM_2_PQ_CI 0x410070
+
+#define mmDMA_QM_2_PQ_CFG0 0x410074
+
+#define mmDMA_QM_2_PQ_CFG1 0x410078
+
+#define mmDMA_QM_2_PQ_ARUSER 0x41007C
+
+#define mmDMA_QM_2_PQ_PUSH0 0x410080
+
+#define mmDMA_QM_2_PQ_PUSH1 0x410084
+
+#define mmDMA_QM_2_PQ_PUSH2 0x410088
+
+#define mmDMA_QM_2_PQ_PUSH3 0x41008C
+
+#define mmDMA_QM_2_PQ_STS0 0x410090
+
+#define mmDMA_QM_2_PQ_STS1 0x410094
+
+#define mmDMA_QM_2_PQ_RD_RATE_LIM_EN 0x4100A0
+
+#define mmDMA_QM_2_PQ_RD_RATE_LIM_RST_TOKEN 0x4100A4
+
+#define mmDMA_QM_2_PQ_RD_RATE_LIM_SAT 0x4100A8
+
+#define mmDMA_QM_2_PQ_RD_RATE_LIM_TOUT 0x4100AC
+
+#define mmDMA_QM_2_CQ_CFG0 0x4100B0
+
+#define mmDMA_QM_2_CQ_CFG1 0x4100B4
+
+#define mmDMA_QM_2_CQ_ARUSER 0x4100B8
+
+#define mmDMA_QM_2_CQ_PTR_LO 0x4100C0
+
+#define mmDMA_QM_2_CQ_PTR_HI 0x4100C4
+
+#define mmDMA_QM_2_CQ_TSIZE 0x4100C8
+
+#define mmDMA_QM_2_CQ_CTL 0x4100CC
+
+#define mmDMA_QM_2_CQ_PTR_LO_STS 0x4100D4
+
+#define mmDMA_QM_2_CQ_PTR_HI_STS 0x4100D8
+
+#define mmDMA_QM_2_CQ_TSIZE_STS 0x4100DC
+
+#define mmDMA_QM_2_CQ_CTL_STS 0x4100E0
+
+#define mmDMA_QM_2_CQ_STS0 0x4100E4
+
+#define mmDMA_QM_2_CQ_STS1 0x4100E8
+
+#define mmDMA_QM_2_CQ_RD_RATE_LIM_EN 0x4100F0
+
+#define mmDMA_QM_2_CQ_RD_RATE_LIM_RST_TOKEN 0x4100F4
+
+#define mmDMA_QM_2_CQ_RD_RATE_LIM_SAT 0x4100F8
+
+#define mmDMA_QM_2_CQ_RD_RATE_LIM_TOUT 0x4100FC
+
+#define mmDMA_QM_2_CQ_IFIFO_CNT 0x410108
+
+#define mmDMA_QM_2_CP_MSG_BASE0_ADDR_LO 0x410120
+
+#define mmDMA_QM_2_CP_MSG_BASE0_ADDR_HI 0x410124
+
+#define mmDMA_QM_2_CP_MSG_BASE1_ADDR_LO 0x410128
+
+#define mmDMA_QM_2_CP_MSG_BASE1_ADDR_HI 0x41012C
+
+#define mmDMA_QM_2_CP_MSG_BASE2_ADDR_LO 0x410130
+
+#define mmDMA_QM_2_CP_MSG_BASE2_ADDR_HI 0x410134
+
+#define mmDMA_QM_2_CP_MSG_BASE3_ADDR_LO 0x410138
+
+#define mmDMA_QM_2_CP_MSG_BASE3_ADDR_HI 0x41013C
+
+#define mmDMA_QM_2_CP_LDMA_TSIZE_OFFSET 0x410140
+
+#define mmDMA_QM_2_CP_LDMA_SRC_BASE_LO_OFFSET 0x410144
+
+#define mmDMA_QM_2_CP_LDMA_SRC_BASE_HI_OFFSET 0x410148
+
+#define mmDMA_QM_2_CP_LDMA_DST_BASE_LO_OFFSET 0x41014C
+
+#define mmDMA_QM_2_CP_LDMA_DST_BASE_HI_OFFSET 0x410150
+
+#define mmDMA_QM_2_CP_LDMA_COMMIT_OFFSET 0x410154
+
+#define mmDMA_QM_2_CP_FENCE0_RDATA 0x410158
+
+#define mmDMA_QM_2_CP_FENCE1_RDATA 0x41015C
+
+#define mmDMA_QM_2_CP_FENCE2_RDATA 0x410160
+
+#define mmDMA_QM_2_CP_FENCE3_RDATA 0x410164
+
+#define mmDMA_QM_2_CP_FENCE0_CNT 0x410168
+
+#define mmDMA_QM_2_CP_FENCE1_CNT 0x41016C
+
+#define mmDMA_QM_2_CP_FENCE2_CNT 0x410170
+
+#define mmDMA_QM_2_CP_FENCE3_CNT 0x410174
+
+#define mmDMA_QM_2_CP_STS 0x410178
+
+#define mmDMA_QM_2_CP_CURRENT_INST_LO 0x41017C
+
+#define mmDMA_QM_2_CP_CURRENT_INST_HI 0x410180
+
+#define mmDMA_QM_2_CP_BARRIER_CFG 0x410184
+
+#define mmDMA_QM_2_CP_DBG_0 0x410188
+
+#define mmDMA_QM_2_PQ_BUF_ADDR 0x410300
+
+#define mmDMA_QM_2_PQ_BUF_RDATA 0x410304
+
+#define mmDMA_QM_2_CQ_BUF_ADDR 0x410308
+
+#define mmDMA_QM_2_CQ_BUF_RDATA 0x41030C
+
+#endif /* ASIC_REG_DMA_QM_2_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h
new file mode 100644
index 000000000000..53e3cd78a06b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_QM_3_REGS_H_
+#define ASIC_REG_DMA_QM_3_REGS_H_
+
+/*
+ *****************************************
+ * DMA_QM_3 (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA_QM_3_GLBL_CFG0 0x418000
+
+#define mmDMA_QM_3_GLBL_CFG1 0x418004
+
+#define mmDMA_QM_3_GLBL_PROT 0x418008
+
+#define mmDMA_QM_3_GLBL_ERR_CFG 0x41800C
+
+#define mmDMA_QM_3_GLBL_ERR_ADDR_LO 0x418010
+
+#define mmDMA_QM_3_GLBL_ERR_ADDR_HI 0x418014
+
+#define mmDMA_QM_3_GLBL_ERR_WDATA 0x418018
+
+#define mmDMA_QM_3_GLBL_SECURE_PROPS 0x41801C
+
+#define mmDMA_QM_3_GLBL_NON_SECURE_PROPS 0x418020
+
+#define mmDMA_QM_3_GLBL_STS0 0x418024
+
+#define mmDMA_QM_3_GLBL_STS1 0x418028
+
+#define mmDMA_QM_3_PQ_BASE_LO 0x418060
+
+#define mmDMA_QM_3_PQ_BASE_HI 0x418064
+
+#define mmDMA_QM_3_PQ_SIZE 0x418068
+
+#define mmDMA_QM_3_PQ_PI 0x41806C
+
+#define mmDMA_QM_3_PQ_CI 0x418070
+
+#define mmDMA_QM_3_PQ_CFG0 0x418074
+
+#define mmDMA_QM_3_PQ_CFG1 0x418078
+
+#define mmDMA_QM_3_PQ_ARUSER 0x41807C
+
+#define mmDMA_QM_3_PQ_PUSH0 0x418080
+
+#define mmDMA_QM_3_PQ_PUSH1 0x418084
+
+#define mmDMA_QM_3_PQ_PUSH2 0x418088
+
+#define mmDMA_QM_3_PQ_PUSH3 0x41808C
+
+#define mmDMA_QM_3_PQ_STS0 0x418090
+
+#define mmDMA_QM_3_PQ_STS1 0x418094
+
+#define mmDMA_QM_3_PQ_RD_RATE_LIM_EN 0x4180A0
+
+#define mmDMA_QM_3_PQ_RD_RATE_LIM_RST_TOKEN 0x4180A4
+
+#define mmDMA_QM_3_PQ_RD_RATE_LIM_SAT 0x4180A8
+
+#define mmDMA_QM_3_PQ_RD_RATE_LIM_TOUT 0x4180AC
+
+#define mmDMA_QM_3_CQ_CFG0 0x4180B0
+
+#define mmDMA_QM_3_CQ_CFG1 0x4180B4
+
+#define mmDMA_QM_3_CQ_ARUSER 0x4180B8
+
+#define mmDMA_QM_3_CQ_PTR_LO 0x4180C0
+
+#define mmDMA_QM_3_CQ_PTR_HI 0x4180C4
+
+#define mmDMA_QM_3_CQ_TSIZE 0x4180C8
+
+#define mmDMA_QM_3_CQ_CTL 0x4180CC
+
+#define mmDMA_QM_3_CQ_PTR_LO_STS 0x4180D4
+
+#define mmDMA_QM_3_CQ_PTR_HI_STS 0x4180D8
+
+#define mmDMA_QM_3_CQ_TSIZE_STS 0x4180DC
+
+#define mmDMA_QM_3_CQ_CTL_STS 0x4180E0
+
+#define mmDMA_QM_3_CQ_STS0 0x4180E4
+
+#define mmDMA_QM_3_CQ_STS1 0x4180E8
+
+#define mmDMA_QM_3_CQ_RD_RATE_LIM_EN 0x4180F0
+
+#define mmDMA_QM_3_CQ_RD_RATE_LIM_RST_TOKEN 0x4180F4
+
+#define mmDMA_QM_3_CQ_RD_RATE_LIM_SAT 0x4180F8
+
+#define mmDMA_QM_3_CQ_RD_RATE_LIM_TOUT 0x4180FC
+
+#define mmDMA_QM_3_CQ_IFIFO_CNT 0x418108
+
+#define mmDMA_QM_3_CP_MSG_BASE0_ADDR_LO 0x418120
+
+#define mmDMA_QM_3_CP_MSG_BASE0_ADDR_HI 0x418124
+
+#define mmDMA_QM_3_CP_MSG_BASE1_ADDR_LO 0x418128
+
+#define mmDMA_QM_3_CP_MSG_BASE1_ADDR_HI 0x41812C
+
+#define mmDMA_QM_3_CP_MSG_BASE2_ADDR_LO 0x418130
+
+#define mmDMA_QM_3_CP_MSG_BASE2_ADDR_HI 0x418134
+
+#define mmDMA_QM_3_CP_MSG_BASE3_ADDR_LO 0x418138
+
+#define mmDMA_QM_3_CP_MSG_BASE3_ADDR_HI 0x41813C
+
+#define mmDMA_QM_3_CP_LDMA_TSIZE_OFFSET 0x418140
+
+#define mmDMA_QM_3_CP_LDMA_SRC_BASE_LO_OFFSET 0x418144
+
+#define mmDMA_QM_3_CP_LDMA_SRC_BASE_HI_OFFSET 0x418148
+
+#define mmDMA_QM_3_CP_LDMA_DST_BASE_LO_OFFSET 0x41814C
+
+#define mmDMA_QM_3_CP_LDMA_DST_BASE_HI_OFFSET 0x418150
+
+#define mmDMA_QM_3_CP_LDMA_COMMIT_OFFSET 0x418154
+
+#define mmDMA_QM_3_CP_FENCE0_RDATA 0x418158
+
+#define mmDMA_QM_3_CP_FENCE1_RDATA 0x41815C
+
+#define mmDMA_QM_3_CP_FENCE2_RDATA 0x418160
+
+#define mmDMA_QM_3_CP_FENCE3_RDATA 0x418164
+
+#define mmDMA_QM_3_CP_FENCE0_CNT 0x418168
+
+#define mmDMA_QM_3_CP_FENCE1_CNT 0x41816C
+
+#define mmDMA_QM_3_CP_FENCE2_CNT 0x418170
+
+#define mmDMA_QM_3_CP_FENCE3_CNT 0x418174
+
+#define mmDMA_QM_3_CP_STS 0x418178
+
+#define mmDMA_QM_3_CP_CURRENT_INST_LO 0x41817C
+
+#define mmDMA_QM_3_CP_CURRENT_INST_HI 0x418180
+
+#define mmDMA_QM_3_CP_BARRIER_CFG 0x418184
+
+#define mmDMA_QM_3_CP_DBG_0 0x418188
+
+#define mmDMA_QM_3_PQ_BUF_ADDR 0x418300
+
+#define mmDMA_QM_3_PQ_BUF_RDATA 0x418304
+
+#define mmDMA_QM_3_CQ_BUF_ADDR 0x418308
+
+#define mmDMA_QM_3_CQ_BUF_RDATA 0x41830C
+
+#endif /* ASIC_REG_DMA_QM_3_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h
new file mode 100644
index 000000000000..e0eb5f260201
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_QM_4_REGS_H_
+#define ASIC_REG_DMA_QM_4_REGS_H_
+
+/*
+ *****************************************
+ * DMA_QM_4 (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA_QM_4_GLBL_CFG0 0x420000
+
+#define mmDMA_QM_4_GLBL_CFG1 0x420004
+
+#define mmDMA_QM_4_GLBL_PROT 0x420008
+
+#define mmDMA_QM_4_GLBL_ERR_CFG 0x42000C
+
+#define mmDMA_QM_4_GLBL_ERR_ADDR_LO 0x420010
+
+#define mmDMA_QM_4_GLBL_ERR_ADDR_HI 0x420014
+
+#define mmDMA_QM_4_GLBL_ERR_WDATA 0x420018
+
+#define mmDMA_QM_4_GLBL_SECURE_PROPS 0x42001C
+
+#define mmDMA_QM_4_GLBL_NON_SECURE_PROPS 0x420020
+
+#define mmDMA_QM_4_GLBL_STS0 0x420024
+
+#define mmDMA_QM_4_GLBL_STS1 0x420028
+
+#define mmDMA_QM_4_PQ_BASE_LO 0x420060
+
+#define mmDMA_QM_4_PQ_BASE_HI 0x420064
+
+#define mmDMA_QM_4_PQ_SIZE 0x420068
+
+#define mmDMA_QM_4_PQ_PI 0x42006C
+
+#define mmDMA_QM_4_PQ_CI 0x420070
+
+#define mmDMA_QM_4_PQ_CFG0 0x420074
+
+#define mmDMA_QM_4_PQ_CFG1 0x420078
+
+#define mmDMA_QM_4_PQ_ARUSER 0x42007C
+
+#define mmDMA_QM_4_PQ_PUSH0 0x420080
+
+#define mmDMA_QM_4_PQ_PUSH1 0x420084
+
+#define mmDMA_QM_4_PQ_PUSH2 0x420088
+
+#define mmDMA_QM_4_PQ_PUSH3 0x42008C
+
+#define mmDMA_QM_4_PQ_STS0 0x420090
+
+#define mmDMA_QM_4_PQ_STS1 0x420094
+
+#define mmDMA_QM_4_PQ_RD_RATE_LIM_EN 0x4200A0
+
+#define mmDMA_QM_4_PQ_RD_RATE_LIM_RST_TOKEN 0x4200A4
+
+#define mmDMA_QM_4_PQ_RD_RATE_LIM_SAT 0x4200A8
+
+#define mmDMA_QM_4_PQ_RD_RATE_LIM_TOUT 0x4200AC
+
+#define mmDMA_QM_4_CQ_CFG0 0x4200B0
+
+#define mmDMA_QM_4_CQ_CFG1 0x4200B4
+
+#define mmDMA_QM_4_CQ_ARUSER 0x4200B8
+
+#define mmDMA_QM_4_CQ_PTR_LO 0x4200C0
+
+#define mmDMA_QM_4_CQ_PTR_HI 0x4200C4
+
+#define mmDMA_QM_4_CQ_TSIZE 0x4200C8
+
+#define mmDMA_QM_4_CQ_CTL 0x4200CC
+
+#define mmDMA_QM_4_CQ_PTR_LO_STS 0x4200D4
+
+#define mmDMA_QM_4_CQ_PTR_HI_STS 0x4200D8
+
+#define mmDMA_QM_4_CQ_TSIZE_STS 0x4200DC
+
+#define mmDMA_QM_4_CQ_CTL_STS 0x4200E0
+
+#define mmDMA_QM_4_CQ_STS0 0x4200E4
+
+#define mmDMA_QM_4_CQ_STS1 0x4200E8
+
+#define mmDMA_QM_4_CQ_RD_RATE_LIM_EN 0x4200F0
+
+#define mmDMA_QM_4_CQ_RD_RATE_LIM_RST_TOKEN 0x4200F4
+
+#define mmDMA_QM_4_CQ_RD_RATE_LIM_SAT 0x4200F8
+
+#define mmDMA_QM_4_CQ_RD_RATE_LIM_TOUT 0x4200FC
+
+#define mmDMA_QM_4_CQ_IFIFO_CNT 0x420108
+
+#define mmDMA_QM_4_CP_MSG_BASE0_ADDR_LO 0x420120
+
+#define mmDMA_QM_4_CP_MSG_BASE0_ADDR_HI 0x420124
+
+#define mmDMA_QM_4_CP_MSG_BASE1_ADDR_LO 0x420128
+
+#define mmDMA_QM_4_CP_MSG_BASE1_ADDR_HI 0x42012C
+
+#define mmDMA_QM_4_CP_MSG_BASE2_ADDR_LO 0x420130
+
+#define mmDMA_QM_4_CP_MSG_BASE2_ADDR_HI 0x420134
+
+#define mmDMA_QM_4_CP_MSG_BASE3_ADDR_LO 0x420138
+
+#define mmDMA_QM_4_CP_MSG_BASE3_ADDR_HI 0x42013C
+
+#define mmDMA_QM_4_CP_LDMA_TSIZE_OFFSET 0x420140
+
+#define mmDMA_QM_4_CP_LDMA_SRC_BASE_LO_OFFSET 0x420144
+
+#define mmDMA_QM_4_CP_LDMA_SRC_BASE_HI_OFFSET 0x420148
+
+#define mmDMA_QM_4_CP_LDMA_DST_BASE_LO_OFFSET 0x42014C
+
+#define mmDMA_QM_4_CP_LDMA_DST_BASE_HI_OFFSET 0x420150
+
+#define mmDMA_QM_4_CP_LDMA_COMMIT_OFFSET 0x420154
+
+#define mmDMA_QM_4_CP_FENCE0_RDATA 0x420158
+
+#define mmDMA_QM_4_CP_FENCE1_RDATA 0x42015C
+
+#define mmDMA_QM_4_CP_FENCE2_RDATA 0x420160
+
+#define mmDMA_QM_4_CP_FENCE3_RDATA 0x420164
+
+#define mmDMA_QM_4_CP_FENCE0_CNT 0x420168
+
+#define mmDMA_QM_4_CP_FENCE1_CNT 0x42016C
+
+#define mmDMA_QM_4_CP_FENCE2_CNT 0x420170
+
+#define mmDMA_QM_4_CP_FENCE3_CNT 0x420174
+
+#define mmDMA_QM_4_CP_STS 0x420178
+
+#define mmDMA_QM_4_CP_CURRENT_INST_LO 0x42017C
+
+#define mmDMA_QM_4_CP_CURRENT_INST_HI 0x420180
+
+#define mmDMA_QM_4_CP_BARRIER_CFG 0x420184
+
+#define mmDMA_QM_4_CP_DBG_0 0x420188
+
+#define mmDMA_QM_4_PQ_BUF_ADDR 0x420300
+
+#define mmDMA_QM_4_PQ_BUF_RDATA 0x420304
+
+#define mmDMA_QM_4_CQ_BUF_ADDR 0x420308
+
+#define mmDMA_QM_4_CQ_BUF_RDATA 0x42030C
+
+#endif /* ASIC_REG_DMA_QM_4_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h
new file mode 100644
index 000000000000..85b15010cd7a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h
@@ -0,0 +1,1372 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef GOYA_BLOCKS_H_
+#define GOYA_BLOCKS_H_
+
+#define mmPCI_NRTR_BASE 0x7FFC000000ull
+#define PCI_NRTR_MAX_OFFSET 0x608
+#define PCI_NRTR_SECTION 0x4000
+#define mmPCI_RD_REGULATOR_BASE 0x7FFC004000ull
+#define PCI_RD_REGULATOR_MAX_OFFSET 0x74
+#define PCI_RD_REGULATOR_SECTION 0x1000
+#define mmPCI_WR_REGULATOR_BASE 0x7FFC005000ull
+#define PCI_WR_REGULATOR_MAX_OFFSET 0x74
+#define PCI_WR_REGULATOR_SECTION 0x3B000
+#define mmMME1_RTR_BASE 0x7FFC040000ull
+#define MME1_RTR_MAX_OFFSET 0x608
+#define MME1_RTR_SECTION 0x4000
+#define mmMME1_RD_REGULATOR_BASE 0x7FFC044000ull
+#define MME1_RD_REGULATOR_MAX_OFFSET 0x74
+#define MME1_RD_REGULATOR_SECTION 0x1000
+#define mmMME1_WR_REGULATOR_BASE 0x7FFC045000ull
+#define MME1_WR_REGULATOR_MAX_OFFSET 0x74
+#define MME1_WR_REGULATOR_SECTION 0x3B000
+#define mmMME2_RTR_BASE 0x7FFC080000ull
+#define MME2_RTR_MAX_OFFSET 0x608
+#define MME2_RTR_SECTION 0x4000
+#define mmMME2_RD_REGULATOR_BASE 0x7FFC084000ull
+#define MME2_RD_REGULATOR_MAX_OFFSET 0x74
+#define MME2_RD_REGULATOR_SECTION 0x1000
+#define mmMME2_WR_REGULATOR_BASE 0x7FFC085000ull
+#define MME2_WR_REGULATOR_MAX_OFFSET 0x74
+#define MME2_WR_REGULATOR_SECTION 0x3B000
+#define mmMME3_RTR_BASE 0x7FFC0C0000ull
+#define MME3_RTR_MAX_OFFSET 0x608
+#define MME3_RTR_SECTION 0x4000
+#define mmMME3_RD_REGULATOR_BASE 0x7FFC0C4000ull
+#define MME3_RD_REGULATOR_MAX_OFFSET 0x74
+#define MME3_RD_REGULATOR_SECTION 0x1000
+#define mmMME3_WR_REGULATOR_BASE 0x7FFC0C5000ull
+#define MME3_WR_REGULATOR_MAX_OFFSET 0x74
+#define MME3_WR_REGULATOR_SECTION 0xB000
+#define mmMME_BASE 0x7FFC0D0000ull
+#define MME_MAX_OFFSET 0xBB0
+#define MME_SECTION 0x8000
+#define mmMME_QM_BASE 0x7FFC0D8000ull
+#define MME_QM_MAX_OFFSET 0x310
+#define MME_QM_SECTION 0x1000
+#define mmMME_CMDQ_BASE 0x7FFC0D9000ull
+#define MME_CMDQ_MAX_OFFSET 0x310
+#define MME_CMDQ_SECTION 0x1000
+#define mmACC_MS_ECC_MEM_0_BASE 0x7FFC0DA000ull
+#define ACC_MS_ECC_MEM_0_MAX_OFFSET 0x0
+#define ACC_MS_ECC_MEM_0_SECTION 0x1000
+#define mmACC_MS_ECC_MEM_1_BASE 0x7FFC0DB000ull
+#define ACC_MS_ECC_MEM_1_MAX_OFFSET 0x0
+#define ACC_MS_ECC_MEM_1_SECTION 0x1000
+#define mmACC_MS_ECC_MEM_2_BASE 0x7FFC0DC000ull
+#define ACC_MS_ECC_MEM_2_MAX_OFFSET 0x0
+#define ACC_MS_ECC_MEM_2_SECTION 0x1000
+#define mmACC_MS_ECC_MEM_3_BASE 0x7FFC0DD000ull
+#define ACC_MS_ECC_MEM_3_MAX_OFFSET 0x0
+#define ACC_MS_ECC_MEM_3_SECTION 0x1000
+#define mmSBA_ECC_MEM_BASE 0x7FFC0DE000ull
+#define SBA_ECC_MEM_MAX_OFFSET 0x0
+#define SBA_ECC_MEM_SECTION 0x1000
+#define mmSBB_ECC_MEM_BASE 0x7FFC0DF000ull
+#define SBB_ECC_MEM_MAX_OFFSET 0x0
+#define SBB_ECC_MEM_SECTION 0x21000
+#define mmMME4_RTR_BASE 0x7FFC100000ull
+#define MME4_RTR_MAX_OFFSET 0x608
+#define MME4_RTR_SECTION 0x4000
+#define mmMME4_RD_REGULATOR_BASE 0x7FFC104000ull
+#define MME4_RD_REGULATOR_MAX_OFFSET 0x74
+#define MME4_RD_REGULATOR_SECTION 0x1000
+#define mmMME4_WR_REGULATOR_BASE 0x7FFC105000ull
+#define MME4_WR_REGULATOR_MAX_OFFSET 0x74
+#define MME4_WR_REGULATOR_SECTION 0xB000
+#define mmSYNC_MNGR_BASE 0x7FFC110000ull
+#define SYNC_MNGR_MAX_OFFSET 0x4400
+#define SYNC_MNGR_SECTION 0x30000
+#define mmMME5_RTR_BASE 0x7FFC140000ull
+#define MME5_RTR_MAX_OFFSET 0x608
+#define MME5_RTR_SECTION 0x4000
+#define mmMME5_RD_REGULATOR_BASE 0x7FFC144000ull
+#define MME5_RD_REGULATOR_MAX_OFFSET 0x74
+#define MME5_RD_REGULATOR_SECTION 0x1000
+#define mmMME5_WR_REGULATOR_BASE 0x7FFC145000ull
+#define MME5_WR_REGULATOR_MAX_OFFSET 0x74
+#define MME5_WR_REGULATOR_SECTION 0x3B000
+#define mmMME6_RTR_BASE 0x7FFC180000ull
+#define MME6_RTR_MAX_OFFSET 0x608
+#define MME6_RTR_SECTION 0x4000
+#define mmMME6_RD_REGULATOR_BASE 0x7FFC184000ull
+#define MME6_RD_REGULATOR_MAX_OFFSET 0x74
+#define MME6_RD_REGULATOR_SECTION 0x1000
+#define mmMME6_WR_REGULATOR_BASE 0x7FFC185000ull
+#define MME6_WR_REGULATOR_MAX_OFFSET 0x74
+#define MME6_WR_REGULATOR_SECTION 0x3B000
+#define mmDMA_NRTR_BASE 0x7FFC1C0000ull
+#define DMA_NRTR_MAX_OFFSET 0x608
+#define DMA_NRTR_SECTION 0x4000
+#define mmDMA_RD_REGULATOR_BASE 0x7FFC1C4000ull
+#define DMA_RD_REGULATOR_MAX_OFFSET 0x74
+#define DMA_RD_REGULATOR_SECTION 0x1000
+#define mmDMA_WR_REGULATOR_BASE 0x7FFC1C5000ull
+#define DMA_WR_REGULATOR_MAX_OFFSET 0x74
+#define DMA_WR_REGULATOR_SECTION 0x3B000
+#define mmSRAM_Y0_X0_BANK_BASE 0x7FFC200000ull
+#define SRAM_Y0_X0_BANK_MAX_OFFSET 0x4
+#define SRAM_Y0_X0_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X0_RTR_BASE 0x7FFC201000ull
+#define SRAM_Y0_X0_RTR_MAX_OFFSET 0x334
+#define SRAM_Y0_X0_RTR_SECTION 0x3000
+#define mmSRAM_Y0_X1_BANK_BASE 0x7FFC204000ull
+#define SRAM_Y0_X1_BANK_MAX_OFFSET 0x4
+#define SRAM_Y0_X1_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X1_RTR_BASE 0x7FFC205000ull
+#define SRAM_Y0_X1_RTR_MAX_OFFSET 0x334
+#define SRAM_Y0_X1_RTR_SECTION 0x3000
+#define mmSRAM_Y0_X2_BANK_BASE 0x7FFC208000ull
+#define SRAM_Y0_X2_BANK_MAX_OFFSET 0x4
+#define SRAM_Y0_X2_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X2_RTR_BASE 0x7FFC209000ull
+#define SRAM_Y0_X2_RTR_MAX_OFFSET 0x334
+#define SRAM_Y0_X2_RTR_SECTION 0x3000
+#define mmSRAM_Y0_X3_BANK_BASE 0x7FFC20C000ull
+#define SRAM_Y0_X3_BANK_MAX_OFFSET 0x4
+#define SRAM_Y0_X3_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X3_RTR_BASE 0x7FFC20D000ull
+#define SRAM_Y0_X3_RTR_MAX_OFFSET 0x334
+#define SRAM_Y0_X3_RTR_SECTION 0x3000
+#define mmSRAM_Y0_X4_BANK_BASE 0x7FFC210000ull
+#define SRAM_Y0_X4_BANK_MAX_OFFSET 0x4
+#define SRAM_Y0_X4_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X4_RTR_BASE 0x7FFC211000ull
+#define SRAM_Y0_X4_RTR_MAX_OFFSET 0x334
+#define SRAM_Y0_X4_RTR_SECTION 0xF000
+#define mmSRAM_Y1_X0_BANK_BASE 0x7FFC220000ull
+#define SRAM_Y1_X0_BANK_MAX_OFFSET 0x4
+#define SRAM_Y1_X0_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X0_RTR_BASE 0x7FFC221000ull
+#define SRAM_Y1_X0_RTR_MAX_OFFSET 0x334
+#define SRAM_Y1_X0_RTR_SECTION 0x3000
+#define mmSRAM_Y1_X1_BANK_BASE 0x7FFC224000ull
+#define SRAM_Y1_X1_BANK_MAX_OFFSET 0x4
+#define SRAM_Y1_X1_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X1_RTR_BASE 0x7FFC225000ull
+#define SRAM_Y1_X1_RTR_MAX_OFFSET 0x334
+#define SRAM_Y1_X1_RTR_SECTION 0x3000
+#define mmSRAM_Y1_X2_BANK_BASE 0x7FFC228000ull
+#define SRAM_Y1_X2_BANK_MAX_OFFSET 0x4
+#define SRAM_Y1_X2_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X2_RTR_BASE 0x7FFC229000ull
+#define SRAM_Y1_X2_RTR_MAX_OFFSET 0x334
+#define SRAM_Y1_X2_RTR_SECTION 0x3000
+#define mmSRAM_Y1_X3_BANK_BASE 0x7FFC22C000ull
+#define SRAM_Y1_X3_BANK_MAX_OFFSET 0x4
+#define SRAM_Y1_X3_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X3_RTR_BASE 0x7FFC22D000ull
+#define SRAM_Y1_X3_RTR_MAX_OFFSET 0x334
+#define SRAM_Y1_X3_RTR_SECTION 0x3000
+#define mmSRAM_Y1_X4_BANK_BASE 0x7FFC230000ull
+#define SRAM_Y1_X4_BANK_MAX_OFFSET 0x4
+#define SRAM_Y1_X4_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X4_RTR_BASE 0x7FFC231000ull
+#define SRAM_Y1_X4_RTR_MAX_OFFSET 0x334
+#define SRAM_Y1_X4_RTR_SECTION 0xF000
+#define mmSRAM_Y2_X0_BANK_BASE 0x7FFC240000ull
+#define SRAM_Y2_X0_BANK_MAX_OFFSET 0x4
+#define SRAM_Y2_X0_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X0_RTR_BASE 0x7FFC241000ull
+#define SRAM_Y2_X0_RTR_MAX_OFFSET 0x334
+#define SRAM_Y2_X0_RTR_SECTION 0x3000
+#define mmSRAM_Y2_X1_BANK_BASE 0x7FFC244000ull
+#define SRAM_Y2_X1_BANK_MAX_OFFSET 0x4
+#define SRAM_Y2_X1_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X1_RTR_BASE 0x7FFC245000ull
+#define SRAM_Y2_X1_RTR_MAX_OFFSET 0x334
+#define SRAM_Y2_X1_RTR_SECTION 0x3000
+#define mmSRAM_Y2_X2_BANK_BASE 0x7FFC248000ull
+#define SRAM_Y2_X2_BANK_MAX_OFFSET 0x4
+#define SRAM_Y2_X2_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X2_RTR_BASE 0x7FFC249000ull
+#define SRAM_Y2_X2_RTR_MAX_OFFSET 0x334
+#define SRAM_Y2_X2_RTR_SECTION 0x3000
+#define mmSRAM_Y2_X3_BANK_BASE 0x7FFC24C000ull
+#define SRAM_Y2_X3_BANK_MAX_OFFSET 0x4
+#define SRAM_Y2_X3_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X3_RTR_BASE 0x7FFC24D000ull
+#define SRAM_Y2_X3_RTR_MAX_OFFSET 0x334
+#define SRAM_Y2_X3_RTR_SECTION 0x3000
+#define mmSRAM_Y2_X4_BANK_BASE 0x7FFC250000ull
+#define SRAM_Y2_X4_BANK_MAX_OFFSET 0x4
+#define SRAM_Y2_X4_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X4_RTR_BASE 0x7FFC251000ull
+#define SRAM_Y2_X4_RTR_MAX_OFFSET 0x334
+#define SRAM_Y2_X4_RTR_SECTION 0xF000
+#define mmSRAM_Y3_X0_BANK_BASE 0x7FFC260000ull
+#define SRAM_Y3_X0_BANK_MAX_OFFSET 0x4
+#define SRAM_Y3_X0_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X0_RTR_BASE 0x7FFC261000ull
+#define SRAM_Y3_X0_RTR_MAX_OFFSET 0x334
+#define SRAM_Y3_X0_RTR_SECTION 0x3000
+#define mmSRAM_Y3_X1_BANK_BASE 0x7FFC264000ull
+#define SRAM_Y3_X1_BANK_MAX_OFFSET 0x4
+#define SRAM_Y3_X1_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X1_RTR_BASE 0x7FFC265000ull
+#define SRAM_Y3_X1_RTR_MAX_OFFSET 0x334
+#define SRAM_Y3_X1_RTR_SECTION 0x3000
+#define mmSRAM_Y3_X2_BANK_BASE 0x7FFC268000ull
+#define SRAM_Y3_X2_BANK_MAX_OFFSET 0x4
+#define SRAM_Y3_X2_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X2_RTR_BASE 0x7FFC269000ull
+#define SRAM_Y3_X2_RTR_MAX_OFFSET 0x334
+#define SRAM_Y3_X2_RTR_SECTION 0x3000
+#define mmSRAM_Y3_X3_BANK_BASE 0x7FFC26C000ull
+#define SRAM_Y3_X3_BANK_MAX_OFFSET 0x4
+#define SRAM_Y3_X3_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X3_RTR_BASE 0x7FFC26D000ull
+#define SRAM_Y3_X3_RTR_MAX_OFFSET 0x334
+#define SRAM_Y3_X3_RTR_SECTION 0x3000
+#define mmSRAM_Y3_X4_BANK_BASE 0x7FFC270000ull
+#define SRAM_Y3_X4_BANK_MAX_OFFSET 0x4
+#define SRAM_Y3_X4_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X4_RTR_BASE 0x7FFC271000ull
+#define SRAM_Y3_X4_RTR_MAX_OFFSET 0x334
+#define SRAM_Y3_X4_RTR_SECTION 0xF000
+#define mmSRAM_Y4_X0_BANK_BASE 0x7FFC280000ull
+#define SRAM_Y4_X0_BANK_MAX_OFFSET 0x4
+#define SRAM_Y4_X0_BANK_SECTION 0x1000
+#define mmSRAM_Y4_X0_RTR_BASE 0x7FFC281000ull
+#define SRAM_Y4_X0_RTR_MAX_OFFSET 0x334
+#define SRAM_Y4_X0_RTR_SECTION 0x3000
+#define mmSRAM_Y4_X1_BANK_BASE 0x7FFC284000ull
+#define SRAM_Y4_X1_BANK_MAX_OFFSET 0x4
+#define SRAM_Y4_X1_BANK_SECTION 0x1000
+#define mmSRAM_Y4_X1_RTR_BASE 0x7FFC285000ull
+#define SRAM_Y4_X1_RTR_MAX_OFFSET 0x334
+#define SRAM_Y4_X1_RTR_SECTION 0x3000
+#define mmSRAM_Y4_X2_BANK_BASE 0x7FFC288000ull
+#define SRAM_Y4_X2_BANK_MAX_OFFSET 0x4
+#define SRAM_Y4_X2_BANK_SECTION 0x1000
+#define mmSRAM_Y4_X2_RTR_BASE 0x7FFC289000ull
+#define SRAM_Y4_X2_RTR_MAX_OFFSET 0x334
+#define SRAM_Y4_X2_RTR_SECTION 0x3000
+#define mmSRAM_Y4_X3_BANK_BASE 0x7FFC28C000ull
+#define SRAM_Y4_X3_BANK_MAX_OFFSET 0x4
+#define SRAM_Y4_X3_BANK_SECTION 0x1000
+#define mmSRAM_Y4_X3_RTR_BASE 0x7FFC28D000ull
+#define SRAM_Y4_X3_RTR_MAX_OFFSET 0x334
+#define SRAM_Y4_X3_RTR_SECTION 0x3000
+#define mmSRAM_Y4_X4_BANK_BASE 0x7FFC290000ull
+#define SRAM_Y4_X4_BANK_MAX_OFFSET 0x4
+#define SRAM_Y4_X4_BANK_SECTION 0x1000
+#define mmSRAM_Y4_X4_RTR_BASE 0x7FFC291000ull
+#define SRAM_Y4_X4_RTR_MAX_OFFSET 0x334
+#define SRAM_Y4_X4_RTR_SECTION 0xF000
+#define mmSRAM_Y5_X0_BANK_BASE 0x7FFC2A0000ull
+#define SRAM_Y5_X0_BANK_MAX_OFFSET 0x4
+#define SRAM_Y5_X0_BANK_SECTION 0x1000
+#define mmSRAM_Y5_X0_RTR_BASE 0x7FFC2A1000ull
+#define SRAM_Y5_X0_RTR_MAX_OFFSET 0x334
+#define SRAM_Y5_X0_RTR_SECTION 0x3000
+#define mmSRAM_Y5_X1_BANK_BASE 0x7FFC2A4000ull
+#define SRAM_Y5_X1_BANK_MAX_OFFSET 0x4
+#define SRAM_Y5_X1_BANK_SECTION 0x1000
+#define mmSRAM_Y5_X1_RTR_BASE 0x7FFC2A5000ull
+#define SRAM_Y5_X1_RTR_MAX_OFFSET 0x334
+#define SRAM_Y5_X1_RTR_SECTION 0x3000
+#define mmSRAM_Y5_X2_BANK_BASE 0x7FFC2A8000ull
+#define SRAM_Y5_X2_BANK_MAX_OFFSET 0x4
+#define SRAM_Y5_X2_BANK_SECTION 0x1000
+#define mmSRAM_Y5_X2_RTR_BASE 0x7FFC2A9000ull
+#define SRAM_Y5_X2_RTR_MAX_OFFSET 0x334
+#define SRAM_Y5_X2_RTR_SECTION 0x3000
+#define mmSRAM_Y5_X3_BANK_BASE 0x7FFC2AC000ull
+#define SRAM_Y5_X3_BANK_MAX_OFFSET 0x4
+#define SRAM_Y5_X3_BANK_SECTION 0x1000
+#define mmSRAM_Y5_X3_RTR_BASE 0x7FFC2AD000ull
+#define SRAM_Y5_X3_RTR_MAX_OFFSET 0x334
+#define SRAM_Y5_X3_RTR_SECTION 0x3000
+#define mmSRAM_Y5_X4_BANK_BASE 0x7FFC2B0000ull
+#define SRAM_Y5_X4_BANK_MAX_OFFSET 0x4
+#define SRAM_Y5_X4_BANK_SECTION 0x1000
+#define mmSRAM_Y5_X4_RTR_BASE 0x7FFC2B1000ull
+#define SRAM_Y5_X4_RTR_MAX_OFFSET 0x334
+#define SRAM_Y5_X4_RTR_SECTION 0x14F000
+#define mmDMA_QM_0_BASE 0x7FFC400000ull
+#define DMA_QM_0_MAX_OFFSET 0x310
+#define DMA_QM_0_SECTION 0x1000
+#define mmDMA_CH_0_BASE 0x7FFC401000ull
+#define DMA_CH_0_MAX_OFFSET 0x200
+#define DMA_CH_0_SECTION 0x7000
+#define mmDMA_QM_1_BASE 0x7FFC408000ull
+#define DMA_QM_1_MAX_OFFSET 0x310
+#define DMA_QM_1_SECTION 0x1000
+#define mmDMA_CH_1_BASE 0x7FFC409000ull
+#define DMA_CH_1_MAX_OFFSET 0x200
+#define DMA_CH_1_SECTION 0x7000
+#define mmDMA_QM_2_BASE 0x7FFC410000ull
+#define DMA_QM_2_MAX_OFFSET 0x310
+#define DMA_QM_2_SECTION 0x1000
+#define mmDMA_CH_2_BASE 0x7FFC411000ull
+#define DMA_CH_2_MAX_OFFSET 0x200
+#define DMA_CH_2_SECTION 0x7000
+#define mmDMA_QM_3_BASE 0x7FFC418000ull
+#define DMA_QM_3_MAX_OFFSET 0x310
+#define DMA_QM_3_SECTION 0x1000
+#define mmDMA_CH_3_BASE 0x7FFC419000ull
+#define DMA_CH_3_MAX_OFFSET 0x200
+#define DMA_CH_3_SECTION 0x7000
+#define mmDMA_QM_4_BASE 0x7FFC420000ull
+#define DMA_QM_4_MAX_OFFSET 0x310
+#define DMA_QM_4_SECTION 0x1000
+#define mmDMA_CH_4_BASE 0x7FFC421000ull
+#define DMA_CH_4_MAX_OFFSET 0x200
+#define DMA_CH_4_SECTION 0x20000
+#define mmCPU_CA53_CFG_BASE 0x7FFC441000ull
+#define CPU_CA53_CFG_MAX_OFFSET 0x218
+#define CPU_CA53_CFG_SECTION 0x1000
+#define mmCPU_IF_BASE 0x7FFC442000ull
+#define CPU_IF_MAX_OFFSET 0x134
+#define CPU_IF_SECTION 0x2000
+#define mmCPU_TIMESTAMP_BASE 0x7FFC444000ull
+#define CPU_TIMESTAMP_MAX_OFFSET 0x1000
+#define CPU_TIMESTAMP_SECTION 0x3C000
+#define mmMMU_BASE 0x7FFC480000ull
+#define MMU_MAX_OFFSET 0x44
+#define MMU_SECTION 0x10000
+#define mmSTLB_BASE 0x7FFC490000ull
+#define STLB_MAX_OFFSET 0x50
+#define STLB_SECTION 0x10000
+#define mmNORTH_THERMAL_SENSOR_BASE 0x7FFC4A0000ull
+#define NORTH_THERMAL_SENSOR_MAX_OFFSET 0xE64
+#define NORTH_THERMAL_SENSOR_SECTION 0x1000
+#define mmMC_PLL_BASE 0x7FFC4A1000ull
+#define MC_PLL_MAX_OFFSET 0x444
+#define MC_PLL_SECTION 0x1000
+#define mmCPU_PLL_BASE 0x7FFC4A2000ull
+#define CPU_PLL_MAX_OFFSET 0x444
+#define CPU_PLL_SECTION 0x1000
+#define mmIC_PLL_BASE 0x7FFC4A3000ull
+#define IC_PLL_MAX_OFFSET 0x444
+#define IC_PLL_SECTION 0x1000
+#define mmDMA_PROCESS_MON_BASE 0x7FFC4A4000ull
+#define DMA_PROCESS_MON_MAX_OFFSET 0x4
+#define DMA_PROCESS_MON_SECTION 0xC000
+#define mmDMA_MACRO_BASE 0x7FFC4B0000ull
+#define DMA_MACRO_MAX_OFFSET 0x15C
+#define DMA_MACRO_SECTION 0x150000
+#define mmDDR_PHY_CH0_BASE 0x7FFC600000ull
+#define DDR_PHY_CH0_MAX_OFFSET 0x0
+#define DDR_PHY_CH0_SECTION 0x40000
+#define mmDDR_MC_CH0_BASE 0x7FFC640000ull
+#define DDR_MC_CH0_MAX_OFFSET 0xF34
+#define DDR_MC_CH0_SECTION 0x8000
+#define mmDDR_MISC_CH0_BASE 0x7FFC648000ull
+#define DDR_MISC_CH0_MAX_OFFSET 0x204
+#define DDR_MISC_CH0_SECTION 0xB8000
+#define mmDDR_PHY_CH1_BASE 0x7FFC700000ull
+#define DDR_PHY_CH1_MAX_OFFSET 0x0
+#define DDR_PHY_CH1_SECTION 0x40000
+#define mmDDR_MC_CH1_BASE 0x7FFC740000ull
+#define DDR_MC_CH1_MAX_OFFSET 0xF34
+#define DDR_MC_CH1_SECTION 0x8000
+#define mmDDR_MISC_CH1_BASE 0x7FFC748000ull
+#define DDR_MISC_CH1_MAX_OFFSET 0x204
+#define DDR_MISC_CH1_SECTION 0xB8000
+#define mmGIC_BASE 0x7FFC800000ull
+#define GIC_MAX_OFFSET 0x10000
+#define GIC_SECTION 0x401000
+#define mmPCIE_WRAP_BASE 0x7FFCC01000ull
+#define PCIE_WRAP_MAX_OFFSET 0xDF4
+#define PCIE_WRAP_SECTION 0x1000
+#define mmPCIE_DBI_BASE 0x7FFCC02000ull
+#define PCIE_DBI_MAX_OFFSET 0xC04
+#define PCIE_DBI_SECTION 0x2000
+#define mmPCIE_CORE_BASE 0x7FFCC04000ull
+#define PCIE_CORE_MAX_OFFSET 0x9B8
+#define PCIE_CORE_SECTION 0x1000
+#define mmPCIE_DB_CFG_BASE 0x7FFCC05000ull
+#define PCIE_DB_CFG_MAX_OFFSET 0xE34
+#define PCIE_DB_CFG_SECTION 0x1000
+#define mmPCIE_DB_CMD_BASE 0x7FFCC06000ull
+#define PCIE_DB_CMD_MAX_OFFSET 0x810
+#define PCIE_DB_CMD_SECTION 0x1000
+#define mmPCIE_AUX_BASE 0x7FFCC07000ull
+#define PCIE_AUX_MAX_OFFSET 0x9BC
+#define PCIE_AUX_SECTION 0x1000
+#define mmPCIE_DB_RSV_BASE 0x7FFCC08000ull
+#define PCIE_DB_RSV_MAX_OFFSET 0x800
+#define PCIE_DB_RSV_SECTION 0x8000
+#define mmPCIE_PHY_BASE 0x7FFCC10000ull
+#define PCIE_PHY_MAX_OFFSET 0x924
+#define PCIE_PHY_SECTION 0x30000
+#define mmPSOC_I2C_M0_BASE 0x7FFCC40000ull
+#define PSOC_I2C_M0_MAX_OFFSET 0x100
+#define PSOC_I2C_M0_SECTION 0x1000
+#define mmPSOC_I2C_M1_BASE 0x7FFCC41000ull
+#define PSOC_I2C_M1_MAX_OFFSET 0x100
+#define PSOC_I2C_M1_SECTION 0x1000
+#define mmPSOC_I2C_S_BASE 0x7FFCC42000ull
+#define PSOC_I2C_S_MAX_OFFSET 0x100
+#define PSOC_I2C_S_SECTION 0x1000
+#define mmPSOC_SPI_BASE 0x7FFCC43000ull
+#define PSOC_SPI_MAX_OFFSET 0x100
+#define PSOC_SPI_SECTION 0x1000
+#define mmPSOC_EMMC_BASE 0x7FFCC44000ull
+#define PSOC_EMMC_MAX_OFFSET 0xF70
+#define PSOC_EMMC_SECTION 0x1000
+#define mmPSOC_UART_0_BASE 0x7FFCC45000ull
+#define PSOC_UART_0_MAX_OFFSET 0x1000
+#define PSOC_UART_0_SECTION 0x1000
+#define mmPSOC_UART_1_BASE 0x7FFCC46000ull
+#define PSOC_UART_1_MAX_OFFSET 0x1000
+#define PSOC_UART_1_SECTION 0x1000
+#define mmPSOC_TIMER_BASE 0x7FFCC47000ull
+#define PSOC_TIMER_MAX_OFFSET 0x1000
+#define PSOC_TIMER_SECTION 0x1000
+#define mmPSOC_WDOG_BASE 0x7FFCC48000ull
+#define PSOC_WDOG_MAX_OFFSET 0x1000
+#define PSOC_WDOG_SECTION 0x1000
+#define mmPSOC_TIMESTAMP_BASE 0x7FFCC49000ull
+#define PSOC_TIMESTAMP_MAX_OFFSET 0x1000
+#define PSOC_TIMESTAMP_SECTION 0x1000
+#define mmPSOC_EFUSE_BASE 0x7FFCC4A000ull
+#define PSOC_EFUSE_MAX_OFFSET 0x10C
+#define PSOC_EFUSE_SECTION 0x1000
+#define mmPSOC_GLOBAL_CONF_BASE 0x7FFCC4B000ull
+#define PSOC_GLOBAL_CONF_MAX_OFFSET 0xA48
+#define PSOC_GLOBAL_CONF_SECTION 0x1000
+#define mmPSOC_GPIO0_BASE 0x7FFCC4C000ull
+#define PSOC_GPIO0_MAX_OFFSET 0x1000
+#define PSOC_GPIO0_SECTION 0x1000
+#define mmPSOC_GPIO1_BASE 0x7FFCC4D000ull
+#define PSOC_GPIO1_MAX_OFFSET 0x1000
+#define PSOC_GPIO1_SECTION 0x1000
+#define mmPSOC_BTL_BASE 0x7FFCC4E000ull
+#define PSOC_BTL_MAX_OFFSET 0x124
+#define PSOC_BTL_SECTION 0x1000
+#define mmPSOC_CS_TRACE_BASE 0x7FFCC4F000ull
+#define PSOC_CS_TRACE_MAX_OFFSET 0x0
+#define PSOC_CS_TRACE_SECTION 0x1000
+#define mmPSOC_GPIO2_BASE 0x7FFCC50000ull
+#define PSOC_GPIO2_MAX_OFFSET 0x1000
+#define PSOC_GPIO2_SECTION 0x1000
+#define mmPSOC_GPIO3_BASE 0x7FFCC51000ull
+#define PSOC_GPIO3_MAX_OFFSET 0x1000
+#define PSOC_GPIO3_SECTION 0x1000
+#define mmPSOC_GPIO4_BASE 0x7FFCC52000ull
+#define PSOC_GPIO4_MAX_OFFSET 0x1000
+#define PSOC_GPIO4_SECTION 0x1000
+#define mmPSOC_DFT_EFUSE_BASE 0x7FFCC53000ull
+#define PSOC_DFT_EFUSE_MAX_OFFSET 0x10C
+#define PSOC_DFT_EFUSE_SECTION 0x1000
+#define mmPSOC_PM_BASE 0x7FFCC54000ull
+#define PSOC_PM_MAX_OFFSET 0x4
+#define PSOC_PM_SECTION 0x1000
+#define mmPSOC_TS_BASE 0x7FFCC55000ull
+#define PSOC_TS_MAX_OFFSET 0xE64
+#define PSOC_TS_SECTION 0xB000
+#define mmPSOC_MII_BASE 0x7FFCC60000ull
+#define PSOC_MII_MAX_OFFSET 0x105C
+#define PSOC_MII_SECTION 0x10000
+#define mmPSOC_EMMC_PLL_BASE 0x7FFCC70000ull
+#define PSOC_EMMC_PLL_MAX_OFFSET 0x444
+#define PSOC_EMMC_PLL_SECTION 0x1000
+#define mmPSOC_MME_PLL_BASE 0x7FFCC71000ull
+#define PSOC_MME_PLL_MAX_OFFSET 0x444
+#define PSOC_MME_PLL_SECTION 0x1000
+#define mmPSOC_PCI_PLL_BASE 0x7FFCC72000ull
+#define PSOC_PCI_PLL_MAX_OFFSET 0x444
+#define PSOC_PCI_PLL_SECTION 0x6000
+#define mmPSOC_PWM0_BASE 0x7FFCC78000ull
+#define PSOC_PWM0_MAX_OFFSET 0x58
+#define PSOC_PWM0_SECTION 0x1000
+#define mmPSOC_PWM1_BASE 0x7FFCC79000ull
+#define PSOC_PWM1_MAX_OFFSET 0x58
+#define PSOC_PWM1_SECTION 0x1000
+#define mmPSOC_PWM2_BASE 0x7FFCC7A000ull
+#define PSOC_PWM2_MAX_OFFSET 0x58
+#define PSOC_PWM2_SECTION 0x1000
+#define mmPSOC_PWM3_BASE 0x7FFCC7B000ull
+#define PSOC_PWM3_MAX_OFFSET 0x58
+#define PSOC_PWM3_SECTION 0x185000
+#define mmTPC0_NRTR_BASE 0x7FFCE00000ull
+#define TPC0_NRTR_MAX_OFFSET 0x608
+#define TPC0_NRTR_SECTION 0x1000
+#define mmTPC_PLL_BASE 0x7FFCE01000ull
+#define TPC_PLL_MAX_OFFSET 0x444
+#define TPC_PLL_SECTION 0x1000
+#define mmTPC_THEMAL_SENSOR_BASE 0x7FFCE02000ull
+#define TPC_THEMAL_SENSOR_MAX_OFFSET 0xE64
+#define TPC_THEMAL_SENSOR_SECTION 0x1000
+#define mmTPC_PROCESS_MON_BASE 0x7FFCE03000ull
+#define TPC_PROCESS_MON_MAX_OFFSET 0x4
+#define TPC_PROCESS_MON_SECTION 0x1000
+#define mmTPC0_RD_REGULATOR_BASE 0x7FFCE04000ull
+#define TPC0_RD_REGULATOR_MAX_OFFSET 0x74
+#define TPC0_RD_REGULATOR_SECTION 0x1000
+#define mmTPC0_WR_REGULATOR_BASE 0x7FFCE05000ull
+#define TPC0_WR_REGULATOR_MAX_OFFSET 0x74
+#define TPC0_WR_REGULATOR_SECTION 0x1000
+#define mmTPC0_CFG_BASE 0x7FFCE06000ull
+#define TPC0_CFG_MAX_OFFSET 0xE30
+#define TPC0_CFG_SECTION 0x2000
+#define mmTPC0_QM_BASE 0x7FFCE08000ull
+#define TPC0_QM_MAX_OFFSET 0x310
+#define TPC0_QM_SECTION 0x1000
+#define mmTPC0_CMDQ_BASE 0x7FFCE09000ull
+#define TPC0_CMDQ_MAX_OFFSET 0x310
+#define TPC0_CMDQ_SECTION 0x37000
+#define mmTPC1_RTR_BASE 0x7FFCE40000ull
+#define TPC1_RTR_MAX_OFFSET 0x608
+#define TPC1_RTR_SECTION 0x4000
+#define mmTPC1_WR_REGULATOR_BASE 0x7FFCE44000ull
+#define TPC1_WR_REGULATOR_MAX_OFFSET 0x74
+#define TPC1_WR_REGULATOR_SECTION 0x1000
+#define mmTPC1_RD_REGULATOR_BASE 0x7FFCE45000ull
+#define TPC1_RD_REGULATOR_MAX_OFFSET 0x74
+#define TPC1_RD_REGULATOR_SECTION 0x1000
+#define mmTPC1_CFG_BASE 0x7FFCE46000ull
+#define TPC1_CFG_MAX_OFFSET 0xE30
+#define TPC1_CFG_SECTION 0x2000
+#define mmTPC1_QM_BASE 0x7FFCE48000ull
+#define TPC1_QM_MAX_OFFSET 0x310
+#define TPC1_QM_SECTION 0x1000
+#define mmTPC1_CMDQ_BASE 0x7FFCE49000ull
+#define TPC1_CMDQ_MAX_OFFSET 0x310
+#define TPC1_CMDQ_SECTION 0x37000
+#define mmTPC2_RTR_BASE 0x7FFCE80000ull
+#define TPC2_RTR_MAX_OFFSET 0x608
+#define TPC2_RTR_SECTION 0x4000
+#define mmTPC2_RD_REGULATOR_BASE 0x7FFCE84000ull
+#define TPC2_RD_REGULATOR_MAX_OFFSET 0x74
+#define TPC2_RD_REGULATOR_SECTION 0x1000
+#define mmTPC2_WR_REGULATOR_BASE 0x7FFCE85000ull
+#define TPC2_WR_REGULATOR_MAX_OFFSET 0x74
+#define TPC2_WR_REGULATOR_SECTION 0x1000
+#define mmTPC2_CFG_BASE 0x7FFCE86000ull
+#define TPC2_CFG_MAX_OFFSET 0xE30
+#define TPC2_CFG_SECTION 0x2000
+#define mmTPC2_QM_BASE 0x7FFCE88000ull
+#define TPC2_QM_MAX_OFFSET 0x310
+#define TPC2_QM_SECTION 0x1000
+#define mmTPC2_CMDQ_BASE 0x7FFCE89000ull
+#define TPC2_CMDQ_MAX_OFFSET 0x310
+#define TPC2_CMDQ_SECTION 0x37000
+#define mmTPC3_RTR_BASE 0x7FFCEC0000ull
+#define TPC3_RTR_MAX_OFFSET 0x608
+#define TPC3_RTR_SECTION 0x4000
+#define mmTPC3_RD_REGULATOR_BASE 0x7FFCEC4000ull
+#define TPC3_RD_REGULATOR_MAX_OFFSET 0x74
+#define TPC3_RD_REGULATOR_SECTION 0x1000
+#define mmTPC3_WR_REGULATOR_BASE 0x7FFCEC5000ull
+#define TPC3_WR_REGULATOR_MAX_OFFSET 0x74
+#define TPC3_WR_REGULATOR_SECTION 0x1000
+#define mmTPC3_CFG_BASE 0x7FFCEC6000ull
+#define TPC3_CFG_MAX_OFFSET 0xE30
+#define TPC3_CFG_SECTION 0x2000
+#define mmTPC3_QM_BASE 0x7FFCEC8000ull
+#define TPC3_QM_MAX_OFFSET 0x310
+#define TPC3_QM_SECTION 0x1000
+#define mmTPC3_CMDQ_BASE 0x7FFCEC9000ull
+#define TPC3_CMDQ_MAX_OFFSET 0x310
+#define TPC3_CMDQ_SECTION 0x37000
+#define mmTPC4_RTR_BASE 0x7FFCF00000ull
+#define TPC4_RTR_MAX_OFFSET 0x608
+#define TPC4_RTR_SECTION 0x4000
+#define mmTPC4_RD_REGULATOR_BASE 0x7FFCF04000ull
+#define TPC4_RD_REGULATOR_MAX_OFFSET 0x74
+#define TPC4_RD_REGULATOR_SECTION 0x1000
+#define mmTPC4_WR_REGULATOR_BASE 0x7FFCF05000ull
+#define TPC4_WR_REGULATOR_MAX_OFFSET 0x74
+#define TPC4_WR_REGULATOR_SECTION 0x1000
+#define mmTPC4_CFG_BASE 0x7FFCF06000ull
+#define TPC4_CFG_MAX_OFFSET 0xE30
+#define TPC4_CFG_SECTION 0x2000
+#define mmTPC4_QM_BASE 0x7FFCF08000ull
+#define TPC4_QM_MAX_OFFSET 0x310
+#define TPC4_QM_SECTION 0x1000
+#define mmTPC4_CMDQ_BASE 0x7FFCF09000ull
+#define TPC4_CMDQ_MAX_OFFSET 0x310
+#define TPC4_CMDQ_SECTION 0x37000
+#define mmTPC5_RTR_BASE 0x7FFCF40000ull
+#define TPC5_RTR_MAX_OFFSET 0x608
+#define TPC5_RTR_SECTION 0x4000
+#define mmTPC5_RD_REGULATOR_BASE 0x7FFCF44000ull
+#define TPC5_RD_REGULATOR_MAX_OFFSET 0x74
+#define TPC5_RD_REGULATOR_SECTION 0x1000
+#define mmTPC5_WR_REGULATOR_BASE 0x7FFCF45000ull
+#define TPC5_WR_REGULATOR_MAX_OFFSET 0x74
+#define TPC5_WR_REGULATOR_SECTION 0x1000
+#define mmTPC5_CFG_BASE 0x7FFCF46000ull
+#define TPC5_CFG_MAX_OFFSET 0xE30
+#define TPC5_CFG_SECTION 0x2000
+#define mmTPC5_QM_BASE 0x7FFCF48000ull
+#define TPC5_QM_MAX_OFFSET 0x310
+#define TPC5_QM_SECTION 0x1000
+#define mmTPC5_CMDQ_BASE 0x7FFCF49000ull
+#define TPC5_CMDQ_MAX_OFFSET 0x310
+#define TPC5_CMDQ_SECTION 0x37000
+#define mmTPC6_RTR_BASE 0x7FFCF80000ull
+#define TPC6_RTR_MAX_OFFSET 0x608
+#define TPC6_RTR_SECTION 0x4000
+#define mmTPC6_RD_REGULATOR_BASE 0x7FFCF84000ull
+#define TPC6_RD_REGULATOR_MAX_OFFSET 0x74
+#define TPC6_RD_REGULATOR_SECTION 0x1000
+#define mmTPC6_WR_REGULATOR_BASE 0x7FFCF85000ull
+#define TPC6_WR_REGULATOR_MAX_OFFSET 0x74
+#define TPC6_WR_REGULATOR_SECTION 0x1000
+#define mmTPC6_CFG_BASE 0x7FFCF86000ull
+#define TPC6_CFG_MAX_OFFSET 0xE30
+#define TPC6_CFG_SECTION 0x2000
+#define mmTPC6_QM_BASE 0x7FFCF88000ull
+#define TPC6_QM_MAX_OFFSET 0x310
+#define TPC6_QM_SECTION 0x1000
+#define mmTPC6_CMDQ_BASE 0x7FFCF89000ull
+#define TPC6_CMDQ_MAX_OFFSET 0x310
+#define TPC6_CMDQ_SECTION 0x37000
+#define mmTPC7_NRTR_BASE 0x7FFCFC0000ull
+#define TPC7_NRTR_MAX_OFFSET 0x608
+#define TPC7_NRTR_SECTION 0x4000
+#define mmTPC7_RD_REGULATOR_BASE 0x7FFCFC4000ull
+#define TPC7_RD_REGULATOR_MAX_OFFSET 0x74
+#define TPC7_RD_REGULATOR_SECTION 0x1000
+#define mmTPC7_WR_REGULATOR_BASE 0x7FFCFC5000ull
+#define TPC7_WR_REGULATOR_MAX_OFFSET 0x74
+#define TPC7_WR_REGULATOR_SECTION 0x1000
+#define mmTPC7_CFG_BASE 0x7FFCFC6000ull
+#define TPC7_CFG_MAX_OFFSET 0xE30
+#define TPC7_CFG_SECTION 0x2000
+#define mmTPC7_QM_BASE 0x7FFCFC8000ull
+#define TPC7_QM_MAX_OFFSET 0x310
+#define TPC7_QM_SECTION 0x1000
+#define mmTPC7_CMDQ_BASE 0x7FFCFC9000ull
+#define TPC7_CMDQ_MAX_OFFSET 0x310
+#define TPC7_CMDQ_SECTION 0x1037000
+#define mmMME_TOP_TABLE_BASE 0x7FFE000000ull
+#define MME_TOP_TABLE_MAX_OFFSET 0x1000
+#define MME_TOP_TABLE_SECTION 0x1000
+#define mmMME0_RTR_FUNNEL_BASE 0x7FFE001000ull
+#define MME0_RTR_FUNNEL_MAX_OFFSET 0x1000
+#define MME0_RTR_FUNNEL_SECTION 0x40000
+#define mmMME1_RTR_FUNNEL_BASE 0x7FFE041000ull
+#define MME1_RTR_FUNNEL_MAX_OFFSET 0x1000
+#define MME1_RTR_FUNNEL_SECTION 0x1000
+#define mmMME1_SBA_STM_BASE 0x7FFE042000ull
+#define MME1_SBA_STM_MAX_OFFSET 0x1000
+#define MME1_SBA_STM_SECTION 0x1000
+#define mmMME1_SBA_CTI_BASE 0x7FFE043000ull
+#define MME1_SBA_CTI_MAX_OFFSET 0x1000
+#define MME1_SBA_CTI_SECTION 0x1000
+#define mmMME1_SBA_ETF_BASE 0x7FFE044000ull
+#define MME1_SBA_ETF_MAX_OFFSET 0x1000
+#define MME1_SBA_ETF_SECTION 0x1000
+#define mmMME1_SBA_SPMU_BASE 0x7FFE045000ull
+#define MME1_SBA_SPMU_MAX_OFFSET 0x1000
+#define MME1_SBA_SPMU_SECTION 0x1000
+#define mmMME1_SBA_CTI0_BASE 0x7FFE046000ull
+#define MME1_SBA_CTI0_MAX_OFFSET 0x1000
+#define MME1_SBA_CTI0_SECTION 0x1000
+#define mmMME1_SBA_CTI1_BASE 0x7FFE047000ull
+#define MME1_SBA_CTI1_MAX_OFFSET 0x1000
+#define MME1_SBA_CTI1_SECTION 0x1000
+#define mmMME1_SBA_BMON0_BASE 0x7FFE048000ull
+#define MME1_SBA_BMON0_MAX_OFFSET 0x1000
+#define MME1_SBA_BMON0_SECTION 0x1000
+#define mmMME1_SBA_BMON1_BASE 0x7FFE049000ull
+#define MME1_SBA_BMON1_MAX_OFFSET 0x1000
+#define MME1_SBA_BMON1_SECTION 0x38000
+#define mmMME2_RTR_FUNNEL_BASE 0x7FFE081000ull
+#define MME2_RTR_FUNNEL_MAX_OFFSET 0x1000
+#define MME2_RTR_FUNNEL_SECTION 0x40000
+#define mmMME3_RTR_FUNNEL_BASE 0x7FFE0C1000ull
+#define MME3_RTR_FUNNEL_MAX_OFFSET 0x1000
+#define MME3_RTR_FUNNEL_SECTION 0x1000
+#define mmMME3_SBB_STM_BASE 0x7FFE0C2000ull
+#define MME3_SBB_STM_MAX_OFFSET 0x1000
+#define MME3_SBB_STM_SECTION 0x1000
+#define mmMME3_SBB_CTI_BASE 0x7FFE0C3000ull
+#define MME3_SBB_CTI_MAX_OFFSET 0x1000
+#define MME3_SBB_CTI_SECTION 0x1000
+#define mmMME3_SBB_ETF_BASE 0x7FFE0C4000ull
+#define MME3_SBB_ETF_MAX_OFFSET 0x1000
+#define MME3_SBB_ETF_SECTION 0x1000
+#define mmMME3_SBB_SPMU_BASE 0x7FFE0C5000ull
+#define MME3_SBB_SPMU_MAX_OFFSET 0x1000
+#define MME3_SBB_SPMU_SECTION 0x1000
+#define mmMME3_SBB_CTI0_BASE 0x7FFE0C6000ull
+#define MME3_SBB_CTI0_MAX_OFFSET 0x1000
+#define MME3_SBB_CTI0_SECTION 0x1000
+#define mmMME3_SBB_CTI1_BASE 0x7FFE0C7000ull
+#define MME3_SBB_CTI1_MAX_OFFSET 0x1000
+#define MME3_SBB_CTI1_SECTION 0x1000
+#define mmMME3_SBB_BMON0_BASE 0x7FFE0C8000ull
+#define MME3_SBB_BMON0_MAX_OFFSET 0x1000
+#define MME3_SBB_BMON0_SECTION 0x1000
+#define mmMME3_SBB_BMON1_BASE 0x7FFE0C9000ull
+#define MME3_SBB_BMON1_MAX_OFFSET 0x1000
+#define MME3_SBB_BMON1_SECTION 0x38000
+#define mmMME4_RTR_FUNNEL_BASE 0x7FFE101000ull
+#define MME4_RTR_FUNNEL_MAX_OFFSET 0x1000
+#define MME4_RTR_FUNNEL_SECTION 0x1000
+#define mmMME4_WACS_STM_BASE 0x7FFE102000ull
+#define MME4_WACS_STM_MAX_OFFSET 0x1000
+#define MME4_WACS_STM_SECTION 0x1000
+#define mmMME4_WACS_CTI_BASE 0x7FFE103000ull
+#define MME4_WACS_CTI_MAX_OFFSET 0x1000
+#define MME4_WACS_CTI_SECTION 0x1000
+#define mmMME4_WACS_ETF_BASE 0x7FFE104000ull
+#define MME4_WACS_ETF_MAX_OFFSET 0x1000
+#define MME4_WACS_ETF_SECTION 0x1000
+#define mmMME4_WACS_SPMU_BASE 0x7FFE105000ull
+#define MME4_WACS_SPMU_MAX_OFFSET 0x1000
+#define MME4_WACS_SPMU_SECTION 0x1000
+#define mmMME4_WACS_CTI0_BASE 0x7FFE106000ull
+#define MME4_WACS_CTI0_MAX_OFFSET 0x1000
+#define MME4_WACS_CTI0_SECTION 0x1000
+#define mmMME4_WACS_CTI1_BASE 0x7FFE107000ull
+#define MME4_WACS_CTI1_MAX_OFFSET 0x1000
+#define MME4_WACS_CTI1_SECTION 0x1000
+#define mmMME4_WACS_BMON0_BASE 0x7FFE108000ull
+#define MME4_WACS_BMON0_MAX_OFFSET 0x1000
+#define MME4_WACS_BMON0_SECTION 0x1000
+#define mmMME4_WACS_BMON1_BASE 0x7FFE109000ull
+#define MME4_WACS_BMON1_MAX_OFFSET 0x1000
+#define MME4_WACS_BMON1_SECTION 0x1000
+#define mmMME4_WACS_BMON2_BASE 0x7FFE10A000ull
+#define MME4_WACS_BMON2_MAX_OFFSET 0x1000
+#define MME4_WACS_BMON2_SECTION 0x1000
+#define mmMME4_WACS_BMON3_BASE 0x7FFE10B000ull
+#define MME4_WACS_BMON3_MAX_OFFSET 0x1000
+#define MME4_WACS_BMON3_SECTION 0x1000
+#define mmMME4_WACS_BMON4_BASE 0x7FFE10C000ull
+#define MME4_WACS_BMON4_MAX_OFFSET 0x1000
+#define MME4_WACS_BMON4_SECTION 0x1000
+#define mmMME4_WACS_BMON5_BASE 0x7FFE10D000ull
+#define MME4_WACS_BMON5_MAX_OFFSET 0x1000
+#define MME4_WACS_BMON5_SECTION 0x1000
+#define mmMME4_WACS_BMON6_BASE 0x7FFE10E000ull
+#define MME4_WACS_BMON6_MAX_OFFSET 0x1000
+#define MME4_WACS_BMON6_SECTION 0x4000
+#define mmMME4_WACS2_STM_BASE 0x7FFE112000ull
+#define MME4_WACS2_STM_MAX_OFFSET 0x1000
+#define MME4_WACS2_STM_SECTION 0x1000
+#define mmMME4_WACS2_CTI_BASE 0x7FFE113000ull
+#define MME4_WACS2_CTI_MAX_OFFSET 0x1000
+#define MME4_WACS2_CTI_SECTION 0x1000
+#define mmMME4_WACS2_ETF_BASE 0x7FFE114000ull
+#define MME4_WACS2_ETF_MAX_OFFSET 0x1000
+#define MME4_WACS2_ETF_SECTION 0x1000
+#define mmMME4_WACS2_SPMU_BASE 0x7FFE115000ull
+#define MME4_WACS2_SPMU_MAX_OFFSET 0x1000
+#define MME4_WACS2_SPMU_SECTION 0x1000
+#define mmMME4_WACS2_CTI0_BASE 0x7FFE116000ull
+#define MME4_WACS2_CTI0_MAX_OFFSET 0x1000
+#define MME4_WACS2_CTI0_SECTION 0x1000
+#define mmMME4_WACS2_CTI1_BASE 0x7FFE117000ull
+#define MME4_WACS2_CTI1_MAX_OFFSET 0x1000
+#define MME4_WACS2_CTI1_SECTION 0x1000
+#define mmMME4_WACS2_BMON0_BASE 0x7FFE118000ull
+#define MME4_WACS2_BMON0_MAX_OFFSET 0x1000
+#define MME4_WACS2_BMON0_SECTION 0x1000
+#define mmMME4_WACS2_BMON1_BASE 0x7FFE119000ull
+#define MME4_WACS2_BMON1_MAX_OFFSET 0x1000
+#define MME4_WACS2_BMON1_SECTION 0x1000
+#define mmMME4_WACS2_BMON2_BASE 0x7FFE11A000ull
+#define MME4_WACS2_BMON2_MAX_OFFSET 0x1000
+#define MME4_WACS2_BMON2_SECTION 0x27000
+#define mmMME5_RTR_FUNNEL_BASE 0x7FFE141000ull
+#define MME5_RTR_FUNNEL_MAX_OFFSET 0x1000
+#define MME5_RTR_FUNNEL_SECTION 0x2BF000
+#define mmDMA_ROM_TABLE_BASE 0x7FFE400000ull
+#define DMA_ROM_TABLE_MAX_OFFSET 0x1000
+#define DMA_ROM_TABLE_SECTION 0x1000
+#define mmDMA_CH_0_CS_STM_BASE 0x7FFE401000ull
+#define DMA_CH_0_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_0_CS_STM_SECTION 0x1000
+#define mmDMA_CH_0_CS_CTI_BASE 0x7FFE402000ull
+#define DMA_CH_0_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_0_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_0_CS_ETF_BASE 0x7FFE403000ull
+#define DMA_CH_0_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_0_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_0_CS_SPMU_BASE 0x7FFE404000ull
+#define DMA_CH_0_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_0_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_0_BMON_CTI_BASE 0x7FFE405000ull
+#define DMA_CH_0_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_0_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_0_USER_CTI_BASE 0x7FFE406000ull
+#define DMA_CH_0_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_0_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_0_BMON_0_BASE 0x7FFE407000ull
+#define DMA_CH_0_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_0_BMON_0_SECTION 0x1000
+#define mmDMA_CH_0_BMON_1_BASE 0x7FFE408000ull
+#define DMA_CH_0_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_0_BMON_1_SECTION 0x9000
+#define mmDMA_CH_1_CS_STM_BASE 0x7FFE411000ull
+#define DMA_CH_1_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_1_CS_STM_SECTION 0x1000
+#define mmDMA_CH_1_CS_CTI_BASE 0x7FFE412000ull
+#define DMA_CH_1_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_1_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_1_CS_ETF_BASE 0x7FFE413000ull
+#define DMA_CH_1_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_1_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_1_CS_SPMU_BASE 0x7FFE414000ull
+#define DMA_CH_1_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_1_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_1_BMON_CTI_BASE 0x7FFE415000ull
+#define DMA_CH_1_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_1_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_1_USER_CTI_BASE 0x7FFE416000ull
+#define DMA_CH_1_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_1_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_1_BMON_0_BASE 0x7FFE417000ull
+#define DMA_CH_1_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_1_BMON_0_SECTION 0x1000
+#define mmDMA_CH_1_BMON_1_BASE 0x7FFE418000ull
+#define DMA_CH_1_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_1_BMON_1_SECTION 0x9000
+#define mmDMA_CH_2_CS_STM_BASE 0x7FFE421000ull
+#define DMA_CH_2_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_2_CS_STM_SECTION 0x1000
+#define mmDMA_CH_2_CS_CTI_BASE 0x7FFE422000ull
+#define DMA_CH_2_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_2_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_2_CS_ETF_BASE 0x7FFE423000ull
+#define DMA_CH_2_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_2_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_2_CS_SPMU_BASE 0x7FFE424000ull
+#define DMA_CH_2_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_2_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_2_BMON_CTI_BASE 0x7FFE425000ull
+#define DMA_CH_2_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_2_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_2_USER_CTI_BASE 0x7FFE426000ull
+#define DMA_CH_2_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_2_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_2_BMON_0_BASE 0x7FFE427000ull
+#define DMA_CH_2_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_2_BMON_0_SECTION 0x1000
+#define mmDMA_CH_2_BMON_1_BASE 0x7FFE428000ull
+#define DMA_CH_2_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_2_BMON_1_SECTION 0x9000
+#define mmDMA_CH_3_CS_STM_BASE 0x7FFE431000ull
+#define DMA_CH_3_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_3_CS_STM_SECTION 0x1000
+#define mmDMA_CH_3_CS_CTI_BASE 0x7FFE432000ull
+#define DMA_CH_3_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_3_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_3_CS_ETF_BASE 0x7FFE433000ull
+#define DMA_CH_3_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_3_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_3_CS_SPMU_BASE 0x7FFE434000ull
+#define DMA_CH_3_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_3_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_3_BMON_CTI_BASE 0x7FFE435000ull
+#define DMA_CH_3_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_3_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_3_USER_CTI_BASE 0x7FFE436000ull
+#define DMA_CH_3_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_3_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_3_BMON_0_BASE 0x7FFE437000ull
+#define DMA_CH_3_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_3_BMON_0_SECTION 0x1000
+#define mmDMA_CH_3_BMON_1_BASE 0x7FFE438000ull
+#define DMA_CH_3_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_3_BMON_1_SECTION 0x9000
+#define mmDMA_CH_4_CS_STM_BASE 0x7FFE441000ull
+#define DMA_CH_4_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_4_CS_STM_SECTION 0x1000
+#define mmDMA_CH_4_CS_CTI_BASE 0x7FFE442000ull
+#define DMA_CH_4_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_4_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_4_CS_ETF_BASE 0x7FFE443000ull
+#define DMA_CH_4_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_4_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_4_CS_SPMU_BASE 0x7FFE444000ull
+#define DMA_CH_4_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_4_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_4_BMON_CTI_BASE 0x7FFE445000ull
+#define DMA_CH_4_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_4_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_4_USER_CTI_BASE 0x7FFE446000ull
+#define DMA_CH_4_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_4_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_4_BMON_0_BASE 0x7FFE447000ull
+#define DMA_CH_4_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_4_BMON_0_SECTION 0x1000
+#define mmDMA_CH_4_BMON_1_BASE 0x7FFE448000ull
+#define DMA_CH_4_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_4_BMON_1_SECTION 0x8000
+#define mmDMA_CH_FUNNEL_6_1_BASE 0x7FFE450000ull
+#define DMA_CH_FUNNEL_6_1_MAX_OFFSET 0x1000
+#define DMA_CH_FUNNEL_6_1_SECTION 0x11000
+#define mmDMA_MACRO_CS_STM_BASE 0x7FFE461000ull
+#define DMA_MACRO_CS_STM_MAX_OFFSET 0x1000
+#define DMA_MACRO_CS_STM_SECTION 0x1000
+#define mmDMA_MACRO_CS_CTI_BASE 0x7FFE462000ull
+#define DMA_MACRO_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_MACRO_CS_CTI_SECTION 0x1000
+#define mmDMA_MACRO_CS_ETF_BASE 0x7FFE463000ull
+#define DMA_MACRO_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_MACRO_CS_ETF_SECTION 0x1000
+#define mmDMA_MACRO_CS_SPMU_BASE 0x7FFE464000ull
+#define DMA_MACRO_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_MACRO_CS_SPMU_SECTION 0x1000
+#define mmDMA_MACRO_BMON_CTI_BASE 0x7FFE465000ull
+#define DMA_MACRO_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_MACRO_BMON_CTI_SECTION 0x1000
+#define mmDMA_MACRO_USER_CTI_BASE 0x7FFE466000ull
+#define DMA_MACRO_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_MACRO_USER_CTI_SECTION 0x1000
+#define mmDMA_MACRO_BMON_0_BASE 0x7FFE467000ull
+#define DMA_MACRO_BMON_0_MAX_OFFSET 0x1000
+#define DMA_MACRO_BMON_0_SECTION 0x1000
+#define mmDMA_MACRO_BMON_1_BASE 0x7FFE468000ull
+#define DMA_MACRO_BMON_1_MAX_OFFSET 0x1000
+#define DMA_MACRO_BMON_1_SECTION 0x1000
+#define mmDMA_MACRO_BMON_2_BASE 0x7FFE469000ull
+#define DMA_MACRO_BMON_2_MAX_OFFSET 0x1000
+#define DMA_MACRO_BMON_2_SECTION 0x1000
+#define mmDMA_MACRO_BMON_3_BASE 0x7FFE46A000ull
+#define DMA_MACRO_BMON_3_MAX_OFFSET 0x1000
+#define DMA_MACRO_BMON_3_SECTION 0x1000
+#define mmDMA_MACRO_BMON_4_BASE 0x7FFE46B000ull
+#define DMA_MACRO_BMON_4_MAX_OFFSET 0x1000
+#define DMA_MACRO_BMON_4_SECTION 0x1000
+#define mmDMA_MACRO_BMON_5_BASE 0x7FFE46C000ull
+#define DMA_MACRO_BMON_5_MAX_OFFSET 0x1000
+#define DMA_MACRO_BMON_5_SECTION 0x1000
+#define mmDMA_MACRO_BMON_6_BASE 0x7FFE46D000ull
+#define DMA_MACRO_BMON_6_MAX_OFFSET 0x1000
+#define DMA_MACRO_BMON_6_SECTION 0x1000
+#define mmDMA_MACRO_BMON_7_BASE 0x7FFE46E000ull
+#define DMA_MACRO_BMON_7_MAX_OFFSET 0x1000
+#define DMA_MACRO_BMON_7_SECTION 0x2000
+#define mmDMA_MACRO_FUNNEL_3_1_BASE 0x7FFE470000ull
+#define DMA_MACRO_FUNNEL_3_1_MAX_OFFSET 0x1000
+#define DMA_MACRO_FUNNEL_3_1_SECTION 0x10000
+#define mmCPU_ROM_TABLE_BASE 0x7FFE480000ull
+#define CPU_ROM_TABLE_MAX_OFFSET 0x1000
+#define CPU_ROM_TABLE_SECTION 0x1000
+#define mmCPU_ETF_0_BASE 0x7FFE481000ull
+#define CPU_ETF_0_MAX_OFFSET 0x1000
+#define CPU_ETF_0_SECTION 0x1000
+#define mmCPU_ETF_1_BASE 0x7FFE482000ull
+#define CPU_ETF_1_MAX_OFFSET 0x1000
+#define CPU_ETF_1_SECTION 0x2000
+#define mmCPU_CTI_BASE 0x7FFE484000ull
+#define CPU_CTI_MAX_OFFSET 0x1000
+#define CPU_CTI_SECTION 0x1000
+#define mmCPU_FUNNEL_BASE 0x7FFE485000ull
+#define CPU_FUNNEL_MAX_OFFSET 0x1000
+#define CPU_FUNNEL_SECTION 0x1000
+#define mmCPU_STM_BASE 0x7FFE486000ull
+#define CPU_STM_MAX_OFFSET 0x1000
+#define CPU_STM_SECTION 0x1000
+#define mmCPU_CTI_TRACE_BASE 0x7FFE487000ull
+#define CPU_CTI_TRACE_MAX_OFFSET 0x1000
+#define CPU_CTI_TRACE_SECTION 0x1000
+#define mmCPU_ETF_TRACE_BASE 0x7FFE488000ull
+#define CPU_ETF_TRACE_MAX_OFFSET 0x1000
+#define CPU_ETF_TRACE_SECTION 0x1000
+#define mmCPU_WR_BMON_BASE 0x7FFE489000ull
+#define CPU_WR_BMON_MAX_OFFSET 0x1000
+#define CPU_WR_BMON_SECTION 0x1000
+#define mmCPU_RD_BMON_BASE 0x7FFE48A000ull
+#define CPU_RD_BMON_MAX_OFFSET 0x1000
+#define CPU_RD_BMON_SECTION 0x37000
+#define mmMMU_CS_STM_BASE 0x7FFE4C1000ull
+#define MMU_CS_STM_MAX_OFFSET 0x1000
+#define MMU_CS_STM_SECTION 0x1000
+#define mmMMU_CS_CTI_BASE 0x7FFE4C2000ull
+#define MMU_CS_CTI_MAX_OFFSET 0x1000
+#define MMU_CS_CTI_SECTION 0x1000
+#define mmMMU_CS_ETF_BASE 0x7FFE4C3000ull
+#define MMU_CS_ETF_MAX_OFFSET 0x1000
+#define MMU_CS_ETF_SECTION 0x1000
+#define mmMMU_CS_SPMU_BASE 0x7FFE4C4000ull
+#define MMU_CS_SPMU_MAX_OFFSET 0x1000
+#define MMU_CS_SPMU_SECTION 0x1000
+#define mmMMU_BMON_CTI_BASE 0x7FFE4C5000ull
+#define MMU_BMON_CTI_MAX_OFFSET 0x1000
+#define MMU_BMON_CTI_SECTION 0x1000
+#define mmMMU_USER_CTI_BASE 0x7FFE4C6000ull
+#define MMU_USER_CTI_MAX_OFFSET 0x1000
+#define MMU_USER_CTI_SECTION 0x1000
+#define mmMMU_BMON_0_BASE 0x7FFE4C7000ull
+#define MMU_BMON_0_MAX_OFFSET 0x1000
+#define MMU_BMON_0_SECTION 0x1000
+#define mmMMU_BMON_1_BASE 0x7FFE4C8000ull
+#define MMU_BMON_1_MAX_OFFSET 0x1000
+#define MMU_BMON_1_SECTION 0x338000
+#define mmCA53_BASE 0x7FFE800000ull
+#define CA53_MAX_OFFSET 0x1000
+#define CA53_SECTION 0x400000
+#define mmPCI_ROM_TABLE_BASE 0x7FFEC00000ull
+#define PCI_ROM_TABLE_MAX_OFFSET 0x1000
+#define PCI_ROM_TABLE_SECTION 0x1000
+#define mmPCIE_STM_BASE 0x7FFEC01000ull
+#define PCIE_STM_MAX_OFFSET 0x1000
+#define PCIE_STM_SECTION 0x1000
+#define mmPCIE_ETF_BASE 0x7FFEC02000ull
+#define PCIE_ETF_MAX_OFFSET 0x1000
+#define PCIE_ETF_SECTION 0x1000
+#define mmPCIE_CTI_0_BASE 0x7FFEC03000ull
+#define PCIE_CTI_0_MAX_OFFSET 0x1000
+#define PCIE_CTI_0_SECTION 0x1000
+#define mmPCIE_SPMU_BASE 0x7FFEC04000ull
+#define PCIE_SPMU_MAX_OFFSET 0x1000
+#define PCIE_SPMU_SECTION 0x1000
+#define mmPCIE_CTI_1_BASE 0x7FFEC05000ull
+#define PCIE_CTI_1_MAX_OFFSET 0x1000
+#define PCIE_CTI_1_SECTION 0x1000
+#define mmPCIE_FUNNEL_BASE 0x7FFEC06000ull
+#define PCIE_FUNNEL_MAX_OFFSET 0x1000
+#define PCIE_FUNNEL_SECTION 0x1000
+#define mmPCIE_BMON_MSTR_WR_BASE 0x7FFEC07000ull
+#define PCIE_BMON_MSTR_WR_MAX_OFFSET 0x1000
+#define PCIE_BMON_MSTR_WR_SECTION 0x1000
+#define mmPCIE_BMON_MSTR_RD_BASE 0x7FFEC08000ull
+#define PCIE_BMON_MSTR_RD_MAX_OFFSET 0x1000
+#define PCIE_BMON_MSTR_RD_SECTION 0x1000
+#define mmPCIE_BMON_SLV_WR_BASE 0x7FFEC09000ull
+#define PCIE_BMON_SLV_WR_MAX_OFFSET 0x1000
+#define PCIE_BMON_SLV_WR_SECTION 0x1000
+#define mmPCIE_BMON_SLV_RD_BASE 0x7FFEC0A000ull
+#define PCIE_BMON_SLV_RD_MAX_OFFSET 0x1000
+#define PCIE_BMON_SLV_RD_SECTION 0x36000
+#define mmPSOC_CTI_BASE 0x7FFEC40000ull
+#define PSOC_CTI_MAX_OFFSET 0x1000
+#define PSOC_CTI_SECTION 0x1000
+#define mmPSOC_STM_BASE 0x7FFEC41000ull
+#define PSOC_STM_MAX_OFFSET 0x1000
+#define PSOC_STM_SECTION 0x1000
+#define mmPSOC_FUNNEL_BASE 0x7FFEC42000ull
+#define PSOC_FUNNEL_MAX_OFFSET 0x1000
+#define PSOC_FUNNEL_SECTION 0x1000
+#define mmPSOC_ETR_BASE 0x7FFEC43000ull
+#define PSOC_ETR_MAX_OFFSET 0x1000
+#define PSOC_ETR_SECTION 0x1000
+#define mmPSOC_ETF_BASE 0x7FFEC44000ull
+#define PSOC_ETF_MAX_OFFSET 0x1000
+#define PSOC_ETF_SECTION 0x1000
+#define mmPSOC_TS_CTI_BASE 0x7FFEC45000ull
+#define PSOC_TS_CTI_MAX_OFFSET 0x1000
+#define PSOC_TS_CTI_SECTION 0xB000
+#define mmTOP_ROM_TABLE_BASE 0x7FFEC50000ull
+#define TOP_ROM_TABLE_MAX_OFFSET 0x1000
+#define TOP_ROM_TABLE_SECTION 0x1F0000
+#define mmTPC1_RTR_FUNNEL_BASE 0x7FFEE40000ull
+#define TPC1_RTR_FUNNEL_MAX_OFFSET 0x1000
+#define TPC1_RTR_FUNNEL_SECTION 0x40000
+#define mmTPC2_RTR_FUNNEL_BASE 0x7FFEE80000ull
+#define TPC2_RTR_FUNNEL_MAX_OFFSET 0x1000
+#define TPC2_RTR_FUNNEL_SECTION 0x40000
+#define mmTPC3_RTR_FUNNEL_BASE 0x7FFEEC0000ull
+#define TPC3_RTR_FUNNEL_MAX_OFFSET 0x1000
+#define TPC3_RTR_FUNNEL_SECTION 0x40000
+#define mmTPC4_RTR_FUNNEL_BASE 0x7FFEF00000ull
+#define TPC4_RTR_FUNNEL_MAX_OFFSET 0x1000
+#define TPC4_RTR_FUNNEL_SECTION 0x40000
+#define mmTPC5_RTR_FUNNEL_BASE 0x7FFEF40000ull
+#define TPC5_RTR_FUNNEL_MAX_OFFSET 0x1000
+#define TPC5_RTR_FUNNEL_SECTION 0x40000
+#define mmTPC6_RTR_FUNNEL_BASE 0x7FFEF80000ull
+#define TPC6_RTR_FUNNEL_MAX_OFFSET 0x1000
+#define TPC6_RTR_FUNNEL_SECTION 0x81000
+#define mmTPC0_EML_SPMU_BASE 0x7FFF001000ull
+#define TPC0_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC0_EML_SPMU_SECTION 0x1000
+#define mmTPC0_EML_ETF_BASE 0x7FFF002000ull
+#define TPC0_EML_ETF_MAX_OFFSET 0x1000
+#define TPC0_EML_ETF_SECTION 0x1000
+#define mmTPC0_EML_STM_BASE 0x7FFF003000ull
+#define TPC0_EML_STM_MAX_OFFSET 0x1000
+#define TPC0_EML_STM_SECTION 0x1000
+#define mmTPC0_EML_ETM_R4_BASE 0x7FFF004000ull
+#define TPC0_EML_ETM_R4_MAX_OFFSET 0x0
+#define TPC0_EML_ETM_R4_SECTION 0x1000
+#define mmTPC0_EML_CTI_BASE 0x7FFF005000ull
+#define TPC0_EML_CTI_MAX_OFFSET 0x1000
+#define TPC0_EML_CTI_SECTION 0x1000
+#define mmTPC0_EML_FUNNEL_BASE 0x7FFF006000ull
+#define TPC0_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC0_EML_FUNNEL_SECTION 0x1000
+#define mmTPC0_EML_BUSMON_0_BASE 0x7FFF007000ull
+#define TPC0_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC0_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC0_EML_BUSMON_1_BASE 0x7FFF008000ull
+#define TPC0_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC0_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC0_EML_BUSMON_2_BASE 0x7FFF009000ull
+#define TPC0_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC0_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC0_EML_BUSMON_3_BASE 0x7FFF00A000ull
+#define TPC0_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC0_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC0_EML_CFG_BASE 0x7FFF040000ull
+#define TPC0_EML_CFG_MAX_OFFSET 0x338
+#define TPC0_EML_CFG_SECTION 0x1BF000
+#define mmTPC0_EML_CS_BASE 0x7FFF1FF000ull
+#define TPC0_EML_CS_MAX_OFFSET 0x1000
+#define TPC0_EML_CS_SECTION 0x2000
+#define mmTPC1_EML_SPMU_BASE 0x7FFF201000ull
+#define TPC1_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC1_EML_SPMU_SECTION 0x1000
+#define mmTPC1_EML_ETF_BASE 0x7FFF202000ull
+#define TPC1_EML_ETF_MAX_OFFSET 0x1000
+#define TPC1_EML_ETF_SECTION 0x1000
+#define mmTPC1_EML_STM_BASE 0x7FFF203000ull
+#define TPC1_EML_STM_MAX_OFFSET 0x1000
+#define TPC1_EML_STM_SECTION 0x1000
+#define mmTPC1_EML_ETM_R4_BASE 0x7FFF204000ull
+#define TPC1_EML_ETM_R4_MAX_OFFSET 0x0
+#define TPC1_EML_ETM_R4_SECTION 0x1000
+#define mmTPC1_EML_CTI_BASE 0x7FFF205000ull
+#define TPC1_EML_CTI_MAX_OFFSET 0x1000
+#define TPC1_EML_CTI_SECTION 0x1000
+#define mmTPC1_EML_FUNNEL_BASE 0x7FFF206000ull
+#define TPC1_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC1_EML_FUNNEL_SECTION 0x1000
+#define mmTPC1_EML_BUSMON_0_BASE 0x7FFF207000ull
+#define TPC1_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC1_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC1_EML_BUSMON_1_BASE 0x7FFF208000ull
+#define TPC1_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC1_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC1_EML_BUSMON_2_BASE 0x7FFF209000ull
+#define TPC1_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC1_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC1_EML_BUSMON_3_BASE 0x7FFF20A000ull
+#define TPC1_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC1_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC1_EML_CFG_BASE 0x7FFF240000ull
+#define TPC1_EML_CFG_MAX_OFFSET 0x338
+#define TPC1_EML_CFG_SECTION 0x1BF000
+#define mmTPC1_EML_CS_BASE 0x7FFF3FF000ull
+#define TPC1_EML_CS_MAX_OFFSET 0x1000
+#define TPC1_EML_CS_SECTION 0x2000
+#define mmTPC2_EML_SPMU_BASE 0x7FFF401000ull
+#define TPC2_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC2_EML_SPMU_SECTION 0x1000
+#define mmTPC2_EML_ETF_BASE 0x7FFF402000ull
+#define TPC2_EML_ETF_MAX_OFFSET 0x1000
+#define TPC2_EML_ETF_SECTION 0x1000
+#define mmTPC2_EML_STM_BASE 0x7FFF403000ull
+#define TPC2_EML_STM_MAX_OFFSET 0x1000
+#define TPC2_EML_STM_SECTION 0x1000
+#define mmTPC2_EML_ETM_R4_BASE 0x7FFF404000ull
+#define TPC2_EML_ETM_R4_MAX_OFFSET 0x0
+#define TPC2_EML_ETM_R4_SECTION 0x1000
+#define mmTPC2_EML_CTI_BASE 0x7FFF405000ull
+#define TPC2_EML_CTI_MAX_OFFSET 0x1000
+#define TPC2_EML_CTI_SECTION 0x1000
+#define mmTPC2_EML_FUNNEL_BASE 0x7FFF406000ull
+#define TPC2_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC2_EML_FUNNEL_SECTION 0x1000
+#define mmTPC2_EML_BUSMON_0_BASE 0x7FFF407000ull
+#define TPC2_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC2_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC2_EML_BUSMON_1_BASE 0x7FFF408000ull
+#define TPC2_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC2_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC2_EML_BUSMON_2_BASE 0x7FFF409000ull
+#define TPC2_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC2_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC2_EML_BUSMON_3_BASE 0x7FFF40A000ull
+#define TPC2_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC2_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC2_EML_CFG_BASE 0x7FFF440000ull
+#define TPC2_EML_CFG_MAX_OFFSET 0x338
+#define TPC2_EML_CFG_SECTION 0x1BF000
+#define mmTPC2_EML_CS_BASE 0x7FFF5FF000ull
+#define TPC2_EML_CS_MAX_OFFSET 0x1000
+#define TPC2_EML_CS_SECTION 0x2000
+#define mmTPC3_EML_SPMU_BASE 0x7FFF601000ull
+#define TPC3_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC3_EML_SPMU_SECTION 0x1000
+#define mmTPC3_EML_ETF_BASE 0x7FFF602000ull
+#define TPC3_EML_ETF_MAX_OFFSET 0x1000
+#define TPC3_EML_ETF_SECTION 0x1000
+#define mmTPC3_EML_STM_BASE 0x7FFF603000ull
+#define TPC3_EML_STM_MAX_OFFSET 0x1000
+#define TPC3_EML_STM_SECTION 0x1000
+#define mmTPC3_EML_ETM_R4_BASE 0x7FFF604000ull
+#define TPC3_EML_ETM_R4_MAX_OFFSET 0x0
+#define TPC3_EML_ETM_R4_SECTION 0x1000
+#define mmTPC3_EML_CTI_BASE 0x7FFF605000ull
+#define TPC3_EML_CTI_MAX_OFFSET 0x1000
+#define TPC3_EML_CTI_SECTION 0x1000
+#define mmTPC3_EML_FUNNEL_BASE 0x7FFF606000ull
+#define TPC3_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC3_EML_FUNNEL_SECTION 0x1000
+#define mmTPC3_EML_BUSMON_0_BASE 0x7FFF607000ull
+#define TPC3_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC3_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC3_EML_BUSMON_1_BASE 0x7FFF608000ull
+#define TPC3_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC3_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC3_EML_BUSMON_2_BASE 0x7FFF609000ull
+#define TPC3_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC3_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC3_EML_BUSMON_3_BASE 0x7FFF60A000ull
+#define TPC3_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC3_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC3_EML_CFG_BASE 0x7FFF640000ull
+#define TPC3_EML_CFG_MAX_OFFSET 0x338
+#define TPC3_EML_CFG_SECTION 0x1BF000
+#define mmTPC3_EML_CS_BASE 0x7FFF7FF000ull
+#define TPC3_EML_CS_MAX_OFFSET 0x1000
+#define TPC3_EML_CS_SECTION 0x2000
+#define mmTPC4_EML_SPMU_BASE 0x7FFF801000ull
+#define TPC4_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC4_EML_SPMU_SECTION 0x1000
+#define mmTPC4_EML_ETF_BASE 0x7FFF802000ull
+#define TPC4_EML_ETF_MAX_OFFSET 0x1000
+#define TPC4_EML_ETF_SECTION 0x1000
+#define mmTPC4_EML_STM_BASE 0x7FFF803000ull
+#define TPC4_EML_STM_MAX_OFFSET 0x1000
+#define TPC4_EML_STM_SECTION 0x1000
+#define mmTPC4_EML_ETM_R4_BASE 0x7FFF804000ull
+#define TPC4_EML_ETM_R4_MAX_OFFSET 0x0
+#define TPC4_EML_ETM_R4_SECTION 0x1000
+#define mmTPC4_EML_CTI_BASE 0x7FFF805000ull
+#define TPC4_EML_CTI_MAX_OFFSET 0x1000
+#define TPC4_EML_CTI_SECTION 0x1000
+#define mmTPC4_EML_FUNNEL_BASE 0x7FFF806000ull
+#define TPC4_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC4_EML_FUNNEL_SECTION 0x1000
+#define mmTPC4_EML_BUSMON_0_BASE 0x7FFF807000ull
+#define TPC4_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC4_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC4_EML_BUSMON_1_BASE 0x7FFF808000ull
+#define TPC4_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC4_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC4_EML_BUSMON_2_BASE 0x7FFF809000ull
+#define TPC4_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC4_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC4_EML_BUSMON_3_BASE 0x7FFF80A000ull
+#define TPC4_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC4_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC4_EML_CFG_BASE 0x7FFF840000ull
+#define TPC4_EML_CFG_MAX_OFFSET 0x338
+#define TPC4_EML_CFG_SECTION 0x1BF000
+#define mmTPC4_EML_CS_BASE 0x7FFF9FF000ull
+#define TPC4_EML_CS_MAX_OFFSET 0x1000
+#define TPC4_EML_CS_SECTION 0x2000
+#define mmTPC5_EML_SPMU_BASE 0x7FFFA01000ull
+#define TPC5_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC5_EML_SPMU_SECTION 0x1000
+#define mmTPC5_EML_ETF_BASE 0x7FFFA02000ull
+#define TPC5_EML_ETF_MAX_OFFSET 0x1000
+#define TPC5_EML_ETF_SECTION 0x1000
+#define mmTPC5_EML_STM_BASE 0x7FFFA03000ull
+#define TPC5_EML_STM_MAX_OFFSET 0x1000
+#define TPC5_EML_STM_SECTION 0x1000
+#define mmTPC5_EML_ETM_R4_BASE 0x7FFFA04000ull
+#define TPC5_EML_ETM_R4_MAX_OFFSET 0x0
+#define TPC5_EML_ETM_R4_SECTION 0x1000
+#define mmTPC5_EML_CTI_BASE 0x7FFFA05000ull
+#define TPC5_EML_CTI_MAX_OFFSET 0x1000
+#define TPC5_EML_CTI_SECTION 0x1000
+#define mmTPC5_EML_FUNNEL_BASE 0x7FFFA06000ull
+#define TPC5_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC5_EML_FUNNEL_SECTION 0x1000
+#define mmTPC5_EML_BUSMON_0_BASE 0x7FFFA07000ull
+#define TPC5_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC5_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC5_EML_BUSMON_1_BASE 0x7FFFA08000ull
+#define TPC5_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC5_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC5_EML_BUSMON_2_BASE 0x7FFFA09000ull
+#define TPC5_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC5_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC5_EML_BUSMON_3_BASE 0x7FFFA0A000ull
+#define TPC5_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC5_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC5_EML_CFG_BASE 0x7FFFA40000ull
+#define TPC5_EML_CFG_MAX_OFFSET 0x338
+#define TPC5_EML_CFG_SECTION 0x1BF000
+#define mmTPC5_EML_CS_BASE 0x7FFFBFF000ull
+#define TPC5_EML_CS_MAX_OFFSET 0x1000
+#define TPC5_EML_CS_SECTION 0x2000
+#define mmTPC6_EML_SPMU_BASE 0x7FFFC01000ull
+#define TPC6_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC6_EML_SPMU_SECTION 0x1000
+#define mmTPC6_EML_ETF_BASE 0x7FFFC02000ull
+#define TPC6_EML_ETF_MAX_OFFSET 0x1000
+#define TPC6_EML_ETF_SECTION 0x1000
+#define mmTPC6_EML_STM_BASE 0x7FFFC03000ull
+#define TPC6_EML_STM_MAX_OFFSET 0x1000
+#define TPC6_EML_STM_SECTION 0x1000
+#define mmTPC6_EML_ETM_R4_BASE 0x7FFFC04000ull
+#define TPC6_EML_ETM_R4_MAX_OFFSET 0x0
+#define TPC6_EML_ETM_R4_SECTION 0x1000
+#define mmTPC6_EML_CTI_BASE 0x7FFFC05000ull
+#define TPC6_EML_CTI_MAX_OFFSET 0x1000
+#define TPC6_EML_CTI_SECTION 0x1000
+#define mmTPC6_EML_FUNNEL_BASE 0x7FFFC06000ull
+#define TPC6_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC6_EML_FUNNEL_SECTION 0x1000
+#define mmTPC6_EML_BUSMON_0_BASE 0x7FFFC07000ull
+#define TPC6_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC6_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC6_EML_BUSMON_1_BASE 0x7FFFC08000ull
+#define TPC6_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC6_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC6_EML_BUSMON_2_BASE 0x7FFFC09000ull
+#define TPC6_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC6_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC6_EML_BUSMON_3_BASE 0x7FFFC0A000ull
+#define TPC6_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC6_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC6_EML_CFG_BASE 0x7FFFC40000ull
+#define TPC6_EML_CFG_MAX_OFFSET 0x338
+#define TPC6_EML_CFG_SECTION 0x1BF000
+#define mmTPC6_EML_CS_BASE 0x7FFFDFF000ull
+#define TPC6_EML_CS_MAX_OFFSET 0x1000
+#define TPC6_EML_CS_SECTION 0x2000
+#define mmTPC7_EML_SPMU_BASE 0x7FFFE01000ull
+#define TPC7_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC7_EML_SPMU_SECTION 0x1000
+#define mmTPC7_EML_ETF_BASE 0x7FFFE02000ull
+#define TPC7_EML_ETF_MAX_OFFSET 0x1000
+#define TPC7_EML_ETF_SECTION 0x1000
+#define mmTPC7_EML_STM_BASE 0x7FFFE03000ull
+#define TPC7_EML_STM_MAX_OFFSET 0x1000
+#define TPC7_EML_STM_SECTION 0x1000
+#define mmTPC7_EML_ETM_R4_BASE 0x7FFFE04000ull
+#define TPC7_EML_ETM_R4_MAX_OFFSET 0x0
+#define TPC7_EML_ETM_R4_SECTION 0x1000
+#define mmTPC7_EML_CTI_BASE 0x7FFFE05000ull
+#define TPC7_EML_CTI_MAX_OFFSET 0x1000
+#define TPC7_EML_CTI_SECTION 0x1000
+#define mmTPC7_EML_FUNNEL_BASE 0x7FFFE06000ull
+#define TPC7_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC7_EML_FUNNEL_SECTION 0x1000
+#define mmTPC7_EML_BUSMON_0_BASE 0x7FFFE07000ull
+#define TPC7_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC7_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC7_EML_BUSMON_1_BASE 0x7FFFE08000ull
+#define TPC7_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC7_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC7_EML_BUSMON_2_BASE 0x7FFFE09000ull
+#define TPC7_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC7_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC7_EML_BUSMON_3_BASE 0x7FFFE0A000ull
+#define TPC7_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC7_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC7_EML_CFG_BASE 0x7FFFE40000ull
+#define TPC7_EML_CFG_MAX_OFFSET 0x338
+#define TPC7_EML_CFG_SECTION 0x1BF000
+#define mmTPC7_EML_CS_BASE 0x7FFFFFF000ull
+#define TPC7_EML_CS_MAX_OFFSET 0x1000
+
+#endif /* GOYA_BLOCKS_H_ */
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
new file mode 100644
index 000000000000..a161ecfe74de
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef ASIC_REG_GOYA_MASKS_H_
+#define ASIC_REG_GOYA_MASKS_H_
+
+#include "goya_regs.h"
+
+/* Useful masks for bits in various registers */
+#define QMAN_DMA_ENABLE (\
+ (1 << DMA_QM_0_GLBL_CFG0_PQF_EN_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_CFG0_CQF_EN_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_CFG0_CP_EN_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_CFG0_DMA_EN_SHIFT))
+
+#define QMAN_DMA_FULLY_TRUSTED (\
+ (1 << DMA_QM_0_GLBL_PROT_PQF_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_CQF_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_CP_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_DMA_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_CP_ERR_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_DMA_ERR_PROT_SHIFT))
+
+#define QMAN_DMA_PARTLY_TRUSTED (\
+ (1 << DMA_QM_0_GLBL_PROT_PQF_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_CQF_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_CP_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_CP_ERR_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_DMA_ERR_PROT_SHIFT))
+
+#define QMAN_DMA_STOP (\
+ (1 << DMA_QM_0_GLBL_CFG1_PQF_STOP_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_CFG1_CQF_STOP_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_CFG1_CP_STOP_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT))
+
+#define QMAN_DMA_IS_STOPPED (\
+ (1 << DMA_QM_0_GLBL_STS0_PQF_IS_STOP_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_STS0_CQF_IS_STOP_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_STS0_CP_IS_STOP_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_STS0_DMA_IS_STOP_SHIFT))
+
+#define QMAN_DMA_ERR_MSG_EN (\
+ (1 << DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT))
+
+#define QMAN_MME_ENABLE (\
+ (1 << MME_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
+ (1 << MME_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
+ (1 << MME_QM_GLBL_CFG0_CP_EN_SHIFT))
+
+#define CMDQ_MME_ENABLE (\
+ (1 << MME_CMDQ_GLBL_CFG0_CQF_EN_SHIFT) | \
+ (1 << MME_CMDQ_GLBL_CFG0_CP_EN_SHIFT))
+
+#define QMAN_MME_STOP (\
+ (1 << MME_QM_GLBL_CFG1_PQF_STOP_SHIFT) | \
+ (1 << MME_QM_GLBL_CFG1_CQF_STOP_SHIFT) | \
+ (1 << MME_QM_GLBL_CFG1_CP_STOP_SHIFT))
+
+#define CMDQ_MME_STOP (\
+ (1 << MME_CMDQ_GLBL_CFG1_CQF_STOP_SHIFT) | \
+ (1 << MME_CMDQ_GLBL_CFG1_CP_STOP_SHIFT))
+
+#define QMAN_MME_ERR_MSG_EN (\
+ (1 << MME_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
+ (1 << MME_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
+ (1 << MME_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \
+ (1 << MME_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \
+ (1 << MME_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
+ (1 << MME_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
+ (1 << MME_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \
+ (1 << MME_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT))
+
+#define CMDQ_MME_ERR_MSG_EN (\
+ (1 << MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
+ (1 << MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
+ (1 << MME_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \
+ (1 << MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \
+ (1 << MME_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
+ (1 << MME_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
+ (1 << MME_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \
+ (1 << MME_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT))
+
+#define QMAN_MME_ERR_PROT (\
+ (1 << MME_QM_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \
+ (1 << MME_QM_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \
+ (1 << MME_QM_GLBL_PROT_CP_ERR_PROT_SHIFT) | \
+ (1 << MME_QM_GLBL_PROT_DMA_ERR_PROT_SHIFT))
+
+#define CMDQ_MME_ERR_PROT (\
+ (1 << MME_CMDQ_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \
+ (1 << MME_CMDQ_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \
+ (1 << MME_CMDQ_GLBL_PROT_CP_ERR_PROT_SHIFT) | \
+ (1 << MME_CMDQ_GLBL_PROT_DMA_ERR_PROT_SHIFT))
+
+#define QMAN_TPC_ENABLE (\
+ (1 << TPC0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
+ (1 << TPC0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
+ (1 << TPC0_QM_GLBL_CFG0_CP_EN_SHIFT))
+
+#define CMDQ_TPC_ENABLE (\
+ (1 << TPC0_CMDQ_GLBL_CFG0_CQF_EN_SHIFT) | \
+ (1 << TPC0_CMDQ_GLBL_CFG0_CP_EN_SHIFT))
+
+#define QMAN_TPC_STOP (\
+ (1 << TPC0_QM_GLBL_CFG1_PQF_STOP_SHIFT) | \
+ (1 << TPC0_QM_GLBL_CFG1_CQF_STOP_SHIFT) | \
+ (1 << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT))
+
+#define CMDQ_TPC_STOP (\
+ (1 << TPC0_CMDQ_GLBL_CFG1_CQF_STOP_SHIFT) | \
+ (1 << TPC0_CMDQ_GLBL_CFG1_CP_STOP_SHIFT))
+
+#define QMAN_TPC_ERR_MSG_EN (\
+ (1 << TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
+ (1 << TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
+ (1 << TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \
+ (1 << TPC0_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \
+ (1 << TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
+ (1 << TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
+ (1 << TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \
+ (1 << TPC0_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT))
+
+#define CMDQ_TPC_ERR_MSG_EN (\
+ (1 << TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
+ (1 << TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
+ (1 << TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \
+ (1 << TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \
+ (1 << TPC0_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
+ (1 << TPC0_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
+ (1 << TPC0_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \
+ (1 << TPC0_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT))
+
+#define QMAN_TPC_ERR_PROT (\
+ (1 << TPC0_QM_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \
+ (1 << TPC0_QM_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \
+ (1 << TPC0_QM_GLBL_PROT_CP_ERR_PROT_SHIFT) | \
+ (1 << TPC0_QM_GLBL_PROT_DMA_ERR_PROT_SHIFT))
+
+#define CMDQ_TPC_ERR_PROT (\
+ (1 << TPC0_CMDQ_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \
+ (1 << TPC0_CMDQ_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \
+ (1 << TPC0_CMDQ_GLBL_PROT_CP_ERR_PROT_SHIFT) | \
+ (1 << TPC0_CMDQ_GLBL_PROT_DMA_ERR_PROT_SHIFT))
+
+/* RESETS */
+#define DMA_MME_TPC_RESET (\
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_TPC_SHIFT |\
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MME_SHIFT |\
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_SHIFT)
+
+#define RESET_ALL (\
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_TPC_SHIFT |\
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MME_SHIFT |\
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MC_SHIFT |\
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_CPU_SHIFT |\
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PSOC_SHIFT |\
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_IC_IF_SHIFT |\
+ PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_SRAM_MASK |\
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_SHIFT |\
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_IF_SHIFT)
+
+#define CA53_RESET (\
+ (~\
+ (1 << PSOC_GLOBAL_CONF_UNIT_RST_N_CPU_SHIFT)\
+ ) & 0x7FFFFF)
+
+#define CPU_RESET_ASSERT (\
+ 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT)
+
+#define CPU_RESET_CORE0_DEASSERT (\
+ 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NCPUPORESET_SHIFT |\
+ 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NCORERESET_SHIFT |\
+ 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_SHIFT |\
+ 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT)
+
+/* PCI CONFIGURATION SPACE */
+#define mmPCI_CONFIG_ELBI_ADDR 0xFF0
+#define mmPCI_CONFIG_ELBI_DATA 0xFF4
+#define mmPCI_CONFIG_ELBI_CTRL 0xFF8
+#define PCI_CONFIG_ELBI_CTRL_WRITE (1 << 31)
+
+#define mmPCI_CONFIG_ELBI_STS 0xFFC
+#define PCI_CONFIG_ELBI_STS_ERR (1 << 30)
+#define PCI_CONFIG_ELBI_STS_DONE (1 << 31)
+#define PCI_CONFIG_ELBI_STS_MASK (PCI_CONFIG_ELBI_STS_ERR | \
+ PCI_CONFIG_ELBI_STS_DONE)
+
+#define GOYA_IRQ_HBW_ID_MASK 0x1FFF
+#define GOYA_IRQ_HBW_ID_SHIFT 0
+#define GOYA_IRQ_HBW_INTERNAL_ID_MASK 0xE000
+#define GOYA_IRQ_HBW_INTERNAL_ID_SHIFT 13
+#define GOYA_IRQ_HBW_AGENT_ID_MASK 0x1F0000
+#define GOYA_IRQ_HBW_AGENT_ID_SHIFT 16
+#define GOYA_IRQ_HBW_Y_MASK 0xE00000
+#define GOYA_IRQ_HBW_Y_SHIFT 21
+#define GOYA_IRQ_HBW_X_MASK 0x7000000
+#define GOYA_IRQ_HBW_X_SHIFT 24
+#define GOYA_IRQ_LBW_ID_MASK 0xFF
+#define GOYA_IRQ_LBW_ID_SHIFT 0
+#define GOYA_IRQ_LBW_INTERNAL_ID_MASK 0x700
+#define GOYA_IRQ_LBW_INTERNAL_ID_SHIFT 8
+#define GOYA_IRQ_LBW_AGENT_ID_MASK 0xF800
+#define GOYA_IRQ_LBW_AGENT_ID_SHIFT 11
+#define GOYA_IRQ_LBW_Y_MASK 0x70000
+#define GOYA_IRQ_LBW_Y_SHIFT 16
+#define GOYA_IRQ_LBW_X_MASK 0x380000
+#define GOYA_IRQ_LBW_X_SHIFT 19
+
+#define DMA_QM_IDLE_MASK (DMA_QM_0_GLBL_STS0_PQF_IDLE_MASK | \
+ DMA_QM_0_GLBL_STS0_CQF_IDLE_MASK | \
+ DMA_QM_0_GLBL_STS0_CP_IDLE_MASK | \
+ DMA_QM_0_GLBL_STS0_DMA_IDLE_MASK)
+
+#define TPC_QM_IDLE_MASK (TPC0_QM_GLBL_STS0_PQF_IDLE_MASK | \
+ TPC0_QM_GLBL_STS0_CQF_IDLE_MASK | \
+ TPC0_QM_GLBL_STS0_CP_IDLE_MASK)
+
+#define TPC_CMDQ_IDLE_MASK (TPC0_CMDQ_GLBL_STS0_CQF_IDLE_MASK | \
+ TPC0_CMDQ_GLBL_STS0_CP_IDLE_MASK)
+
+#define TPC_CFG_IDLE_MASK (TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_MASK | \
+ TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK | \
+ TPC0_CFG_STATUS_IQ_EMPTY_MASK | \
+ TPC0_CFG_STATUS_NO_INFLIGH_MEM_ACCESSES_MASK)
+
+#define MME_QM_IDLE_MASK (MME_QM_GLBL_STS0_PQF_IDLE_MASK | \
+ MME_QM_GLBL_STS0_CQF_IDLE_MASK | \
+ MME_QM_GLBL_STS0_CP_IDLE_MASK)
+
+#define MME_CMDQ_IDLE_MASK (MME_CMDQ_GLBL_STS0_CQF_IDLE_MASK | \
+ MME_CMDQ_GLBL_STS0_CP_IDLE_MASK)
+
+#define MME_ARCH_IDLE_MASK (MME_ARCH_STATUS_SB_A_EMPTY_MASK | \
+ MME_ARCH_STATUS_SB_B_EMPTY_MASK | \
+ MME_ARCH_STATUS_SB_CIN_EMPTY_MASK | \
+ MME_ARCH_STATUS_SB_COUT_EMPTY_MASK)
+
+#define MME_SHADOW_IDLE_MASK (MME_SHADOW_0_STATUS_A_MASK | \
+ MME_SHADOW_0_STATUS_B_MASK | \
+ MME_SHADOW_0_STATUS_CIN_MASK | \
+ MME_SHADOW_0_STATUS_COUT_MASK | \
+ MME_SHADOW_0_STATUS_TE_MASK | \
+ MME_SHADOW_0_STATUS_LD_MASK | \
+ MME_SHADOW_0_STATUS_ST_MASK)
+
+#define TPC1_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT
+#define TPC2_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT
+#define TPC3_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT
+#define TPC4_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT
+#define TPC5_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT
+#define TPC6_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT
+#define TPC7_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT
+
+#define DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT
+#define DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT
+#define DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT
+#define DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT
+
+#endif /* ASIC_REG_GOYA_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
new file mode 100644
index 000000000000..6cb0b6e54d41
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef ASIC_REG_GOYA_REGS_H_
+#define ASIC_REG_GOYA_REGS_H_
+
+#include "goya_blocks.h"
+#include "stlb_regs.h"
+#include "mmu_regs.h"
+#include "pcie_aux_regs.h"
+#include "psoc_global_conf_regs.h"
+#include "psoc_spi_regs.h"
+#include "psoc_mme_pll_regs.h"
+#include "psoc_pci_pll_regs.h"
+#include "psoc_emmc_pll_regs.h"
+#include "cpu_if_regs.h"
+#include "cpu_ca53_cfg_regs.h"
+#include "cpu_pll_regs.h"
+#include "ic_pll_regs.h"
+#include "mc_pll_regs.h"
+#include "tpc_pll_regs.h"
+#include "dma_qm_0_regs.h"
+#include "dma_qm_1_regs.h"
+#include "dma_qm_2_regs.h"
+#include "dma_qm_3_regs.h"
+#include "dma_qm_4_regs.h"
+#include "dma_ch_0_regs.h"
+#include "dma_ch_1_regs.h"
+#include "dma_ch_2_regs.h"
+#include "dma_ch_3_regs.h"
+#include "dma_ch_4_regs.h"
+#include "dma_macro_regs.h"
+#include "dma_nrtr_regs.h"
+#include "pci_nrtr_regs.h"
+#include "sram_y0_x0_rtr_regs.h"
+#include "sram_y0_x1_rtr_regs.h"
+#include "sram_y0_x2_rtr_regs.h"
+#include "sram_y0_x3_rtr_regs.h"
+#include "sram_y0_x4_rtr_regs.h"
+#include "mme_regs.h"
+#include "mme_qm_regs.h"
+#include "mme_cmdq_regs.h"
+#include "mme1_rtr_regs.h"
+#include "mme2_rtr_regs.h"
+#include "mme3_rtr_regs.h"
+#include "mme4_rtr_regs.h"
+#include "mme5_rtr_regs.h"
+#include "mme6_rtr_regs.h"
+#include "tpc0_cfg_regs.h"
+#include "tpc1_cfg_regs.h"
+#include "tpc2_cfg_regs.h"
+#include "tpc3_cfg_regs.h"
+#include "tpc4_cfg_regs.h"
+#include "tpc5_cfg_regs.h"
+#include "tpc6_cfg_regs.h"
+#include "tpc7_cfg_regs.h"
+#include "tpc0_qm_regs.h"
+#include "tpc1_qm_regs.h"
+#include "tpc2_qm_regs.h"
+#include "tpc3_qm_regs.h"
+#include "tpc4_qm_regs.h"
+#include "tpc5_qm_regs.h"
+#include "tpc6_qm_regs.h"
+#include "tpc7_qm_regs.h"
+#include "tpc0_cmdq_regs.h"
+#include "tpc1_cmdq_regs.h"
+#include "tpc2_cmdq_regs.h"
+#include "tpc3_cmdq_regs.h"
+#include "tpc4_cmdq_regs.h"
+#include "tpc5_cmdq_regs.h"
+#include "tpc6_cmdq_regs.h"
+#include "tpc7_cmdq_regs.h"
+#include "tpc0_nrtr_regs.h"
+#include "tpc1_rtr_regs.h"
+#include "tpc2_rtr_regs.h"
+#include "tpc3_rtr_regs.h"
+#include "tpc4_rtr_regs.h"
+#include "tpc5_rtr_regs.h"
+#include "tpc6_rtr_regs.h"
+#include "tpc7_nrtr_regs.h"
+#include "tpc0_eml_cfg_regs.h"
+
+#include "psoc_global_conf_masks.h"
+#include "dma_macro_masks.h"
+#include "dma_qm_0_masks.h"
+#include "tpc0_qm_masks.h"
+#include "tpc0_cmdq_masks.h"
+#include "mme_qm_masks.h"
+#include "mme_cmdq_masks.h"
+#include "tpc0_cfg_masks.h"
+#include "tpc0_eml_cfg_masks.h"
+#include "mme1_rtr_masks.h"
+#include "tpc0_nrtr_masks.h"
+#include "dma_nrtr_masks.h"
+#include "pci_nrtr_masks.h"
+#include "stlb_masks.h"
+#include "cpu_ca53_cfg_masks.h"
+#include "mmu_masks.h"
+#include "mme_masks.h"
+
+#define mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG 0xC02000
+#define mmPCIE_DBI_MSIX_DOORBELL_OFF 0xC02948
+
+#define mmSYNC_MNGR_MON_PAY_ADDRL_0 0x113000
+#define mmSYNC_MNGR_SOB_OBJ_0 0x112000
+#define mmSYNC_MNGR_SOB_OBJ_1000 0x112FA0
+#define mmSYNC_MNGR_SOB_OBJ_1007 0x112FBC
+#define mmSYNC_MNGR_SOB_OBJ_1023 0x112FFC
+#define mmSYNC_MNGR_MON_STATUS_0 0x114000
+#define mmSYNC_MNGR_MON_STATUS_255 0x1143FC
+
+#define mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR 0x800040
+
+#endif /* ASIC_REG_GOYA_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h
new file mode 100644
index 000000000000..0a743817aad7
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_IC_PLL_REGS_H_
+#define ASIC_REG_IC_PLL_REGS_H_
+
+/*
+ *****************************************
+ * IC_PLL (Prototype: PLL)
+ *****************************************
+ */
+
+#define mmIC_PLL_NR 0x4A3100
+
+#define mmIC_PLL_NF 0x4A3104
+
+#define mmIC_PLL_OD 0x4A3108
+
+#define mmIC_PLL_NB 0x4A310C
+
+#define mmIC_PLL_CFG 0x4A3110
+
+#define mmIC_PLL_LOSE_MASK 0x4A3120
+
+#define mmIC_PLL_LOCK_INTR 0x4A3128
+
+#define mmIC_PLL_LOCK_BYPASS 0x4A312C
+
+#define mmIC_PLL_DATA_CHNG 0x4A3130
+
+#define mmIC_PLL_RST 0x4A3134
+
+#define mmIC_PLL_SLIP_WD_CNTR 0x4A3150
+
+#define mmIC_PLL_DIV_FACTOR_0 0x4A3200
+
+#define mmIC_PLL_DIV_FACTOR_1 0x4A3204
+
+#define mmIC_PLL_DIV_FACTOR_2 0x4A3208
+
+#define mmIC_PLL_DIV_FACTOR_3 0x4A320C
+
+#define mmIC_PLL_DIV_FACTOR_CMD_0 0x4A3220
+
+#define mmIC_PLL_DIV_FACTOR_CMD_1 0x4A3224
+
+#define mmIC_PLL_DIV_FACTOR_CMD_2 0x4A3228
+
+#define mmIC_PLL_DIV_FACTOR_CMD_3 0x4A322C
+
+#define mmIC_PLL_DIV_SEL_0 0x4A3280
+
+#define mmIC_PLL_DIV_SEL_1 0x4A3284
+
+#define mmIC_PLL_DIV_SEL_2 0x4A3288
+
+#define mmIC_PLL_DIV_SEL_3 0x4A328C
+
+#define mmIC_PLL_DIV_EN_0 0x4A32A0
+
+#define mmIC_PLL_DIV_EN_1 0x4A32A4
+
+#define mmIC_PLL_DIV_EN_2 0x4A32A8
+
+#define mmIC_PLL_DIV_EN_3 0x4A32AC
+
+#define mmIC_PLL_DIV_FACTOR_BUSY_0 0x4A32C0
+
+#define mmIC_PLL_DIV_FACTOR_BUSY_1 0x4A32C4
+
+#define mmIC_PLL_DIV_FACTOR_BUSY_2 0x4A32C8
+
+#define mmIC_PLL_DIV_FACTOR_BUSY_3 0x4A32CC
+
+#define mmIC_PLL_CLK_GATER 0x4A3300
+
+#define mmIC_PLL_CLK_RLX_0 0x4A3310
+
+#define mmIC_PLL_CLK_RLX_1 0x4A3314
+
+#define mmIC_PLL_CLK_RLX_2 0x4A3318
+
+#define mmIC_PLL_CLK_RLX_3 0x4A331C
+
+#define mmIC_PLL_REF_CNTR_PERIOD 0x4A3400
+
+#define mmIC_PLL_REF_LOW_THRESHOLD 0x4A3410
+
+#define mmIC_PLL_REF_HIGH_THRESHOLD 0x4A3420
+
+#define mmIC_PLL_PLL_NOT_STABLE 0x4A3430
+
+#define mmIC_PLL_FREQ_CALC_EN 0x4A3440
+
+#endif /* ASIC_REG_IC_PLL_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h
new file mode 100644
index 000000000000..4408188aa067
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MC_PLL_REGS_H_
+#define ASIC_REG_MC_PLL_REGS_H_
+
+/*
+ *****************************************
+ * MC_PLL (Prototype: PLL)
+ *****************************************
+ */
+
+#define mmMC_PLL_NR 0x4A1100
+
+#define mmMC_PLL_NF 0x4A1104
+
+#define mmMC_PLL_OD 0x4A1108
+
+#define mmMC_PLL_NB 0x4A110C
+
+#define mmMC_PLL_CFG 0x4A1110
+
+#define mmMC_PLL_LOSE_MASK 0x4A1120
+
+#define mmMC_PLL_LOCK_INTR 0x4A1128
+
+#define mmMC_PLL_LOCK_BYPASS 0x4A112C
+
+#define mmMC_PLL_DATA_CHNG 0x4A1130
+
+#define mmMC_PLL_RST 0x4A1134
+
+#define mmMC_PLL_SLIP_WD_CNTR 0x4A1150
+
+#define mmMC_PLL_DIV_FACTOR_0 0x4A1200
+
+#define mmMC_PLL_DIV_FACTOR_1 0x4A1204
+
+#define mmMC_PLL_DIV_FACTOR_2 0x4A1208
+
+#define mmMC_PLL_DIV_FACTOR_3 0x4A120C
+
+#define mmMC_PLL_DIV_FACTOR_CMD_0 0x4A1220
+
+#define mmMC_PLL_DIV_FACTOR_CMD_1 0x4A1224
+
+#define mmMC_PLL_DIV_FACTOR_CMD_2 0x4A1228
+
+#define mmMC_PLL_DIV_FACTOR_CMD_3 0x4A122C
+
+#define mmMC_PLL_DIV_SEL_0 0x4A1280
+
+#define mmMC_PLL_DIV_SEL_1 0x4A1284
+
+#define mmMC_PLL_DIV_SEL_2 0x4A1288
+
+#define mmMC_PLL_DIV_SEL_3 0x4A128C
+
+#define mmMC_PLL_DIV_EN_0 0x4A12A0
+
+#define mmMC_PLL_DIV_EN_1 0x4A12A4
+
+#define mmMC_PLL_DIV_EN_2 0x4A12A8
+
+#define mmMC_PLL_DIV_EN_3 0x4A12AC
+
+#define mmMC_PLL_DIV_FACTOR_BUSY_0 0x4A12C0
+
+#define mmMC_PLL_DIV_FACTOR_BUSY_1 0x4A12C4
+
+#define mmMC_PLL_DIV_FACTOR_BUSY_2 0x4A12C8
+
+#define mmMC_PLL_DIV_FACTOR_BUSY_3 0x4A12CC
+
+#define mmMC_PLL_CLK_GATER 0x4A1300
+
+#define mmMC_PLL_CLK_RLX_0 0x4A1310
+
+#define mmMC_PLL_CLK_RLX_1 0x4A1314
+
+#define mmMC_PLL_CLK_RLX_2 0x4A1318
+
+#define mmMC_PLL_CLK_RLX_3 0x4A131C
+
+#define mmMC_PLL_REF_CNTR_PERIOD 0x4A1400
+
+#define mmMC_PLL_REF_LOW_THRESHOLD 0x4A1410
+
+#define mmMC_PLL_REF_HIGH_THRESHOLD 0x4A1420
+
+#define mmMC_PLL_PLL_NOT_STABLE 0x4A1430
+
+#define mmMC_PLL_FREQ_CALC_EN 0x4A1440
+
+#endif /* ASIC_REG_MC_PLL_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h
new file mode 100644
index 000000000000..687bca5c5fe3
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h
@@ -0,0 +1,653 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME1_RTR_MASKS_H_
+#define ASIC_REG_MME1_RTR_MASKS_H_
+
+/*
+ *****************************************
+ * MME1_RTR (Prototype: MME_RTR)
+ *****************************************
+ */
+
+/* MME1_RTR_HBW_RD_RQ_E_ARB */
+#define MME1_RTR_HBW_RD_RQ_E_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_RD_RQ_E_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_RD_RQ_E_ARB_S_SHIFT 8
+#define MME1_RTR_HBW_RD_RQ_E_ARB_S_MASK 0x700
+#define MME1_RTR_HBW_RD_RQ_E_ARB_N_SHIFT 16
+#define MME1_RTR_HBW_RD_RQ_E_ARB_N_MASK 0x70000
+#define MME1_RTR_HBW_RD_RQ_E_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_RD_RQ_E_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_RD_RQ_W_ARB */
+#define MME1_RTR_HBW_RD_RQ_W_ARB_E_SHIFT 0
+#define MME1_RTR_HBW_RD_RQ_W_ARB_E_MASK 0x7
+#define MME1_RTR_HBW_RD_RQ_W_ARB_S_SHIFT 8
+#define MME1_RTR_HBW_RD_RQ_W_ARB_S_MASK 0x700
+#define MME1_RTR_HBW_RD_RQ_W_ARB_N_SHIFT 16
+#define MME1_RTR_HBW_RD_RQ_W_ARB_N_MASK 0x70000
+#define MME1_RTR_HBW_RD_RQ_W_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_RD_RQ_W_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_RD_RQ_N_ARB */
+#define MME1_RTR_HBW_RD_RQ_N_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_RD_RQ_N_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_RD_RQ_N_ARB_E_SHIFT 8
+#define MME1_RTR_HBW_RD_RQ_N_ARB_E_MASK 0x700
+#define MME1_RTR_HBW_RD_RQ_N_ARB_S_SHIFT 16
+#define MME1_RTR_HBW_RD_RQ_N_ARB_S_MASK 0x70000
+#define MME1_RTR_HBW_RD_RQ_N_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_RD_RQ_N_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_RD_RQ_S_ARB */
+#define MME1_RTR_HBW_RD_RQ_S_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_RD_RQ_S_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_RD_RQ_S_ARB_E_SHIFT 8
+#define MME1_RTR_HBW_RD_RQ_S_ARB_E_MASK 0x700
+#define MME1_RTR_HBW_RD_RQ_S_ARB_N_SHIFT 16
+#define MME1_RTR_HBW_RD_RQ_S_ARB_N_MASK 0x70000
+#define MME1_RTR_HBW_RD_RQ_S_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_RD_RQ_S_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_RD_RQ_L_ARB */
+#define MME1_RTR_HBW_RD_RQ_L_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_RD_RQ_L_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_RD_RQ_L_ARB_E_SHIFT 8
+#define MME1_RTR_HBW_RD_RQ_L_ARB_E_MASK 0x700
+#define MME1_RTR_HBW_RD_RQ_L_ARB_S_SHIFT 16
+#define MME1_RTR_HBW_RD_RQ_L_ARB_S_MASK 0x70000
+#define MME1_RTR_HBW_RD_RQ_L_ARB_N_SHIFT 24
+#define MME1_RTR_HBW_RD_RQ_L_ARB_N_MASK 0x7000000
+
+/* MME1_RTR_HBW_E_ARB_MAX */
+#define MME1_RTR_HBW_E_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_HBW_E_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_HBW_W_ARB_MAX */
+#define MME1_RTR_HBW_W_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_HBW_W_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_HBW_N_ARB_MAX */
+#define MME1_RTR_HBW_N_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_HBW_N_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_HBW_S_ARB_MAX */
+#define MME1_RTR_HBW_S_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_HBW_S_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_HBW_L_ARB_MAX */
+#define MME1_RTR_HBW_L_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_HBW_L_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_HBW_RD_RS_MAX_CREDIT */
+#define MME1_RTR_HBW_RD_RS_MAX_CREDIT_A_SHIFT 0
+#define MME1_RTR_HBW_RD_RS_MAX_CREDIT_A_MASK 0x3F
+#define MME1_RTR_HBW_RD_RS_MAX_CREDIT_B_SHIFT 8
+#define MME1_RTR_HBW_RD_RS_MAX_CREDIT_B_MASK 0x3F00
+
+/* MME1_RTR_HBW_WR_RQ_MAX_CREDIT */
+#define MME1_RTR_HBW_WR_RQ_MAX_CREDIT_VAL_SHIFT 0
+#define MME1_RTR_HBW_WR_RQ_MAX_CREDIT_VAL_MASK 0x3F
+
+/* MME1_RTR_HBW_RD_RQ_MAX_CREDIT */
+#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_A_SHIFT 0
+#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_A_MASK 0x3F
+#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_B_SHIFT 8
+#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_B_MASK 0x3F00
+#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_IC_SHIFT 16
+#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_IC_MASK 0x3F0000
+
+/* MME1_RTR_HBW_RD_RS_E_ARB */
+#define MME1_RTR_HBW_RD_RS_E_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_RD_RS_E_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_RD_RS_E_ARB_S_SHIFT 8
+#define MME1_RTR_HBW_RD_RS_E_ARB_S_MASK 0x700
+#define MME1_RTR_HBW_RD_RS_E_ARB_N_SHIFT 16
+#define MME1_RTR_HBW_RD_RS_E_ARB_N_MASK 0x70000
+#define MME1_RTR_HBW_RD_RS_E_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_RD_RS_E_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_RD_RS_W_ARB */
+#define MME1_RTR_HBW_RD_RS_W_ARB_E_SHIFT 0
+#define MME1_RTR_HBW_RD_RS_W_ARB_E_MASK 0x7
+#define MME1_RTR_HBW_RD_RS_W_ARB_S_SHIFT 8
+#define MME1_RTR_HBW_RD_RS_W_ARB_S_MASK 0x700
+#define MME1_RTR_HBW_RD_RS_W_ARB_N_SHIFT 16
+#define MME1_RTR_HBW_RD_RS_W_ARB_N_MASK 0x70000
+#define MME1_RTR_HBW_RD_RS_W_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_RD_RS_W_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_RD_RS_N_ARB */
+#define MME1_RTR_HBW_RD_RS_N_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_RD_RS_N_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_RD_RS_N_ARB_E_SHIFT 8
+#define MME1_RTR_HBW_RD_RS_N_ARB_E_MASK 0x700
+#define MME1_RTR_HBW_RD_RS_N_ARB_S_SHIFT 16
+#define MME1_RTR_HBW_RD_RS_N_ARB_S_MASK 0x70000
+#define MME1_RTR_HBW_RD_RS_N_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_RD_RS_N_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_RD_RS_S_ARB */
+#define MME1_RTR_HBW_RD_RS_S_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_RD_RS_S_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_RD_RS_S_ARB_E_SHIFT 8
+#define MME1_RTR_HBW_RD_RS_S_ARB_E_MASK 0x700
+#define MME1_RTR_HBW_RD_RS_S_ARB_N_SHIFT 16
+#define MME1_RTR_HBW_RD_RS_S_ARB_N_MASK 0x70000
+#define MME1_RTR_HBW_RD_RS_S_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_RD_RS_S_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_RD_RS_L_ARB */
+#define MME1_RTR_HBW_RD_RS_L_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_RD_RS_L_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_RD_RS_L_ARB_E_SHIFT 8
+#define MME1_RTR_HBW_RD_RS_L_ARB_E_MASK 0x700
+#define MME1_RTR_HBW_RD_RS_L_ARB_S_SHIFT 16
+#define MME1_RTR_HBW_RD_RS_L_ARB_S_MASK 0x70000
+#define MME1_RTR_HBW_RD_RS_L_ARB_N_SHIFT 24
+#define MME1_RTR_HBW_RD_RS_L_ARB_N_MASK 0x7000000
+
+/* MME1_RTR_HBW_WR_RQ_E_ARB */
+#define MME1_RTR_HBW_WR_RQ_E_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_WR_RQ_E_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_WR_RQ_E_ARB_S_SHIFT 8
+#define MME1_RTR_HBW_WR_RQ_E_ARB_S_MASK 0x700
+#define MME1_RTR_HBW_WR_RQ_E_ARB_N_SHIFT 16
+#define MME1_RTR_HBW_WR_RQ_E_ARB_N_MASK 0x70000
+#define MME1_RTR_HBW_WR_RQ_E_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_WR_RQ_E_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_WR_RQ_W_ARB */
+#define MME1_RTR_HBW_WR_RQ_W_ARB_E_SHIFT 0
+#define MME1_RTR_HBW_WR_RQ_W_ARB_E_MASK 0x7
+#define MME1_RTR_HBW_WR_RQ_W_ARB_S_SHIFT 8
+#define MME1_RTR_HBW_WR_RQ_W_ARB_S_MASK 0x700
+#define MME1_RTR_HBW_WR_RQ_W_ARB_N_SHIFT 16
+#define MME1_RTR_HBW_WR_RQ_W_ARB_N_MASK 0x70000
+#define MME1_RTR_HBW_WR_RQ_W_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_WR_RQ_W_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_WR_RQ_N_ARB */
+#define MME1_RTR_HBW_WR_RQ_N_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_WR_RQ_N_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_WR_RQ_N_ARB_E_SHIFT 8
+#define MME1_RTR_HBW_WR_RQ_N_ARB_E_MASK 0x700
+#define MME1_RTR_HBW_WR_RQ_N_ARB_S_SHIFT 16
+#define MME1_RTR_HBW_WR_RQ_N_ARB_S_MASK 0x70000
+#define MME1_RTR_HBW_WR_RQ_N_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_WR_RQ_N_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_WR_RQ_S_ARB */
+#define MME1_RTR_HBW_WR_RQ_S_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_WR_RQ_S_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_WR_RQ_S_ARB_E_SHIFT 8
+#define MME1_RTR_HBW_WR_RQ_S_ARB_E_MASK 0x700
+#define MME1_RTR_HBW_WR_RQ_S_ARB_N_SHIFT 16
+#define MME1_RTR_HBW_WR_RQ_S_ARB_N_MASK 0x70000
+#define MME1_RTR_HBW_WR_RQ_S_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_WR_RQ_S_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_WR_RQ_L_ARB */
+#define MME1_RTR_HBW_WR_RQ_L_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_WR_RQ_L_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_WR_RQ_L_ARB_E_SHIFT 8
+#define MME1_RTR_HBW_WR_RQ_L_ARB_E_MASK 0x700
+#define MME1_RTR_HBW_WR_RQ_L_ARB_S_SHIFT 16
+#define MME1_RTR_HBW_WR_RQ_L_ARB_S_MASK 0x70000
+#define MME1_RTR_HBW_WR_RQ_L_ARB_N_SHIFT 24
+#define MME1_RTR_HBW_WR_RQ_L_ARB_N_MASK 0x7000000
+
+/* MME1_RTR_HBW_WR_RS_E_ARB */
+#define MME1_RTR_HBW_WR_RS_E_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_WR_RS_E_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_WR_RS_E_ARB_S_SHIFT 8
+#define MME1_RTR_HBW_WR_RS_E_ARB_S_MASK 0x700
+#define MME1_RTR_HBW_WR_RS_E_ARB_N_SHIFT 16
+#define MME1_RTR_HBW_WR_RS_E_ARB_N_MASK 0x70000
+#define MME1_RTR_HBW_WR_RS_E_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_WR_RS_E_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_WR_RS_W_ARB */
+#define MME1_RTR_HBW_WR_RS_W_ARB_E_SHIFT 0
+#define MME1_RTR_HBW_WR_RS_W_ARB_E_MASK 0x7
+#define MME1_RTR_HBW_WR_RS_W_ARB_S_SHIFT 8
+#define MME1_RTR_HBW_WR_RS_W_ARB_S_MASK 0x700
+#define MME1_RTR_HBW_WR_RS_W_ARB_N_SHIFT 16
+#define MME1_RTR_HBW_WR_RS_W_ARB_N_MASK 0x70000
+#define MME1_RTR_HBW_WR_RS_W_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_WR_RS_W_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_WR_RS_N_ARB */
+#define MME1_RTR_HBW_WR_RS_N_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_WR_RS_N_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_WR_RS_N_ARB_E_SHIFT 8
+#define MME1_RTR_HBW_WR_RS_N_ARB_E_MASK 0x700
+#define MME1_RTR_HBW_WR_RS_N_ARB_S_SHIFT 16
+#define MME1_RTR_HBW_WR_RS_N_ARB_S_MASK 0x70000
+#define MME1_RTR_HBW_WR_RS_N_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_WR_RS_N_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_WR_RS_S_ARB */
+#define MME1_RTR_HBW_WR_RS_S_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_WR_RS_S_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_WR_RS_S_ARB_E_SHIFT 8
+#define MME1_RTR_HBW_WR_RS_S_ARB_E_MASK 0x700
+#define MME1_RTR_HBW_WR_RS_S_ARB_N_SHIFT 16
+#define MME1_RTR_HBW_WR_RS_S_ARB_N_MASK 0x70000
+#define MME1_RTR_HBW_WR_RS_S_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_WR_RS_S_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_WR_RS_L_ARB */
+#define MME1_RTR_HBW_WR_RS_L_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_WR_RS_L_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_WR_RS_L_ARB_E_SHIFT 8
+#define MME1_RTR_HBW_WR_RS_L_ARB_E_MASK 0x700
+#define MME1_RTR_HBW_WR_RS_L_ARB_S_SHIFT 16
+#define MME1_RTR_HBW_WR_RS_L_ARB_S_MASK 0x70000
+#define MME1_RTR_HBW_WR_RS_L_ARB_N_SHIFT 24
+#define MME1_RTR_HBW_WR_RS_L_ARB_N_MASK 0x7000000
+
+/* MME1_RTR_LBW_RD_RQ_E_ARB */
+#define MME1_RTR_LBW_RD_RQ_E_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_RD_RQ_E_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_RD_RQ_E_ARB_S_SHIFT 8
+#define MME1_RTR_LBW_RD_RQ_E_ARB_S_MASK 0x700
+#define MME1_RTR_LBW_RD_RQ_E_ARB_N_SHIFT 16
+#define MME1_RTR_LBW_RD_RQ_E_ARB_N_MASK 0x70000
+#define MME1_RTR_LBW_RD_RQ_E_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_RD_RQ_E_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_RD_RQ_W_ARB */
+#define MME1_RTR_LBW_RD_RQ_W_ARB_E_SHIFT 0
+#define MME1_RTR_LBW_RD_RQ_W_ARB_E_MASK 0x7
+#define MME1_RTR_LBW_RD_RQ_W_ARB_S_SHIFT 8
+#define MME1_RTR_LBW_RD_RQ_W_ARB_S_MASK 0x700
+#define MME1_RTR_LBW_RD_RQ_W_ARB_N_SHIFT 16
+#define MME1_RTR_LBW_RD_RQ_W_ARB_N_MASK 0x70000
+#define MME1_RTR_LBW_RD_RQ_W_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_RD_RQ_W_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_RD_RQ_N_ARB */
+#define MME1_RTR_LBW_RD_RQ_N_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_RD_RQ_N_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_RD_RQ_N_ARB_E_SHIFT 8
+#define MME1_RTR_LBW_RD_RQ_N_ARB_E_MASK 0x700
+#define MME1_RTR_LBW_RD_RQ_N_ARB_S_SHIFT 16
+#define MME1_RTR_LBW_RD_RQ_N_ARB_S_MASK 0x70000
+#define MME1_RTR_LBW_RD_RQ_N_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_RD_RQ_N_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_RD_RQ_S_ARB */
+#define MME1_RTR_LBW_RD_RQ_S_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_RD_RQ_S_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_RD_RQ_S_ARB_E_SHIFT 8
+#define MME1_RTR_LBW_RD_RQ_S_ARB_E_MASK 0x700
+#define MME1_RTR_LBW_RD_RQ_S_ARB_N_SHIFT 16
+#define MME1_RTR_LBW_RD_RQ_S_ARB_N_MASK 0x70000
+#define MME1_RTR_LBW_RD_RQ_S_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_RD_RQ_S_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_RD_RQ_L_ARB */
+#define MME1_RTR_LBW_RD_RQ_L_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_RD_RQ_L_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_RD_RQ_L_ARB_E_SHIFT 8
+#define MME1_RTR_LBW_RD_RQ_L_ARB_E_MASK 0x700
+#define MME1_RTR_LBW_RD_RQ_L_ARB_S_SHIFT 16
+#define MME1_RTR_LBW_RD_RQ_L_ARB_S_MASK 0x70000
+#define MME1_RTR_LBW_RD_RQ_L_ARB_N_SHIFT 24
+#define MME1_RTR_LBW_RD_RQ_L_ARB_N_MASK 0x7000000
+
+/* MME1_RTR_LBW_E_ARB_MAX */
+#define MME1_RTR_LBW_E_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_LBW_E_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_LBW_W_ARB_MAX */
+#define MME1_RTR_LBW_W_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_LBW_W_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_LBW_N_ARB_MAX */
+#define MME1_RTR_LBW_N_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_LBW_N_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_LBW_S_ARB_MAX */
+#define MME1_RTR_LBW_S_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_LBW_S_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_LBW_L_ARB_MAX */
+#define MME1_RTR_LBW_L_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_LBW_L_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_LBW_SRAM_MAX_CREDIT */
+#define MME1_RTR_LBW_SRAM_MAX_CREDIT_MSTR_SHIFT 0
+#define MME1_RTR_LBW_SRAM_MAX_CREDIT_MSTR_MASK 0x3F
+#define MME1_RTR_LBW_SRAM_MAX_CREDIT_SLV_SHIFT 8
+#define MME1_RTR_LBW_SRAM_MAX_CREDIT_SLV_MASK 0x3F00
+
+/* MME1_RTR_LBW_RD_RS_E_ARB */
+#define MME1_RTR_LBW_RD_RS_E_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_RD_RS_E_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_RD_RS_E_ARB_S_SHIFT 8
+#define MME1_RTR_LBW_RD_RS_E_ARB_S_MASK 0x700
+#define MME1_RTR_LBW_RD_RS_E_ARB_N_SHIFT 16
+#define MME1_RTR_LBW_RD_RS_E_ARB_N_MASK 0x70000
+#define MME1_RTR_LBW_RD_RS_E_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_RD_RS_E_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_RD_RS_W_ARB */
+#define MME1_RTR_LBW_RD_RS_W_ARB_E_SHIFT 0
+#define MME1_RTR_LBW_RD_RS_W_ARB_E_MASK 0x7
+#define MME1_RTR_LBW_RD_RS_W_ARB_S_SHIFT 8
+#define MME1_RTR_LBW_RD_RS_W_ARB_S_MASK 0x700
+#define MME1_RTR_LBW_RD_RS_W_ARB_N_SHIFT 16
+#define MME1_RTR_LBW_RD_RS_W_ARB_N_MASK 0x70000
+#define MME1_RTR_LBW_RD_RS_W_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_RD_RS_W_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_RD_RS_N_ARB */
+#define MME1_RTR_LBW_RD_RS_N_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_RD_RS_N_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_RD_RS_N_ARB_E_SHIFT 8
+#define MME1_RTR_LBW_RD_RS_N_ARB_E_MASK 0x700
+#define MME1_RTR_LBW_RD_RS_N_ARB_S_SHIFT 16
+#define MME1_RTR_LBW_RD_RS_N_ARB_S_MASK 0x70000
+#define MME1_RTR_LBW_RD_RS_N_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_RD_RS_N_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_RD_RS_S_ARB */
+#define MME1_RTR_LBW_RD_RS_S_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_RD_RS_S_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_RD_RS_S_ARB_E_SHIFT 8
+#define MME1_RTR_LBW_RD_RS_S_ARB_E_MASK 0x700
+#define MME1_RTR_LBW_RD_RS_S_ARB_N_SHIFT 16
+#define MME1_RTR_LBW_RD_RS_S_ARB_N_MASK 0x70000
+#define MME1_RTR_LBW_RD_RS_S_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_RD_RS_S_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_RD_RS_L_ARB */
+#define MME1_RTR_LBW_RD_RS_L_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_RD_RS_L_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_RD_RS_L_ARB_E_SHIFT 8
+#define MME1_RTR_LBW_RD_RS_L_ARB_E_MASK 0x700
+#define MME1_RTR_LBW_RD_RS_L_ARB_S_SHIFT 16
+#define MME1_RTR_LBW_RD_RS_L_ARB_S_MASK 0x70000
+#define MME1_RTR_LBW_RD_RS_L_ARB_N_SHIFT 24
+#define MME1_RTR_LBW_RD_RS_L_ARB_N_MASK 0x7000000
+
+/* MME1_RTR_LBW_WR_RQ_E_ARB */
+#define MME1_RTR_LBW_WR_RQ_E_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_WR_RQ_E_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_WR_RQ_E_ARB_S_SHIFT 8
+#define MME1_RTR_LBW_WR_RQ_E_ARB_S_MASK 0x700
+#define MME1_RTR_LBW_WR_RQ_E_ARB_N_SHIFT 16
+#define MME1_RTR_LBW_WR_RQ_E_ARB_N_MASK 0x70000
+#define MME1_RTR_LBW_WR_RQ_E_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_WR_RQ_E_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_WR_RQ_W_ARB */
+#define MME1_RTR_LBW_WR_RQ_W_ARB_E_SHIFT 0
+#define MME1_RTR_LBW_WR_RQ_W_ARB_E_MASK 0x7
+#define MME1_RTR_LBW_WR_RQ_W_ARB_S_SHIFT 8
+#define MME1_RTR_LBW_WR_RQ_W_ARB_S_MASK 0x700
+#define MME1_RTR_LBW_WR_RQ_W_ARB_N_SHIFT 16
+#define MME1_RTR_LBW_WR_RQ_W_ARB_N_MASK 0x70000
+#define MME1_RTR_LBW_WR_RQ_W_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_WR_RQ_W_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_WR_RQ_N_ARB */
+#define MME1_RTR_LBW_WR_RQ_N_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_WR_RQ_N_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_WR_RQ_N_ARB_E_SHIFT 8
+#define MME1_RTR_LBW_WR_RQ_N_ARB_E_MASK 0x700
+#define MME1_RTR_LBW_WR_RQ_N_ARB_S_SHIFT 16
+#define MME1_RTR_LBW_WR_RQ_N_ARB_S_MASK 0x70000
+#define MME1_RTR_LBW_WR_RQ_N_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_WR_RQ_N_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_WR_RQ_S_ARB */
+#define MME1_RTR_LBW_WR_RQ_S_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_WR_RQ_S_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_WR_RQ_S_ARB_E_SHIFT 8
+#define MME1_RTR_LBW_WR_RQ_S_ARB_E_MASK 0x700
+#define MME1_RTR_LBW_WR_RQ_S_ARB_N_SHIFT 16
+#define MME1_RTR_LBW_WR_RQ_S_ARB_N_MASK 0x70000
+#define MME1_RTR_LBW_WR_RQ_S_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_WR_RQ_S_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_WR_RQ_L_ARB */
+#define MME1_RTR_LBW_WR_RQ_L_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_WR_RQ_L_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_WR_RQ_L_ARB_E_SHIFT 8
+#define MME1_RTR_LBW_WR_RQ_L_ARB_E_MASK 0x700
+#define MME1_RTR_LBW_WR_RQ_L_ARB_S_SHIFT 16
+#define MME1_RTR_LBW_WR_RQ_L_ARB_S_MASK 0x70000
+#define MME1_RTR_LBW_WR_RQ_L_ARB_N_SHIFT 24
+#define MME1_RTR_LBW_WR_RQ_L_ARB_N_MASK 0x7000000
+
+/* MME1_RTR_LBW_WR_RS_E_ARB */
+#define MME1_RTR_LBW_WR_RS_E_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_WR_RS_E_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_WR_RS_E_ARB_S_SHIFT 8
+#define MME1_RTR_LBW_WR_RS_E_ARB_S_MASK 0x700
+#define MME1_RTR_LBW_WR_RS_E_ARB_N_SHIFT 16
+#define MME1_RTR_LBW_WR_RS_E_ARB_N_MASK 0x70000
+#define MME1_RTR_LBW_WR_RS_E_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_WR_RS_E_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_WR_RS_W_ARB */
+#define MME1_RTR_LBW_WR_RS_W_ARB_E_SHIFT 0
+#define MME1_RTR_LBW_WR_RS_W_ARB_E_MASK 0x7
+#define MME1_RTR_LBW_WR_RS_W_ARB_S_SHIFT 8
+#define MME1_RTR_LBW_WR_RS_W_ARB_S_MASK 0x700
+#define MME1_RTR_LBW_WR_RS_W_ARB_N_SHIFT 16
+#define MME1_RTR_LBW_WR_RS_W_ARB_N_MASK 0x70000
+#define MME1_RTR_LBW_WR_RS_W_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_WR_RS_W_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_WR_RS_N_ARB */
+#define MME1_RTR_LBW_WR_RS_N_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_WR_RS_N_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_WR_RS_N_ARB_E_SHIFT 8
+#define MME1_RTR_LBW_WR_RS_N_ARB_E_MASK 0x700
+#define MME1_RTR_LBW_WR_RS_N_ARB_S_SHIFT 16
+#define MME1_RTR_LBW_WR_RS_N_ARB_S_MASK 0x70000
+#define MME1_RTR_LBW_WR_RS_N_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_WR_RS_N_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_WR_RS_S_ARB */
+#define MME1_RTR_LBW_WR_RS_S_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_WR_RS_S_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_WR_RS_S_ARB_E_SHIFT 8
+#define MME1_RTR_LBW_WR_RS_S_ARB_E_MASK 0x700
+#define MME1_RTR_LBW_WR_RS_S_ARB_N_SHIFT 16
+#define MME1_RTR_LBW_WR_RS_S_ARB_N_MASK 0x70000
+#define MME1_RTR_LBW_WR_RS_S_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_WR_RS_S_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_WR_RS_L_ARB */
+#define MME1_RTR_LBW_WR_RS_L_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_WR_RS_L_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_WR_RS_L_ARB_E_SHIFT 8
+#define MME1_RTR_LBW_WR_RS_L_ARB_E_MASK 0x700
+#define MME1_RTR_LBW_WR_RS_L_ARB_S_SHIFT 16
+#define MME1_RTR_LBW_WR_RS_L_ARB_S_MASK 0x70000
+#define MME1_RTR_LBW_WR_RS_L_ARB_N_SHIFT 24
+#define MME1_RTR_LBW_WR_RS_L_ARB_N_MASK 0x7000000
+
+/* MME1_RTR_DBG_E_ARB */
+#define MME1_RTR_DBG_E_ARB_W_SHIFT 0
+#define MME1_RTR_DBG_E_ARB_W_MASK 0x7
+#define MME1_RTR_DBG_E_ARB_S_SHIFT 8
+#define MME1_RTR_DBG_E_ARB_S_MASK 0x700
+#define MME1_RTR_DBG_E_ARB_N_SHIFT 16
+#define MME1_RTR_DBG_E_ARB_N_MASK 0x70000
+#define MME1_RTR_DBG_E_ARB_L_SHIFT 24
+#define MME1_RTR_DBG_E_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_DBG_W_ARB */
+#define MME1_RTR_DBG_W_ARB_E_SHIFT 0
+#define MME1_RTR_DBG_W_ARB_E_MASK 0x7
+#define MME1_RTR_DBG_W_ARB_S_SHIFT 8
+#define MME1_RTR_DBG_W_ARB_S_MASK 0x700
+#define MME1_RTR_DBG_W_ARB_N_SHIFT 16
+#define MME1_RTR_DBG_W_ARB_N_MASK 0x70000
+#define MME1_RTR_DBG_W_ARB_L_SHIFT 24
+#define MME1_RTR_DBG_W_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_DBG_N_ARB */
+#define MME1_RTR_DBG_N_ARB_W_SHIFT 0
+#define MME1_RTR_DBG_N_ARB_W_MASK 0x7
+#define MME1_RTR_DBG_N_ARB_E_SHIFT 8
+#define MME1_RTR_DBG_N_ARB_E_MASK 0x700
+#define MME1_RTR_DBG_N_ARB_S_SHIFT 16
+#define MME1_RTR_DBG_N_ARB_S_MASK 0x70000
+#define MME1_RTR_DBG_N_ARB_L_SHIFT 24
+#define MME1_RTR_DBG_N_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_DBG_S_ARB */
+#define MME1_RTR_DBG_S_ARB_W_SHIFT 0
+#define MME1_RTR_DBG_S_ARB_W_MASK 0x7
+#define MME1_RTR_DBG_S_ARB_E_SHIFT 8
+#define MME1_RTR_DBG_S_ARB_E_MASK 0x700
+#define MME1_RTR_DBG_S_ARB_N_SHIFT 16
+#define MME1_RTR_DBG_S_ARB_N_MASK 0x70000
+#define MME1_RTR_DBG_S_ARB_L_SHIFT 24
+#define MME1_RTR_DBG_S_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_DBG_L_ARB */
+#define MME1_RTR_DBG_L_ARB_W_SHIFT 0
+#define MME1_RTR_DBG_L_ARB_W_MASK 0x7
+#define MME1_RTR_DBG_L_ARB_E_SHIFT 8
+#define MME1_RTR_DBG_L_ARB_E_MASK 0x700
+#define MME1_RTR_DBG_L_ARB_S_SHIFT 16
+#define MME1_RTR_DBG_L_ARB_S_MASK 0x70000
+#define MME1_RTR_DBG_L_ARB_N_SHIFT 24
+#define MME1_RTR_DBG_L_ARB_N_MASK 0x7000000
+
+/* MME1_RTR_DBG_E_ARB_MAX */
+#define MME1_RTR_DBG_E_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_DBG_E_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_DBG_W_ARB_MAX */
+#define MME1_RTR_DBG_W_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_DBG_W_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_DBG_N_ARB_MAX */
+#define MME1_RTR_DBG_N_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_DBG_N_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_DBG_S_ARB_MAX */
+#define MME1_RTR_DBG_S_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_DBG_S_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_DBG_L_ARB_MAX */
+#define MME1_RTR_DBG_L_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_DBG_L_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_SPLIT_COEF */
+#define MME1_RTR_SPLIT_COEF_VAL_SHIFT 0
+#define MME1_RTR_SPLIT_COEF_VAL_MASK 0xFFFF
+
+/* MME1_RTR_SPLIT_CFG */
+#define MME1_RTR_SPLIT_CFG_FORCE_WAK_ORDER_SHIFT 0
+#define MME1_RTR_SPLIT_CFG_FORCE_WAK_ORDER_MASK 0x1
+#define MME1_RTR_SPLIT_CFG_FORCE_STRONG_ORDER_SHIFT 1
+#define MME1_RTR_SPLIT_CFG_FORCE_STRONG_ORDER_MASK 0x2
+#define MME1_RTR_SPLIT_CFG_DEFAULT_MESH_SHIFT 2
+#define MME1_RTR_SPLIT_CFG_DEFAULT_MESH_MASK 0xC
+#define MME1_RTR_SPLIT_CFG_WR_RATE_LIM_EN_SHIFT 4
+#define MME1_RTR_SPLIT_CFG_WR_RATE_LIM_EN_MASK 0x10
+#define MME1_RTR_SPLIT_CFG_RD_RATE_LIM_EN_SHIFT 5
+#define MME1_RTR_SPLIT_CFG_RD_RATE_LIM_EN_MASK 0x20
+#define MME1_RTR_SPLIT_CFG_B2B_OPT_SHIFT 6
+#define MME1_RTR_SPLIT_CFG_B2B_OPT_MASK 0x1C0
+
+/* MME1_RTR_SPLIT_RD_SAT */
+#define MME1_RTR_SPLIT_RD_SAT_VAL_SHIFT 0
+#define MME1_RTR_SPLIT_RD_SAT_VAL_MASK 0xFFFF
+
+/* MME1_RTR_SPLIT_RD_RST_TOKEN */
+#define MME1_RTR_SPLIT_RD_RST_TOKEN_VAL_SHIFT 0
+#define MME1_RTR_SPLIT_RD_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* MME1_RTR_SPLIT_RD_TIMEOUT */
+#define MME1_RTR_SPLIT_RD_TIMEOUT_VAL_SHIFT 0
+#define MME1_RTR_SPLIT_RD_TIMEOUT_VAL_MASK 0xFFFFFFFF
+
+/* MME1_RTR_SPLIT_WR_SAT */
+#define MME1_RTR_SPLIT_WR_SAT_VAL_SHIFT 0
+#define MME1_RTR_SPLIT_WR_SAT_VAL_MASK 0xFFFF
+
+/* MME1_RTR_WPLIT_WR_TST_TOLEN */
+#define MME1_RTR_WPLIT_WR_TST_TOLEN_VAL_SHIFT 0
+#define MME1_RTR_WPLIT_WR_TST_TOLEN_VAL_MASK 0xFFFF
+
+/* MME1_RTR_SPLIT_WR_TIMEOUT */
+#define MME1_RTR_SPLIT_WR_TIMEOUT_VAL_SHIFT 0
+#define MME1_RTR_SPLIT_WR_TIMEOUT_VAL_MASK 0xFFFFFFFF
+
+/* MME1_RTR_HBW_RANGE_HIT */
+#define MME1_RTR_HBW_RANGE_HIT_IND_SHIFT 0
+#define MME1_RTR_HBW_RANGE_HIT_IND_MASK 0xFF
+
+/* MME1_RTR_HBW_RANGE_MASK_L */
+#define MME1_RTR_HBW_RANGE_MASK_L_VAL_SHIFT 0
+#define MME1_RTR_HBW_RANGE_MASK_L_VAL_MASK 0xFFFFFFFF
+
+/* MME1_RTR_HBW_RANGE_MASK_H */
+#define MME1_RTR_HBW_RANGE_MASK_H_VAL_SHIFT 0
+#define MME1_RTR_HBW_RANGE_MASK_H_VAL_MASK 0x3FFFF
+
+/* MME1_RTR_HBW_RANGE_BASE_L */
+#define MME1_RTR_HBW_RANGE_BASE_L_VAL_SHIFT 0
+#define MME1_RTR_HBW_RANGE_BASE_L_VAL_MASK 0xFFFFFFFF
+
+/* MME1_RTR_HBW_RANGE_BASE_H */
+#define MME1_RTR_HBW_RANGE_BASE_H_VAL_SHIFT 0
+#define MME1_RTR_HBW_RANGE_BASE_H_VAL_MASK 0x3FFFF
+
+/* MME1_RTR_LBW_RANGE_HIT */
+#define MME1_RTR_LBW_RANGE_HIT_IND_SHIFT 0
+#define MME1_RTR_LBW_RANGE_HIT_IND_MASK 0xFFFF
+
+/* MME1_RTR_LBW_RANGE_MASK */
+#define MME1_RTR_LBW_RANGE_MASK_VAL_SHIFT 0
+#define MME1_RTR_LBW_RANGE_MASK_VAL_MASK 0x3FFFFFF
+
+/* MME1_RTR_LBW_RANGE_BASE */
+#define MME1_RTR_LBW_RANGE_BASE_VAL_SHIFT 0
+#define MME1_RTR_LBW_RANGE_BASE_VAL_MASK 0x3FFFFFF
+
+/* MME1_RTR_RGLTR */
+#define MME1_RTR_RGLTR_WR_EN_SHIFT 0
+#define MME1_RTR_RGLTR_WR_EN_MASK 0x1
+#define MME1_RTR_RGLTR_RD_EN_SHIFT 4
+#define MME1_RTR_RGLTR_RD_EN_MASK 0x10
+
+/* MME1_RTR_RGLTR_WR_RESULT */
+#define MME1_RTR_RGLTR_WR_RESULT_VAL_SHIFT 0
+#define MME1_RTR_RGLTR_WR_RESULT_VAL_MASK 0xFF
+
+/* MME1_RTR_RGLTR_RD_RESULT */
+#define MME1_RTR_RGLTR_RD_RESULT_VAL_SHIFT 0
+#define MME1_RTR_RGLTR_RD_RESULT_VAL_MASK 0xFF
+
+/* MME1_RTR_SCRAMB_EN */
+#define MME1_RTR_SCRAMB_EN_VAL_SHIFT 0
+#define MME1_RTR_SCRAMB_EN_VAL_MASK 0x1
+
+/* MME1_RTR_NON_LIN_SCRAMB */
+#define MME1_RTR_NON_LIN_SCRAMB_EN_SHIFT 0
+#define MME1_RTR_NON_LIN_SCRAMB_EN_MASK 0x1
+
+#endif /* ASIC_REG_MME1_RTR_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h
new file mode 100644
index 000000000000..c248339a1cbe
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME1_RTR_REGS_H_
+#define ASIC_REG_MME1_RTR_REGS_H_
+
+/*
+ *****************************************
+ * MME1_RTR (Prototype: MME_RTR)
+ *****************************************
+ */
+
+#define mmMME1_RTR_HBW_RD_RQ_E_ARB 0x40100
+
+#define mmMME1_RTR_HBW_RD_RQ_W_ARB 0x40104
+
+#define mmMME1_RTR_HBW_RD_RQ_N_ARB 0x40108
+
+#define mmMME1_RTR_HBW_RD_RQ_S_ARB 0x4010C
+
+#define mmMME1_RTR_HBW_RD_RQ_L_ARB 0x40110
+
+#define mmMME1_RTR_HBW_E_ARB_MAX 0x40120
+
+#define mmMME1_RTR_HBW_W_ARB_MAX 0x40124
+
+#define mmMME1_RTR_HBW_N_ARB_MAX 0x40128
+
+#define mmMME1_RTR_HBW_S_ARB_MAX 0x4012C
+
+#define mmMME1_RTR_HBW_L_ARB_MAX 0x40130
+
+#define mmMME1_RTR_HBW_RD_RS_MAX_CREDIT 0x40140
+
+#define mmMME1_RTR_HBW_WR_RQ_MAX_CREDIT 0x40144
+
+#define mmMME1_RTR_HBW_RD_RQ_MAX_CREDIT 0x40148
+
+#define mmMME1_RTR_HBW_RD_RS_E_ARB 0x40150
+
+#define mmMME1_RTR_HBW_RD_RS_W_ARB 0x40154
+
+#define mmMME1_RTR_HBW_RD_RS_N_ARB 0x40158
+
+#define mmMME1_RTR_HBW_RD_RS_S_ARB 0x4015C
+
+#define mmMME1_RTR_HBW_RD_RS_L_ARB 0x40160
+
+#define mmMME1_RTR_HBW_WR_RQ_E_ARB 0x40170
+
+#define mmMME1_RTR_HBW_WR_RQ_W_ARB 0x40174
+
+#define mmMME1_RTR_HBW_WR_RQ_N_ARB 0x40178
+
+#define mmMME1_RTR_HBW_WR_RQ_S_ARB 0x4017C
+
+#define mmMME1_RTR_HBW_WR_RQ_L_ARB 0x40180
+
+#define mmMME1_RTR_HBW_WR_RS_E_ARB 0x40190
+
+#define mmMME1_RTR_HBW_WR_RS_W_ARB 0x40194
+
+#define mmMME1_RTR_HBW_WR_RS_N_ARB 0x40198
+
+#define mmMME1_RTR_HBW_WR_RS_S_ARB 0x4019C
+
+#define mmMME1_RTR_HBW_WR_RS_L_ARB 0x401A0
+
+#define mmMME1_RTR_LBW_RD_RQ_E_ARB 0x40200
+
+#define mmMME1_RTR_LBW_RD_RQ_W_ARB 0x40204
+
+#define mmMME1_RTR_LBW_RD_RQ_N_ARB 0x40208
+
+#define mmMME1_RTR_LBW_RD_RQ_S_ARB 0x4020C
+
+#define mmMME1_RTR_LBW_RD_RQ_L_ARB 0x40210
+
+#define mmMME1_RTR_LBW_E_ARB_MAX 0x40220
+
+#define mmMME1_RTR_LBW_W_ARB_MAX 0x40224
+
+#define mmMME1_RTR_LBW_N_ARB_MAX 0x40228
+
+#define mmMME1_RTR_LBW_S_ARB_MAX 0x4022C
+
+#define mmMME1_RTR_LBW_L_ARB_MAX 0x40230
+
+#define mmMME1_RTR_LBW_SRAM_MAX_CREDIT 0x40240
+
+#define mmMME1_RTR_LBW_RD_RS_E_ARB 0x40250
+
+#define mmMME1_RTR_LBW_RD_RS_W_ARB 0x40254
+
+#define mmMME1_RTR_LBW_RD_RS_N_ARB 0x40258
+
+#define mmMME1_RTR_LBW_RD_RS_S_ARB 0x4025C
+
+#define mmMME1_RTR_LBW_RD_RS_L_ARB 0x40260
+
+#define mmMME1_RTR_LBW_WR_RQ_E_ARB 0x40270
+
+#define mmMME1_RTR_LBW_WR_RQ_W_ARB 0x40274
+
+#define mmMME1_RTR_LBW_WR_RQ_N_ARB 0x40278
+
+#define mmMME1_RTR_LBW_WR_RQ_S_ARB 0x4027C
+
+#define mmMME1_RTR_LBW_WR_RQ_L_ARB 0x40280
+
+#define mmMME1_RTR_LBW_WR_RS_E_ARB 0x40290
+
+#define mmMME1_RTR_LBW_WR_RS_W_ARB 0x40294
+
+#define mmMME1_RTR_LBW_WR_RS_N_ARB 0x40298
+
+#define mmMME1_RTR_LBW_WR_RS_S_ARB 0x4029C
+
+#define mmMME1_RTR_LBW_WR_RS_L_ARB 0x402A0
+
+#define mmMME1_RTR_DBG_E_ARB 0x40300
+
+#define mmMME1_RTR_DBG_W_ARB 0x40304
+
+#define mmMME1_RTR_DBG_N_ARB 0x40308
+
+#define mmMME1_RTR_DBG_S_ARB 0x4030C
+
+#define mmMME1_RTR_DBG_L_ARB 0x40310
+
+#define mmMME1_RTR_DBG_E_ARB_MAX 0x40320
+
+#define mmMME1_RTR_DBG_W_ARB_MAX 0x40324
+
+#define mmMME1_RTR_DBG_N_ARB_MAX 0x40328
+
+#define mmMME1_RTR_DBG_S_ARB_MAX 0x4032C
+
+#define mmMME1_RTR_DBG_L_ARB_MAX 0x40330
+
+#define mmMME1_RTR_SPLIT_COEF_0 0x40400
+
+#define mmMME1_RTR_SPLIT_COEF_1 0x40404
+
+#define mmMME1_RTR_SPLIT_COEF_2 0x40408
+
+#define mmMME1_RTR_SPLIT_COEF_3 0x4040C
+
+#define mmMME1_RTR_SPLIT_COEF_4 0x40410
+
+#define mmMME1_RTR_SPLIT_COEF_5 0x40414
+
+#define mmMME1_RTR_SPLIT_COEF_6 0x40418
+
+#define mmMME1_RTR_SPLIT_COEF_7 0x4041C
+
+#define mmMME1_RTR_SPLIT_COEF_8 0x40420
+
+#define mmMME1_RTR_SPLIT_COEF_9 0x40424
+
+#define mmMME1_RTR_SPLIT_CFG 0x40440
+
+#define mmMME1_RTR_SPLIT_RD_SAT 0x40444
+
+#define mmMME1_RTR_SPLIT_RD_RST_TOKEN 0x40448
+
+#define mmMME1_RTR_SPLIT_RD_TIMEOUT_0 0x4044C
+
+#define mmMME1_RTR_SPLIT_RD_TIMEOUT_1 0x40450
+
+#define mmMME1_RTR_SPLIT_WR_SAT 0x40454
+
+#define mmMME1_RTR_WPLIT_WR_TST_TOLEN 0x40458
+
+#define mmMME1_RTR_SPLIT_WR_TIMEOUT_0 0x4045C
+
+#define mmMME1_RTR_SPLIT_WR_TIMEOUT_1 0x40460
+
+#define mmMME1_RTR_HBW_RANGE_HIT 0x40470
+
+#define mmMME1_RTR_HBW_RANGE_MASK_L_0 0x40480
+
+#define mmMME1_RTR_HBW_RANGE_MASK_L_1 0x40484
+
+#define mmMME1_RTR_HBW_RANGE_MASK_L_2 0x40488
+
+#define mmMME1_RTR_HBW_RANGE_MASK_L_3 0x4048C
+
+#define mmMME1_RTR_HBW_RANGE_MASK_L_4 0x40490
+
+#define mmMME1_RTR_HBW_RANGE_MASK_L_5 0x40494
+
+#define mmMME1_RTR_HBW_RANGE_MASK_L_6 0x40498
+
+#define mmMME1_RTR_HBW_RANGE_MASK_L_7 0x4049C
+
+#define mmMME1_RTR_HBW_RANGE_MASK_H_0 0x404A0
+
+#define mmMME1_RTR_HBW_RANGE_MASK_H_1 0x404A4
+
+#define mmMME1_RTR_HBW_RANGE_MASK_H_2 0x404A8
+
+#define mmMME1_RTR_HBW_RANGE_MASK_H_3 0x404AC
+
+#define mmMME1_RTR_HBW_RANGE_MASK_H_4 0x404B0
+
+#define mmMME1_RTR_HBW_RANGE_MASK_H_5 0x404B4
+
+#define mmMME1_RTR_HBW_RANGE_MASK_H_6 0x404B8
+
+#define mmMME1_RTR_HBW_RANGE_MASK_H_7 0x404BC
+
+#define mmMME1_RTR_HBW_RANGE_BASE_L_0 0x404C0
+
+#define mmMME1_RTR_HBW_RANGE_BASE_L_1 0x404C4
+
+#define mmMME1_RTR_HBW_RANGE_BASE_L_2 0x404C8
+
+#define mmMME1_RTR_HBW_RANGE_BASE_L_3 0x404CC
+
+#define mmMME1_RTR_HBW_RANGE_BASE_L_4 0x404D0
+
+#define mmMME1_RTR_HBW_RANGE_BASE_L_5 0x404D4
+
+#define mmMME1_RTR_HBW_RANGE_BASE_L_6 0x404D8
+
+#define mmMME1_RTR_HBW_RANGE_BASE_L_7 0x404DC
+
+#define mmMME1_RTR_HBW_RANGE_BASE_H_0 0x404E0
+
+#define mmMME1_RTR_HBW_RANGE_BASE_H_1 0x404E4
+
+#define mmMME1_RTR_HBW_RANGE_BASE_H_2 0x404E8
+
+#define mmMME1_RTR_HBW_RANGE_BASE_H_3 0x404EC
+
+#define mmMME1_RTR_HBW_RANGE_BASE_H_4 0x404F0
+
+#define mmMME1_RTR_HBW_RANGE_BASE_H_5 0x404F4
+
+#define mmMME1_RTR_HBW_RANGE_BASE_H_6 0x404F8
+
+#define mmMME1_RTR_HBW_RANGE_BASE_H_7 0x404FC
+
+#define mmMME1_RTR_LBW_RANGE_HIT 0x40500
+
+#define mmMME1_RTR_LBW_RANGE_MASK_0 0x40510
+
+#define mmMME1_RTR_LBW_RANGE_MASK_1 0x40514
+
+#define mmMME1_RTR_LBW_RANGE_MASK_2 0x40518
+
+#define mmMME1_RTR_LBW_RANGE_MASK_3 0x4051C
+
+#define mmMME1_RTR_LBW_RANGE_MASK_4 0x40520
+
+#define mmMME1_RTR_LBW_RANGE_MASK_5 0x40524
+
+#define mmMME1_RTR_LBW_RANGE_MASK_6 0x40528
+
+#define mmMME1_RTR_LBW_RANGE_MASK_7 0x4052C
+
+#define mmMME1_RTR_LBW_RANGE_MASK_8 0x40530
+
+#define mmMME1_RTR_LBW_RANGE_MASK_9 0x40534
+
+#define mmMME1_RTR_LBW_RANGE_MASK_10 0x40538
+
+#define mmMME1_RTR_LBW_RANGE_MASK_11 0x4053C
+
+#define mmMME1_RTR_LBW_RANGE_MASK_12 0x40540
+
+#define mmMME1_RTR_LBW_RANGE_MASK_13 0x40544
+
+#define mmMME1_RTR_LBW_RANGE_MASK_14 0x40548
+
+#define mmMME1_RTR_LBW_RANGE_MASK_15 0x4054C
+
+#define mmMME1_RTR_LBW_RANGE_BASE_0 0x40550
+
+#define mmMME1_RTR_LBW_RANGE_BASE_1 0x40554
+
+#define mmMME1_RTR_LBW_RANGE_BASE_2 0x40558
+
+#define mmMME1_RTR_LBW_RANGE_BASE_3 0x4055C
+
+#define mmMME1_RTR_LBW_RANGE_BASE_4 0x40560
+
+#define mmMME1_RTR_LBW_RANGE_BASE_5 0x40564
+
+#define mmMME1_RTR_LBW_RANGE_BASE_6 0x40568
+
+#define mmMME1_RTR_LBW_RANGE_BASE_7 0x4056C
+
+#define mmMME1_RTR_LBW_RANGE_BASE_8 0x40570
+
+#define mmMME1_RTR_LBW_RANGE_BASE_9 0x40574
+
+#define mmMME1_RTR_LBW_RANGE_BASE_10 0x40578
+
+#define mmMME1_RTR_LBW_RANGE_BASE_11 0x4057C
+
+#define mmMME1_RTR_LBW_RANGE_BASE_12 0x40580
+
+#define mmMME1_RTR_LBW_RANGE_BASE_13 0x40584
+
+#define mmMME1_RTR_LBW_RANGE_BASE_14 0x40588
+
+#define mmMME1_RTR_LBW_RANGE_BASE_15 0x4058C
+
+#define mmMME1_RTR_RGLTR 0x40590
+
+#define mmMME1_RTR_RGLTR_WR_RESULT 0x40594
+
+#define mmMME1_RTR_RGLTR_RD_RESULT 0x40598
+
+#define mmMME1_RTR_SCRAMB_EN 0x40600
+
+#define mmMME1_RTR_NON_LIN_SCRAMB 0x40604
+
+#endif /* ASIC_REG_MME1_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h
new file mode 100644
index 000000000000..7a2b777bdc4f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME2_RTR_REGS_H_
+#define ASIC_REG_MME2_RTR_REGS_H_
+
+/*
+ *****************************************
+ * MME2_RTR (Prototype: MME_RTR)
+ *****************************************
+ */
+
+#define mmMME2_RTR_HBW_RD_RQ_E_ARB 0x80100
+
+#define mmMME2_RTR_HBW_RD_RQ_W_ARB 0x80104
+
+#define mmMME2_RTR_HBW_RD_RQ_N_ARB 0x80108
+
+#define mmMME2_RTR_HBW_RD_RQ_S_ARB 0x8010C
+
+#define mmMME2_RTR_HBW_RD_RQ_L_ARB 0x80110
+
+#define mmMME2_RTR_HBW_E_ARB_MAX 0x80120
+
+#define mmMME2_RTR_HBW_W_ARB_MAX 0x80124
+
+#define mmMME2_RTR_HBW_N_ARB_MAX 0x80128
+
+#define mmMME2_RTR_HBW_S_ARB_MAX 0x8012C
+
+#define mmMME2_RTR_HBW_L_ARB_MAX 0x80130
+
+#define mmMME2_RTR_HBW_RD_RS_MAX_CREDIT 0x80140
+
+#define mmMME2_RTR_HBW_WR_RQ_MAX_CREDIT 0x80144
+
+#define mmMME2_RTR_HBW_RD_RQ_MAX_CREDIT 0x80148
+
+#define mmMME2_RTR_HBW_RD_RS_E_ARB 0x80150
+
+#define mmMME2_RTR_HBW_RD_RS_W_ARB 0x80154
+
+#define mmMME2_RTR_HBW_RD_RS_N_ARB 0x80158
+
+#define mmMME2_RTR_HBW_RD_RS_S_ARB 0x8015C
+
+#define mmMME2_RTR_HBW_RD_RS_L_ARB 0x80160
+
+#define mmMME2_RTR_HBW_WR_RQ_E_ARB 0x80170
+
+#define mmMME2_RTR_HBW_WR_RQ_W_ARB 0x80174
+
+#define mmMME2_RTR_HBW_WR_RQ_N_ARB 0x80178
+
+#define mmMME2_RTR_HBW_WR_RQ_S_ARB 0x8017C
+
+#define mmMME2_RTR_HBW_WR_RQ_L_ARB 0x80180
+
+#define mmMME2_RTR_HBW_WR_RS_E_ARB 0x80190
+
+#define mmMME2_RTR_HBW_WR_RS_W_ARB 0x80194
+
+#define mmMME2_RTR_HBW_WR_RS_N_ARB 0x80198
+
+#define mmMME2_RTR_HBW_WR_RS_S_ARB 0x8019C
+
+#define mmMME2_RTR_HBW_WR_RS_L_ARB 0x801A0
+
+#define mmMME2_RTR_LBW_RD_RQ_E_ARB 0x80200
+
+#define mmMME2_RTR_LBW_RD_RQ_W_ARB 0x80204
+
+#define mmMME2_RTR_LBW_RD_RQ_N_ARB 0x80208
+
+#define mmMME2_RTR_LBW_RD_RQ_S_ARB 0x8020C
+
+#define mmMME2_RTR_LBW_RD_RQ_L_ARB 0x80210
+
+#define mmMME2_RTR_LBW_E_ARB_MAX 0x80220
+
+#define mmMME2_RTR_LBW_W_ARB_MAX 0x80224
+
+#define mmMME2_RTR_LBW_N_ARB_MAX 0x80228
+
+#define mmMME2_RTR_LBW_S_ARB_MAX 0x8022C
+
+#define mmMME2_RTR_LBW_L_ARB_MAX 0x80230
+
+#define mmMME2_RTR_LBW_SRAM_MAX_CREDIT 0x80240
+
+#define mmMME2_RTR_LBW_RD_RS_E_ARB 0x80250
+
+#define mmMME2_RTR_LBW_RD_RS_W_ARB 0x80254
+
+#define mmMME2_RTR_LBW_RD_RS_N_ARB 0x80258
+
+#define mmMME2_RTR_LBW_RD_RS_S_ARB 0x8025C
+
+#define mmMME2_RTR_LBW_RD_RS_L_ARB 0x80260
+
+#define mmMME2_RTR_LBW_WR_RQ_E_ARB 0x80270
+
+#define mmMME2_RTR_LBW_WR_RQ_W_ARB 0x80274
+
+#define mmMME2_RTR_LBW_WR_RQ_N_ARB 0x80278
+
+#define mmMME2_RTR_LBW_WR_RQ_S_ARB 0x8027C
+
+#define mmMME2_RTR_LBW_WR_RQ_L_ARB 0x80280
+
+#define mmMME2_RTR_LBW_WR_RS_E_ARB 0x80290
+
+#define mmMME2_RTR_LBW_WR_RS_W_ARB 0x80294
+
+#define mmMME2_RTR_LBW_WR_RS_N_ARB 0x80298
+
+#define mmMME2_RTR_LBW_WR_RS_S_ARB 0x8029C
+
+#define mmMME2_RTR_LBW_WR_RS_L_ARB 0x802A0
+
+#define mmMME2_RTR_DBG_E_ARB 0x80300
+
+#define mmMME2_RTR_DBG_W_ARB 0x80304
+
+#define mmMME2_RTR_DBG_N_ARB 0x80308
+
+#define mmMME2_RTR_DBG_S_ARB 0x8030C
+
+#define mmMME2_RTR_DBG_L_ARB 0x80310
+
+#define mmMME2_RTR_DBG_E_ARB_MAX 0x80320
+
+#define mmMME2_RTR_DBG_W_ARB_MAX 0x80324
+
+#define mmMME2_RTR_DBG_N_ARB_MAX 0x80328
+
+#define mmMME2_RTR_DBG_S_ARB_MAX 0x8032C
+
+#define mmMME2_RTR_DBG_L_ARB_MAX 0x80330
+
+#define mmMME2_RTR_SPLIT_COEF_0 0x80400
+
+#define mmMME2_RTR_SPLIT_COEF_1 0x80404
+
+#define mmMME2_RTR_SPLIT_COEF_2 0x80408
+
+#define mmMME2_RTR_SPLIT_COEF_3 0x8040C
+
+#define mmMME2_RTR_SPLIT_COEF_4 0x80410
+
+#define mmMME2_RTR_SPLIT_COEF_5 0x80414
+
+#define mmMME2_RTR_SPLIT_COEF_6 0x80418
+
+#define mmMME2_RTR_SPLIT_COEF_7 0x8041C
+
+#define mmMME2_RTR_SPLIT_COEF_8 0x80420
+
+#define mmMME2_RTR_SPLIT_COEF_9 0x80424
+
+#define mmMME2_RTR_SPLIT_CFG 0x80440
+
+#define mmMME2_RTR_SPLIT_RD_SAT 0x80444
+
+#define mmMME2_RTR_SPLIT_RD_RST_TOKEN 0x80448
+
+#define mmMME2_RTR_SPLIT_RD_TIMEOUT_0 0x8044C
+
+#define mmMME2_RTR_SPLIT_RD_TIMEOUT_1 0x80450
+
+#define mmMME2_RTR_SPLIT_WR_SAT 0x80454
+
+#define mmMME2_RTR_WPLIT_WR_TST_TOLEN 0x80458
+
+#define mmMME2_RTR_SPLIT_WR_TIMEOUT_0 0x8045C
+
+#define mmMME2_RTR_SPLIT_WR_TIMEOUT_1 0x80460
+
+#define mmMME2_RTR_HBW_RANGE_HIT 0x80470
+
+#define mmMME2_RTR_HBW_RANGE_MASK_L_0 0x80480
+
+#define mmMME2_RTR_HBW_RANGE_MASK_L_1 0x80484
+
+#define mmMME2_RTR_HBW_RANGE_MASK_L_2 0x80488
+
+#define mmMME2_RTR_HBW_RANGE_MASK_L_3 0x8048C
+
+#define mmMME2_RTR_HBW_RANGE_MASK_L_4 0x80490
+
+#define mmMME2_RTR_HBW_RANGE_MASK_L_5 0x80494
+
+#define mmMME2_RTR_HBW_RANGE_MASK_L_6 0x80498
+
+#define mmMME2_RTR_HBW_RANGE_MASK_L_7 0x8049C
+
+#define mmMME2_RTR_HBW_RANGE_MASK_H_0 0x804A0
+
+#define mmMME2_RTR_HBW_RANGE_MASK_H_1 0x804A4
+
+#define mmMME2_RTR_HBW_RANGE_MASK_H_2 0x804A8
+
+#define mmMME2_RTR_HBW_RANGE_MASK_H_3 0x804AC
+
+#define mmMME2_RTR_HBW_RANGE_MASK_H_4 0x804B0
+
+#define mmMME2_RTR_HBW_RANGE_MASK_H_5 0x804B4
+
+#define mmMME2_RTR_HBW_RANGE_MASK_H_6 0x804B8
+
+#define mmMME2_RTR_HBW_RANGE_MASK_H_7 0x804BC
+
+#define mmMME2_RTR_HBW_RANGE_BASE_L_0 0x804C0
+
+#define mmMME2_RTR_HBW_RANGE_BASE_L_1 0x804C4
+
+#define mmMME2_RTR_HBW_RANGE_BASE_L_2 0x804C8
+
+#define mmMME2_RTR_HBW_RANGE_BASE_L_3 0x804CC
+
+#define mmMME2_RTR_HBW_RANGE_BASE_L_4 0x804D0
+
+#define mmMME2_RTR_HBW_RANGE_BASE_L_5 0x804D4
+
+#define mmMME2_RTR_HBW_RANGE_BASE_L_6 0x804D8
+
+#define mmMME2_RTR_HBW_RANGE_BASE_L_7 0x804DC
+
+#define mmMME2_RTR_HBW_RANGE_BASE_H_0 0x804E0
+
+#define mmMME2_RTR_HBW_RANGE_BASE_H_1 0x804E4
+
+#define mmMME2_RTR_HBW_RANGE_BASE_H_2 0x804E8
+
+#define mmMME2_RTR_HBW_RANGE_BASE_H_3 0x804EC
+
+#define mmMME2_RTR_HBW_RANGE_BASE_H_4 0x804F0
+
+#define mmMME2_RTR_HBW_RANGE_BASE_H_5 0x804F4
+
+#define mmMME2_RTR_HBW_RANGE_BASE_H_6 0x804F8
+
+#define mmMME2_RTR_HBW_RANGE_BASE_H_7 0x804FC
+
+#define mmMME2_RTR_LBW_RANGE_HIT 0x80500
+
+#define mmMME2_RTR_LBW_RANGE_MASK_0 0x80510
+
+#define mmMME2_RTR_LBW_RANGE_MASK_1 0x80514
+
+#define mmMME2_RTR_LBW_RANGE_MASK_2 0x80518
+
+#define mmMME2_RTR_LBW_RANGE_MASK_3 0x8051C
+
+#define mmMME2_RTR_LBW_RANGE_MASK_4 0x80520
+
+#define mmMME2_RTR_LBW_RANGE_MASK_5 0x80524
+
+#define mmMME2_RTR_LBW_RANGE_MASK_6 0x80528
+
+#define mmMME2_RTR_LBW_RANGE_MASK_7 0x8052C
+
+#define mmMME2_RTR_LBW_RANGE_MASK_8 0x80530
+
+#define mmMME2_RTR_LBW_RANGE_MASK_9 0x80534
+
+#define mmMME2_RTR_LBW_RANGE_MASK_10 0x80538
+
+#define mmMME2_RTR_LBW_RANGE_MASK_11 0x8053C
+
+#define mmMME2_RTR_LBW_RANGE_MASK_12 0x80540
+
+#define mmMME2_RTR_LBW_RANGE_MASK_13 0x80544
+
+#define mmMME2_RTR_LBW_RANGE_MASK_14 0x80548
+
+#define mmMME2_RTR_LBW_RANGE_MASK_15 0x8054C
+
+#define mmMME2_RTR_LBW_RANGE_BASE_0 0x80550
+
+#define mmMME2_RTR_LBW_RANGE_BASE_1 0x80554
+
+#define mmMME2_RTR_LBW_RANGE_BASE_2 0x80558
+
+#define mmMME2_RTR_LBW_RANGE_BASE_3 0x8055C
+
+#define mmMME2_RTR_LBW_RANGE_BASE_4 0x80560
+
+#define mmMME2_RTR_LBW_RANGE_BASE_5 0x80564
+
+#define mmMME2_RTR_LBW_RANGE_BASE_6 0x80568
+
+#define mmMME2_RTR_LBW_RANGE_BASE_7 0x8056C
+
+#define mmMME2_RTR_LBW_RANGE_BASE_8 0x80570
+
+#define mmMME2_RTR_LBW_RANGE_BASE_9 0x80574
+
+#define mmMME2_RTR_LBW_RANGE_BASE_10 0x80578
+
+#define mmMME2_RTR_LBW_RANGE_BASE_11 0x8057C
+
+#define mmMME2_RTR_LBW_RANGE_BASE_12 0x80580
+
+#define mmMME2_RTR_LBW_RANGE_BASE_13 0x80584
+
+#define mmMME2_RTR_LBW_RANGE_BASE_14 0x80588
+
+#define mmMME2_RTR_LBW_RANGE_BASE_15 0x8058C
+
+#define mmMME2_RTR_RGLTR 0x80590
+
+#define mmMME2_RTR_RGLTR_WR_RESULT 0x80594
+
+#define mmMME2_RTR_RGLTR_RD_RESULT 0x80598
+
+#define mmMME2_RTR_SCRAMB_EN 0x80600
+
+#define mmMME2_RTR_NON_LIN_SCRAMB 0x80604
+
+#endif /* ASIC_REG_MME2_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h
new file mode 100644
index 000000000000..b78f8bc387fc
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME3_RTR_REGS_H_
+#define ASIC_REG_MME3_RTR_REGS_H_
+
+/*
+ *****************************************
+ * MME3_RTR (Prototype: MME_RTR)
+ *****************************************
+ */
+
+#define mmMME3_RTR_HBW_RD_RQ_E_ARB 0xC0100
+
+#define mmMME3_RTR_HBW_RD_RQ_W_ARB 0xC0104
+
+#define mmMME3_RTR_HBW_RD_RQ_N_ARB 0xC0108
+
+#define mmMME3_RTR_HBW_RD_RQ_S_ARB 0xC010C
+
+#define mmMME3_RTR_HBW_RD_RQ_L_ARB 0xC0110
+
+#define mmMME3_RTR_HBW_E_ARB_MAX 0xC0120
+
+#define mmMME3_RTR_HBW_W_ARB_MAX 0xC0124
+
+#define mmMME3_RTR_HBW_N_ARB_MAX 0xC0128
+
+#define mmMME3_RTR_HBW_S_ARB_MAX 0xC012C
+
+#define mmMME3_RTR_HBW_L_ARB_MAX 0xC0130
+
+#define mmMME3_RTR_HBW_RD_RS_MAX_CREDIT 0xC0140
+
+#define mmMME3_RTR_HBW_WR_RQ_MAX_CREDIT 0xC0144
+
+#define mmMME3_RTR_HBW_RD_RQ_MAX_CREDIT 0xC0148
+
+#define mmMME3_RTR_HBW_RD_RS_E_ARB 0xC0150
+
+#define mmMME3_RTR_HBW_RD_RS_W_ARB 0xC0154
+
+#define mmMME3_RTR_HBW_RD_RS_N_ARB 0xC0158
+
+#define mmMME3_RTR_HBW_RD_RS_S_ARB 0xC015C
+
+#define mmMME3_RTR_HBW_RD_RS_L_ARB 0xC0160
+
+#define mmMME3_RTR_HBW_WR_RQ_E_ARB 0xC0170
+
+#define mmMME3_RTR_HBW_WR_RQ_W_ARB 0xC0174
+
+#define mmMME3_RTR_HBW_WR_RQ_N_ARB 0xC0178
+
+#define mmMME3_RTR_HBW_WR_RQ_S_ARB 0xC017C
+
+#define mmMME3_RTR_HBW_WR_RQ_L_ARB 0xC0180
+
+#define mmMME3_RTR_HBW_WR_RS_E_ARB 0xC0190
+
+#define mmMME3_RTR_HBW_WR_RS_W_ARB 0xC0194
+
+#define mmMME3_RTR_HBW_WR_RS_N_ARB 0xC0198
+
+#define mmMME3_RTR_HBW_WR_RS_S_ARB 0xC019C
+
+#define mmMME3_RTR_HBW_WR_RS_L_ARB 0xC01A0
+
+#define mmMME3_RTR_LBW_RD_RQ_E_ARB 0xC0200
+
+#define mmMME3_RTR_LBW_RD_RQ_W_ARB 0xC0204
+
+#define mmMME3_RTR_LBW_RD_RQ_N_ARB 0xC0208
+
+#define mmMME3_RTR_LBW_RD_RQ_S_ARB 0xC020C
+
+#define mmMME3_RTR_LBW_RD_RQ_L_ARB 0xC0210
+
+#define mmMME3_RTR_LBW_E_ARB_MAX 0xC0220
+
+#define mmMME3_RTR_LBW_W_ARB_MAX 0xC0224
+
+#define mmMME3_RTR_LBW_N_ARB_MAX 0xC0228
+
+#define mmMME3_RTR_LBW_S_ARB_MAX 0xC022C
+
+#define mmMME3_RTR_LBW_L_ARB_MAX 0xC0230
+
+#define mmMME3_RTR_LBW_SRAM_MAX_CREDIT 0xC0240
+
+#define mmMME3_RTR_LBW_RD_RS_E_ARB 0xC0250
+
+#define mmMME3_RTR_LBW_RD_RS_W_ARB 0xC0254
+
+#define mmMME3_RTR_LBW_RD_RS_N_ARB 0xC0258
+
+#define mmMME3_RTR_LBW_RD_RS_S_ARB 0xC025C
+
+#define mmMME3_RTR_LBW_RD_RS_L_ARB 0xC0260
+
+#define mmMME3_RTR_LBW_WR_RQ_E_ARB 0xC0270
+
+#define mmMME3_RTR_LBW_WR_RQ_W_ARB 0xC0274
+
+#define mmMME3_RTR_LBW_WR_RQ_N_ARB 0xC0278
+
+#define mmMME3_RTR_LBW_WR_RQ_S_ARB 0xC027C
+
+#define mmMME3_RTR_LBW_WR_RQ_L_ARB 0xC0280
+
+#define mmMME3_RTR_LBW_WR_RS_E_ARB 0xC0290
+
+#define mmMME3_RTR_LBW_WR_RS_W_ARB 0xC0294
+
+#define mmMME3_RTR_LBW_WR_RS_N_ARB 0xC0298
+
+#define mmMME3_RTR_LBW_WR_RS_S_ARB 0xC029C
+
+#define mmMME3_RTR_LBW_WR_RS_L_ARB 0xC02A0
+
+#define mmMME3_RTR_DBG_E_ARB 0xC0300
+
+#define mmMME3_RTR_DBG_W_ARB 0xC0304
+
+#define mmMME3_RTR_DBG_N_ARB 0xC0308
+
+#define mmMME3_RTR_DBG_S_ARB 0xC030C
+
+#define mmMME3_RTR_DBG_L_ARB 0xC0310
+
+#define mmMME3_RTR_DBG_E_ARB_MAX 0xC0320
+
+#define mmMME3_RTR_DBG_W_ARB_MAX 0xC0324
+
+#define mmMME3_RTR_DBG_N_ARB_MAX 0xC0328
+
+#define mmMME3_RTR_DBG_S_ARB_MAX 0xC032C
+
+#define mmMME3_RTR_DBG_L_ARB_MAX 0xC0330
+
+#define mmMME3_RTR_SPLIT_COEF_0 0xC0400
+
+#define mmMME3_RTR_SPLIT_COEF_1 0xC0404
+
+#define mmMME3_RTR_SPLIT_COEF_2 0xC0408
+
+#define mmMME3_RTR_SPLIT_COEF_3 0xC040C
+
+#define mmMME3_RTR_SPLIT_COEF_4 0xC0410
+
+#define mmMME3_RTR_SPLIT_COEF_5 0xC0414
+
+#define mmMME3_RTR_SPLIT_COEF_6 0xC0418
+
+#define mmMME3_RTR_SPLIT_COEF_7 0xC041C
+
+#define mmMME3_RTR_SPLIT_COEF_8 0xC0420
+
+#define mmMME3_RTR_SPLIT_COEF_9 0xC0424
+
+#define mmMME3_RTR_SPLIT_CFG 0xC0440
+
+#define mmMME3_RTR_SPLIT_RD_SAT 0xC0444
+
+#define mmMME3_RTR_SPLIT_RD_RST_TOKEN 0xC0448
+
+#define mmMME3_RTR_SPLIT_RD_TIMEOUT_0 0xC044C
+
+#define mmMME3_RTR_SPLIT_RD_TIMEOUT_1 0xC0450
+
+#define mmMME3_RTR_SPLIT_WR_SAT 0xC0454
+
+#define mmMME3_RTR_WPLIT_WR_TST_TOLEN 0xC0458
+
+#define mmMME3_RTR_SPLIT_WR_TIMEOUT_0 0xC045C
+
+#define mmMME3_RTR_SPLIT_WR_TIMEOUT_1 0xC0460
+
+#define mmMME3_RTR_HBW_RANGE_HIT 0xC0470
+
+#define mmMME3_RTR_HBW_RANGE_MASK_L_0 0xC0480
+
+#define mmMME3_RTR_HBW_RANGE_MASK_L_1 0xC0484
+
+#define mmMME3_RTR_HBW_RANGE_MASK_L_2 0xC0488
+
+#define mmMME3_RTR_HBW_RANGE_MASK_L_3 0xC048C
+
+#define mmMME3_RTR_HBW_RANGE_MASK_L_4 0xC0490
+
+#define mmMME3_RTR_HBW_RANGE_MASK_L_5 0xC0494
+
+#define mmMME3_RTR_HBW_RANGE_MASK_L_6 0xC0498
+
+#define mmMME3_RTR_HBW_RANGE_MASK_L_7 0xC049C
+
+#define mmMME3_RTR_HBW_RANGE_MASK_H_0 0xC04A0
+
+#define mmMME3_RTR_HBW_RANGE_MASK_H_1 0xC04A4
+
+#define mmMME3_RTR_HBW_RANGE_MASK_H_2 0xC04A8
+
+#define mmMME3_RTR_HBW_RANGE_MASK_H_3 0xC04AC
+
+#define mmMME3_RTR_HBW_RANGE_MASK_H_4 0xC04B0
+
+#define mmMME3_RTR_HBW_RANGE_MASK_H_5 0xC04B4
+
+#define mmMME3_RTR_HBW_RANGE_MASK_H_6 0xC04B8
+
+#define mmMME3_RTR_HBW_RANGE_MASK_H_7 0xC04BC
+
+#define mmMME3_RTR_HBW_RANGE_BASE_L_0 0xC04C0
+
+#define mmMME3_RTR_HBW_RANGE_BASE_L_1 0xC04C4
+
+#define mmMME3_RTR_HBW_RANGE_BASE_L_2 0xC04C8
+
+#define mmMME3_RTR_HBW_RANGE_BASE_L_3 0xC04CC
+
+#define mmMME3_RTR_HBW_RANGE_BASE_L_4 0xC04D0
+
+#define mmMME3_RTR_HBW_RANGE_BASE_L_5 0xC04D4
+
+#define mmMME3_RTR_HBW_RANGE_BASE_L_6 0xC04D8
+
+#define mmMME3_RTR_HBW_RANGE_BASE_L_7 0xC04DC
+
+#define mmMME3_RTR_HBW_RANGE_BASE_H_0 0xC04E0
+
+#define mmMME3_RTR_HBW_RANGE_BASE_H_1 0xC04E4
+
+#define mmMME3_RTR_HBW_RANGE_BASE_H_2 0xC04E8
+
+#define mmMME3_RTR_HBW_RANGE_BASE_H_3 0xC04EC
+
+#define mmMME3_RTR_HBW_RANGE_BASE_H_4 0xC04F0
+
+#define mmMME3_RTR_HBW_RANGE_BASE_H_5 0xC04F4
+
+#define mmMME3_RTR_HBW_RANGE_BASE_H_6 0xC04F8
+
+#define mmMME3_RTR_HBW_RANGE_BASE_H_7 0xC04FC
+
+#define mmMME3_RTR_LBW_RANGE_HIT 0xC0500
+
+#define mmMME3_RTR_LBW_RANGE_MASK_0 0xC0510
+
+#define mmMME3_RTR_LBW_RANGE_MASK_1 0xC0514
+
+#define mmMME3_RTR_LBW_RANGE_MASK_2 0xC0518
+
+#define mmMME3_RTR_LBW_RANGE_MASK_3 0xC051C
+
+#define mmMME3_RTR_LBW_RANGE_MASK_4 0xC0520
+
+#define mmMME3_RTR_LBW_RANGE_MASK_5 0xC0524
+
+#define mmMME3_RTR_LBW_RANGE_MASK_6 0xC0528
+
+#define mmMME3_RTR_LBW_RANGE_MASK_7 0xC052C
+
+#define mmMME3_RTR_LBW_RANGE_MASK_8 0xC0530
+
+#define mmMME3_RTR_LBW_RANGE_MASK_9 0xC0534
+
+#define mmMME3_RTR_LBW_RANGE_MASK_10 0xC0538
+
+#define mmMME3_RTR_LBW_RANGE_MASK_11 0xC053C
+
+#define mmMME3_RTR_LBW_RANGE_MASK_12 0xC0540
+
+#define mmMME3_RTR_LBW_RANGE_MASK_13 0xC0544
+
+#define mmMME3_RTR_LBW_RANGE_MASK_14 0xC0548
+
+#define mmMME3_RTR_LBW_RANGE_MASK_15 0xC054C
+
+#define mmMME3_RTR_LBW_RANGE_BASE_0 0xC0550
+
+#define mmMME3_RTR_LBW_RANGE_BASE_1 0xC0554
+
+#define mmMME3_RTR_LBW_RANGE_BASE_2 0xC0558
+
+#define mmMME3_RTR_LBW_RANGE_BASE_3 0xC055C
+
+#define mmMME3_RTR_LBW_RANGE_BASE_4 0xC0560
+
+#define mmMME3_RTR_LBW_RANGE_BASE_5 0xC0564
+
+#define mmMME3_RTR_LBW_RANGE_BASE_6 0xC0568
+
+#define mmMME3_RTR_LBW_RANGE_BASE_7 0xC056C
+
+#define mmMME3_RTR_LBW_RANGE_BASE_8 0xC0570
+
+#define mmMME3_RTR_LBW_RANGE_BASE_9 0xC0574
+
+#define mmMME3_RTR_LBW_RANGE_BASE_10 0xC0578
+
+#define mmMME3_RTR_LBW_RANGE_BASE_11 0xC057C
+
+#define mmMME3_RTR_LBW_RANGE_BASE_12 0xC0580
+
+#define mmMME3_RTR_LBW_RANGE_BASE_13 0xC0584
+
+#define mmMME3_RTR_LBW_RANGE_BASE_14 0xC0588
+
+#define mmMME3_RTR_LBW_RANGE_BASE_15 0xC058C
+
+#define mmMME3_RTR_RGLTR 0xC0590
+
+#define mmMME3_RTR_RGLTR_WR_RESULT 0xC0594
+
+#define mmMME3_RTR_RGLTR_RD_RESULT 0xC0598
+
+#define mmMME3_RTR_SCRAMB_EN 0xC0600
+
+#define mmMME3_RTR_NON_LIN_SCRAMB 0xC0604
+
+#endif /* ASIC_REG_MME3_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h
new file mode 100644
index 000000000000..d9a4a02cefa3
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME4_RTR_REGS_H_
+#define ASIC_REG_MME4_RTR_REGS_H_
+
+/*
+ *****************************************
+ * MME4_RTR (Prototype: MME_RTR)
+ *****************************************
+ */
+
+#define mmMME4_RTR_HBW_RD_RQ_E_ARB 0x100100
+
+#define mmMME4_RTR_HBW_RD_RQ_W_ARB 0x100104
+
+#define mmMME4_RTR_HBW_RD_RQ_N_ARB 0x100108
+
+#define mmMME4_RTR_HBW_RD_RQ_S_ARB 0x10010C
+
+#define mmMME4_RTR_HBW_RD_RQ_L_ARB 0x100110
+
+#define mmMME4_RTR_HBW_E_ARB_MAX 0x100120
+
+#define mmMME4_RTR_HBW_W_ARB_MAX 0x100124
+
+#define mmMME4_RTR_HBW_N_ARB_MAX 0x100128
+
+#define mmMME4_RTR_HBW_S_ARB_MAX 0x10012C
+
+#define mmMME4_RTR_HBW_L_ARB_MAX 0x100130
+
+#define mmMME4_RTR_HBW_RD_RS_MAX_CREDIT 0x100140
+
+#define mmMME4_RTR_HBW_WR_RQ_MAX_CREDIT 0x100144
+
+#define mmMME4_RTR_HBW_RD_RQ_MAX_CREDIT 0x100148
+
+#define mmMME4_RTR_HBW_RD_RS_E_ARB 0x100150
+
+#define mmMME4_RTR_HBW_RD_RS_W_ARB 0x100154
+
+#define mmMME4_RTR_HBW_RD_RS_N_ARB 0x100158
+
+#define mmMME4_RTR_HBW_RD_RS_S_ARB 0x10015C
+
+#define mmMME4_RTR_HBW_RD_RS_L_ARB 0x100160
+
+#define mmMME4_RTR_HBW_WR_RQ_E_ARB 0x100170
+
+#define mmMME4_RTR_HBW_WR_RQ_W_ARB 0x100174
+
+#define mmMME4_RTR_HBW_WR_RQ_N_ARB 0x100178
+
+#define mmMME4_RTR_HBW_WR_RQ_S_ARB 0x10017C
+
+#define mmMME4_RTR_HBW_WR_RQ_L_ARB 0x100180
+
+#define mmMME4_RTR_HBW_WR_RS_E_ARB 0x100190
+
+#define mmMME4_RTR_HBW_WR_RS_W_ARB 0x100194
+
+#define mmMME4_RTR_HBW_WR_RS_N_ARB 0x100198
+
+#define mmMME4_RTR_HBW_WR_RS_S_ARB 0x10019C
+
+#define mmMME4_RTR_HBW_WR_RS_L_ARB 0x1001A0
+
+#define mmMME4_RTR_LBW_RD_RQ_E_ARB 0x100200
+
+#define mmMME4_RTR_LBW_RD_RQ_W_ARB 0x100204
+
+#define mmMME4_RTR_LBW_RD_RQ_N_ARB 0x100208
+
+#define mmMME4_RTR_LBW_RD_RQ_S_ARB 0x10020C
+
+#define mmMME4_RTR_LBW_RD_RQ_L_ARB 0x100210
+
+#define mmMME4_RTR_LBW_E_ARB_MAX 0x100220
+
+#define mmMME4_RTR_LBW_W_ARB_MAX 0x100224
+
+#define mmMME4_RTR_LBW_N_ARB_MAX 0x100228
+
+#define mmMME4_RTR_LBW_S_ARB_MAX 0x10022C
+
+#define mmMME4_RTR_LBW_L_ARB_MAX 0x100230
+
+#define mmMME4_RTR_LBW_SRAM_MAX_CREDIT 0x100240
+
+#define mmMME4_RTR_LBW_RD_RS_E_ARB 0x100250
+
+#define mmMME4_RTR_LBW_RD_RS_W_ARB 0x100254
+
+#define mmMME4_RTR_LBW_RD_RS_N_ARB 0x100258
+
+#define mmMME4_RTR_LBW_RD_RS_S_ARB 0x10025C
+
+#define mmMME4_RTR_LBW_RD_RS_L_ARB 0x100260
+
+#define mmMME4_RTR_LBW_WR_RQ_E_ARB 0x100270
+
+#define mmMME4_RTR_LBW_WR_RQ_W_ARB 0x100274
+
+#define mmMME4_RTR_LBW_WR_RQ_N_ARB 0x100278
+
+#define mmMME4_RTR_LBW_WR_RQ_S_ARB 0x10027C
+
+#define mmMME4_RTR_LBW_WR_RQ_L_ARB 0x100280
+
+#define mmMME4_RTR_LBW_WR_RS_E_ARB 0x100290
+
+#define mmMME4_RTR_LBW_WR_RS_W_ARB 0x100294
+
+#define mmMME4_RTR_LBW_WR_RS_N_ARB 0x100298
+
+#define mmMME4_RTR_LBW_WR_RS_S_ARB 0x10029C
+
+#define mmMME4_RTR_LBW_WR_RS_L_ARB 0x1002A0
+
+#define mmMME4_RTR_DBG_E_ARB 0x100300
+
+#define mmMME4_RTR_DBG_W_ARB 0x100304
+
+#define mmMME4_RTR_DBG_N_ARB 0x100308
+
+#define mmMME4_RTR_DBG_S_ARB 0x10030C
+
+#define mmMME4_RTR_DBG_L_ARB 0x100310
+
+#define mmMME4_RTR_DBG_E_ARB_MAX 0x100320
+
+#define mmMME4_RTR_DBG_W_ARB_MAX 0x100324
+
+#define mmMME4_RTR_DBG_N_ARB_MAX 0x100328
+
+#define mmMME4_RTR_DBG_S_ARB_MAX 0x10032C
+
+#define mmMME4_RTR_DBG_L_ARB_MAX 0x100330
+
+#define mmMME4_RTR_SPLIT_COEF_0 0x100400
+
+#define mmMME4_RTR_SPLIT_COEF_1 0x100404
+
+#define mmMME4_RTR_SPLIT_COEF_2 0x100408
+
+#define mmMME4_RTR_SPLIT_COEF_3 0x10040C
+
+#define mmMME4_RTR_SPLIT_COEF_4 0x100410
+
+#define mmMME4_RTR_SPLIT_COEF_5 0x100414
+
+#define mmMME4_RTR_SPLIT_COEF_6 0x100418
+
+#define mmMME4_RTR_SPLIT_COEF_7 0x10041C
+
+#define mmMME4_RTR_SPLIT_COEF_8 0x100420
+
+#define mmMME4_RTR_SPLIT_COEF_9 0x100424
+
+#define mmMME4_RTR_SPLIT_CFG 0x100440
+
+#define mmMME4_RTR_SPLIT_RD_SAT 0x100444
+
+#define mmMME4_RTR_SPLIT_RD_RST_TOKEN 0x100448
+
+#define mmMME4_RTR_SPLIT_RD_TIMEOUT_0 0x10044C
+
+#define mmMME4_RTR_SPLIT_RD_TIMEOUT_1 0x100450
+
+#define mmMME4_RTR_SPLIT_WR_SAT 0x100454
+
+#define mmMME4_RTR_WPLIT_WR_TST_TOLEN 0x100458
+
+#define mmMME4_RTR_SPLIT_WR_TIMEOUT_0 0x10045C
+
+#define mmMME4_RTR_SPLIT_WR_TIMEOUT_1 0x100460
+
+#define mmMME4_RTR_HBW_RANGE_HIT 0x100470
+
+#define mmMME4_RTR_HBW_RANGE_MASK_L_0 0x100480
+
+#define mmMME4_RTR_HBW_RANGE_MASK_L_1 0x100484
+
+#define mmMME4_RTR_HBW_RANGE_MASK_L_2 0x100488
+
+#define mmMME4_RTR_HBW_RANGE_MASK_L_3 0x10048C
+
+#define mmMME4_RTR_HBW_RANGE_MASK_L_4 0x100490
+
+#define mmMME4_RTR_HBW_RANGE_MASK_L_5 0x100494
+
+#define mmMME4_RTR_HBW_RANGE_MASK_L_6 0x100498
+
+#define mmMME4_RTR_HBW_RANGE_MASK_L_7 0x10049C
+
+#define mmMME4_RTR_HBW_RANGE_MASK_H_0 0x1004A0
+
+#define mmMME4_RTR_HBW_RANGE_MASK_H_1 0x1004A4
+
+#define mmMME4_RTR_HBW_RANGE_MASK_H_2 0x1004A8
+
+#define mmMME4_RTR_HBW_RANGE_MASK_H_3 0x1004AC
+
+#define mmMME4_RTR_HBW_RANGE_MASK_H_4 0x1004B0
+
+#define mmMME4_RTR_HBW_RANGE_MASK_H_5 0x1004B4
+
+#define mmMME4_RTR_HBW_RANGE_MASK_H_6 0x1004B8
+
+#define mmMME4_RTR_HBW_RANGE_MASK_H_7 0x1004BC
+
+#define mmMME4_RTR_HBW_RANGE_BASE_L_0 0x1004C0
+
+#define mmMME4_RTR_HBW_RANGE_BASE_L_1 0x1004C4
+
+#define mmMME4_RTR_HBW_RANGE_BASE_L_2 0x1004C8
+
+#define mmMME4_RTR_HBW_RANGE_BASE_L_3 0x1004CC
+
+#define mmMME4_RTR_HBW_RANGE_BASE_L_4 0x1004D0
+
+#define mmMME4_RTR_HBW_RANGE_BASE_L_5 0x1004D4
+
+#define mmMME4_RTR_HBW_RANGE_BASE_L_6 0x1004D8
+
+#define mmMME4_RTR_HBW_RANGE_BASE_L_7 0x1004DC
+
+#define mmMME4_RTR_HBW_RANGE_BASE_H_0 0x1004E0
+
+#define mmMME4_RTR_HBW_RANGE_BASE_H_1 0x1004E4
+
+#define mmMME4_RTR_HBW_RANGE_BASE_H_2 0x1004E8
+
+#define mmMME4_RTR_HBW_RANGE_BASE_H_3 0x1004EC
+
+#define mmMME4_RTR_HBW_RANGE_BASE_H_4 0x1004F0
+
+#define mmMME4_RTR_HBW_RANGE_BASE_H_5 0x1004F4
+
+#define mmMME4_RTR_HBW_RANGE_BASE_H_6 0x1004F8
+
+#define mmMME4_RTR_HBW_RANGE_BASE_H_7 0x1004FC
+
+#define mmMME4_RTR_LBW_RANGE_HIT 0x100500
+
+#define mmMME4_RTR_LBW_RANGE_MASK_0 0x100510
+
+#define mmMME4_RTR_LBW_RANGE_MASK_1 0x100514
+
+#define mmMME4_RTR_LBW_RANGE_MASK_2 0x100518
+
+#define mmMME4_RTR_LBW_RANGE_MASK_3 0x10051C
+
+#define mmMME4_RTR_LBW_RANGE_MASK_4 0x100520
+
+#define mmMME4_RTR_LBW_RANGE_MASK_5 0x100524
+
+#define mmMME4_RTR_LBW_RANGE_MASK_6 0x100528
+
+#define mmMME4_RTR_LBW_RANGE_MASK_7 0x10052C
+
+#define mmMME4_RTR_LBW_RANGE_MASK_8 0x100530
+
+#define mmMME4_RTR_LBW_RANGE_MASK_9 0x100534
+
+#define mmMME4_RTR_LBW_RANGE_MASK_10 0x100538
+
+#define mmMME4_RTR_LBW_RANGE_MASK_11 0x10053C
+
+#define mmMME4_RTR_LBW_RANGE_MASK_12 0x100540
+
+#define mmMME4_RTR_LBW_RANGE_MASK_13 0x100544
+
+#define mmMME4_RTR_LBW_RANGE_MASK_14 0x100548
+
+#define mmMME4_RTR_LBW_RANGE_MASK_15 0x10054C
+
+#define mmMME4_RTR_LBW_RANGE_BASE_0 0x100550
+
+#define mmMME4_RTR_LBW_RANGE_BASE_1 0x100554
+
+#define mmMME4_RTR_LBW_RANGE_BASE_2 0x100558
+
+#define mmMME4_RTR_LBW_RANGE_BASE_3 0x10055C
+
+#define mmMME4_RTR_LBW_RANGE_BASE_4 0x100560
+
+#define mmMME4_RTR_LBW_RANGE_BASE_5 0x100564
+
+#define mmMME4_RTR_LBW_RANGE_BASE_6 0x100568
+
+#define mmMME4_RTR_LBW_RANGE_BASE_7 0x10056C
+
+#define mmMME4_RTR_LBW_RANGE_BASE_8 0x100570
+
+#define mmMME4_RTR_LBW_RANGE_BASE_9 0x100574
+
+#define mmMME4_RTR_LBW_RANGE_BASE_10 0x100578
+
+#define mmMME4_RTR_LBW_RANGE_BASE_11 0x10057C
+
+#define mmMME4_RTR_LBW_RANGE_BASE_12 0x100580
+
+#define mmMME4_RTR_LBW_RANGE_BASE_13 0x100584
+
+#define mmMME4_RTR_LBW_RANGE_BASE_14 0x100588
+
+#define mmMME4_RTR_LBW_RANGE_BASE_15 0x10058C
+
+#define mmMME4_RTR_RGLTR 0x100590
+
+#define mmMME4_RTR_RGLTR_WR_RESULT 0x100594
+
+#define mmMME4_RTR_RGLTR_RD_RESULT 0x100598
+
+#define mmMME4_RTR_SCRAMB_EN 0x100600
+
+#define mmMME4_RTR_NON_LIN_SCRAMB 0x100604
+
+#endif /* ASIC_REG_MME4_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h
new file mode 100644
index 000000000000..205adc988407
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME5_RTR_REGS_H_
+#define ASIC_REG_MME5_RTR_REGS_H_
+
+/*
+ *****************************************
+ * MME5_RTR (Prototype: MME_RTR)
+ *****************************************
+ */
+
+#define mmMME5_RTR_HBW_RD_RQ_E_ARB 0x140100
+
+#define mmMME5_RTR_HBW_RD_RQ_W_ARB 0x140104
+
+#define mmMME5_RTR_HBW_RD_RQ_N_ARB 0x140108
+
+#define mmMME5_RTR_HBW_RD_RQ_S_ARB 0x14010C
+
+#define mmMME5_RTR_HBW_RD_RQ_L_ARB 0x140110
+
+#define mmMME5_RTR_HBW_E_ARB_MAX 0x140120
+
+#define mmMME5_RTR_HBW_W_ARB_MAX 0x140124
+
+#define mmMME5_RTR_HBW_N_ARB_MAX 0x140128
+
+#define mmMME5_RTR_HBW_S_ARB_MAX 0x14012C
+
+#define mmMME5_RTR_HBW_L_ARB_MAX 0x140130
+
+#define mmMME5_RTR_HBW_RD_RS_MAX_CREDIT 0x140140
+
+#define mmMME5_RTR_HBW_WR_RQ_MAX_CREDIT 0x140144
+
+#define mmMME5_RTR_HBW_RD_RQ_MAX_CREDIT 0x140148
+
+#define mmMME5_RTR_HBW_RD_RS_E_ARB 0x140150
+
+#define mmMME5_RTR_HBW_RD_RS_W_ARB 0x140154
+
+#define mmMME5_RTR_HBW_RD_RS_N_ARB 0x140158
+
+#define mmMME5_RTR_HBW_RD_RS_S_ARB 0x14015C
+
+#define mmMME5_RTR_HBW_RD_RS_L_ARB 0x140160
+
+#define mmMME5_RTR_HBW_WR_RQ_E_ARB 0x140170
+
+#define mmMME5_RTR_HBW_WR_RQ_W_ARB 0x140174
+
+#define mmMME5_RTR_HBW_WR_RQ_N_ARB 0x140178
+
+#define mmMME5_RTR_HBW_WR_RQ_S_ARB 0x14017C
+
+#define mmMME5_RTR_HBW_WR_RQ_L_ARB 0x140180
+
+#define mmMME5_RTR_HBW_WR_RS_E_ARB 0x140190
+
+#define mmMME5_RTR_HBW_WR_RS_W_ARB 0x140194
+
+#define mmMME5_RTR_HBW_WR_RS_N_ARB 0x140198
+
+#define mmMME5_RTR_HBW_WR_RS_S_ARB 0x14019C
+
+#define mmMME5_RTR_HBW_WR_RS_L_ARB 0x1401A0
+
+#define mmMME5_RTR_LBW_RD_RQ_E_ARB 0x140200
+
+#define mmMME5_RTR_LBW_RD_RQ_W_ARB 0x140204
+
+#define mmMME5_RTR_LBW_RD_RQ_N_ARB 0x140208
+
+#define mmMME5_RTR_LBW_RD_RQ_S_ARB 0x14020C
+
+#define mmMME5_RTR_LBW_RD_RQ_L_ARB 0x140210
+
+#define mmMME5_RTR_LBW_E_ARB_MAX 0x140220
+
+#define mmMME5_RTR_LBW_W_ARB_MAX 0x140224
+
+#define mmMME5_RTR_LBW_N_ARB_MAX 0x140228
+
+#define mmMME5_RTR_LBW_S_ARB_MAX 0x14022C
+
+#define mmMME5_RTR_LBW_L_ARB_MAX 0x140230
+
+#define mmMME5_RTR_LBW_SRAM_MAX_CREDIT 0x140240
+
+#define mmMME5_RTR_LBW_RD_RS_E_ARB 0x140250
+
+#define mmMME5_RTR_LBW_RD_RS_W_ARB 0x140254
+
+#define mmMME5_RTR_LBW_RD_RS_N_ARB 0x140258
+
+#define mmMME5_RTR_LBW_RD_RS_S_ARB 0x14025C
+
+#define mmMME5_RTR_LBW_RD_RS_L_ARB 0x140260
+
+#define mmMME5_RTR_LBW_WR_RQ_E_ARB 0x140270
+
+#define mmMME5_RTR_LBW_WR_RQ_W_ARB 0x140274
+
+#define mmMME5_RTR_LBW_WR_RQ_N_ARB 0x140278
+
+#define mmMME5_RTR_LBW_WR_RQ_S_ARB 0x14027C
+
+#define mmMME5_RTR_LBW_WR_RQ_L_ARB 0x140280
+
+#define mmMME5_RTR_LBW_WR_RS_E_ARB 0x140290
+
+#define mmMME5_RTR_LBW_WR_RS_W_ARB 0x140294
+
+#define mmMME5_RTR_LBW_WR_RS_N_ARB 0x140298
+
+#define mmMME5_RTR_LBW_WR_RS_S_ARB 0x14029C
+
+#define mmMME5_RTR_LBW_WR_RS_L_ARB 0x1402A0
+
+#define mmMME5_RTR_DBG_E_ARB 0x140300
+
+#define mmMME5_RTR_DBG_W_ARB 0x140304
+
+#define mmMME5_RTR_DBG_N_ARB 0x140308
+
+#define mmMME5_RTR_DBG_S_ARB 0x14030C
+
+#define mmMME5_RTR_DBG_L_ARB 0x140310
+
+#define mmMME5_RTR_DBG_E_ARB_MAX 0x140320
+
+#define mmMME5_RTR_DBG_W_ARB_MAX 0x140324
+
+#define mmMME5_RTR_DBG_N_ARB_MAX 0x140328
+
+#define mmMME5_RTR_DBG_S_ARB_MAX 0x14032C
+
+#define mmMME5_RTR_DBG_L_ARB_MAX 0x140330
+
+#define mmMME5_RTR_SPLIT_COEF_0 0x140400
+
+#define mmMME5_RTR_SPLIT_COEF_1 0x140404
+
+#define mmMME5_RTR_SPLIT_COEF_2 0x140408
+
+#define mmMME5_RTR_SPLIT_COEF_3 0x14040C
+
+#define mmMME5_RTR_SPLIT_COEF_4 0x140410
+
+#define mmMME5_RTR_SPLIT_COEF_5 0x140414
+
+#define mmMME5_RTR_SPLIT_COEF_6 0x140418
+
+#define mmMME5_RTR_SPLIT_COEF_7 0x14041C
+
+#define mmMME5_RTR_SPLIT_COEF_8 0x140420
+
+#define mmMME5_RTR_SPLIT_COEF_9 0x140424
+
+#define mmMME5_RTR_SPLIT_CFG 0x140440
+
+#define mmMME5_RTR_SPLIT_RD_SAT 0x140444
+
+#define mmMME5_RTR_SPLIT_RD_RST_TOKEN 0x140448
+
+#define mmMME5_RTR_SPLIT_RD_TIMEOUT_0 0x14044C
+
+#define mmMME5_RTR_SPLIT_RD_TIMEOUT_1 0x140450
+
+#define mmMME5_RTR_SPLIT_WR_SAT 0x140454
+
+#define mmMME5_RTR_WPLIT_WR_TST_TOLEN 0x140458
+
+#define mmMME5_RTR_SPLIT_WR_TIMEOUT_0 0x14045C
+
+#define mmMME5_RTR_SPLIT_WR_TIMEOUT_1 0x140460
+
+#define mmMME5_RTR_HBW_RANGE_HIT 0x140470
+
+#define mmMME5_RTR_HBW_RANGE_MASK_L_0 0x140480
+
+#define mmMME5_RTR_HBW_RANGE_MASK_L_1 0x140484
+
+#define mmMME5_RTR_HBW_RANGE_MASK_L_2 0x140488
+
+#define mmMME5_RTR_HBW_RANGE_MASK_L_3 0x14048C
+
+#define mmMME5_RTR_HBW_RANGE_MASK_L_4 0x140490
+
+#define mmMME5_RTR_HBW_RANGE_MASK_L_5 0x140494
+
+#define mmMME5_RTR_HBW_RANGE_MASK_L_6 0x140498
+
+#define mmMME5_RTR_HBW_RANGE_MASK_L_7 0x14049C
+
+#define mmMME5_RTR_HBW_RANGE_MASK_H_0 0x1404A0
+
+#define mmMME5_RTR_HBW_RANGE_MASK_H_1 0x1404A4
+
+#define mmMME5_RTR_HBW_RANGE_MASK_H_2 0x1404A8
+
+#define mmMME5_RTR_HBW_RANGE_MASK_H_3 0x1404AC
+
+#define mmMME5_RTR_HBW_RANGE_MASK_H_4 0x1404B0
+
+#define mmMME5_RTR_HBW_RANGE_MASK_H_5 0x1404B4
+
+#define mmMME5_RTR_HBW_RANGE_MASK_H_6 0x1404B8
+
+#define mmMME5_RTR_HBW_RANGE_MASK_H_7 0x1404BC
+
+#define mmMME5_RTR_HBW_RANGE_BASE_L_0 0x1404C0
+
+#define mmMME5_RTR_HBW_RANGE_BASE_L_1 0x1404C4
+
+#define mmMME5_RTR_HBW_RANGE_BASE_L_2 0x1404C8
+
+#define mmMME5_RTR_HBW_RANGE_BASE_L_3 0x1404CC
+
+#define mmMME5_RTR_HBW_RANGE_BASE_L_4 0x1404D0
+
+#define mmMME5_RTR_HBW_RANGE_BASE_L_5 0x1404D4
+
+#define mmMME5_RTR_HBW_RANGE_BASE_L_6 0x1404D8
+
+#define mmMME5_RTR_HBW_RANGE_BASE_L_7 0x1404DC
+
+#define mmMME5_RTR_HBW_RANGE_BASE_H_0 0x1404E0
+
+#define mmMME5_RTR_HBW_RANGE_BASE_H_1 0x1404E4
+
+#define mmMME5_RTR_HBW_RANGE_BASE_H_2 0x1404E8
+
+#define mmMME5_RTR_HBW_RANGE_BASE_H_3 0x1404EC
+
+#define mmMME5_RTR_HBW_RANGE_BASE_H_4 0x1404F0
+
+#define mmMME5_RTR_HBW_RANGE_BASE_H_5 0x1404F4
+
+#define mmMME5_RTR_HBW_RANGE_BASE_H_6 0x1404F8
+
+#define mmMME5_RTR_HBW_RANGE_BASE_H_7 0x1404FC
+
+#define mmMME5_RTR_LBW_RANGE_HIT 0x140500
+
+#define mmMME5_RTR_LBW_RANGE_MASK_0 0x140510
+
+#define mmMME5_RTR_LBW_RANGE_MASK_1 0x140514
+
+#define mmMME5_RTR_LBW_RANGE_MASK_2 0x140518
+
+#define mmMME5_RTR_LBW_RANGE_MASK_3 0x14051C
+
+#define mmMME5_RTR_LBW_RANGE_MASK_4 0x140520
+
+#define mmMME5_RTR_LBW_RANGE_MASK_5 0x140524
+
+#define mmMME5_RTR_LBW_RANGE_MASK_6 0x140528
+
+#define mmMME5_RTR_LBW_RANGE_MASK_7 0x14052C
+
+#define mmMME5_RTR_LBW_RANGE_MASK_8 0x140530
+
+#define mmMME5_RTR_LBW_RANGE_MASK_9 0x140534
+
+#define mmMME5_RTR_LBW_RANGE_MASK_10 0x140538
+
+#define mmMME5_RTR_LBW_RANGE_MASK_11 0x14053C
+
+#define mmMME5_RTR_LBW_RANGE_MASK_12 0x140540
+
+#define mmMME5_RTR_LBW_RANGE_MASK_13 0x140544
+
+#define mmMME5_RTR_LBW_RANGE_MASK_14 0x140548
+
+#define mmMME5_RTR_LBW_RANGE_MASK_15 0x14054C
+
+#define mmMME5_RTR_LBW_RANGE_BASE_0 0x140550
+
+#define mmMME5_RTR_LBW_RANGE_BASE_1 0x140554
+
+#define mmMME5_RTR_LBW_RANGE_BASE_2 0x140558
+
+#define mmMME5_RTR_LBW_RANGE_BASE_3 0x14055C
+
+#define mmMME5_RTR_LBW_RANGE_BASE_4 0x140560
+
+#define mmMME5_RTR_LBW_RANGE_BASE_5 0x140564
+
+#define mmMME5_RTR_LBW_RANGE_BASE_6 0x140568
+
+#define mmMME5_RTR_LBW_RANGE_BASE_7 0x14056C
+
+#define mmMME5_RTR_LBW_RANGE_BASE_8 0x140570
+
+#define mmMME5_RTR_LBW_RANGE_BASE_9 0x140574
+
+#define mmMME5_RTR_LBW_RANGE_BASE_10 0x140578
+
+#define mmMME5_RTR_LBW_RANGE_BASE_11 0x14057C
+
+#define mmMME5_RTR_LBW_RANGE_BASE_12 0x140580
+
+#define mmMME5_RTR_LBW_RANGE_BASE_13 0x140584
+
+#define mmMME5_RTR_LBW_RANGE_BASE_14 0x140588
+
+#define mmMME5_RTR_LBW_RANGE_BASE_15 0x14058C
+
+#define mmMME5_RTR_RGLTR 0x140590
+
+#define mmMME5_RTR_RGLTR_WR_RESULT 0x140594
+
+#define mmMME5_RTR_RGLTR_RD_RESULT 0x140598
+
+#define mmMME5_RTR_SCRAMB_EN 0x140600
+
+#define mmMME5_RTR_NON_LIN_SCRAMB 0x140604
+
+#endif /* ASIC_REG_MME5_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h
new file mode 100644
index 000000000000..fcec68388278
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME6_RTR_REGS_H_
+#define ASIC_REG_MME6_RTR_REGS_H_
+
+/*
+ *****************************************
+ * MME6_RTR (Prototype: MME_RTR)
+ *****************************************
+ */
+
+#define mmMME6_RTR_HBW_RD_RQ_E_ARB 0x180100
+
+#define mmMME6_RTR_HBW_RD_RQ_W_ARB 0x180104
+
+#define mmMME6_RTR_HBW_RD_RQ_N_ARB 0x180108
+
+#define mmMME6_RTR_HBW_RD_RQ_S_ARB 0x18010C
+
+#define mmMME6_RTR_HBW_RD_RQ_L_ARB 0x180110
+
+#define mmMME6_RTR_HBW_E_ARB_MAX 0x180120
+
+#define mmMME6_RTR_HBW_W_ARB_MAX 0x180124
+
+#define mmMME6_RTR_HBW_N_ARB_MAX 0x180128
+
+#define mmMME6_RTR_HBW_S_ARB_MAX 0x18012C
+
+#define mmMME6_RTR_HBW_L_ARB_MAX 0x180130
+
+#define mmMME6_RTR_HBW_RD_RS_MAX_CREDIT 0x180140
+
+#define mmMME6_RTR_HBW_WR_RQ_MAX_CREDIT 0x180144
+
+#define mmMME6_RTR_HBW_RD_RQ_MAX_CREDIT 0x180148
+
+#define mmMME6_RTR_HBW_RD_RS_E_ARB 0x180150
+
+#define mmMME6_RTR_HBW_RD_RS_W_ARB 0x180154
+
+#define mmMME6_RTR_HBW_RD_RS_N_ARB 0x180158
+
+#define mmMME6_RTR_HBW_RD_RS_S_ARB 0x18015C
+
+#define mmMME6_RTR_HBW_RD_RS_L_ARB 0x180160
+
+#define mmMME6_RTR_HBW_WR_RQ_E_ARB 0x180170
+
+#define mmMME6_RTR_HBW_WR_RQ_W_ARB 0x180174
+
+#define mmMME6_RTR_HBW_WR_RQ_N_ARB 0x180178
+
+#define mmMME6_RTR_HBW_WR_RQ_S_ARB 0x18017C
+
+#define mmMME6_RTR_HBW_WR_RQ_L_ARB 0x180180
+
+#define mmMME6_RTR_HBW_WR_RS_E_ARB 0x180190
+
+#define mmMME6_RTR_HBW_WR_RS_W_ARB 0x180194
+
+#define mmMME6_RTR_HBW_WR_RS_N_ARB 0x180198
+
+#define mmMME6_RTR_HBW_WR_RS_S_ARB 0x18019C
+
+#define mmMME6_RTR_HBW_WR_RS_L_ARB 0x1801A0
+
+#define mmMME6_RTR_LBW_RD_RQ_E_ARB 0x180200
+
+#define mmMME6_RTR_LBW_RD_RQ_W_ARB 0x180204
+
+#define mmMME6_RTR_LBW_RD_RQ_N_ARB 0x180208
+
+#define mmMME6_RTR_LBW_RD_RQ_S_ARB 0x18020C
+
+#define mmMME6_RTR_LBW_RD_RQ_L_ARB 0x180210
+
+#define mmMME6_RTR_LBW_E_ARB_MAX 0x180220
+
+#define mmMME6_RTR_LBW_W_ARB_MAX 0x180224
+
+#define mmMME6_RTR_LBW_N_ARB_MAX 0x180228
+
+#define mmMME6_RTR_LBW_S_ARB_MAX 0x18022C
+
+#define mmMME6_RTR_LBW_L_ARB_MAX 0x180230
+
+#define mmMME6_RTR_LBW_SRAM_MAX_CREDIT 0x180240
+
+#define mmMME6_RTR_LBW_RD_RS_E_ARB 0x180250
+
+#define mmMME6_RTR_LBW_RD_RS_W_ARB 0x180254
+
+#define mmMME6_RTR_LBW_RD_RS_N_ARB 0x180258
+
+#define mmMME6_RTR_LBW_RD_RS_S_ARB 0x18025C
+
+#define mmMME6_RTR_LBW_RD_RS_L_ARB 0x180260
+
+#define mmMME6_RTR_LBW_WR_RQ_E_ARB 0x180270
+
+#define mmMME6_RTR_LBW_WR_RQ_W_ARB 0x180274
+
+#define mmMME6_RTR_LBW_WR_RQ_N_ARB 0x180278
+
+#define mmMME6_RTR_LBW_WR_RQ_S_ARB 0x18027C
+
+#define mmMME6_RTR_LBW_WR_RQ_L_ARB 0x180280
+
+#define mmMME6_RTR_LBW_WR_RS_E_ARB 0x180290
+
+#define mmMME6_RTR_LBW_WR_RS_W_ARB 0x180294
+
+#define mmMME6_RTR_LBW_WR_RS_N_ARB 0x180298
+
+#define mmMME6_RTR_LBW_WR_RS_S_ARB 0x18029C
+
+#define mmMME6_RTR_LBW_WR_RS_L_ARB 0x1802A0
+
+#define mmMME6_RTR_DBG_E_ARB 0x180300
+
+#define mmMME6_RTR_DBG_W_ARB 0x180304
+
+#define mmMME6_RTR_DBG_N_ARB 0x180308
+
+#define mmMME6_RTR_DBG_S_ARB 0x18030C
+
+#define mmMME6_RTR_DBG_L_ARB 0x180310
+
+#define mmMME6_RTR_DBG_E_ARB_MAX 0x180320
+
+#define mmMME6_RTR_DBG_W_ARB_MAX 0x180324
+
+#define mmMME6_RTR_DBG_N_ARB_MAX 0x180328
+
+#define mmMME6_RTR_DBG_S_ARB_MAX 0x18032C
+
+#define mmMME6_RTR_DBG_L_ARB_MAX 0x180330
+
+#define mmMME6_RTR_SPLIT_COEF_0 0x180400
+
+#define mmMME6_RTR_SPLIT_COEF_1 0x180404
+
+#define mmMME6_RTR_SPLIT_COEF_2 0x180408
+
+#define mmMME6_RTR_SPLIT_COEF_3 0x18040C
+
+#define mmMME6_RTR_SPLIT_COEF_4 0x180410
+
+#define mmMME6_RTR_SPLIT_COEF_5 0x180414
+
+#define mmMME6_RTR_SPLIT_COEF_6 0x180418
+
+#define mmMME6_RTR_SPLIT_COEF_7 0x18041C
+
+#define mmMME6_RTR_SPLIT_COEF_8 0x180420
+
+#define mmMME6_RTR_SPLIT_COEF_9 0x180424
+
+#define mmMME6_RTR_SPLIT_CFG 0x180440
+
+#define mmMME6_RTR_SPLIT_RD_SAT 0x180444
+
+#define mmMME6_RTR_SPLIT_RD_RST_TOKEN 0x180448
+
+#define mmMME6_RTR_SPLIT_RD_TIMEOUT_0 0x18044C
+
+#define mmMME6_RTR_SPLIT_RD_TIMEOUT_1 0x180450
+
+#define mmMME6_RTR_SPLIT_WR_SAT 0x180454
+
+#define mmMME6_RTR_WPLIT_WR_TST_TOLEN 0x180458
+
+#define mmMME6_RTR_SPLIT_WR_TIMEOUT_0 0x18045C
+
+#define mmMME6_RTR_SPLIT_WR_TIMEOUT_1 0x180460
+
+#define mmMME6_RTR_HBW_RANGE_HIT 0x180470
+
+#define mmMME6_RTR_HBW_RANGE_MASK_L_0 0x180480
+
+#define mmMME6_RTR_HBW_RANGE_MASK_L_1 0x180484
+
+#define mmMME6_RTR_HBW_RANGE_MASK_L_2 0x180488
+
+#define mmMME6_RTR_HBW_RANGE_MASK_L_3 0x18048C
+
+#define mmMME6_RTR_HBW_RANGE_MASK_L_4 0x180490
+
+#define mmMME6_RTR_HBW_RANGE_MASK_L_5 0x180494
+
+#define mmMME6_RTR_HBW_RANGE_MASK_L_6 0x180498
+
+#define mmMME6_RTR_HBW_RANGE_MASK_L_7 0x18049C
+
+#define mmMME6_RTR_HBW_RANGE_MASK_H_0 0x1804A0
+
+#define mmMME6_RTR_HBW_RANGE_MASK_H_1 0x1804A4
+
+#define mmMME6_RTR_HBW_RANGE_MASK_H_2 0x1804A8
+
+#define mmMME6_RTR_HBW_RANGE_MASK_H_3 0x1804AC
+
+#define mmMME6_RTR_HBW_RANGE_MASK_H_4 0x1804B0
+
+#define mmMME6_RTR_HBW_RANGE_MASK_H_5 0x1804B4
+
+#define mmMME6_RTR_HBW_RANGE_MASK_H_6 0x1804B8
+
+#define mmMME6_RTR_HBW_RANGE_MASK_H_7 0x1804BC
+
+#define mmMME6_RTR_HBW_RANGE_BASE_L_0 0x1804C0
+
+#define mmMME6_RTR_HBW_RANGE_BASE_L_1 0x1804C4
+
+#define mmMME6_RTR_HBW_RANGE_BASE_L_2 0x1804C8
+
+#define mmMME6_RTR_HBW_RANGE_BASE_L_3 0x1804CC
+
+#define mmMME6_RTR_HBW_RANGE_BASE_L_4 0x1804D0
+
+#define mmMME6_RTR_HBW_RANGE_BASE_L_5 0x1804D4
+
+#define mmMME6_RTR_HBW_RANGE_BASE_L_6 0x1804D8
+
+#define mmMME6_RTR_HBW_RANGE_BASE_L_7 0x1804DC
+
+#define mmMME6_RTR_HBW_RANGE_BASE_H_0 0x1804E0
+
+#define mmMME6_RTR_HBW_RANGE_BASE_H_1 0x1804E4
+
+#define mmMME6_RTR_HBW_RANGE_BASE_H_2 0x1804E8
+
+#define mmMME6_RTR_HBW_RANGE_BASE_H_3 0x1804EC
+
+#define mmMME6_RTR_HBW_RANGE_BASE_H_4 0x1804F0
+
+#define mmMME6_RTR_HBW_RANGE_BASE_H_5 0x1804F4
+
+#define mmMME6_RTR_HBW_RANGE_BASE_H_6 0x1804F8
+
+#define mmMME6_RTR_HBW_RANGE_BASE_H_7 0x1804FC
+
+#define mmMME6_RTR_LBW_RANGE_HIT 0x180500
+
+#define mmMME6_RTR_LBW_RANGE_MASK_0 0x180510
+
+#define mmMME6_RTR_LBW_RANGE_MASK_1 0x180514
+
+#define mmMME6_RTR_LBW_RANGE_MASK_2 0x180518
+
+#define mmMME6_RTR_LBW_RANGE_MASK_3 0x18051C
+
+#define mmMME6_RTR_LBW_RANGE_MASK_4 0x180520
+
+#define mmMME6_RTR_LBW_RANGE_MASK_5 0x180524
+
+#define mmMME6_RTR_LBW_RANGE_MASK_6 0x180528
+
+#define mmMME6_RTR_LBW_RANGE_MASK_7 0x18052C
+
+#define mmMME6_RTR_LBW_RANGE_MASK_8 0x180530
+
+#define mmMME6_RTR_LBW_RANGE_MASK_9 0x180534
+
+#define mmMME6_RTR_LBW_RANGE_MASK_10 0x180538
+
+#define mmMME6_RTR_LBW_RANGE_MASK_11 0x18053C
+
+#define mmMME6_RTR_LBW_RANGE_MASK_12 0x180540
+
+#define mmMME6_RTR_LBW_RANGE_MASK_13 0x180544
+
+#define mmMME6_RTR_LBW_RANGE_MASK_14 0x180548
+
+#define mmMME6_RTR_LBW_RANGE_MASK_15 0x18054C
+
+#define mmMME6_RTR_LBW_RANGE_BASE_0 0x180550
+
+#define mmMME6_RTR_LBW_RANGE_BASE_1 0x180554
+
+#define mmMME6_RTR_LBW_RANGE_BASE_2 0x180558
+
+#define mmMME6_RTR_LBW_RANGE_BASE_3 0x18055C
+
+#define mmMME6_RTR_LBW_RANGE_BASE_4 0x180560
+
+#define mmMME6_RTR_LBW_RANGE_BASE_5 0x180564
+
+#define mmMME6_RTR_LBW_RANGE_BASE_6 0x180568
+
+#define mmMME6_RTR_LBW_RANGE_BASE_7 0x18056C
+
+#define mmMME6_RTR_LBW_RANGE_BASE_8 0x180570
+
+#define mmMME6_RTR_LBW_RANGE_BASE_9 0x180574
+
+#define mmMME6_RTR_LBW_RANGE_BASE_10 0x180578
+
+#define mmMME6_RTR_LBW_RANGE_BASE_11 0x18057C
+
+#define mmMME6_RTR_LBW_RANGE_BASE_12 0x180580
+
+#define mmMME6_RTR_LBW_RANGE_BASE_13 0x180584
+
+#define mmMME6_RTR_LBW_RANGE_BASE_14 0x180588
+
+#define mmMME6_RTR_LBW_RANGE_BASE_15 0x18058C
+
+#define mmMME6_RTR_RGLTR 0x180590
+
+#define mmMME6_RTR_RGLTR_WR_RESULT 0x180594
+
+#define mmMME6_RTR_RGLTR_RD_RESULT 0x180598
+
+#define mmMME6_RTR_SCRAMB_EN 0x180600
+
+#define mmMME6_RTR_NON_LIN_SCRAMB 0x180604
+
+#endif /* ASIC_REG_MME6_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h
new file mode 100644
index 000000000000..a0d4382fbbd0
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h
@@ -0,0 +1,373 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME_CMDQ_MASKS_H_
+#define ASIC_REG_MME_CMDQ_MASKS_H_
+
+/*
+ *****************************************
+ * MME_CMDQ (Prototype: CMDQ)
+ *****************************************
+ */
+
+/* MME_CMDQ_GLBL_CFG0 */
+#define MME_CMDQ_GLBL_CFG0_PQF_EN_SHIFT 0
+#define MME_CMDQ_GLBL_CFG0_PQF_EN_MASK 0x1
+#define MME_CMDQ_GLBL_CFG0_CQF_EN_SHIFT 1
+#define MME_CMDQ_GLBL_CFG0_CQF_EN_MASK 0x2
+#define MME_CMDQ_GLBL_CFG0_CP_EN_SHIFT 2
+#define MME_CMDQ_GLBL_CFG0_CP_EN_MASK 0x4
+#define MME_CMDQ_GLBL_CFG0_DMA_EN_SHIFT 3
+#define MME_CMDQ_GLBL_CFG0_DMA_EN_MASK 0x8
+
+/* MME_CMDQ_GLBL_CFG1 */
+#define MME_CMDQ_GLBL_CFG1_PQF_STOP_SHIFT 0
+#define MME_CMDQ_GLBL_CFG1_PQF_STOP_MASK 0x1
+#define MME_CMDQ_GLBL_CFG1_CQF_STOP_SHIFT 1
+#define MME_CMDQ_GLBL_CFG1_CQF_STOP_MASK 0x2
+#define MME_CMDQ_GLBL_CFG1_CP_STOP_SHIFT 2
+#define MME_CMDQ_GLBL_CFG1_CP_STOP_MASK 0x4
+#define MME_CMDQ_GLBL_CFG1_DMA_STOP_SHIFT 3
+#define MME_CMDQ_GLBL_CFG1_DMA_STOP_MASK 0x8
+#define MME_CMDQ_GLBL_CFG1_PQF_FLUSH_SHIFT 8
+#define MME_CMDQ_GLBL_CFG1_PQF_FLUSH_MASK 0x100
+#define MME_CMDQ_GLBL_CFG1_CQF_FLUSH_SHIFT 9
+#define MME_CMDQ_GLBL_CFG1_CQF_FLUSH_MASK 0x200
+#define MME_CMDQ_GLBL_CFG1_CP_FLUSH_SHIFT 10
+#define MME_CMDQ_GLBL_CFG1_CP_FLUSH_MASK 0x400
+#define MME_CMDQ_GLBL_CFG1_DMA_FLUSH_SHIFT 11
+#define MME_CMDQ_GLBL_CFG1_DMA_FLUSH_MASK 0x800
+
+/* MME_CMDQ_GLBL_PROT */
+#define MME_CMDQ_GLBL_PROT_PQF_PROT_SHIFT 0
+#define MME_CMDQ_GLBL_PROT_PQF_PROT_MASK 0x1
+#define MME_CMDQ_GLBL_PROT_CQF_PROT_SHIFT 1
+#define MME_CMDQ_GLBL_PROT_CQF_PROT_MASK 0x2
+#define MME_CMDQ_GLBL_PROT_CP_PROT_SHIFT 2
+#define MME_CMDQ_GLBL_PROT_CP_PROT_MASK 0x4
+#define MME_CMDQ_GLBL_PROT_DMA_PROT_SHIFT 3
+#define MME_CMDQ_GLBL_PROT_DMA_PROT_MASK 0x8
+#define MME_CMDQ_GLBL_PROT_PQF_ERR_PROT_SHIFT 4
+#define MME_CMDQ_GLBL_PROT_PQF_ERR_PROT_MASK 0x10
+#define MME_CMDQ_GLBL_PROT_CQF_ERR_PROT_SHIFT 5
+#define MME_CMDQ_GLBL_PROT_CQF_ERR_PROT_MASK 0x20
+#define MME_CMDQ_GLBL_PROT_CP_ERR_PROT_SHIFT 6
+#define MME_CMDQ_GLBL_PROT_CP_ERR_PROT_MASK 0x40
+#define MME_CMDQ_GLBL_PROT_DMA_ERR_PROT_SHIFT 7
+#define MME_CMDQ_GLBL_PROT_DMA_ERR_PROT_MASK 0x80
+
+/* MME_CMDQ_GLBL_ERR_CFG */
+#define MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0
+#define MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1
+#define MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1
+#define MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2
+#define MME_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2
+#define MME_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4
+#define MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3
+#define MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8
+#define MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4
+#define MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10
+#define MME_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5
+#define MME_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20
+#define MME_CMDQ_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6
+#define MME_CMDQ_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40
+#define MME_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7
+#define MME_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80
+#define MME_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8
+#define MME_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100
+#define MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9
+#define MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200
+#define MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10
+#define MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400
+#define MME_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11
+#define MME_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800
+
+/* MME_CMDQ_GLBL_ERR_ADDR_LO */
+#define MME_CMDQ_GLBL_ERR_ADDR_LO_VAL_SHIFT 0
+#define MME_CMDQ_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_GLBL_ERR_ADDR_HI */
+#define MME_CMDQ_GLBL_ERR_ADDR_HI_VAL_SHIFT 0
+#define MME_CMDQ_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_GLBL_ERR_WDATA */
+#define MME_CMDQ_GLBL_ERR_WDATA_VAL_SHIFT 0
+#define MME_CMDQ_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_GLBL_SECURE_PROPS */
+#define MME_CMDQ_GLBL_SECURE_PROPS_ASID_SHIFT 0
+#define MME_CMDQ_GLBL_SECURE_PROPS_ASID_MASK 0x3FF
+#define MME_CMDQ_GLBL_SECURE_PROPS_MMBP_SHIFT 10
+#define MME_CMDQ_GLBL_SECURE_PROPS_MMBP_MASK 0x400
+
+/* MME_CMDQ_GLBL_NON_SECURE_PROPS */
+#define MME_CMDQ_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0
+#define MME_CMDQ_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF
+#define MME_CMDQ_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10
+#define MME_CMDQ_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400
+
+/* MME_CMDQ_GLBL_STS0 */
+#define MME_CMDQ_GLBL_STS0_PQF_IDLE_SHIFT 0
+#define MME_CMDQ_GLBL_STS0_PQF_IDLE_MASK 0x1
+#define MME_CMDQ_GLBL_STS0_CQF_IDLE_SHIFT 1
+#define MME_CMDQ_GLBL_STS0_CQF_IDLE_MASK 0x2
+#define MME_CMDQ_GLBL_STS0_CP_IDLE_SHIFT 2
+#define MME_CMDQ_GLBL_STS0_CP_IDLE_MASK 0x4
+#define MME_CMDQ_GLBL_STS0_DMA_IDLE_SHIFT 3
+#define MME_CMDQ_GLBL_STS0_DMA_IDLE_MASK 0x8
+#define MME_CMDQ_GLBL_STS0_PQF_IS_STOP_SHIFT 4
+#define MME_CMDQ_GLBL_STS0_PQF_IS_STOP_MASK 0x10
+#define MME_CMDQ_GLBL_STS0_CQF_IS_STOP_SHIFT 5
+#define MME_CMDQ_GLBL_STS0_CQF_IS_STOP_MASK 0x20
+#define MME_CMDQ_GLBL_STS0_CP_IS_STOP_SHIFT 6
+#define MME_CMDQ_GLBL_STS0_CP_IS_STOP_MASK 0x40
+#define MME_CMDQ_GLBL_STS0_DMA_IS_STOP_SHIFT 7
+#define MME_CMDQ_GLBL_STS0_DMA_IS_STOP_MASK 0x80
+
+/* MME_CMDQ_GLBL_STS1 */
+#define MME_CMDQ_GLBL_STS1_PQF_RD_ERR_SHIFT 0
+#define MME_CMDQ_GLBL_STS1_PQF_RD_ERR_MASK 0x1
+#define MME_CMDQ_GLBL_STS1_CQF_RD_ERR_SHIFT 1
+#define MME_CMDQ_GLBL_STS1_CQF_RD_ERR_MASK 0x2
+#define MME_CMDQ_GLBL_STS1_CP_RD_ERR_SHIFT 2
+#define MME_CMDQ_GLBL_STS1_CP_RD_ERR_MASK 0x4
+#define MME_CMDQ_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3
+#define MME_CMDQ_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8
+#define MME_CMDQ_GLBL_STS1_CP_STOP_OP_SHIFT 4
+#define MME_CMDQ_GLBL_STS1_CP_STOP_OP_MASK 0x10
+#define MME_CMDQ_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5
+#define MME_CMDQ_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20
+#define MME_CMDQ_GLBL_STS1_DMA_RD_ERR_SHIFT 8
+#define MME_CMDQ_GLBL_STS1_DMA_RD_ERR_MASK 0x100
+#define MME_CMDQ_GLBL_STS1_DMA_WR_ERR_SHIFT 9
+#define MME_CMDQ_GLBL_STS1_DMA_WR_ERR_MASK 0x200
+#define MME_CMDQ_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10
+#define MME_CMDQ_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400
+#define MME_CMDQ_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11
+#define MME_CMDQ_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800
+
+/* MME_CMDQ_CQ_CFG0 */
+#define MME_CMDQ_CQ_CFG0_RESERVED_SHIFT 0
+#define MME_CMDQ_CQ_CFG0_RESERVED_MASK 0x1
+
+/* MME_CMDQ_CQ_CFG1 */
+#define MME_CMDQ_CQ_CFG1_CREDIT_LIM_SHIFT 0
+#define MME_CMDQ_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define MME_CMDQ_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define MME_CMDQ_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* MME_CMDQ_CQ_ARUSER */
+#define MME_CMDQ_CQ_ARUSER_NOSNOOP_SHIFT 0
+#define MME_CMDQ_CQ_ARUSER_NOSNOOP_MASK 0x1
+#define MME_CMDQ_CQ_ARUSER_WORD_SHIFT 1
+#define MME_CMDQ_CQ_ARUSER_WORD_MASK 0x2
+
+/* MME_CMDQ_CQ_PTR_LO */
+#define MME_CMDQ_CQ_PTR_LO_VAL_SHIFT 0
+#define MME_CMDQ_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CQ_PTR_HI */
+#define MME_CMDQ_CQ_PTR_HI_VAL_SHIFT 0
+#define MME_CMDQ_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CQ_TSIZE */
+#define MME_CMDQ_CQ_TSIZE_VAL_SHIFT 0
+#define MME_CMDQ_CQ_TSIZE_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CQ_CTL */
+#define MME_CMDQ_CQ_CTL_RPT_SHIFT 0
+#define MME_CMDQ_CQ_CTL_RPT_MASK 0xFFFF
+#define MME_CMDQ_CQ_CTL_CTL_SHIFT 16
+#define MME_CMDQ_CQ_CTL_CTL_MASK 0xFFFF0000
+
+/* MME_CMDQ_CQ_PTR_LO_STS */
+#define MME_CMDQ_CQ_PTR_LO_STS_VAL_SHIFT 0
+#define MME_CMDQ_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CQ_PTR_HI_STS */
+#define MME_CMDQ_CQ_PTR_HI_STS_VAL_SHIFT 0
+#define MME_CMDQ_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CQ_TSIZE_STS */
+#define MME_CMDQ_CQ_TSIZE_STS_VAL_SHIFT 0
+#define MME_CMDQ_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CQ_CTL_STS */
+#define MME_CMDQ_CQ_CTL_STS_RPT_SHIFT 0
+#define MME_CMDQ_CQ_CTL_STS_RPT_MASK 0xFFFF
+#define MME_CMDQ_CQ_CTL_STS_CTL_SHIFT 16
+#define MME_CMDQ_CQ_CTL_STS_CTL_MASK 0xFFFF0000
+
+/* MME_CMDQ_CQ_STS0 */
+#define MME_CMDQ_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0
+#define MME_CMDQ_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF
+#define MME_CMDQ_CQ_STS0_CQ_FREE_CNT_SHIFT 16
+#define MME_CMDQ_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000
+
+/* MME_CMDQ_CQ_STS1 */
+#define MME_CMDQ_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0
+#define MME_CMDQ_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF
+#define MME_CMDQ_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30
+#define MME_CMDQ_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000
+#define MME_CMDQ_CQ_STS1_CQ_BUSY_SHIFT 31
+#define MME_CMDQ_CQ_STS1_CQ_BUSY_MASK 0x80000000
+
+/* MME_CMDQ_CQ_RD_RATE_LIM_EN */
+#define MME_CMDQ_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0
+#define MME_CMDQ_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1
+
+/* MME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN */
+#define MME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
+#define MME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* MME_CMDQ_CQ_RD_RATE_LIM_SAT */
+#define MME_CMDQ_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0
+#define MME_CMDQ_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
+
+/* MME_CMDQ_CQ_RD_RATE_LIM_TOUT */
+#define MME_CMDQ_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0
+#define MME_CMDQ_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
+
+/* MME_CMDQ_CQ_IFIFO_CNT */
+#define MME_CMDQ_CQ_IFIFO_CNT_VAL_SHIFT 0
+#define MME_CMDQ_CQ_IFIFO_CNT_VAL_MASK 0x3
+
+/* MME_CMDQ_CP_MSG_BASE0_ADDR_LO */
+#define MME_CMDQ_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0
+#define MME_CMDQ_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_MSG_BASE0_ADDR_HI */
+#define MME_CMDQ_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0
+#define MME_CMDQ_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_MSG_BASE1_ADDR_LO */
+#define MME_CMDQ_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0
+#define MME_CMDQ_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_MSG_BASE1_ADDR_HI */
+#define MME_CMDQ_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0
+#define MME_CMDQ_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_MSG_BASE2_ADDR_LO */
+#define MME_CMDQ_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0
+#define MME_CMDQ_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_MSG_BASE2_ADDR_HI */
+#define MME_CMDQ_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0
+#define MME_CMDQ_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_MSG_BASE3_ADDR_LO */
+#define MME_CMDQ_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0
+#define MME_CMDQ_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_MSG_BASE3_ADDR_HI */
+#define MME_CMDQ_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0
+#define MME_CMDQ_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_LDMA_TSIZE_OFFSET */
+#define MME_CMDQ_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0
+#define MME_CMDQ_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET */
+#define MME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0
+#define MME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET */
+#define MME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0
+#define MME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET */
+#define MME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0
+#define MME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET */
+#define MME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0
+#define MME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_LDMA_COMMIT_OFFSET */
+#define MME_CMDQ_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0
+#define MME_CMDQ_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_FENCE0_RDATA */
+#define MME_CMDQ_CP_FENCE0_RDATA_INC_VAL_SHIFT 0
+#define MME_CMDQ_CP_FENCE0_RDATA_INC_VAL_MASK 0xF
+
+/* MME_CMDQ_CP_FENCE1_RDATA */
+#define MME_CMDQ_CP_FENCE1_RDATA_INC_VAL_SHIFT 0
+#define MME_CMDQ_CP_FENCE1_RDATA_INC_VAL_MASK 0xF
+
+/* MME_CMDQ_CP_FENCE2_RDATA */
+#define MME_CMDQ_CP_FENCE2_RDATA_INC_VAL_SHIFT 0
+#define MME_CMDQ_CP_FENCE2_RDATA_INC_VAL_MASK 0xF
+
+/* MME_CMDQ_CP_FENCE3_RDATA */
+#define MME_CMDQ_CP_FENCE3_RDATA_INC_VAL_SHIFT 0
+#define MME_CMDQ_CP_FENCE3_RDATA_INC_VAL_MASK 0xF
+
+/* MME_CMDQ_CP_FENCE0_CNT */
+#define MME_CMDQ_CP_FENCE0_CNT_VAL_SHIFT 0
+#define MME_CMDQ_CP_FENCE0_CNT_VAL_MASK 0xFF
+
+/* MME_CMDQ_CP_FENCE1_CNT */
+#define MME_CMDQ_CP_FENCE1_CNT_VAL_SHIFT 0
+#define MME_CMDQ_CP_FENCE1_CNT_VAL_MASK 0xFF
+
+/* MME_CMDQ_CP_FENCE2_CNT */
+#define MME_CMDQ_CP_FENCE2_CNT_VAL_SHIFT 0
+#define MME_CMDQ_CP_FENCE2_CNT_VAL_MASK 0xFF
+
+/* MME_CMDQ_CP_FENCE3_CNT */
+#define MME_CMDQ_CP_FENCE3_CNT_VAL_SHIFT 0
+#define MME_CMDQ_CP_FENCE3_CNT_VAL_MASK 0xFF
+
+/* MME_CMDQ_CP_STS */
+#define MME_CMDQ_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0
+#define MME_CMDQ_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF
+#define MME_CMDQ_CP_STS_ERDY_SHIFT 16
+#define MME_CMDQ_CP_STS_ERDY_MASK 0x10000
+#define MME_CMDQ_CP_STS_RRDY_SHIFT 17
+#define MME_CMDQ_CP_STS_RRDY_MASK 0x20000
+#define MME_CMDQ_CP_STS_MRDY_SHIFT 18
+#define MME_CMDQ_CP_STS_MRDY_MASK 0x40000
+#define MME_CMDQ_CP_STS_SW_STOP_SHIFT 19
+#define MME_CMDQ_CP_STS_SW_STOP_MASK 0x80000
+#define MME_CMDQ_CP_STS_FENCE_ID_SHIFT 20
+#define MME_CMDQ_CP_STS_FENCE_ID_MASK 0x300000
+#define MME_CMDQ_CP_STS_FENCE_IN_PROGRESS_SHIFT 22
+#define MME_CMDQ_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000
+
+/* MME_CMDQ_CP_CURRENT_INST_LO */
+#define MME_CMDQ_CP_CURRENT_INST_LO_VAL_SHIFT 0
+#define MME_CMDQ_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_CURRENT_INST_HI */
+#define MME_CMDQ_CP_CURRENT_INST_HI_VAL_SHIFT 0
+#define MME_CMDQ_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_BARRIER_CFG */
+#define MME_CMDQ_CP_BARRIER_CFG_EBGUARD_SHIFT 0
+#define MME_CMDQ_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF
+
+/* MME_CMDQ_CP_DBG_0 */
+#define MME_CMDQ_CP_DBG_0_VAL_SHIFT 0
+#define MME_CMDQ_CP_DBG_0_VAL_MASK 0xFF
+
+/* MME_CMDQ_CQ_BUF_ADDR */
+#define MME_CMDQ_CQ_BUF_ADDR_VAL_SHIFT 0
+#define MME_CMDQ_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CQ_BUF_RDATA */
+#define MME_CMDQ_CQ_BUF_RDATA_VAL_SHIFT 0
+#define MME_CMDQ_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_MME_CMDQ_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h
new file mode 100644
index 000000000000..5c2f6b870a58
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME_CMDQ_REGS_H_
+#define ASIC_REG_MME_CMDQ_REGS_H_
+
+/*
+ *****************************************
+ * MME_CMDQ (Prototype: CMDQ)
+ *****************************************
+ */
+
+#define mmMME_CMDQ_GLBL_CFG0 0xD9000
+
+#define mmMME_CMDQ_GLBL_CFG1 0xD9004
+
+#define mmMME_CMDQ_GLBL_PROT 0xD9008
+
+#define mmMME_CMDQ_GLBL_ERR_CFG 0xD900C
+
+#define mmMME_CMDQ_GLBL_ERR_ADDR_LO 0xD9010
+
+#define mmMME_CMDQ_GLBL_ERR_ADDR_HI 0xD9014
+
+#define mmMME_CMDQ_GLBL_ERR_WDATA 0xD9018
+
+#define mmMME_CMDQ_GLBL_SECURE_PROPS 0xD901C
+
+#define mmMME_CMDQ_GLBL_NON_SECURE_PROPS 0xD9020
+
+#define mmMME_CMDQ_GLBL_STS0 0xD9024
+
+#define mmMME_CMDQ_GLBL_STS1 0xD9028
+
+#define mmMME_CMDQ_CQ_CFG0 0xD90B0
+
+#define mmMME_CMDQ_CQ_CFG1 0xD90B4
+
+#define mmMME_CMDQ_CQ_ARUSER 0xD90B8
+
+#define mmMME_CMDQ_CQ_PTR_LO 0xD90C0
+
+#define mmMME_CMDQ_CQ_PTR_HI 0xD90C4
+
+#define mmMME_CMDQ_CQ_TSIZE 0xD90C8
+
+#define mmMME_CMDQ_CQ_CTL 0xD90CC
+
+#define mmMME_CMDQ_CQ_PTR_LO_STS 0xD90D4
+
+#define mmMME_CMDQ_CQ_PTR_HI_STS 0xD90D8
+
+#define mmMME_CMDQ_CQ_TSIZE_STS 0xD90DC
+
+#define mmMME_CMDQ_CQ_CTL_STS 0xD90E0
+
+#define mmMME_CMDQ_CQ_STS0 0xD90E4
+
+#define mmMME_CMDQ_CQ_STS1 0xD90E8
+
+#define mmMME_CMDQ_CQ_RD_RATE_LIM_EN 0xD90F0
+
+#define mmMME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xD90F4
+
+#define mmMME_CMDQ_CQ_RD_RATE_LIM_SAT 0xD90F8
+
+#define mmMME_CMDQ_CQ_RD_RATE_LIM_TOUT 0xD90FC
+
+#define mmMME_CMDQ_CQ_IFIFO_CNT 0xD9108
+
+#define mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO 0xD9120
+
+#define mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI 0xD9124
+
+#define mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO 0xD9128
+
+#define mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI 0xD912C
+
+#define mmMME_CMDQ_CP_MSG_BASE2_ADDR_LO 0xD9130
+
+#define mmMME_CMDQ_CP_MSG_BASE2_ADDR_HI 0xD9134
+
+#define mmMME_CMDQ_CP_MSG_BASE3_ADDR_LO 0xD9138
+
+#define mmMME_CMDQ_CP_MSG_BASE3_ADDR_HI 0xD913C
+
+#define mmMME_CMDQ_CP_LDMA_TSIZE_OFFSET 0xD9140
+
+#define mmMME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xD9144
+
+#define mmMME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xD9148
+
+#define mmMME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xD914C
+
+#define mmMME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xD9150
+
+#define mmMME_CMDQ_CP_LDMA_COMMIT_OFFSET 0xD9154
+
+#define mmMME_CMDQ_CP_FENCE0_RDATA 0xD9158
+
+#define mmMME_CMDQ_CP_FENCE1_RDATA 0xD915C
+
+#define mmMME_CMDQ_CP_FENCE2_RDATA 0xD9160
+
+#define mmMME_CMDQ_CP_FENCE3_RDATA 0xD9164
+
+#define mmMME_CMDQ_CP_FENCE0_CNT 0xD9168
+
+#define mmMME_CMDQ_CP_FENCE1_CNT 0xD916C
+
+#define mmMME_CMDQ_CP_FENCE2_CNT 0xD9170
+
+#define mmMME_CMDQ_CP_FENCE3_CNT 0xD9174
+
+#define mmMME_CMDQ_CP_STS 0xD9178
+
+#define mmMME_CMDQ_CP_CURRENT_INST_LO 0xD917C
+
+#define mmMME_CMDQ_CP_CURRENT_INST_HI 0xD9180
+
+#define mmMME_CMDQ_CP_BARRIER_CFG 0xD9184
+
+#define mmMME_CMDQ_CP_DBG_0 0xD9188
+
+#define mmMME_CMDQ_CQ_BUF_ADDR 0xD9308
+
+#define mmMME_CMDQ_CQ_BUF_RDATA 0xD930C
+
+#endif /* ASIC_REG_MME_CMDQ_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h
new file mode 100644
index 000000000000..c7b1b0bb3384
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h
@@ -0,0 +1,1537 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME_MASKS_H_
+#define ASIC_REG_MME_MASKS_H_
+
+/*
+ *****************************************
+ * MME (Prototype: MME)
+ *****************************************
+ */
+
+/* MME_ARCH_STATUS */
+#define MME_ARCH_STATUS_A_SHIFT 0
+#define MME_ARCH_STATUS_A_MASK 0x1
+#define MME_ARCH_STATUS_B_SHIFT 1
+#define MME_ARCH_STATUS_B_MASK 0x2
+#define MME_ARCH_STATUS_CIN_SHIFT 2
+#define MME_ARCH_STATUS_CIN_MASK 0x4
+#define MME_ARCH_STATUS_COUT_SHIFT 3
+#define MME_ARCH_STATUS_COUT_MASK 0x8
+#define MME_ARCH_STATUS_TE_SHIFT 4
+#define MME_ARCH_STATUS_TE_MASK 0x10
+#define MME_ARCH_STATUS_LD_SHIFT 5
+#define MME_ARCH_STATUS_LD_MASK 0x20
+#define MME_ARCH_STATUS_ST_SHIFT 6
+#define MME_ARCH_STATUS_ST_MASK 0x40
+#define MME_ARCH_STATUS_SB_A_EMPTY_SHIFT 7
+#define MME_ARCH_STATUS_SB_A_EMPTY_MASK 0x80
+#define MME_ARCH_STATUS_SB_B_EMPTY_SHIFT 8
+#define MME_ARCH_STATUS_SB_B_EMPTY_MASK 0x100
+#define MME_ARCH_STATUS_SB_CIN_EMPTY_SHIFT 9
+#define MME_ARCH_STATUS_SB_CIN_EMPTY_MASK 0x200
+#define MME_ARCH_STATUS_SB_COUT_EMPTY_SHIFT 10
+#define MME_ARCH_STATUS_SB_COUT_EMPTY_MASK 0x400
+#define MME_ARCH_STATUS_SM_IDLE_SHIFT 11
+#define MME_ARCH_STATUS_SM_IDLE_MASK 0x800
+#define MME_ARCH_STATUS_WBC_AXI_IDLE_SHIFT 12
+#define MME_ARCH_STATUS_WBC_AXI_IDLE_MASK 0xF000
+#define MME_ARCH_STATUS_SBC_AXI_IDLE_SHIFT 16
+#define MME_ARCH_STATUS_SBC_AXI_IDLE_MASK 0x30000
+#define MME_ARCH_STATUS_SBB_AXI_IDLE_SHIFT 18
+#define MME_ARCH_STATUS_SBB_AXI_IDLE_MASK 0xC0000
+#define MME_ARCH_STATUS_SBA_AXI_IDLE_SHIFT 20
+#define MME_ARCH_STATUS_SBA_AXI_IDLE_MASK 0x300000
+#define MME_ARCH_STATUS_FREE_ACCUMS_SHIFT 22
+#define MME_ARCH_STATUS_FREE_ACCUMS_MASK 0x1C00000
+
+/* MME_ARCH_A_BASE_ADDR_HIGH */
+#define MME_ARCH_A_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_ARCH_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_B_BASE_ADDR_HIGH */
+#define MME_ARCH_B_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_ARCH_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_CIN_BASE_ADDR_HIGH */
+#define MME_ARCH_CIN_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_ARCH_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_COUT_BASE_ADDR_HIGH */
+#define MME_ARCH_COUT_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_ARCH_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_BIAS_BASE_ADDR_HIGH */
+#define MME_ARCH_BIAS_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_ARCH_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_A_BASE_ADDR_LOW */
+#define MME_ARCH_A_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_ARCH_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_B_BASE_ADDR_LOW */
+#define MME_ARCH_B_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_ARCH_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_CIN_BASE_ADDR_LOW */
+#define MME_ARCH_CIN_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_ARCH_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_COUT_BASE_ADDR_LOW */
+#define MME_ARCH_COUT_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_ARCH_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_BIAS_BASE_ADDR_LOW */
+#define MME_ARCH_BIAS_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_ARCH_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_HEADER */
+#define MME_ARCH_HEADER_SIGNAL_MASK_SHIFT 0
+#define MME_ARCH_HEADER_SIGNAL_MASK_MASK 0x1F
+#define MME_ARCH_HEADER_SIGNAL_EN_SHIFT 5
+#define MME_ARCH_HEADER_SIGNAL_EN_MASK 0x20
+#define MME_ARCH_HEADER_TRANS_A_SHIFT 6
+#define MME_ARCH_HEADER_TRANS_A_MASK 0x40
+#define MME_ARCH_HEADER_LOWER_A_SHIFT 7
+#define MME_ARCH_HEADER_LOWER_A_MASK 0x80
+#define MME_ARCH_HEADER_ACCUM_MASK_SHIFT 8
+#define MME_ARCH_HEADER_ACCUM_MASK_MASK 0xF00
+#define MME_ARCH_HEADER_LOAD_BIAS_SHIFT 12
+#define MME_ARCH_HEADER_LOAD_BIAS_MASK 0x1000
+#define MME_ARCH_HEADER_LOAD_CIN_SHIFT 13
+#define MME_ARCH_HEADER_LOAD_CIN_MASK 0x2000
+#define MME_ARCH_HEADER_STORE_OUT_SHIFT 15
+#define MME_ARCH_HEADER_STORE_OUT_MASK 0x8000
+#define MME_ARCH_HEADER_ACC_LD_INC_DISABLE_SHIFT 16
+#define MME_ARCH_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000
+#define MME_ARCH_HEADER_ADVANCE_A_SHIFT 17
+#define MME_ARCH_HEADER_ADVANCE_A_MASK 0x20000
+#define MME_ARCH_HEADER_ADVANCE_B_SHIFT 18
+#define MME_ARCH_HEADER_ADVANCE_B_MASK 0x40000
+#define MME_ARCH_HEADER_ADVANCE_CIN_SHIFT 19
+#define MME_ARCH_HEADER_ADVANCE_CIN_MASK 0x80000
+#define MME_ARCH_HEADER_ADVANCE_COUT_SHIFT 20
+#define MME_ARCH_HEADER_ADVANCE_COUT_MASK 0x100000
+#define MME_ARCH_HEADER_COMPRESSED_B_SHIFT 21
+#define MME_ARCH_HEADER_COMPRESSED_B_MASK 0x200000
+#define MME_ARCH_HEADER_MASK_CONV_END_SHIFT 22
+#define MME_ARCH_HEADER_MASK_CONV_END_MASK 0x400000
+#define MME_ARCH_HEADER_ACC_ST_INC_DISABLE_SHIFT 23
+#define MME_ARCH_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000
+#define MME_ARCH_HEADER_AB_DATA_TYPE_SHIFT 24
+#define MME_ARCH_HEADER_AB_DATA_TYPE_MASK 0x3000000
+#define MME_ARCH_HEADER_CIN_DATA_TYPE_SHIFT 26
+#define MME_ARCH_HEADER_CIN_DATA_TYPE_MASK 0x1C000000
+#define MME_ARCH_HEADER_COUT_DATA_TYPE_SHIFT 29
+#define MME_ARCH_HEADER_COUT_DATA_TYPE_MASK 0xE0000000
+
+/* MME_ARCH_KERNEL_SIZE_MINUS_1 */
+#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0
+#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF
+#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8
+#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00
+#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16
+#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000
+#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24
+#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000
+
+/* MME_ARCH_ASSOCIATED_DIMS */
+#define MME_ARCH_ASSOCIATED_DIMS_A_0_SHIFT 0
+#define MME_ARCH_ASSOCIATED_DIMS_A_0_MASK 0x7
+#define MME_ARCH_ASSOCIATED_DIMS_B_0_SHIFT 3
+#define MME_ARCH_ASSOCIATED_DIMS_B_0_MASK 0x38
+#define MME_ARCH_ASSOCIATED_DIMS_CIN_0_SHIFT 6
+#define MME_ARCH_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0
+#define MME_ARCH_ASSOCIATED_DIMS_COUT_0_SHIFT 9
+#define MME_ARCH_ASSOCIATED_DIMS_COUT_0_MASK 0xE00
+#define MME_ARCH_ASSOCIATED_DIMS_A_1_SHIFT 16
+#define MME_ARCH_ASSOCIATED_DIMS_A_1_MASK 0x70000
+#define MME_ARCH_ASSOCIATED_DIMS_B_1_SHIFT 19
+#define MME_ARCH_ASSOCIATED_DIMS_B_1_MASK 0x380000
+#define MME_ARCH_ASSOCIATED_DIMS_CIN_1_SHIFT 22
+#define MME_ARCH_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000
+#define MME_ARCH_ASSOCIATED_DIMS_COUT_1_SHIFT 25
+#define MME_ARCH_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000
+
+/* MME_ARCH_COUT_SCALE */
+#define MME_ARCH_COUT_SCALE_V_SHIFT 0
+#define MME_ARCH_COUT_SCALE_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_CIN_SCALE */
+#define MME_ARCH_CIN_SCALE_V_SHIFT 0
+#define MME_ARCH_CIN_SCALE_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_GEMMLOWP_ZP */
+#define MME_ARCH_GEMMLOWP_ZP_ZP_CIN_SHIFT 0
+#define MME_ARCH_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF
+#define MME_ARCH_GEMMLOWP_ZP_ZP_COUT_SHIFT 9
+#define MME_ARCH_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00
+#define MME_ARCH_GEMMLOWP_ZP_ZP_B_SHIFT 18
+#define MME_ARCH_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000
+#define MME_ARCH_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27
+#define MME_ARCH_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000
+#define MME_ARCH_GEMMLOWP_ZP_ACCUM_SHIFT 28
+#define MME_ARCH_GEMMLOWP_ZP_ACCUM_MASK 0x10000000
+#define MME_ARCH_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29
+#define MME_ARCH_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000
+#define MME_ARCH_GEMMLOWP_ZP_RELU_EN_SHIFT 30
+#define MME_ARCH_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000
+
+/* MME_ARCH_GEMMLOWP_EXPONENT */
+#define MME_ARCH_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0
+#define MME_ARCH_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F
+#define MME_ARCH_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8
+#define MME_ARCH_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00
+#define MME_ARCH_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16
+#define MME_ARCH_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000
+#define MME_ARCH_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17
+#define MME_ARCH_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000
+
+/* MME_ARCH_A_ROI_BASE_OFFSET */
+#define MME_ARCH_A_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_ARCH_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_A_VALID_ELEMENTS */
+#define MME_ARCH_A_VALID_ELEMENTS_V_SHIFT 0
+#define MME_ARCH_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_A_LOOP_STRIDE */
+#define MME_ARCH_A_LOOP_STRIDE_V_SHIFT 0
+#define MME_ARCH_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_A_ROI_SIZE */
+#define MME_ARCH_A_ROI_SIZE_V_SHIFT 0
+#define MME_ARCH_A_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_A_SPATIAL_START_OFFSET */
+#define MME_ARCH_A_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_ARCH_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_A_SPATIAL_STRIDE */
+#define MME_ARCH_A_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_ARCH_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_A_SPATIAL_SIZE_MINUS_1 */
+#define MME_ARCH_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_ARCH_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_B_ROI_BASE_OFFSET */
+#define MME_ARCH_B_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_ARCH_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_B_VALID_ELEMENTS */
+#define MME_ARCH_B_VALID_ELEMENTS_V_SHIFT 0
+#define MME_ARCH_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_B_LOOP_STRIDE */
+#define MME_ARCH_B_LOOP_STRIDE_V_SHIFT 0
+#define MME_ARCH_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_B_ROI_SIZE */
+#define MME_ARCH_B_ROI_SIZE_V_SHIFT 0
+#define MME_ARCH_B_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_B_SPATIAL_START_OFFSET */
+#define MME_ARCH_B_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_ARCH_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_B_SPATIAL_STRIDE */
+#define MME_ARCH_B_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_ARCH_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_B_SPATIAL_SIZE_MINUS_1 */
+#define MME_ARCH_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_ARCH_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_C_ROI_BASE_OFFSET */
+#define MME_ARCH_C_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_ARCH_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_C_VALID_ELEMENTS */
+#define MME_ARCH_C_VALID_ELEMENTS_V_SHIFT 0
+#define MME_ARCH_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_C_LOOP_STRIDE */
+#define MME_ARCH_C_LOOP_STRIDE_V_SHIFT 0
+#define MME_ARCH_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_C_ROI_SIZE */
+#define MME_ARCH_C_ROI_SIZE_V_SHIFT 0
+#define MME_ARCH_C_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_C_SPATIAL_START_OFFSET */
+#define MME_ARCH_C_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_ARCH_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_C_SPATIAL_STRIDE */
+#define MME_ARCH_C_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_ARCH_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_C_SPATIAL_SIZE_MINUS_1 */
+#define MME_ARCH_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_ARCH_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_SYNC_OBJECT_MESSAGE */
+#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
+#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
+#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16
+#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000
+#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31
+#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000
+
+/* MME_ARCH_E_PADDING_VALUE_A */
+#define MME_ARCH_E_PADDING_VALUE_A_V_SHIFT 0
+#define MME_ARCH_E_PADDING_VALUE_A_V_MASK 0xFFFF
+
+/* MME_ARCH_E_NUM_ITERATION_MINUS_1 */
+#define MME_ARCH_E_NUM_ITERATION_MINUS_1_V_SHIFT 0
+#define MME_ARCH_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_E_BUBBLES_PER_SPLIT */
+#define MME_ARCH_E_BUBBLES_PER_SPLIT_A_SHIFT 0
+#define MME_ARCH_E_BUBBLES_PER_SPLIT_A_MASK 0xFF
+#define MME_ARCH_E_BUBBLES_PER_SPLIT_B_SHIFT 8
+#define MME_ARCH_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00
+#define MME_ARCH_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16
+#define MME_ARCH_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000
+#define MME_ARCH_E_BUBBLES_PER_SPLIT_ID_SHIFT 24
+#define MME_ARCH_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000
+
+/* MME_CMD */
+#define MME_CMD_EXECUTE_SHIFT 0
+#define MME_CMD_EXECUTE_MASK 0x1
+
+/* MME_DUMMY */
+#define MME_DUMMY_V_SHIFT 0
+#define MME_DUMMY_V_MASK 0xFFFFFFFF
+
+/* MME_RESET */
+#define MME_RESET_V_SHIFT 0
+#define MME_RESET_V_MASK 0x1
+
+/* MME_STALL */
+#define MME_STALL_V_SHIFT 0
+#define MME_STALL_V_MASK 0xFFFFFFFF
+
+/* MME_SM_BASE_ADDRESS_LOW */
+#define MME_SM_BASE_ADDRESS_LOW_V_SHIFT 0
+#define MME_SM_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SM_BASE_ADDRESS_HIGH */
+#define MME_SM_BASE_ADDRESS_HIGH_V_SHIFT 0
+#define MME_SM_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_DBGMEM_ADD */
+#define MME_DBGMEM_ADD_V_SHIFT 0
+#define MME_DBGMEM_ADD_V_MASK 0xFFFFFFFF
+
+/* MME_DBGMEM_DATA_WR */
+#define MME_DBGMEM_DATA_WR_V_SHIFT 0
+#define MME_DBGMEM_DATA_WR_V_MASK 0xFFFFFFFF
+
+/* MME_DBGMEM_DATA_RD */
+#define MME_DBGMEM_DATA_RD_V_SHIFT 0
+#define MME_DBGMEM_DATA_RD_V_MASK 0xFFFFFFFF
+
+/* MME_DBGMEM_CTRL */
+#define MME_DBGMEM_CTRL_WR_NRD_SHIFT 0
+#define MME_DBGMEM_CTRL_WR_NRD_MASK 0x1
+
+/* MME_DBGMEM_RC */
+#define MME_DBGMEM_RC_VALID_SHIFT 0
+#define MME_DBGMEM_RC_VALID_MASK 0x1
+#define MME_DBGMEM_RC_FULL_SHIFT 1
+#define MME_DBGMEM_RC_FULL_MASK 0x2
+
+/* MME_LOG_SHADOW */
+#define MME_LOG_SHADOW_MASK_0_SHIFT 0
+#define MME_LOG_SHADOW_MASK_0_MASK 0x7F
+#define MME_LOG_SHADOW_MASK_1_SHIFT 8
+#define MME_LOG_SHADOW_MASK_1_MASK 0x7F00
+#define MME_LOG_SHADOW_MASK_2_SHIFT 16
+#define MME_LOG_SHADOW_MASK_2_MASK 0x7F0000
+#define MME_LOG_SHADOW_MASK_3_SHIFT 24
+#define MME_LOG_SHADOW_MASK_3_MASK 0x7F000000
+
+/* MME_STORE_MAX_CREDIT */
+#define MME_STORE_MAX_CREDIT_V_SHIFT 0
+#define MME_STORE_MAX_CREDIT_V_MASK 0x3F
+
+/* MME_AGU */
+#define MME_AGU_SBA_MAX_CREDIT_SHIFT 0
+#define MME_AGU_SBA_MAX_CREDIT_MASK 0x1F
+#define MME_AGU_SBB_MAX_CREDIT_SHIFT 8
+#define MME_AGU_SBB_MAX_CREDIT_MASK 0x1F00
+#define MME_AGU_SBC_MAX_CREDIT_SHIFT 16
+#define MME_AGU_SBC_MAX_CREDIT_MASK 0x1F0000
+#define MME_AGU_WBC_MAX_CREDIT_SHIFT 24
+#define MME_AGU_WBC_MAX_CREDIT_MASK 0x3F000000
+
+/* MME_SBA */
+#define MME_SBA_MAX_SIZE_SHIFT 0
+#define MME_SBA_MAX_SIZE_MASK 0x3FF
+#define MME_SBA_EU_MAX_CREDIT_SHIFT 16
+#define MME_SBA_EU_MAX_CREDIT_MASK 0x1F0000
+
+/* MME_SBB */
+#define MME_SBB_MAX_SIZE_SHIFT 0
+#define MME_SBB_MAX_SIZE_MASK 0x3FF
+#define MME_SBB_EU_MAX_CREDIT_SHIFT 16
+#define MME_SBB_EU_MAX_CREDIT_MASK 0x1F0000
+
+/* MME_SBC */
+#define MME_SBC_MAX_SIZE_SHIFT 0
+#define MME_SBC_MAX_SIZE_MASK 0x3FF
+#define MME_SBC_EU_MAX_CREDIT_SHIFT 16
+#define MME_SBC_EU_MAX_CREDIT_MASK 0x1F0000
+
+/* MME_WBC */
+#define MME_WBC_MAX_OUTSTANDING_SHIFT 0
+#define MME_WBC_MAX_OUTSTANDING_MASK 0xFFF
+#define MME_WBC_DISABLE_FAST_END_PE_SHIFT 12
+#define MME_WBC_DISABLE_FAST_END_PE_MASK 0x1000
+#define MME_WBC_LD_INSERT_BUBBLE_DIS_SHIFT 13
+#define MME_WBC_LD_INSERT_BUBBLE_DIS_MASK 0x2000
+
+/* MME_SBA_CONTROL_DATA */
+#define MME_SBA_CONTROL_DATA_ASID_SHIFT 0
+#define MME_SBA_CONTROL_DATA_ASID_MASK 0x3FF
+#define MME_SBA_CONTROL_DATA_MMBP_SHIFT 10
+#define MME_SBA_CONTROL_DATA_MMBP_MASK 0x400
+
+/* MME_SBB_CONTROL_DATA */
+#define MME_SBB_CONTROL_DATA_ASID_SHIFT 0
+#define MME_SBB_CONTROL_DATA_ASID_MASK 0x3FF
+#define MME_SBB_CONTROL_DATA_MMBP_SHIFT 10
+#define MME_SBB_CONTROL_DATA_MMBP_MASK 0x400
+
+/* MME_SBC_CONTROL_DATA */
+#define MME_SBC_CONTROL_DATA_ASID_SHIFT 0
+#define MME_SBC_CONTROL_DATA_ASID_MASK 0x3FF
+#define MME_SBC_CONTROL_DATA_MMBP_SHIFT 10
+#define MME_SBC_CONTROL_DATA_MMBP_MASK 0x400
+
+/* MME_WBC_CONTROL_DATA */
+#define MME_WBC_CONTROL_DATA_ASID_SHIFT 0
+#define MME_WBC_CONTROL_DATA_ASID_MASK 0x3FF
+#define MME_WBC_CONTROL_DATA_MMBP_SHIFT 10
+#define MME_WBC_CONTROL_DATA_MMBP_MASK 0x400
+
+/* MME_TE */
+#define MME_TE_MAX_CREDIT_SHIFT 0
+#define MME_TE_MAX_CREDIT_MASK 0x1F
+#define MME_TE_DESC_MAX_CREDIT_SHIFT 8
+#define MME_TE_DESC_MAX_CREDIT_MASK 0x1F00
+
+/* MME_TE2DEC */
+#define MME_TE2DEC_MAX_CREDIT_SHIFT 0
+#define MME_TE2DEC_MAX_CREDIT_MASK 0x1F
+
+/* MME_REI_STATUS */
+#define MME_REI_STATUS_V_SHIFT 0
+#define MME_REI_STATUS_V_MASK 0xFFFFFFFF
+
+/* MME_REI_MASK */
+#define MME_REI_MASK_V_SHIFT 0
+#define MME_REI_MASK_V_MASK 0xFFFFFFFF
+
+/* MME_SEI_STATUS */
+#define MME_SEI_STATUS_V_SHIFT 0
+#define MME_SEI_STATUS_V_MASK 0xFFFFFFFF
+
+/* MME_SEI_MASK */
+#define MME_SEI_MASK_V_SHIFT 0
+#define MME_SEI_MASK_V_MASK 0xFFFFFFFF
+
+/* MME_SPI_STATUS */
+#define MME_SPI_STATUS_V_SHIFT 0
+#define MME_SPI_STATUS_V_MASK 0xFFFFFFFF
+
+/* MME_SPI_MASK */
+#define MME_SPI_MASK_V_SHIFT 0
+#define MME_SPI_MASK_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_STATUS */
+#define MME_SHADOW_0_STATUS_A_SHIFT 0
+#define MME_SHADOW_0_STATUS_A_MASK 0x1
+#define MME_SHADOW_0_STATUS_B_SHIFT 1
+#define MME_SHADOW_0_STATUS_B_MASK 0x2
+#define MME_SHADOW_0_STATUS_CIN_SHIFT 2
+#define MME_SHADOW_0_STATUS_CIN_MASK 0x4
+#define MME_SHADOW_0_STATUS_COUT_SHIFT 3
+#define MME_SHADOW_0_STATUS_COUT_MASK 0x8
+#define MME_SHADOW_0_STATUS_TE_SHIFT 4
+#define MME_SHADOW_0_STATUS_TE_MASK 0x10
+#define MME_SHADOW_0_STATUS_LD_SHIFT 5
+#define MME_SHADOW_0_STATUS_LD_MASK 0x20
+#define MME_SHADOW_0_STATUS_ST_SHIFT 6
+#define MME_SHADOW_0_STATUS_ST_MASK 0x40
+
+/* MME_SHADOW_0_A_BASE_ADDR_HIGH */
+#define MME_SHADOW_0_A_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_0_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_B_BASE_ADDR_HIGH */
+#define MME_SHADOW_0_B_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_0_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_CIN_BASE_ADDR_HIGH */
+#define MME_SHADOW_0_CIN_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_0_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_COUT_BASE_ADDR_HIGH */
+#define MME_SHADOW_0_COUT_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_0_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_BIAS_BASE_ADDR_HIGH */
+#define MME_SHADOW_0_BIAS_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_0_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_A_BASE_ADDR_LOW */
+#define MME_SHADOW_0_A_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_0_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_B_BASE_ADDR_LOW */
+#define MME_SHADOW_0_B_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_0_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_CIN_BASE_ADDR_LOW */
+#define MME_SHADOW_0_CIN_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_0_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_COUT_BASE_ADDR_LOW */
+#define MME_SHADOW_0_COUT_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_0_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_BIAS_BASE_ADDR_LOW */
+#define MME_SHADOW_0_BIAS_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_0_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_HEADER */
+#define MME_SHADOW_0_HEADER_SIGNAL_MASK_SHIFT 0
+#define MME_SHADOW_0_HEADER_SIGNAL_MASK_MASK 0x1F
+#define MME_SHADOW_0_HEADER_SIGNAL_EN_SHIFT 5
+#define MME_SHADOW_0_HEADER_SIGNAL_EN_MASK 0x20
+#define MME_SHADOW_0_HEADER_TRANS_A_SHIFT 6
+#define MME_SHADOW_0_HEADER_TRANS_A_MASK 0x40
+#define MME_SHADOW_0_HEADER_LOWER_A_SHIFT 7
+#define MME_SHADOW_0_HEADER_LOWER_A_MASK 0x80
+#define MME_SHADOW_0_HEADER_ACCUM_MASK_SHIFT 8
+#define MME_SHADOW_0_HEADER_ACCUM_MASK_MASK 0xF00
+#define MME_SHADOW_0_HEADER_LOAD_BIAS_SHIFT 12
+#define MME_SHADOW_0_HEADER_LOAD_BIAS_MASK 0x1000
+#define MME_SHADOW_0_HEADER_LOAD_CIN_SHIFT 13
+#define MME_SHADOW_0_HEADER_LOAD_CIN_MASK 0x2000
+#define MME_SHADOW_0_HEADER_STORE_OUT_SHIFT 15
+#define MME_SHADOW_0_HEADER_STORE_OUT_MASK 0x8000
+#define MME_SHADOW_0_HEADER_ACC_LD_INC_DISABLE_SHIFT 16
+#define MME_SHADOW_0_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000
+#define MME_SHADOW_0_HEADER_ADVANCE_A_SHIFT 17
+#define MME_SHADOW_0_HEADER_ADVANCE_A_MASK 0x20000
+#define MME_SHADOW_0_HEADER_ADVANCE_B_SHIFT 18
+#define MME_SHADOW_0_HEADER_ADVANCE_B_MASK 0x40000
+#define MME_SHADOW_0_HEADER_ADVANCE_CIN_SHIFT 19
+#define MME_SHADOW_0_HEADER_ADVANCE_CIN_MASK 0x80000
+#define MME_SHADOW_0_HEADER_ADVANCE_COUT_SHIFT 20
+#define MME_SHADOW_0_HEADER_ADVANCE_COUT_MASK 0x100000
+#define MME_SHADOW_0_HEADER_COMPRESSED_B_SHIFT 21
+#define MME_SHADOW_0_HEADER_COMPRESSED_B_MASK 0x200000
+#define MME_SHADOW_0_HEADER_MASK_CONV_END_SHIFT 22
+#define MME_SHADOW_0_HEADER_MASK_CONV_END_MASK 0x400000
+#define MME_SHADOW_0_HEADER_ACC_ST_INC_DISABLE_SHIFT 23
+#define MME_SHADOW_0_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000
+#define MME_SHADOW_0_HEADER_AB_DATA_TYPE_SHIFT 24
+#define MME_SHADOW_0_HEADER_AB_DATA_TYPE_MASK 0x3000000
+#define MME_SHADOW_0_HEADER_CIN_DATA_TYPE_SHIFT 26
+#define MME_SHADOW_0_HEADER_CIN_DATA_TYPE_MASK 0x1C000000
+#define MME_SHADOW_0_HEADER_COUT_DATA_TYPE_SHIFT 29
+#define MME_SHADOW_0_HEADER_COUT_DATA_TYPE_MASK 0xE0000000
+
+/* MME_SHADOW_0_KERNEL_SIZE_MINUS_1 */
+#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0
+#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF
+#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8
+#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00
+#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16
+#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000
+#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24
+#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000
+
+/* MME_SHADOW_0_ASSOCIATED_DIMS */
+#define MME_SHADOW_0_ASSOCIATED_DIMS_A_0_SHIFT 0
+#define MME_SHADOW_0_ASSOCIATED_DIMS_A_0_MASK 0x7
+#define MME_SHADOW_0_ASSOCIATED_DIMS_B_0_SHIFT 3
+#define MME_SHADOW_0_ASSOCIATED_DIMS_B_0_MASK 0x38
+#define MME_SHADOW_0_ASSOCIATED_DIMS_CIN_0_SHIFT 6
+#define MME_SHADOW_0_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0
+#define MME_SHADOW_0_ASSOCIATED_DIMS_COUT_0_SHIFT 9
+#define MME_SHADOW_0_ASSOCIATED_DIMS_COUT_0_MASK 0xE00
+#define MME_SHADOW_0_ASSOCIATED_DIMS_A_1_SHIFT 16
+#define MME_SHADOW_0_ASSOCIATED_DIMS_A_1_MASK 0x70000
+#define MME_SHADOW_0_ASSOCIATED_DIMS_B_1_SHIFT 19
+#define MME_SHADOW_0_ASSOCIATED_DIMS_B_1_MASK 0x380000
+#define MME_SHADOW_0_ASSOCIATED_DIMS_CIN_1_SHIFT 22
+#define MME_SHADOW_0_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000
+#define MME_SHADOW_0_ASSOCIATED_DIMS_COUT_1_SHIFT 25
+#define MME_SHADOW_0_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000
+
+/* MME_SHADOW_0_COUT_SCALE */
+#define MME_SHADOW_0_COUT_SCALE_V_SHIFT 0
+#define MME_SHADOW_0_COUT_SCALE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_CIN_SCALE */
+#define MME_SHADOW_0_CIN_SCALE_V_SHIFT 0
+#define MME_SHADOW_0_CIN_SCALE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_GEMMLOWP_ZP */
+#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_CIN_SHIFT 0
+#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF
+#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_COUT_SHIFT 9
+#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00
+#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_B_SHIFT 18
+#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000
+#define MME_SHADOW_0_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27
+#define MME_SHADOW_0_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000
+#define MME_SHADOW_0_GEMMLOWP_ZP_ACCUM_SHIFT 28
+#define MME_SHADOW_0_GEMMLOWP_ZP_ACCUM_MASK 0x10000000
+#define MME_SHADOW_0_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29
+#define MME_SHADOW_0_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000
+#define MME_SHADOW_0_GEMMLOWP_ZP_RELU_EN_SHIFT 30
+#define MME_SHADOW_0_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000
+
+/* MME_SHADOW_0_GEMMLOWP_EXPONENT */
+#define MME_SHADOW_0_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0
+#define MME_SHADOW_0_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F
+#define MME_SHADOW_0_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8
+#define MME_SHADOW_0_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00
+#define MME_SHADOW_0_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16
+#define MME_SHADOW_0_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000
+#define MME_SHADOW_0_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17
+#define MME_SHADOW_0_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000
+
+/* MME_SHADOW_0_A_ROI_BASE_OFFSET */
+#define MME_SHADOW_0_A_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_SHADOW_0_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_A_VALID_ELEMENTS */
+#define MME_SHADOW_0_A_VALID_ELEMENTS_V_SHIFT 0
+#define MME_SHADOW_0_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_A_LOOP_STRIDE */
+#define MME_SHADOW_0_A_LOOP_STRIDE_V_SHIFT 0
+#define MME_SHADOW_0_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_A_ROI_SIZE */
+#define MME_SHADOW_0_A_ROI_SIZE_V_SHIFT 0
+#define MME_SHADOW_0_A_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_A_SPATIAL_START_OFFSET */
+#define MME_SHADOW_0_A_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_SHADOW_0_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_A_SPATIAL_STRIDE */
+#define MME_SHADOW_0_A_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_SHADOW_0_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_A_SPATIAL_SIZE_MINUS_1 */
+#define MME_SHADOW_0_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_0_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_B_ROI_BASE_OFFSET */
+#define MME_SHADOW_0_B_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_SHADOW_0_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_B_VALID_ELEMENTS */
+#define MME_SHADOW_0_B_VALID_ELEMENTS_V_SHIFT 0
+#define MME_SHADOW_0_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_B_LOOP_STRIDE */
+#define MME_SHADOW_0_B_LOOP_STRIDE_V_SHIFT 0
+#define MME_SHADOW_0_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_B_ROI_SIZE */
+#define MME_SHADOW_0_B_ROI_SIZE_V_SHIFT 0
+#define MME_SHADOW_0_B_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_B_SPATIAL_START_OFFSET */
+#define MME_SHADOW_0_B_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_SHADOW_0_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_B_SPATIAL_STRIDE */
+#define MME_SHADOW_0_B_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_SHADOW_0_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_B_SPATIAL_SIZE_MINUS_1 */
+#define MME_SHADOW_0_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_0_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_C_ROI_BASE_OFFSET */
+#define MME_SHADOW_0_C_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_SHADOW_0_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_C_VALID_ELEMENTS */
+#define MME_SHADOW_0_C_VALID_ELEMENTS_V_SHIFT 0
+#define MME_SHADOW_0_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_C_LOOP_STRIDE */
+#define MME_SHADOW_0_C_LOOP_STRIDE_V_SHIFT 0
+#define MME_SHADOW_0_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_C_ROI_SIZE */
+#define MME_SHADOW_0_C_ROI_SIZE_V_SHIFT 0
+#define MME_SHADOW_0_C_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_C_SPATIAL_START_OFFSET */
+#define MME_SHADOW_0_C_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_SHADOW_0_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_C_SPATIAL_STRIDE */
+#define MME_SHADOW_0_C_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_SHADOW_0_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_C_SPATIAL_SIZE_MINUS_1 */
+#define MME_SHADOW_0_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_0_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_SYNC_OBJECT_MESSAGE */
+#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
+#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
+#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16
+#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000
+#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31
+#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000
+
+/* MME_SHADOW_0_E_PADDING_VALUE_A */
+#define MME_SHADOW_0_E_PADDING_VALUE_A_V_SHIFT 0
+#define MME_SHADOW_0_E_PADDING_VALUE_A_V_MASK 0xFFFF
+
+/* MME_SHADOW_0_E_NUM_ITERATION_MINUS_1 */
+#define MME_SHADOW_0_E_NUM_ITERATION_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_0_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_E_BUBBLES_PER_SPLIT */
+#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_A_SHIFT 0
+#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_A_MASK 0xFF
+#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_B_SHIFT 8
+#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00
+#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16
+#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000
+#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_ID_SHIFT 24
+#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000
+
+/* MME_SHADOW_1_STATUS */
+#define MME_SHADOW_1_STATUS_A_SHIFT 0
+#define MME_SHADOW_1_STATUS_A_MASK 0x1
+#define MME_SHADOW_1_STATUS_B_SHIFT 1
+#define MME_SHADOW_1_STATUS_B_MASK 0x2
+#define MME_SHADOW_1_STATUS_CIN_SHIFT 2
+#define MME_SHADOW_1_STATUS_CIN_MASK 0x4
+#define MME_SHADOW_1_STATUS_COUT_SHIFT 3
+#define MME_SHADOW_1_STATUS_COUT_MASK 0x8
+#define MME_SHADOW_1_STATUS_TE_SHIFT 4
+#define MME_SHADOW_1_STATUS_TE_MASK 0x10
+#define MME_SHADOW_1_STATUS_LD_SHIFT 5
+#define MME_SHADOW_1_STATUS_LD_MASK 0x20
+#define MME_SHADOW_1_STATUS_ST_SHIFT 6
+#define MME_SHADOW_1_STATUS_ST_MASK 0x40
+
+/* MME_SHADOW_1_A_BASE_ADDR_HIGH */
+#define MME_SHADOW_1_A_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_1_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_B_BASE_ADDR_HIGH */
+#define MME_SHADOW_1_B_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_1_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_CIN_BASE_ADDR_HIGH */
+#define MME_SHADOW_1_CIN_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_1_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_COUT_BASE_ADDR_HIGH */
+#define MME_SHADOW_1_COUT_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_1_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_BIAS_BASE_ADDR_HIGH */
+#define MME_SHADOW_1_BIAS_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_1_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_A_BASE_ADDR_LOW */
+#define MME_SHADOW_1_A_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_1_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_B_BASE_ADDR_LOW */
+#define MME_SHADOW_1_B_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_1_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_CIN_BASE_ADDR_LOW */
+#define MME_SHADOW_1_CIN_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_1_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_COUT_BASE_ADDR_LOW */
+#define MME_SHADOW_1_COUT_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_1_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_BIAS_BASE_ADDR_LOW */
+#define MME_SHADOW_1_BIAS_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_1_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_HEADER */
+#define MME_SHADOW_1_HEADER_SIGNAL_MASK_SHIFT 0
+#define MME_SHADOW_1_HEADER_SIGNAL_MASK_MASK 0x1F
+#define MME_SHADOW_1_HEADER_SIGNAL_EN_SHIFT 5
+#define MME_SHADOW_1_HEADER_SIGNAL_EN_MASK 0x20
+#define MME_SHADOW_1_HEADER_TRANS_A_SHIFT 6
+#define MME_SHADOW_1_HEADER_TRANS_A_MASK 0x40
+#define MME_SHADOW_1_HEADER_LOWER_A_SHIFT 7
+#define MME_SHADOW_1_HEADER_LOWER_A_MASK 0x80
+#define MME_SHADOW_1_HEADER_ACCUM_MASK_SHIFT 8
+#define MME_SHADOW_1_HEADER_ACCUM_MASK_MASK 0xF00
+#define MME_SHADOW_1_HEADER_LOAD_BIAS_SHIFT 12
+#define MME_SHADOW_1_HEADER_LOAD_BIAS_MASK 0x1000
+#define MME_SHADOW_1_HEADER_LOAD_CIN_SHIFT 13
+#define MME_SHADOW_1_HEADER_LOAD_CIN_MASK 0x2000
+#define MME_SHADOW_1_HEADER_STORE_OUT_SHIFT 15
+#define MME_SHADOW_1_HEADER_STORE_OUT_MASK 0x8000
+#define MME_SHADOW_1_HEADER_ACC_LD_INC_DISABLE_SHIFT 16
+#define MME_SHADOW_1_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000
+#define MME_SHADOW_1_HEADER_ADVANCE_A_SHIFT 17
+#define MME_SHADOW_1_HEADER_ADVANCE_A_MASK 0x20000
+#define MME_SHADOW_1_HEADER_ADVANCE_B_SHIFT 18
+#define MME_SHADOW_1_HEADER_ADVANCE_B_MASK 0x40000
+#define MME_SHADOW_1_HEADER_ADVANCE_CIN_SHIFT 19
+#define MME_SHADOW_1_HEADER_ADVANCE_CIN_MASK 0x80000
+#define MME_SHADOW_1_HEADER_ADVANCE_COUT_SHIFT 20
+#define MME_SHADOW_1_HEADER_ADVANCE_COUT_MASK 0x100000
+#define MME_SHADOW_1_HEADER_COMPRESSED_B_SHIFT 21
+#define MME_SHADOW_1_HEADER_COMPRESSED_B_MASK 0x200000
+#define MME_SHADOW_1_HEADER_MASK_CONV_END_SHIFT 22
+#define MME_SHADOW_1_HEADER_MASK_CONV_END_MASK 0x400000
+#define MME_SHADOW_1_HEADER_ACC_ST_INC_DISABLE_SHIFT 23
+#define MME_SHADOW_1_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000
+#define MME_SHADOW_1_HEADER_AB_DATA_TYPE_SHIFT 24
+#define MME_SHADOW_1_HEADER_AB_DATA_TYPE_MASK 0x3000000
+#define MME_SHADOW_1_HEADER_CIN_DATA_TYPE_SHIFT 26
+#define MME_SHADOW_1_HEADER_CIN_DATA_TYPE_MASK 0x1C000000
+#define MME_SHADOW_1_HEADER_COUT_DATA_TYPE_SHIFT 29
+#define MME_SHADOW_1_HEADER_COUT_DATA_TYPE_MASK 0xE0000000
+
+/* MME_SHADOW_1_KERNEL_SIZE_MINUS_1 */
+#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0
+#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF
+#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8
+#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00
+#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16
+#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000
+#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24
+#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000
+
+/* MME_SHADOW_1_ASSOCIATED_DIMS */
+#define MME_SHADOW_1_ASSOCIATED_DIMS_A_0_SHIFT 0
+#define MME_SHADOW_1_ASSOCIATED_DIMS_A_0_MASK 0x7
+#define MME_SHADOW_1_ASSOCIATED_DIMS_B_0_SHIFT 3
+#define MME_SHADOW_1_ASSOCIATED_DIMS_B_0_MASK 0x38
+#define MME_SHADOW_1_ASSOCIATED_DIMS_CIN_0_SHIFT 6
+#define MME_SHADOW_1_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0
+#define MME_SHADOW_1_ASSOCIATED_DIMS_COUT_0_SHIFT 9
+#define MME_SHADOW_1_ASSOCIATED_DIMS_COUT_0_MASK 0xE00
+#define MME_SHADOW_1_ASSOCIATED_DIMS_A_1_SHIFT 16
+#define MME_SHADOW_1_ASSOCIATED_DIMS_A_1_MASK 0x70000
+#define MME_SHADOW_1_ASSOCIATED_DIMS_B_1_SHIFT 19
+#define MME_SHADOW_1_ASSOCIATED_DIMS_B_1_MASK 0x380000
+#define MME_SHADOW_1_ASSOCIATED_DIMS_CIN_1_SHIFT 22
+#define MME_SHADOW_1_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000
+#define MME_SHADOW_1_ASSOCIATED_DIMS_COUT_1_SHIFT 25
+#define MME_SHADOW_1_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000
+
+/* MME_SHADOW_1_COUT_SCALE */
+#define MME_SHADOW_1_COUT_SCALE_V_SHIFT 0
+#define MME_SHADOW_1_COUT_SCALE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_CIN_SCALE */
+#define MME_SHADOW_1_CIN_SCALE_V_SHIFT 0
+#define MME_SHADOW_1_CIN_SCALE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_GEMMLOWP_ZP */
+#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_CIN_SHIFT 0
+#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF
+#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_COUT_SHIFT 9
+#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00
+#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_B_SHIFT 18
+#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000
+#define MME_SHADOW_1_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27
+#define MME_SHADOW_1_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000
+#define MME_SHADOW_1_GEMMLOWP_ZP_ACCUM_SHIFT 28
+#define MME_SHADOW_1_GEMMLOWP_ZP_ACCUM_MASK 0x10000000
+#define MME_SHADOW_1_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29
+#define MME_SHADOW_1_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000
+#define MME_SHADOW_1_GEMMLOWP_ZP_RELU_EN_SHIFT 30
+#define MME_SHADOW_1_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000
+
+/* MME_SHADOW_1_GEMMLOWP_EXPONENT */
+#define MME_SHADOW_1_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0
+#define MME_SHADOW_1_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F
+#define MME_SHADOW_1_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8
+#define MME_SHADOW_1_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00
+#define MME_SHADOW_1_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16
+#define MME_SHADOW_1_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000
+#define MME_SHADOW_1_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17
+#define MME_SHADOW_1_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000
+
+/* MME_SHADOW_1_A_ROI_BASE_OFFSET */
+#define MME_SHADOW_1_A_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_SHADOW_1_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_A_VALID_ELEMENTS */
+#define MME_SHADOW_1_A_VALID_ELEMENTS_V_SHIFT 0
+#define MME_SHADOW_1_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_A_LOOP_STRIDE */
+#define MME_SHADOW_1_A_LOOP_STRIDE_V_SHIFT 0
+#define MME_SHADOW_1_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_A_ROI_SIZE */
+#define MME_SHADOW_1_A_ROI_SIZE_V_SHIFT 0
+#define MME_SHADOW_1_A_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_A_SPATIAL_START_OFFSET */
+#define MME_SHADOW_1_A_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_SHADOW_1_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_A_SPATIAL_STRIDE */
+#define MME_SHADOW_1_A_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_SHADOW_1_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_A_SPATIAL_SIZE_MINUS_1 */
+#define MME_SHADOW_1_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_1_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_B_ROI_BASE_OFFSET */
+#define MME_SHADOW_1_B_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_SHADOW_1_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_B_VALID_ELEMENTS */
+#define MME_SHADOW_1_B_VALID_ELEMENTS_V_SHIFT 0
+#define MME_SHADOW_1_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_B_LOOP_STRIDE */
+#define MME_SHADOW_1_B_LOOP_STRIDE_V_SHIFT 0
+#define MME_SHADOW_1_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_B_ROI_SIZE */
+#define MME_SHADOW_1_B_ROI_SIZE_V_SHIFT 0
+#define MME_SHADOW_1_B_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_B_SPATIAL_START_OFFSET */
+#define MME_SHADOW_1_B_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_SHADOW_1_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_B_SPATIAL_STRIDE */
+#define MME_SHADOW_1_B_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_SHADOW_1_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_B_SPATIAL_SIZE_MINUS_1 */
+#define MME_SHADOW_1_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_1_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_C_ROI_BASE_OFFSET */
+#define MME_SHADOW_1_C_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_SHADOW_1_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_C_VALID_ELEMENTS */
+#define MME_SHADOW_1_C_VALID_ELEMENTS_V_SHIFT 0
+#define MME_SHADOW_1_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_C_LOOP_STRIDE */
+#define MME_SHADOW_1_C_LOOP_STRIDE_V_SHIFT 0
+#define MME_SHADOW_1_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_C_ROI_SIZE */
+#define MME_SHADOW_1_C_ROI_SIZE_V_SHIFT 0
+#define MME_SHADOW_1_C_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_C_SPATIAL_START_OFFSET */
+#define MME_SHADOW_1_C_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_SHADOW_1_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_C_SPATIAL_STRIDE */
+#define MME_SHADOW_1_C_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_SHADOW_1_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_C_SPATIAL_SIZE_MINUS_1 */
+#define MME_SHADOW_1_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_1_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_SYNC_OBJECT_MESSAGE */
+#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
+#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
+#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16
+#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000
+#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31
+#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000
+
+/* MME_SHADOW_1_E_PADDING_VALUE_A */
+#define MME_SHADOW_1_E_PADDING_VALUE_A_V_SHIFT 0
+#define MME_SHADOW_1_E_PADDING_VALUE_A_V_MASK 0xFFFF
+
+/* MME_SHADOW_1_E_NUM_ITERATION_MINUS_1 */
+#define MME_SHADOW_1_E_NUM_ITERATION_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_1_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_E_BUBBLES_PER_SPLIT */
+#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_A_SHIFT 0
+#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_A_MASK 0xFF
+#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_B_SHIFT 8
+#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00
+#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16
+#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000
+#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_ID_SHIFT 24
+#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000
+
+/* MME_SHADOW_2_STATUS */
+#define MME_SHADOW_2_STATUS_A_SHIFT 0
+#define MME_SHADOW_2_STATUS_A_MASK 0x1
+#define MME_SHADOW_2_STATUS_B_SHIFT 1
+#define MME_SHADOW_2_STATUS_B_MASK 0x2
+#define MME_SHADOW_2_STATUS_CIN_SHIFT 2
+#define MME_SHADOW_2_STATUS_CIN_MASK 0x4
+#define MME_SHADOW_2_STATUS_COUT_SHIFT 3
+#define MME_SHADOW_2_STATUS_COUT_MASK 0x8
+#define MME_SHADOW_2_STATUS_TE_SHIFT 4
+#define MME_SHADOW_2_STATUS_TE_MASK 0x10
+#define MME_SHADOW_2_STATUS_LD_SHIFT 5
+#define MME_SHADOW_2_STATUS_LD_MASK 0x20
+#define MME_SHADOW_2_STATUS_ST_SHIFT 6
+#define MME_SHADOW_2_STATUS_ST_MASK 0x40
+
+/* MME_SHADOW_2_A_BASE_ADDR_HIGH */
+#define MME_SHADOW_2_A_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_2_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_B_BASE_ADDR_HIGH */
+#define MME_SHADOW_2_B_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_2_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_CIN_BASE_ADDR_HIGH */
+#define MME_SHADOW_2_CIN_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_2_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_COUT_BASE_ADDR_HIGH */
+#define MME_SHADOW_2_COUT_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_2_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_BIAS_BASE_ADDR_HIGH */
+#define MME_SHADOW_2_BIAS_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_2_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_A_BASE_ADDR_LOW */
+#define MME_SHADOW_2_A_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_2_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_B_BASE_ADDR_LOW */
+#define MME_SHADOW_2_B_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_2_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_CIN_BASE_ADDR_LOW */
+#define MME_SHADOW_2_CIN_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_2_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_COUT_BASE_ADDR_LOW */
+#define MME_SHADOW_2_COUT_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_2_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_BIAS_BASE_ADDR_LOW */
+#define MME_SHADOW_2_BIAS_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_2_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_HEADER */
+#define MME_SHADOW_2_HEADER_SIGNAL_MASK_SHIFT 0
+#define MME_SHADOW_2_HEADER_SIGNAL_MASK_MASK 0x1F
+#define MME_SHADOW_2_HEADER_SIGNAL_EN_SHIFT 5
+#define MME_SHADOW_2_HEADER_SIGNAL_EN_MASK 0x20
+#define MME_SHADOW_2_HEADER_TRANS_A_SHIFT 6
+#define MME_SHADOW_2_HEADER_TRANS_A_MASK 0x40
+#define MME_SHADOW_2_HEADER_LOWER_A_SHIFT 7
+#define MME_SHADOW_2_HEADER_LOWER_A_MASK 0x80
+#define MME_SHADOW_2_HEADER_ACCUM_MASK_SHIFT 8
+#define MME_SHADOW_2_HEADER_ACCUM_MASK_MASK 0xF00
+#define MME_SHADOW_2_HEADER_LOAD_BIAS_SHIFT 12
+#define MME_SHADOW_2_HEADER_LOAD_BIAS_MASK 0x1000
+#define MME_SHADOW_2_HEADER_LOAD_CIN_SHIFT 13
+#define MME_SHADOW_2_HEADER_LOAD_CIN_MASK 0x2000
+#define MME_SHADOW_2_HEADER_STORE_OUT_SHIFT 15
+#define MME_SHADOW_2_HEADER_STORE_OUT_MASK 0x8000
+#define MME_SHADOW_2_HEADER_ACC_LD_INC_DISABLE_SHIFT 16
+#define MME_SHADOW_2_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000
+#define MME_SHADOW_2_HEADER_ADVANCE_A_SHIFT 17
+#define MME_SHADOW_2_HEADER_ADVANCE_A_MASK 0x20000
+#define MME_SHADOW_2_HEADER_ADVANCE_B_SHIFT 18
+#define MME_SHADOW_2_HEADER_ADVANCE_B_MASK 0x40000
+#define MME_SHADOW_2_HEADER_ADVANCE_CIN_SHIFT 19
+#define MME_SHADOW_2_HEADER_ADVANCE_CIN_MASK 0x80000
+#define MME_SHADOW_2_HEADER_ADVANCE_COUT_SHIFT 20
+#define MME_SHADOW_2_HEADER_ADVANCE_COUT_MASK 0x100000
+#define MME_SHADOW_2_HEADER_COMPRESSED_B_SHIFT 21
+#define MME_SHADOW_2_HEADER_COMPRESSED_B_MASK 0x200000
+#define MME_SHADOW_2_HEADER_MASK_CONV_END_SHIFT 22
+#define MME_SHADOW_2_HEADER_MASK_CONV_END_MASK 0x400000
+#define MME_SHADOW_2_HEADER_ACC_ST_INC_DISABLE_SHIFT 23
+#define MME_SHADOW_2_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000
+#define MME_SHADOW_2_HEADER_AB_DATA_TYPE_SHIFT 24
+#define MME_SHADOW_2_HEADER_AB_DATA_TYPE_MASK 0x3000000
+#define MME_SHADOW_2_HEADER_CIN_DATA_TYPE_SHIFT 26
+#define MME_SHADOW_2_HEADER_CIN_DATA_TYPE_MASK 0x1C000000
+#define MME_SHADOW_2_HEADER_COUT_DATA_TYPE_SHIFT 29
+#define MME_SHADOW_2_HEADER_COUT_DATA_TYPE_MASK 0xE0000000
+
+/* MME_SHADOW_2_KERNEL_SIZE_MINUS_1 */
+#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0
+#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF
+#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8
+#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00
+#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16
+#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000
+#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24
+#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000
+
+/* MME_SHADOW_2_ASSOCIATED_DIMS */
+#define MME_SHADOW_2_ASSOCIATED_DIMS_A_0_SHIFT 0
+#define MME_SHADOW_2_ASSOCIATED_DIMS_A_0_MASK 0x7
+#define MME_SHADOW_2_ASSOCIATED_DIMS_B_0_SHIFT 3
+#define MME_SHADOW_2_ASSOCIATED_DIMS_B_0_MASK 0x38
+#define MME_SHADOW_2_ASSOCIATED_DIMS_CIN_0_SHIFT 6
+#define MME_SHADOW_2_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0
+#define MME_SHADOW_2_ASSOCIATED_DIMS_COUT_0_SHIFT 9
+#define MME_SHADOW_2_ASSOCIATED_DIMS_COUT_0_MASK 0xE00
+#define MME_SHADOW_2_ASSOCIATED_DIMS_A_1_SHIFT 16
+#define MME_SHADOW_2_ASSOCIATED_DIMS_A_1_MASK 0x70000
+#define MME_SHADOW_2_ASSOCIATED_DIMS_B_1_SHIFT 19
+#define MME_SHADOW_2_ASSOCIATED_DIMS_B_1_MASK 0x380000
+#define MME_SHADOW_2_ASSOCIATED_DIMS_CIN_1_SHIFT 22
+#define MME_SHADOW_2_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000
+#define MME_SHADOW_2_ASSOCIATED_DIMS_COUT_1_SHIFT 25
+#define MME_SHADOW_2_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000
+
+/* MME_SHADOW_2_COUT_SCALE */
+#define MME_SHADOW_2_COUT_SCALE_V_SHIFT 0
+#define MME_SHADOW_2_COUT_SCALE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_CIN_SCALE */
+#define MME_SHADOW_2_CIN_SCALE_V_SHIFT 0
+#define MME_SHADOW_2_CIN_SCALE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_GEMMLOWP_ZP */
+#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_CIN_SHIFT 0
+#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF
+#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_COUT_SHIFT 9
+#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00
+#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_B_SHIFT 18
+#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000
+#define MME_SHADOW_2_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27
+#define MME_SHADOW_2_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000
+#define MME_SHADOW_2_GEMMLOWP_ZP_ACCUM_SHIFT 28
+#define MME_SHADOW_2_GEMMLOWP_ZP_ACCUM_MASK 0x10000000
+#define MME_SHADOW_2_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29
+#define MME_SHADOW_2_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000
+#define MME_SHADOW_2_GEMMLOWP_ZP_RELU_EN_SHIFT 30
+#define MME_SHADOW_2_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000
+
+/* MME_SHADOW_2_GEMMLOWP_EXPONENT */
+#define MME_SHADOW_2_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0
+#define MME_SHADOW_2_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F
+#define MME_SHADOW_2_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8
+#define MME_SHADOW_2_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00
+#define MME_SHADOW_2_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16
+#define MME_SHADOW_2_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000
+#define MME_SHADOW_2_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17
+#define MME_SHADOW_2_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000
+
+/* MME_SHADOW_2_A_ROI_BASE_OFFSET */
+#define MME_SHADOW_2_A_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_SHADOW_2_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_A_VALID_ELEMENTS */
+#define MME_SHADOW_2_A_VALID_ELEMENTS_V_SHIFT 0
+#define MME_SHADOW_2_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_A_LOOP_STRIDE */
+#define MME_SHADOW_2_A_LOOP_STRIDE_V_SHIFT 0
+#define MME_SHADOW_2_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_A_ROI_SIZE */
+#define MME_SHADOW_2_A_ROI_SIZE_V_SHIFT 0
+#define MME_SHADOW_2_A_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_A_SPATIAL_START_OFFSET */
+#define MME_SHADOW_2_A_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_SHADOW_2_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_A_SPATIAL_STRIDE */
+#define MME_SHADOW_2_A_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_SHADOW_2_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_A_SPATIAL_SIZE_MINUS_1 */
+#define MME_SHADOW_2_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_2_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_B_ROI_BASE_OFFSET */
+#define MME_SHADOW_2_B_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_SHADOW_2_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_B_VALID_ELEMENTS */
+#define MME_SHADOW_2_B_VALID_ELEMENTS_V_SHIFT 0
+#define MME_SHADOW_2_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_B_LOOP_STRIDE */
+#define MME_SHADOW_2_B_LOOP_STRIDE_V_SHIFT 0
+#define MME_SHADOW_2_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_B_ROI_SIZE */
+#define MME_SHADOW_2_B_ROI_SIZE_V_SHIFT 0
+#define MME_SHADOW_2_B_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_B_SPATIAL_START_OFFSET */
+#define MME_SHADOW_2_B_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_SHADOW_2_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_B_SPATIAL_STRIDE */
+#define MME_SHADOW_2_B_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_SHADOW_2_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_B_SPATIAL_SIZE_MINUS_1 */
+#define MME_SHADOW_2_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_2_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_C_ROI_BASE_OFFSET */
+#define MME_SHADOW_2_C_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_SHADOW_2_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_C_VALID_ELEMENTS */
+#define MME_SHADOW_2_C_VALID_ELEMENTS_V_SHIFT 0
+#define MME_SHADOW_2_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_C_LOOP_STRIDE */
+#define MME_SHADOW_2_C_LOOP_STRIDE_V_SHIFT 0
+#define MME_SHADOW_2_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_C_ROI_SIZE */
+#define MME_SHADOW_2_C_ROI_SIZE_V_SHIFT 0
+#define MME_SHADOW_2_C_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_C_SPATIAL_START_OFFSET */
+#define MME_SHADOW_2_C_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_SHADOW_2_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_C_SPATIAL_STRIDE */
+#define MME_SHADOW_2_C_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_SHADOW_2_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_C_SPATIAL_SIZE_MINUS_1 */
+#define MME_SHADOW_2_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_2_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_SYNC_OBJECT_MESSAGE */
+#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
+#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
+#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16
+#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000
+#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31
+#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000
+
+/* MME_SHADOW_2_E_PADDING_VALUE_A */
+#define MME_SHADOW_2_E_PADDING_VALUE_A_V_SHIFT 0
+#define MME_SHADOW_2_E_PADDING_VALUE_A_V_MASK 0xFFFF
+
+/* MME_SHADOW_2_E_NUM_ITERATION_MINUS_1 */
+#define MME_SHADOW_2_E_NUM_ITERATION_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_2_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_E_BUBBLES_PER_SPLIT */
+#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_A_SHIFT 0
+#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_A_MASK 0xFF
+#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_B_SHIFT 8
+#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00
+#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16
+#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000
+#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_ID_SHIFT 24
+#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000
+
+/* MME_SHADOW_3_STATUS */
+#define MME_SHADOW_3_STATUS_A_SHIFT 0
+#define MME_SHADOW_3_STATUS_A_MASK 0x1
+#define MME_SHADOW_3_STATUS_B_SHIFT 1
+#define MME_SHADOW_3_STATUS_B_MASK 0x2
+#define MME_SHADOW_3_STATUS_CIN_SHIFT 2
+#define MME_SHADOW_3_STATUS_CIN_MASK 0x4
+#define MME_SHADOW_3_STATUS_COUT_SHIFT 3
+#define MME_SHADOW_3_STATUS_COUT_MASK 0x8
+#define MME_SHADOW_3_STATUS_TE_SHIFT 4
+#define MME_SHADOW_3_STATUS_TE_MASK 0x10
+#define MME_SHADOW_3_STATUS_LD_SHIFT 5
+#define MME_SHADOW_3_STATUS_LD_MASK 0x20
+#define MME_SHADOW_3_STATUS_ST_SHIFT 6
+#define MME_SHADOW_3_STATUS_ST_MASK 0x40
+
+/* MME_SHADOW_3_A_BASE_ADDR_HIGH */
+#define MME_SHADOW_3_A_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_3_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_B_BASE_ADDR_HIGH */
+#define MME_SHADOW_3_B_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_3_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_CIN_BASE_ADDR_HIGH */
+#define MME_SHADOW_3_CIN_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_3_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_COUT_BASE_ADDR_HIGH */
+#define MME_SHADOW_3_COUT_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_3_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_BIAS_BASE_ADDR_HIGH */
+#define MME_SHADOW_3_BIAS_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_3_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_A_BASE_ADDR_LOW */
+#define MME_SHADOW_3_A_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_3_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_B_BASE_ADDR_LOW */
+#define MME_SHADOW_3_B_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_3_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_CIN_BASE_ADDR_LOW */
+#define MME_SHADOW_3_CIN_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_3_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_COUT_BASE_ADDR_LOW */
+#define MME_SHADOW_3_COUT_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_3_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_BIAS_BASE_ADDR_LOW */
+#define MME_SHADOW_3_BIAS_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_3_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_HEADER */
+#define MME_SHADOW_3_HEADER_SIGNAL_MASK_SHIFT 0
+#define MME_SHADOW_3_HEADER_SIGNAL_MASK_MASK 0x1F
+#define MME_SHADOW_3_HEADER_SIGNAL_EN_SHIFT 5
+#define MME_SHADOW_3_HEADER_SIGNAL_EN_MASK 0x20
+#define MME_SHADOW_3_HEADER_TRANS_A_SHIFT 6
+#define MME_SHADOW_3_HEADER_TRANS_A_MASK 0x40
+#define MME_SHADOW_3_HEADER_LOWER_A_SHIFT 7
+#define MME_SHADOW_3_HEADER_LOWER_A_MASK 0x80
+#define MME_SHADOW_3_HEADER_ACCUM_MASK_SHIFT 8
+#define MME_SHADOW_3_HEADER_ACCUM_MASK_MASK 0xF00
+#define MME_SHADOW_3_HEADER_LOAD_BIAS_SHIFT 12
+#define MME_SHADOW_3_HEADER_LOAD_BIAS_MASK 0x1000
+#define MME_SHADOW_3_HEADER_LOAD_CIN_SHIFT 13
+#define MME_SHADOW_3_HEADER_LOAD_CIN_MASK 0x2000
+#define MME_SHADOW_3_HEADER_STORE_OUT_SHIFT 15
+#define MME_SHADOW_3_HEADER_STORE_OUT_MASK 0x8000
+#define MME_SHADOW_3_HEADER_ACC_LD_INC_DISABLE_SHIFT 16
+#define MME_SHADOW_3_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000
+#define MME_SHADOW_3_HEADER_ADVANCE_A_SHIFT 17
+#define MME_SHADOW_3_HEADER_ADVANCE_A_MASK 0x20000
+#define MME_SHADOW_3_HEADER_ADVANCE_B_SHIFT 18
+#define MME_SHADOW_3_HEADER_ADVANCE_B_MASK 0x40000
+#define MME_SHADOW_3_HEADER_ADVANCE_CIN_SHIFT 19
+#define MME_SHADOW_3_HEADER_ADVANCE_CIN_MASK 0x80000
+#define MME_SHADOW_3_HEADER_ADVANCE_COUT_SHIFT 20
+#define MME_SHADOW_3_HEADER_ADVANCE_COUT_MASK 0x100000
+#define MME_SHADOW_3_HEADER_COMPRESSED_B_SHIFT 21
+#define MME_SHADOW_3_HEADER_COMPRESSED_B_MASK 0x200000
+#define MME_SHADOW_3_HEADER_MASK_CONV_END_SHIFT 22
+#define MME_SHADOW_3_HEADER_MASK_CONV_END_MASK 0x400000
+#define MME_SHADOW_3_HEADER_ACC_ST_INC_DISABLE_SHIFT 23
+#define MME_SHADOW_3_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000
+#define MME_SHADOW_3_HEADER_AB_DATA_TYPE_SHIFT 24
+#define MME_SHADOW_3_HEADER_AB_DATA_TYPE_MASK 0x3000000
+#define MME_SHADOW_3_HEADER_CIN_DATA_TYPE_SHIFT 26
+#define MME_SHADOW_3_HEADER_CIN_DATA_TYPE_MASK 0x1C000000
+#define MME_SHADOW_3_HEADER_COUT_DATA_TYPE_SHIFT 29
+#define MME_SHADOW_3_HEADER_COUT_DATA_TYPE_MASK 0xE0000000
+
+/* MME_SHADOW_3_KERNEL_SIZE_MINUS_1 */
+#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0
+#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF
+#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8
+#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00
+#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16
+#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000
+#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24
+#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000
+
+/* MME_SHADOW_3_ASSOCIATED_DIMS */
+#define MME_SHADOW_3_ASSOCIATED_DIMS_A_0_SHIFT 0
+#define MME_SHADOW_3_ASSOCIATED_DIMS_A_0_MASK 0x7
+#define MME_SHADOW_3_ASSOCIATED_DIMS_B_0_SHIFT 3
+#define MME_SHADOW_3_ASSOCIATED_DIMS_B_0_MASK 0x38
+#define MME_SHADOW_3_ASSOCIATED_DIMS_CIN_0_SHIFT 6
+#define MME_SHADOW_3_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0
+#define MME_SHADOW_3_ASSOCIATED_DIMS_COUT_0_SHIFT 9
+#define MME_SHADOW_3_ASSOCIATED_DIMS_COUT_0_MASK 0xE00
+#define MME_SHADOW_3_ASSOCIATED_DIMS_A_1_SHIFT 16
+#define MME_SHADOW_3_ASSOCIATED_DIMS_A_1_MASK 0x70000
+#define MME_SHADOW_3_ASSOCIATED_DIMS_B_1_SHIFT 19
+#define MME_SHADOW_3_ASSOCIATED_DIMS_B_1_MASK 0x380000
+#define MME_SHADOW_3_ASSOCIATED_DIMS_CIN_1_SHIFT 22
+#define MME_SHADOW_3_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000
+#define MME_SHADOW_3_ASSOCIATED_DIMS_COUT_1_SHIFT 25
+#define MME_SHADOW_3_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000
+
+/* MME_SHADOW_3_COUT_SCALE */
+#define MME_SHADOW_3_COUT_SCALE_V_SHIFT 0
+#define MME_SHADOW_3_COUT_SCALE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_CIN_SCALE */
+#define MME_SHADOW_3_CIN_SCALE_V_SHIFT 0
+#define MME_SHADOW_3_CIN_SCALE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_GEMMLOWP_ZP */
+#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_CIN_SHIFT 0
+#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF
+#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_COUT_SHIFT 9
+#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00
+#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_B_SHIFT 18
+#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000
+#define MME_SHADOW_3_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27
+#define MME_SHADOW_3_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000
+#define MME_SHADOW_3_GEMMLOWP_ZP_ACCUM_SHIFT 28
+#define MME_SHADOW_3_GEMMLOWP_ZP_ACCUM_MASK 0x10000000
+#define MME_SHADOW_3_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29
+#define MME_SHADOW_3_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000
+#define MME_SHADOW_3_GEMMLOWP_ZP_RELU_EN_SHIFT 30
+#define MME_SHADOW_3_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000
+
+/* MME_SHADOW_3_GEMMLOWP_EXPONENT */
+#define MME_SHADOW_3_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0
+#define MME_SHADOW_3_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F
+#define MME_SHADOW_3_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8
+#define MME_SHADOW_3_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00
+#define MME_SHADOW_3_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16
+#define MME_SHADOW_3_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000
+#define MME_SHADOW_3_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17
+#define MME_SHADOW_3_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000
+
+/* MME_SHADOW_3_A_ROI_BASE_OFFSET */
+#define MME_SHADOW_3_A_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_SHADOW_3_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_A_VALID_ELEMENTS */
+#define MME_SHADOW_3_A_VALID_ELEMENTS_V_SHIFT 0
+#define MME_SHADOW_3_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_A_LOOP_STRIDE */
+#define MME_SHADOW_3_A_LOOP_STRIDE_V_SHIFT 0
+#define MME_SHADOW_3_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_A_ROI_SIZE */
+#define MME_SHADOW_3_A_ROI_SIZE_V_SHIFT 0
+#define MME_SHADOW_3_A_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_A_SPATIAL_START_OFFSET */
+#define MME_SHADOW_3_A_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_SHADOW_3_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_A_SPATIAL_STRIDE */
+#define MME_SHADOW_3_A_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_SHADOW_3_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_A_SPATIAL_SIZE_MINUS_1 */
+#define MME_SHADOW_3_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_3_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_B_ROI_BASE_OFFSET */
+#define MME_SHADOW_3_B_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_SHADOW_3_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_B_VALID_ELEMENTS */
+#define MME_SHADOW_3_B_VALID_ELEMENTS_V_SHIFT 0
+#define MME_SHADOW_3_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_B_LOOP_STRIDE */
+#define MME_SHADOW_3_B_LOOP_STRIDE_V_SHIFT 0
+#define MME_SHADOW_3_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_B_ROI_SIZE */
+#define MME_SHADOW_3_B_ROI_SIZE_V_SHIFT 0
+#define MME_SHADOW_3_B_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_B_SPATIAL_START_OFFSET */
+#define MME_SHADOW_3_B_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_SHADOW_3_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_B_SPATIAL_STRIDE */
+#define MME_SHADOW_3_B_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_SHADOW_3_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_B_SPATIAL_SIZE_MINUS_1 */
+#define MME_SHADOW_3_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_3_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_C_ROI_BASE_OFFSET */
+#define MME_SHADOW_3_C_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_SHADOW_3_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_C_VALID_ELEMENTS */
+#define MME_SHADOW_3_C_VALID_ELEMENTS_V_SHIFT 0
+#define MME_SHADOW_3_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_C_LOOP_STRIDE */
+#define MME_SHADOW_3_C_LOOP_STRIDE_V_SHIFT 0
+#define MME_SHADOW_3_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_C_ROI_SIZE */
+#define MME_SHADOW_3_C_ROI_SIZE_V_SHIFT 0
+#define MME_SHADOW_3_C_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_C_SPATIAL_START_OFFSET */
+#define MME_SHADOW_3_C_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_SHADOW_3_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_C_SPATIAL_STRIDE */
+#define MME_SHADOW_3_C_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_SHADOW_3_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_C_SPATIAL_SIZE_MINUS_1 */
+#define MME_SHADOW_3_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_3_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_SYNC_OBJECT_MESSAGE */
+#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
+#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
+#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16
+#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000
+#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31
+#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000
+
+/* MME_SHADOW_3_E_PADDING_VALUE_A */
+#define MME_SHADOW_3_E_PADDING_VALUE_A_V_SHIFT 0
+#define MME_SHADOW_3_E_PADDING_VALUE_A_V_MASK 0xFFFF
+
+/* MME_SHADOW_3_E_NUM_ITERATION_MINUS_1 */
+#define MME_SHADOW_3_E_NUM_ITERATION_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_3_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_E_BUBBLES_PER_SPLIT */
+#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_A_SHIFT 0
+#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_A_MASK 0xFF
+#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_B_SHIFT 8
+#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00
+#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16
+#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000
+#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_ID_SHIFT 24
+#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000
+
+#endif /* ASIC_REG_MME_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h
new file mode 100644
index 000000000000..d4bfa58dce19
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h
@@ -0,0 +1,465 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME_QM_MASKS_H_
+#define ASIC_REG_MME_QM_MASKS_H_
+
+/*
+ *****************************************
+ * MME_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+/* MME_QM_GLBL_CFG0 */
+#define MME_QM_GLBL_CFG0_PQF_EN_SHIFT 0
+#define MME_QM_GLBL_CFG0_PQF_EN_MASK 0x1
+#define MME_QM_GLBL_CFG0_CQF_EN_SHIFT 1
+#define MME_QM_GLBL_CFG0_CQF_EN_MASK 0x2
+#define MME_QM_GLBL_CFG0_CP_EN_SHIFT 2
+#define MME_QM_GLBL_CFG0_CP_EN_MASK 0x4
+#define MME_QM_GLBL_CFG0_DMA_EN_SHIFT 3
+#define MME_QM_GLBL_CFG0_DMA_EN_MASK 0x8
+
+/* MME_QM_GLBL_CFG1 */
+#define MME_QM_GLBL_CFG1_PQF_STOP_SHIFT 0
+#define MME_QM_GLBL_CFG1_PQF_STOP_MASK 0x1
+#define MME_QM_GLBL_CFG1_CQF_STOP_SHIFT 1
+#define MME_QM_GLBL_CFG1_CQF_STOP_MASK 0x2
+#define MME_QM_GLBL_CFG1_CP_STOP_SHIFT 2
+#define MME_QM_GLBL_CFG1_CP_STOP_MASK 0x4
+#define MME_QM_GLBL_CFG1_DMA_STOP_SHIFT 3
+#define MME_QM_GLBL_CFG1_DMA_STOP_MASK 0x8
+#define MME_QM_GLBL_CFG1_PQF_FLUSH_SHIFT 8
+#define MME_QM_GLBL_CFG1_PQF_FLUSH_MASK 0x100
+#define MME_QM_GLBL_CFG1_CQF_FLUSH_SHIFT 9
+#define MME_QM_GLBL_CFG1_CQF_FLUSH_MASK 0x200
+#define MME_QM_GLBL_CFG1_CP_FLUSH_SHIFT 10
+#define MME_QM_GLBL_CFG1_CP_FLUSH_MASK 0x400
+#define MME_QM_GLBL_CFG1_DMA_FLUSH_SHIFT 11
+#define MME_QM_GLBL_CFG1_DMA_FLUSH_MASK 0x800
+
+/* MME_QM_GLBL_PROT */
+#define MME_QM_GLBL_PROT_PQF_PROT_SHIFT 0
+#define MME_QM_GLBL_PROT_PQF_PROT_MASK 0x1
+#define MME_QM_GLBL_PROT_CQF_PROT_SHIFT 1
+#define MME_QM_GLBL_PROT_CQF_PROT_MASK 0x2
+#define MME_QM_GLBL_PROT_CP_PROT_SHIFT 2
+#define MME_QM_GLBL_PROT_CP_PROT_MASK 0x4
+#define MME_QM_GLBL_PROT_DMA_PROT_SHIFT 3
+#define MME_QM_GLBL_PROT_DMA_PROT_MASK 0x8
+#define MME_QM_GLBL_PROT_PQF_ERR_PROT_SHIFT 4
+#define MME_QM_GLBL_PROT_PQF_ERR_PROT_MASK 0x10
+#define MME_QM_GLBL_PROT_CQF_ERR_PROT_SHIFT 5
+#define MME_QM_GLBL_PROT_CQF_ERR_PROT_MASK 0x20
+#define MME_QM_GLBL_PROT_CP_ERR_PROT_SHIFT 6
+#define MME_QM_GLBL_PROT_CP_ERR_PROT_MASK 0x40
+#define MME_QM_GLBL_PROT_DMA_ERR_PROT_SHIFT 7
+#define MME_QM_GLBL_PROT_DMA_ERR_PROT_MASK 0x80
+
+/* MME_QM_GLBL_ERR_CFG */
+#define MME_QM_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0
+#define MME_QM_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1
+#define MME_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1
+#define MME_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2
+#define MME_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2
+#define MME_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4
+#define MME_QM_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3
+#define MME_QM_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8
+#define MME_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4
+#define MME_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10
+#define MME_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5
+#define MME_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20
+#define MME_QM_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6
+#define MME_QM_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40
+#define MME_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7
+#define MME_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80
+#define MME_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8
+#define MME_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100
+#define MME_QM_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9
+#define MME_QM_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200
+#define MME_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10
+#define MME_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400
+#define MME_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11
+#define MME_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800
+
+/* MME_QM_GLBL_ERR_ADDR_LO */
+#define MME_QM_GLBL_ERR_ADDR_LO_VAL_SHIFT 0
+#define MME_QM_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_GLBL_ERR_ADDR_HI */
+#define MME_QM_GLBL_ERR_ADDR_HI_VAL_SHIFT 0
+#define MME_QM_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_GLBL_ERR_WDATA */
+#define MME_QM_GLBL_ERR_WDATA_VAL_SHIFT 0
+#define MME_QM_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_GLBL_SECURE_PROPS */
+#define MME_QM_GLBL_SECURE_PROPS_ASID_SHIFT 0
+#define MME_QM_GLBL_SECURE_PROPS_ASID_MASK 0x3FF
+#define MME_QM_GLBL_SECURE_PROPS_MMBP_SHIFT 10
+#define MME_QM_GLBL_SECURE_PROPS_MMBP_MASK 0x400
+
+/* MME_QM_GLBL_NON_SECURE_PROPS */
+#define MME_QM_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0
+#define MME_QM_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF
+#define MME_QM_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10
+#define MME_QM_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400
+
+/* MME_QM_GLBL_STS0 */
+#define MME_QM_GLBL_STS0_PQF_IDLE_SHIFT 0
+#define MME_QM_GLBL_STS0_PQF_IDLE_MASK 0x1
+#define MME_QM_GLBL_STS0_CQF_IDLE_SHIFT 1
+#define MME_QM_GLBL_STS0_CQF_IDLE_MASK 0x2
+#define MME_QM_GLBL_STS0_CP_IDLE_SHIFT 2
+#define MME_QM_GLBL_STS0_CP_IDLE_MASK 0x4
+#define MME_QM_GLBL_STS0_DMA_IDLE_SHIFT 3
+#define MME_QM_GLBL_STS0_DMA_IDLE_MASK 0x8
+#define MME_QM_GLBL_STS0_PQF_IS_STOP_SHIFT 4
+#define MME_QM_GLBL_STS0_PQF_IS_STOP_MASK 0x10
+#define MME_QM_GLBL_STS0_CQF_IS_STOP_SHIFT 5
+#define MME_QM_GLBL_STS0_CQF_IS_STOP_MASK 0x20
+#define MME_QM_GLBL_STS0_CP_IS_STOP_SHIFT 6
+#define MME_QM_GLBL_STS0_CP_IS_STOP_MASK 0x40
+#define MME_QM_GLBL_STS0_DMA_IS_STOP_SHIFT 7
+#define MME_QM_GLBL_STS0_DMA_IS_STOP_MASK 0x80
+
+/* MME_QM_GLBL_STS1 */
+#define MME_QM_GLBL_STS1_PQF_RD_ERR_SHIFT 0
+#define MME_QM_GLBL_STS1_PQF_RD_ERR_MASK 0x1
+#define MME_QM_GLBL_STS1_CQF_RD_ERR_SHIFT 1
+#define MME_QM_GLBL_STS1_CQF_RD_ERR_MASK 0x2
+#define MME_QM_GLBL_STS1_CP_RD_ERR_SHIFT 2
+#define MME_QM_GLBL_STS1_CP_RD_ERR_MASK 0x4
+#define MME_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3
+#define MME_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8
+#define MME_QM_GLBL_STS1_CP_STOP_OP_SHIFT 4
+#define MME_QM_GLBL_STS1_CP_STOP_OP_MASK 0x10
+#define MME_QM_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5
+#define MME_QM_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20
+#define MME_QM_GLBL_STS1_DMA_RD_ERR_SHIFT 8
+#define MME_QM_GLBL_STS1_DMA_RD_ERR_MASK 0x100
+#define MME_QM_GLBL_STS1_DMA_WR_ERR_SHIFT 9
+#define MME_QM_GLBL_STS1_DMA_WR_ERR_MASK 0x200
+#define MME_QM_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10
+#define MME_QM_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400
+#define MME_QM_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11
+#define MME_QM_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800
+
+/* MME_QM_PQ_BASE_LO */
+#define MME_QM_PQ_BASE_LO_VAL_SHIFT 0
+#define MME_QM_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_PQ_BASE_HI */
+#define MME_QM_PQ_BASE_HI_VAL_SHIFT 0
+#define MME_QM_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_PQ_SIZE */
+#define MME_QM_PQ_SIZE_VAL_SHIFT 0
+#define MME_QM_PQ_SIZE_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_PQ_PI */
+#define MME_QM_PQ_PI_VAL_SHIFT 0
+#define MME_QM_PQ_PI_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_PQ_CI */
+#define MME_QM_PQ_CI_VAL_SHIFT 0
+#define MME_QM_PQ_CI_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_PQ_CFG0 */
+#define MME_QM_PQ_CFG0_RESERVED_SHIFT 0
+#define MME_QM_PQ_CFG0_RESERVED_MASK 0x1
+
+/* MME_QM_PQ_CFG1 */
+#define MME_QM_PQ_CFG1_CREDIT_LIM_SHIFT 0
+#define MME_QM_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define MME_QM_PQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define MME_QM_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* MME_QM_PQ_ARUSER */
+#define MME_QM_PQ_ARUSER_NOSNOOP_SHIFT 0
+#define MME_QM_PQ_ARUSER_NOSNOOP_MASK 0x1
+#define MME_QM_PQ_ARUSER_WORD_SHIFT 1
+#define MME_QM_PQ_ARUSER_WORD_MASK 0x2
+
+/* MME_QM_PQ_PUSH0 */
+#define MME_QM_PQ_PUSH0_PTR_LO_SHIFT 0
+#define MME_QM_PQ_PUSH0_PTR_LO_MASK 0xFFFFFFFF
+
+/* MME_QM_PQ_PUSH1 */
+#define MME_QM_PQ_PUSH1_PTR_HI_SHIFT 0
+#define MME_QM_PQ_PUSH1_PTR_HI_MASK 0xFFFFFFFF
+
+/* MME_QM_PQ_PUSH2 */
+#define MME_QM_PQ_PUSH2_TSIZE_SHIFT 0
+#define MME_QM_PQ_PUSH2_TSIZE_MASK 0xFFFFFFFF
+
+/* MME_QM_PQ_PUSH3 */
+#define MME_QM_PQ_PUSH3_RPT_SHIFT 0
+#define MME_QM_PQ_PUSH3_RPT_MASK 0xFFFF
+#define MME_QM_PQ_PUSH3_CTL_SHIFT 16
+#define MME_QM_PQ_PUSH3_CTL_MASK 0xFFFF0000
+
+/* MME_QM_PQ_STS0 */
+#define MME_QM_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0
+#define MME_QM_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF
+#define MME_QM_PQ_STS0_PQ_FREE_CNT_SHIFT 16
+#define MME_QM_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000
+
+/* MME_QM_PQ_STS1 */
+#define MME_QM_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0
+#define MME_QM_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF
+#define MME_QM_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30
+#define MME_QM_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000
+#define MME_QM_PQ_STS1_PQ_BUSY_SHIFT 31
+#define MME_QM_PQ_STS1_PQ_BUSY_MASK 0x80000000
+
+/* MME_QM_PQ_RD_RATE_LIM_EN */
+#define MME_QM_PQ_RD_RATE_LIM_EN_VAL_SHIFT 0
+#define MME_QM_PQ_RD_RATE_LIM_EN_VAL_MASK 0x1
+
+/* MME_QM_PQ_RD_RATE_LIM_RST_TOKEN */
+#define MME_QM_PQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
+#define MME_QM_PQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* MME_QM_PQ_RD_RATE_LIM_SAT */
+#define MME_QM_PQ_RD_RATE_LIM_SAT_VAL_SHIFT 0
+#define MME_QM_PQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
+
+/* MME_QM_PQ_RD_RATE_LIM_TOUT */
+#define MME_QM_PQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0
+#define MME_QM_PQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
+
+/* MME_QM_CQ_CFG0 */
+#define MME_QM_CQ_CFG0_RESERVED_SHIFT 0
+#define MME_QM_CQ_CFG0_RESERVED_MASK 0x1
+
+/* MME_QM_CQ_CFG1 */
+#define MME_QM_CQ_CFG1_CREDIT_LIM_SHIFT 0
+#define MME_QM_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define MME_QM_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define MME_QM_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* MME_QM_CQ_ARUSER */
+#define MME_QM_CQ_ARUSER_NOSNOOP_SHIFT 0
+#define MME_QM_CQ_ARUSER_NOSNOOP_MASK 0x1
+#define MME_QM_CQ_ARUSER_WORD_SHIFT 1
+#define MME_QM_CQ_ARUSER_WORD_MASK 0x2
+
+/* MME_QM_CQ_PTR_LO */
+#define MME_QM_CQ_PTR_LO_VAL_SHIFT 0
+#define MME_QM_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CQ_PTR_HI */
+#define MME_QM_CQ_PTR_HI_VAL_SHIFT 0
+#define MME_QM_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CQ_TSIZE */
+#define MME_QM_CQ_TSIZE_VAL_SHIFT 0
+#define MME_QM_CQ_TSIZE_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CQ_CTL */
+#define MME_QM_CQ_CTL_RPT_SHIFT 0
+#define MME_QM_CQ_CTL_RPT_MASK 0xFFFF
+#define MME_QM_CQ_CTL_CTL_SHIFT 16
+#define MME_QM_CQ_CTL_CTL_MASK 0xFFFF0000
+
+/* MME_QM_CQ_PTR_LO_STS */
+#define MME_QM_CQ_PTR_LO_STS_VAL_SHIFT 0
+#define MME_QM_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CQ_PTR_HI_STS */
+#define MME_QM_CQ_PTR_HI_STS_VAL_SHIFT 0
+#define MME_QM_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CQ_TSIZE_STS */
+#define MME_QM_CQ_TSIZE_STS_VAL_SHIFT 0
+#define MME_QM_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CQ_CTL_STS */
+#define MME_QM_CQ_CTL_STS_RPT_SHIFT 0
+#define MME_QM_CQ_CTL_STS_RPT_MASK 0xFFFF
+#define MME_QM_CQ_CTL_STS_CTL_SHIFT 16
+#define MME_QM_CQ_CTL_STS_CTL_MASK 0xFFFF0000
+
+/* MME_QM_CQ_STS0 */
+#define MME_QM_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0
+#define MME_QM_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF
+#define MME_QM_CQ_STS0_CQ_FREE_CNT_SHIFT 16
+#define MME_QM_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000
+
+/* MME_QM_CQ_STS1 */
+#define MME_QM_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0
+#define MME_QM_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF
+#define MME_QM_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30
+#define MME_QM_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000
+#define MME_QM_CQ_STS1_CQ_BUSY_SHIFT 31
+#define MME_QM_CQ_STS1_CQ_BUSY_MASK 0x80000000
+
+/* MME_QM_CQ_RD_RATE_LIM_EN */
+#define MME_QM_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0
+#define MME_QM_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1
+
+/* MME_QM_CQ_RD_RATE_LIM_RST_TOKEN */
+#define MME_QM_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
+#define MME_QM_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* MME_QM_CQ_RD_RATE_LIM_SAT */
+#define MME_QM_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0
+#define MME_QM_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
+
+/* MME_QM_CQ_RD_RATE_LIM_TOUT */
+#define MME_QM_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0
+#define MME_QM_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
+
+/* MME_QM_CQ_IFIFO_CNT */
+#define MME_QM_CQ_IFIFO_CNT_VAL_SHIFT 0
+#define MME_QM_CQ_IFIFO_CNT_VAL_MASK 0x3
+
+/* MME_QM_CP_MSG_BASE0_ADDR_LO */
+#define MME_QM_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0
+#define MME_QM_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_MSG_BASE0_ADDR_HI */
+#define MME_QM_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0
+#define MME_QM_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_MSG_BASE1_ADDR_LO */
+#define MME_QM_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0
+#define MME_QM_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_MSG_BASE1_ADDR_HI */
+#define MME_QM_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0
+#define MME_QM_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_MSG_BASE2_ADDR_LO */
+#define MME_QM_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0
+#define MME_QM_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_MSG_BASE2_ADDR_HI */
+#define MME_QM_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0
+#define MME_QM_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_MSG_BASE3_ADDR_LO */
+#define MME_QM_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0
+#define MME_QM_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_MSG_BASE3_ADDR_HI */
+#define MME_QM_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0
+#define MME_QM_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_LDMA_TSIZE_OFFSET */
+#define MME_QM_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0
+#define MME_QM_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_LDMA_SRC_BASE_LO_OFFSET */
+#define MME_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0
+#define MME_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_LDMA_SRC_BASE_HI_OFFSET */
+#define MME_QM_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0
+#define MME_QM_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_LDMA_DST_BASE_LO_OFFSET */
+#define MME_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0
+#define MME_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_LDMA_DST_BASE_HI_OFFSET */
+#define MME_QM_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0
+#define MME_QM_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_LDMA_COMMIT_OFFSET */
+#define MME_QM_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0
+#define MME_QM_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_FENCE0_RDATA */
+#define MME_QM_CP_FENCE0_RDATA_INC_VAL_SHIFT 0
+#define MME_QM_CP_FENCE0_RDATA_INC_VAL_MASK 0xF
+
+/* MME_QM_CP_FENCE1_RDATA */
+#define MME_QM_CP_FENCE1_RDATA_INC_VAL_SHIFT 0
+#define MME_QM_CP_FENCE1_RDATA_INC_VAL_MASK 0xF
+
+/* MME_QM_CP_FENCE2_RDATA */
+#define MME_QM_CP_FENCE2_RDATA_INC_VAL_SHIFT 0
+#define MME_QM_CP_FENCE2_RDATA_INC_VAL_MASK 0xF
+
+/* MME_QM_CP_FENCE3_RDATA */
+#define MME_QM_CP_FENCE3_RDATA_INC_VAL_SHIFT 0
+#define MME_QM_CP_FENCE3_RDATA_INC_VAL_MASK 0xF
+
+/* MME_QM_CP_FENCE0_CNT */
+#define MME_QM_CP_FENCE0_CNT_VAL_SHIFT 0
+#define MME_QM_CP_FENCE0_CNT_VAL_MASK 0xFF
+
+/* MME_QM_CP_FENCE1_CNT */
+#define MME_QM_CP_FENCE1_CNT_VAL_SHIFT 0
+#define MME_QM_CP_FENCE1_CNT_VAL_MASK 0xFF
+
+/* MME_QM_CP_FENCE2_CNT */
+#define MME_QM_CP_FENCE2_CNT_VAL_SHIFT 0
+#define MME_QM_CP_FENCE2_CNT_VAL_MASK 0xFF
+
+/* MME_QM_CP_FENCE3_CNT */
+#define MME_QM_CP_FENCE3_CNT_VAL_SHIFT 0
+#define MME_QM_CP_FENCE3_CNT_VAL_MASK 0xFF
+
+/* MME_QM_CP_STS */
+#define MME_QM_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0
+#define MME_QM_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF
+#define MME_QM_CP_STS_ERDY_SHIFT 16
+#define MME_QM_CP_STS_ERDY_MASK 0x10000
+#define MME_QM_CP_STS_RRDY_SHIFT 17
+#define MME_QM_CP_STS_RRDY_MASK 0x20000
+#define MME_QM_CP_STS_MRDY_SHIFT 18
+#define MME_QM_CP_STS_MRDY_MASK 0x40000
+#define MME_QM_CP_STS_SW_STOP_SHIFT 19
+#define MME_QM_CP_STS_SW_STOP_MASK 0x80000
+#define MME_QM_CP_STS_FENCE_ID_SHIFT 20
+#define MME_QM_CP_STS_FENCE_ID_MASK 0x300000
+#define MME_QM_CP_STS_FENCE_IN_PROGRESS_SHIFT 22
+#define MME_QM_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000
+
+/* MME_QM_CP_CURRENT_INST_LO */
+#define MME_QM_CP_CURRENT_INST_LO_VAL_SHIFT 0
+#define MME_QM_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_CURRENT_INST_HI */
+#define MME_QM_CP_CURRENT_INST_HI_VAL_SHIFT 0
+#define MME_QM_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_BARRIER_CFG */
+#define MME_QM_CP_BARRIER_CFG_EBGUARD_SHIFT 0
+#define MME_QM_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF
+
+/* MME_QM_CP_DBG_0 */
+#define MME_QM_CP_DBG_0_VAL_SHIFT 0
+#define MME_QM_CP_DBG_0_VAL_MASK 0xFF
+
+/* MME_QM_PQ_BUF_ADDR */
+#define MME_QM_PQ_BUF_ADDR_VAL_SHIFT 0
+#define MME_QM_PQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_PQ_BUF_RDATA */
+#define MME_QM_PQ_BUF_RDATA_VAL_SHIFT 0
+#define MME_QM_PQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CQ_BUF_ADDR */
+#define MME_QM_CQ_BUF_ADDR_VAL_SHIFT 0
+#define MME_QM_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CQ_BUF_RDATA */
+#define MME_QM_CQ_BUF_RDATA_VAL_SHIFT 0
+#define MME_QM_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_MME_QM_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h
new file mode 100644
index 000000000000..b5b1c776f6c3
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME_QM_REGS_H_
+#define ASIC_REG_MME_QM_REGS_H_
+
+/*
+ *****************************************
+ * MME_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmMME_QM_GLBL_CFG0 0xD8000
+
+#define mmMME_QM_GLBL_CFG1 0xD8004
+
+#define mmMME_QM_GLBL_PROT 0xD8008
+
+#define mmMME_QM_GLBL_ERR_CFG 0xD800C
+
+#define mmMME_QM_GLBL_ERR_ADDR_LO 0xD8010
+
+#define mmMME_QM_GLBL_ERR_ADDR_HI 0xD8014
+
+#define mmMME_QM_GLBL_ERR_WDATA 0xD8018
+
+#define mmMME_QM_GLBL_SECURE_PROPS 0xD801C
+
+#define mmMME_QM_GLBL_NON_SECURE_PROPS 0xD8020
+
+#define mmMME_QM_GLBL_STS0 0xD8024
+
+#define mmMME_QM_GLBL_STS1 0xD8028
+
+#define mmMME_QM_PQ_BASE_LO 0xD8060
+
+#define mmMME_QM_PQ_BASE_HI 0xD8064
+
+#define mmMME_QM_PQ_SIZE 0xD8068
+
+#define mmMME_QM_PQ_PI 0xD806C
+
+#define mmMME_QM_PQ_CI 0xD8070
+
+#define mmMME_QM_PQ_CFG0 0xD8074
+
+#define mmMME_QM_PQ_CFG1 0xD8078
+
+#define mmMME_QM_PQ_ARUSER 0xD807C
+
+#define mmMME_QM_PQ_PUSH0 0xD8080
+
+#define mmMME_QM_PQ_PUSH1 0xD8084
+
+#define mmMME_QM_PQ_PUSH2 0xD8088
+
+#define mmMME_QM_PQ_PUSH3 0xD808C
+
+#define mmMME_QM_PQ_STS0 0xD8090
+
+#define mmMME_QM_PQ_STS1 0xD8094
+
+#define mmMME_QM_PQ_RD_RATE_LIM_EN 0xD80A0
+
+#define mmMME_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xD80A4
+
+#define mmMME_QM_PQ_RD_RATE_LIM_SAT 0xD80A8
+
+#define mmMME_QM_PQ_RD_RATE_LIM_TOUT 0xD80AC
+
+#define mmMME_QM_CQ_CFG0 0xD80B0
+
+#define mmMME_QM_CQ_CFG1 0xD80B4
+
+#define mmMME_QM_CQ_ARUSER 0xD80B8
+
+#define mmMME_QM_CQ_PTR_LO 0xD80C0
+
+#define mmMME_QM_CQ_PTR_HI 0xD80C4
+
+#define mmMME_QM_CQ_TSIZE 0xD80C8
+
+#define mmMME_QM_CQ_CTL 0xD80CC
+
+#define mmMME_QM_CQ_PTR_LO_STS 0xD80D4
+
+#define mmMME_QM_CQ_PTR_HI_STS 0xD80D8
+
+#define mmMME_QM_CQ_TSIZE_STS 0xD80DC
+
+#define mmMME_QM_CQ_CTL_STS 0xD80E0
+
+#define mmMME_QM_CQ_STS0 0xD80E4
+
+#define mmMME_QM_CQ_STS1 0xD80E8
+
+#define mmMME_QM_CQ_RD_RATE_LIM_EN 0xD80F0
+
+#define mmMME_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xD80F4
+
+#define mmMME_QM_CQ_RD_RATE_LIM_SAT 0xD80F8
+
+#define mmMME_QM_CQ_RD_RATE_LIM_TOUT 0xD80FC
+
+#define mmMME_QM_CQ_IFIFO_CNT 0xD8108
+
+#define mmMME_QM_CP_MSG_BASE0_ADDR_LO 0xD8120
+
+#define mmMME_QM_CP_MSG_BASE0_ADDR_HI 0xD8124
+
+#define mmMME_QM_CP_MSG_BASE1_ADDR_LO 0xD8128
+
+#define mmMME_QM_CP_MSG_BASE1_ADDR_HI 0xD812C
+
+#define mmMME_QM_CP_MSG_BASE2_ADDR_LO 0xD8130
+
+#define mmMME_QM_CP_MSG_BASE2_ADDR_HI 0xD8134
+
+#define mmMME_QM_CP_MSG_BASE3_ADDR_LO 0xD8138
+
+#define mmMME_QM_CP_MSG_BASE3_ADDR_HI 0xD813C
+
+#define mmMME_QM_CP_LDMA_TSIZE_OFFSET 0xD8140
+
+#define mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xD8144
+
+#define mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xD8148
+
+#define mmMME_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xD814C
+
+#define mmMME_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xD8150
+
+#define mmMME_QM_CP_LDMA_COMMIT_OFFSET 0xD8154
+
+#define mmMME_QM_CP_FENCE0_RDATA 0xD8158
+
+#define mmMME_QM_CP_FENCE1_RDATA 0xD815C
+
+#define mmMME_QM_CP_FENCE2_RDATA 0xD8160
+
+#define mmMME_QM_CP_FENCE3_RDATA 0xD8164
+
+#define mmMME_QM_CP_FENCE0_CNT 0xD8168
+
+#define mmMME_QM_CP_FENCE1_CNT 0xD816C
+
+#define mmMME_QM_CP_FENCE2_CNT 0xD8170
+
+#define mmMME_QM_CP_FENCE3_CNT 0xD8174
+
+#define mmMME_QM_CP_STS 0xD8178
+
+#define mmMME_QM_CP_CURRENT_INST_LO 0xD817C
+
+#define mmMME_QM_CP_CURRENT_INST_HI 0xD8180
+
+#define mmMME_QM_CP_BARRIER_CFG 0xD8184
+
+#define mmMME_QM_CP_DBG_0 0xD8188
+
+#define mmMME_QM_PQ_BUF_ADDR 0xD8300
+
+#define mmMME_QM_PQ_BUF_RDATA 0xD8304
+
+#define mmMME_QM_CQ_BUF_ADDR 0xD8308
+
+#define mmMME_QM_CQ_BUF_RDATA 0xD830C
+
+#endif /* ASIC_REG_MME_QM_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h
new file mode 100644
index 000000000000..9436b1e2705a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h
@@ -0,0 +1,1153 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME_REGS_H_
+#define ASIC_REG_MME_REGS_H_
+
+/*
+ *****************************************
+ * MME (Prototype: MME)
+ *****************************************
+ */
+
+#define mmMME_ARCH_STATUS 0xD0000
+
+#define mmMME_ARCH_A_BASE_ADDR_HIGH 0xD0008
+
+#define mmMME_ARCH_B_BASE_ADDR_HIGH 0xD000C
+
+#define mmMME_ARCH_CIN_BASE_ADDR_HIGH 0xD0010
+
+#define mmMME_ARCH_COUT_BASE_ADDR_HIGH 0xD0014
+
+#define mmMME_ARCH_BIAS_BASE_ADDR_HIGH 0xD0018
+
+#define mmMME_ARCH_A_BASE_ADDR_LOW 0xD001C
+
+#define mmMME_ARCH_B_BASE_ADDR_LOW 0xD0020
+
+#define mmMME_ARCH_CIN_BASE_ADDR_LOW 0xD0024
+
+#define mmMME_ARCH_COUT_BASE_ADDR_LOW 0xD0028
+
+#define mmMME_ARCH_BIAS_BASE_ADDR_LOW 0xD002C
+
+#define mmMME_ARCH_HEADER 0xD0030
+
+#define mmMME_ARCH_KERNEL_SIZE_MINUS_1 0xD0034
+
+#define mmMME_ARCH_ASSOCIATED_DIMS_0 0xD0038
+
+#define mmMME_ARCH_ASSOCIATED_DIMS_1 0xD003C
+
+#define mmMME_ARCH_COUT_SCALE 0xD0040
+
+#define mmMME_ARCH_CIN_SCALE 0xD0044
+
+#define mmMME_ARCH_GEMMLOWP_ZP 0xD0048
+
+#define mmMME_ARCH_GEMMLOWP_EXPONENT 0xD004C
+
+#define mmMME_ARCH_A_ROI_BASE_OFFSET_0 0xD0050
+
+#define mmMME_ARCH_A_ROI_BASE_OFFSET_1 0xD0054
+
+#define mmMME_ARCH_A_ROI_BASE_OFFSET_2 0xD0058
+
+#define mmMME_ARCH_A_ROI_BASE_OFFSET_3 0xD005C
+
+#define mmMME_ARCH_A_ROI_BASE_OFFSET_4 0xD0060
+
+#define mmMME_ARCH_A_VALID_ELEMENTS_0 0xD0064
+
+#define mmMME_ARCH_A_VALID_ELEMENTS_1 0xD0068
+
+#define mmMME_ARCH_A_VALID_ELEMENTS_2 0xD006C
+
+#define mmMME_ARCH_A_VALID_ELEMENTS_3 0xD0070
+
+#define mmMME_ARCH_A_VALID_ELEMENTS_4 0xD0074
+
+#define mmMME_ARCH_A_LOOP_STRIDE_0 0xD0078
+
+#define mmMME_ARCH_A_LOOP_STRIDE_1 0xD007C
+
+#define mmMME_ARCH_A_LOOP_STRIDE_2 0xD0080
+
+#define mmMME_ARCH_A_LOOP_STRIDE_3 0xD0084
+
+#define mmMME_ARCH_A_LOOP_STRIDE_4 0xD0088
+
+#define mmMME_ARCH_A_ROI_SIZE_0 0xD008C
+
+#define mmMME_ARCH_A_ROI_SIZE_1 0xD0090
+
+#define mmMME_ARCH_A_ROI_SIZE_2 0xD0094
+
+#define mmMME_ARCH_A_ROI_SIZE_3 0xD0098
+
+#define mmMME_ARCH_A_SPATIAL_START_OFFSET_0 0xD009C
+
+#define mmMME_ARCH_A_SPATIAL_START_OFFSET_1 0xD00A0
+
+#define mmMME_ARCH_A_SPATIAL_START_OFFSET_2 0xD00A4
+
+#define mmMME_ARCH_A_SPATIAL_START_OFFSET_3 0xD00A8
+
+#define mmMME_ARCH_A_SPATIAL_STRIDE_0 0xD00AC
+
+#define mmMME_ARCH_A_SPATIAL_STRIDE_1 0xD00B0
+
+#define mmMME_ARCH_A_SPATIAL_STRIDE_2 0xD00B4
+
+#define mmMME_ARCH_A_SPATIAL_STRIDE_3 0xD00B8
+
+#define mmMME_ARCH_A_SPATIAL_SIZE_MINUS_1 0xD00BC
+
+#define mmMME_ARCH_B_ROI_BASE_OFFSET_0 0xD00C0
+
+#define mmMME_ARCH_B_ROI_BASE_OFFSET_1 0xD00C4
+
+#define mmMME_ARCH_B_ROI_BASE_OFFSET_2 0xD00C8
+
+#define mmMME_ARCH_B_ROI_BASE_OFFSET_3 0xD00CC
+
+#define mmMME_ARCH_B_ROI_BASE_OFFSET_4 0xD00D0
+
+#define mmMME_ARCH_B_VALID_ELEMENTS_0 0xD00D4
+
+#define mmMME_ARCH_B_VALID_ELEMENTS_1 0xD00D8
+
+#define mmMME_ARCH_B_VALID_ELEMENTS_2 0xD00DC
+
+#define mmMME_ARCH_B_VALID_ELEMENTS_3 0xD00E0
+
+#define mmMME_ARCH_B_VALID_ELEMENTS_4 0xD00E4
+
+#define mmMME_ARCH_B_LOOP_STRIDE_0 0xD00E8
+
+#define mmMME_ARCH_B_LOOP_STRIDE_1 0xD00EC
+
+#define mmMME_ARCH_B_LOOP_STRIDE_2 0xD00F0
+
+#define mmMME_ARCH_B_LOOP_STRIDE_3 0xD00F4
+
+#define mmMME_ARCH_B_LOOP_STRIDE_4 0xD00F8
+
+#define mmMME_ARCH_B_ROI_SIZE_0 0xD00FC
+
+#define mmMME_ARCH_B_ROI_SIZE_1 0xD0100
+
+#define mmMME_ARCH_B_ROI_SIZE_2 0xD0104
+
+#define mmMME_ARCH_B_ROI_SIZE_3 0xD0108
+
+#define mmMME_ARCH_B_SPATIAL_START_OFFSET_0 0xD010C
+
+#define mmMME_ARCH_B_SPATIAL_START_OFFSET_1 0xD0110
+
+#define mmMME_ARCH_B_SPATIAL_START_OFFSET_2 0xD0114
+
+#define mmMME_ARCH_B_SPATIAL_START_OFFSET_3 0xD0118
+
+#define mmMME_ARCH_B_SPATIAL_STRIDE_0 0xD011C
+
+#define mmMME_ARCH_B_SPATIAL_STRIDE_1 0xD0120
+
+#define mmMME_ARCH_B_SPATIAL_STRIDE_2 0xD0124
+
+#define mmMME_ARCH_B_SPATIAL_STRIDE_3 0xD0128
+
+#define mmMME_ARCH_B_SPATIAL_SIZE_MINUS_1 0xD012C
+
+#define mmMME_ARCH_C_ROI_BASE_OFFSET_0 0xD0130
+
+#define mmMME_ARCH_C_ROI_BASE_OFFSET_1 0xD0134
+
+#define mmMME_ARCH_C_ROI_BASE_OFFSET_2 0xD0138
+
+#define mmMME_ARCH_C_ROI_BASE_OFFSET_3 0xD013C
+
+#define mmMME_ARCH_C_ROI_BASE_OFFSET_4 0xD0140
+
+#define mmMME_ARCH_C_VALID_ELEMENTS_0 0xD0144
+
+#define mmMME_ARCH_C_VALID_ELEMENTS_1 0xD0148
+
+#define mmMME_ARCH_C_VALID_ELEMENTS_2 0xD014C
+
+#define mmMME_ARCH_C_VALID_ELEMENTS_3 0xD0150
+
+#define mmMME_ARCH_C_VALID_ELEMENTS_4 0xD0154
+
+#define mmMME_ARCH_C_LOOP_STRIDE_0 0xD0158
+
+#define mmMME_ARCH_C_LOOP_STRIDE_1 0xD015C
+
+#define mmMME_ARCH_C_LOOP_STRIDE_2 0xD0160
+
+#define mmMME_ARCH_C_LOOP_STRIDE_3 0xD0164
+
+#define mmMME_ARCH_C_LOOP_STRIDE_4 0xD0168
+
+#define mmMME_ARCH_C_ROI_SIZE_0 0xD016C
+
+#define mmMME_ARCH_C_ROI_SIZE_1 0xD0170
+
+#define mmMME_ARCH_C_ROI_SIZE_2 0xD0174
+
+#define mmMME_ARCH_C_ROI_SIZE_3 0xD0178
+
+#define mmMME_ARCH_C_SPATIAL_START_OFFSET_0 0xD017C
+
+#define mmMME_ARCH_C_SPATIAL_START_OFFSET_1 0xD0180
+
+#define mmMME_ARCH_C_SPATIAL_START_OFFSET_2 0xD0184
+
+#define mmMME_ARCH_C_SPATIAL_START_OFFSET_3 0xD0188
+
+#define mmMME_ARCH_C_SPATIAL_STRIDE_0 0xD018C
+
+#define mmMME_ARCH_C_SPATIAL_STRIDE_1 0xD0190
+
+#define mmMME_ARCH_C_SPATIAL_STRIDE_2 0xD0194
+
+#define mmMME_ARCH_C_SPATIAL_STRIDE_3 0xD0198
+
+#define mmMME_ARCH_C_SPATIAL_SIZE_MINUS_1 0xD019C
+
+#define mmMME_ARCH_SYNC_OBJECT_MESSAGE 0xD01A0
+
+#define mmMME_ARCH_E_PADDING_VALUE_A 0xD01A4
+
+#define mmMME_ARCH_E_NUM_ITERATION_MINUS_1 0xD01A8
+
+#define mmMME_ARCH_E_BUBBLES_PER_SPLIT 0xD01AC
+
+#define mmMME_CMD 0xD0200
+
+#define mmMME_DUMMY 0xD0204
+
+#define mmMME_RESET 0xD0208
+
+#define mmMME_STALL 0xD020C
+
+#define mmMME_SM_BASE_ADDRESS_LOW 0xD0210
+
+#define mmMME_SM_BASE_ADDRESS_HIGH 0xD0214
+
+#define mmMME_DBGMEM_ADD 0xD0218
+
+#define mmMME_DBGMEM_DATA_WR 0xD021C
+
+#define mmMME_DBGMEM_DATA_RD 0xD0220
+
+#define mmMME_DBGMEM_CTRL 0xD0224
+
+#define mmMME_DBGMEM_RC 0xD0228
+
+#define mmMME_LOG_SHADOW 0xD022C
+
+#define mmMME_STORE_MAX_CREDIT 0xD0300
+
+#define mmMME_AGU 0xD0304
+
+#define mmMME_SBA 0xD0308
+
+#define mmMME_SBB 0xD030C
+
+#define mmMME_SBC 0xD0310
+
+#define mmMME_WBC 0xD0314
+
+#define mmMME_SBA_CONTROL_DATA 0xD0318
+
+#define mmMME_SBB_CONTROL_DATA 0xD031C
+
+#define mmMME_SBC_CONTROL_DATA 0xD0320
+
+#define mmMME_WBC_CONTROL_DATA 0xD0324
+
+#define mmMME_TE 0xD0328
+
+#define mmMME_TE2DEC 0xD032C
+
+#define mmMME_REI_STATUS 0xD0330
+
+#define mmMME_REI_MASK 0xD0334
+
+#define mmMME_SEI_STATUS 0xD0338
+
+#define mmMME_SEI_MASK 0xD033C
+
+#define mmMME_SPI_STATUS 0xD0340
+
+#define mmMME_SPI_MASK 0xD0344
+
+#define mmMME_SHADOW_0_STATUS 0xD0400
+
+#define mmMME_SHADOW_0_A_BASE_ADDR_HIGH 0xD0408
+
+#define mmMME_SHADOW_0_B_BASE_ADDR_HIGH 0xD040C
+
+#define mmMME_SHADOW_0_CIN_BASE_ADDR_HIGH 0xD0410
+
+#define mmMME_SHADOW_0_COUT_BASE_ADDR_HIGH 0xD0414
+
+#define mmMME_SHADOW_0_BIAS_BASE_ADDR_HIGH 0xD0418
+
+#define mmMME_SHADOW_0_A_BASE_ADDR_LOW 0xD041C
+
+#define mmMME_SHADOW_0_B_BASE_ADDR_LOW 0xD0420
+
+#define mmMME_SHADOW_0_CIN_BASE_ADDR_LOW 0xD0424
+
+#define mmMME_SHADOW_0_COUT_BASE_ADDR_LOW 0xD0428
+
+#define mmMME_SHADOW_0_BIAS_BASE_ADDR_LOW 0xD042C
+
+#define mmMME_SHADOW_0_HEADER 0xD0430
+
+#define mmMME_SHADOW_0_KERNEL_SIZE_MINUS_1 0xD0434
+
+#define mmMME_SHADOW_0_ASSOCIATED_DIMS_0 0xD0438
+
+#define mmMME_SHADOW_0_ASSOCIATED_DIMS_1 0xD043C
+
+#define mmMME_SHADOW_0_COUT_SCALE 0xD0440
+
+#define mmMME_SHADOW_0_CIN_SCALE 0xD0444
+
+#define mmMME_SHADOW_0_GEMMLOWP_ZP 0xD0448
+
+#define mmMME_SHADOW_0_GEMMLOWP_EXPONENT 0xD044C
+
+#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_0 0xD0450
+
+#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_1 0xD0454
+
+#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_2 0xD0458
+
+#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_3 0xD045C
+
+#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_4 0xD0460
+
+#define mmMME_SHADOW_0_A_VALID_ELEMENTS_0 0xD0464
+
+#define mmMME_SHADOW_0_A_VALID_ELEMENTS_1 0xD0468
+
+#define mmMME_SHADOW_0_A_VALID_ELEMENTS_2 0xD046C
+
+#define mmMME_SHADOW_0_A_VALID_ELEMENTS_3 0xD0470
+
+#define mmMME_SHADOW_0_A_VALID_ELEMENTS_4 0xD0474
+
+#define mmMME_SHADOW_0_A_LOOP_STRIDE_0 0xD0478
+
+#define mmMME_SHADOW_0_A_LOOP_STRIDE_1 0xD047C
+
+#define mmMME_SHADOW_0_A_LOOP_STRIDE_2 0xD0480
+
+#define mmMME_SHADOW_0_A_LOOP_STRIDE_3 0xD0484
+
+#define mmMME_SHADOW_0_A_LOOP_STRIDE_4 0xD0488
+
+#define mmMME_SHADOW_0_A_ROI_SIZE_0 0xD048C
+
+#define mmMME_SHADOW_0_A_ROI_SIZE_1 0xD0490
+
+#define mmMME_SHADOW_0_A_ROI_SIZE_2 0xD0494
+
+#define mmMME_SHADOW_0_A_ROI_SIZE_3 0xD0498
+
+#define mmMME_SHADOW_0_A_SPATIAL_START_OFFSET_0 0xD049C
+
+#define mmMME_SHADOW_0_A_SPATIAL_START_OFFSET_1 0xD04A0
+
+#define mmMME_SHADOW_0_A_SPATIAL_START_OFFSET_2 0xD04A4
+
+#define mmMME_SHADOW_0_A_SPATIAL_START_OFFSET_3 0xD04A8
+
+#define mmMME_SHADOW_0_A_SPATIAL_STRIDE_0 0xD04AC
+
+#define mmMME_SHADOW_0_A_SPATIAL_STRIDE_1 0xD04B0
+
+#define mmMME_SHADOW_0_A_SPATIAL_STRIDE_2 0xD04B4
+
+#define mmMME_SHADOW_0_A_SPATIAL_STRIDE_3 0xD04B8
+
+#define mmMME_SHADOW_0_A_SPATIAL_SIZE_MINUS_1 0xD04BC
+
+#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_0 0xD04C0
+
+#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_1 0xD04C4
+
+#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_2 0xD04C8
+
+#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_3 0xD04CC
+
+#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_4 0xD04D0
+
+#define mmMME_SHADOW_0_B_VALID_ELEMENTS_0 0xD04D4
+
+#define mmMME_SHADOW_0_B_VALID_ELEMENTS_1 0xD04D8
+
+#define mmMME_SHADOW_0_B_VALID_ELEMENTS_2 0xD04DC
+
+#define mmMME_SHADOW_0_B_VALID_ELEMENTS_3 0xD04E0
+
+#define mmMME_SHADOW_0_B_VALID_ELEMENTS_4 0xD04E4
+
+#define mmMME_SHADOW_0_B_LOOP_STRIDE_0 0xD04E8
+
+#define mmMME_SHADOW_0_B_LOOP_STRIDE_1 0xD04EC
+
+#define mmMME_SHADOW_0_B_LOOP_STRIDE_2 0xD04F0
+
+#define mmMME_SHADOW_0_B_LOOP_STRIDE_3 0xD04F4
+
+#define mmMME_SHADOW_0_B_LOOP_STRIDE_4 0xD04F8
+
+#define mmMME_SHADOW_0_B_ROI_SIZE_0 0xD04FC
+
+#define mmMME_SHADOW_0_B_ROI_SIZE_1 0xD0500
+
+#define mmMME_SHADOW_0_B_ROI_SIZE_2 0xD0504
+
+#define mmMME_SHADOW_0_B_ROI_SIZE_3 0xD0508
+
+#define mmMME_SHADOW_0_B_SPATIAL_START_OFFSET_0 0xD050C
+
+#define mmMME_SHADOW_0_B_SPATIAL_START_OFFSET_1 0xD0510
+
+#define mmMME_SHADOW_0_B_SPATIAL_START_OFFSET_2 0xD0514
+
+#define mmMME_SHADOW_0_B_SPATIAL_START_OFFSET_3 0xD0518
+
+#define mmMME_SHADOW_0_B_SPATIAL_STRIDE_0 0xD051C
+
+#define mmMME_SHADOW_0_B_SPATIAL_STRIDE_1 0xD0520
+
+#define mmMME_SHADOW_0_B_SPATIAL_STRIDE_2 0xD0524
+
+#define mmMME_SHADOW_0_B_SPATIAL_STRIDE_3 0xD0528
+
+#define mmMME_SHADOW_0_B_SPATIAL_SIZE_MINUS_1 0xD052C
+
+#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_0 0xD0530
+
+#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_1 0xD0534
+
+#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_2 0xD0538
+
+#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_3 0xD053C
+
+#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_4 0xD0540
+
+#define mmMME_SHADOW_0_C_VALID_ELEMENTS_0 0xD0544
+
+#define mmMME_SHADOW_0_C_VALID_ELEMENTS_1 0xD0548
+
+#define mmMME_SHADOW_0_C_VALID_ELEMENTS_2 0xD054C
+
+#define mmMME_SHADOW_0_C_VALID_ELEMENTS_3 0xD0550
+
+#define mmMME_SHADOW_0_C_VALID_ELEMENTS_4 0xD0554
+
+#define mmMME_SHADOW_0_C_LOOP_STRIDE_0 0xD0558
+
+#define mmMME_SHADOW_0_C_LOOP_STRIDE_1 0xD055C
+
+#define mmMME_SHADOW_0_C_LOOP_STRIDE_2 0xD0560
+
+#define mmMME_SHADOW_0_C_LOOP_STRIDE_3 0xD0564
+
+#define mmMME_SHADOW_0_C_LOOP_STRIDE_4 0xD0568
+
+#define mmMME_SHADOW_0_C_ROI_SIZE_0 0xD056C
+
+#define mmMME_SHADOW_0_C_ROI_SIZE_1 0xD0570
+
+#define mmMME_SHADOW_0_C_ROI_SIZE_2 0xD0574
+
+#define mmMME_SHADOW_0_C_ROI_SIZE_3 0xD0578
+
+#define mmMME_SHADOW_0_C_SPATIAL_START_OFFSET_0 0xD057C
+
+#define mmMME_SHADOW_0_C_SPATIAL_START_OFFSET_1 0xD0580
+
+#define mmMME_SHADOW_0_C_SPATIAL_START_OFFSET_2 0xD0584
+
+#define mmMME_SHADOW_0_C_SPATIAL_START_OFFSET_3 0xD0588
+
+#define mmMME_SHADOW_0_C_SPATIAL_STRIDE_0 0xD058C
+
+#define mmMME_SHADOW_0_C_SPATIAL_STRIDE_1 0xD0590
+
+#define mmMME_SHADOW_0_C_SPATIAL_STRIDE_2 0xD0594
+
+#define mmMME_SHADOW_0_C_SPATIAL_STRIDE_3 0xD0598
+
+#define mmMME_SHADOW_0_C_SPATIAL_SIZE_MINUS_1 0xD059C
+
+#define mmMME_SHADOW_0_SYNC_OBJECT_MESSAGE 0xD05A0
+
+#define mmMME_SHADOW_0_E_PADDING_VALUE_A 0xD05A4
+
+#define mmMME_SHADOW_0_E_NUM_ITERATION_MINUS_1 0xD05A8
+
+#define mmMME_SHADOW_0_E_BUBBLES_PER_SPLIT 0xD05AC
+
+#define mmMME_SHADOW_1_STATUS 0xD0600
+
+#define mmMME_SHADOW_1_A_BASE_ADDR_HIGH 0xD0608
+
+#define mmMME_SHADOW_1_B_BASE_ADDR_HIGH 0xD060C
+
+#define mmMME_SHADOW_1_CIN_BASE_ADDR_HIGH 0xD0610
+
+#define mmMME_SHADOW_1_COUT_BASE_ADDR_HIGH 0xD0614
+
+#define mmMME_SHADOW_1_BIAS_BASE_ADDR_HIGH 0xD0618
+
+#define mmMME_SHADOW_1_A_BASE_ADDR_LOW 0xD061C
+
+#define mmMME_SHADOW_1_B_BASE_ADDR_LOW 0xD0620
+
+#define mmMME_SHADOW_1_CIN_BASE_ADDR_LOW 0xD0624
+
+#define mmMME_SHADOW_1_COUT_BASE_ADDR_LOW 0xD0628
+
+#define mmMME_SHADOW_1_BIAS_BASE_ADDR_LOW 0xD062C
+
+#define mmMME_SHADOW_1_HEADER 0xD0630
+
+#define mmMME_SHADOW_1_KERNEL_SIZE_MINUS_1 0xD0634
+
+#define mmMME_SHADOW_1_ASSOCIATED_DIMS_0 0xD0638
+
+#define mmMME_SHADOW_1_ASSOCIATED_DIMS_1 0xD063C
+
+#define mmMME_SHADOW_1_COUT_SCALE 0xD0640
+
+#define mmMME_SHADOW_1_CIN_SCALE 0xD0644
+
+#define mmMME_SHADOW_1_GEMMLOWP_ZP 0xD0648
+
+#define mmMME_SHADOW_1_GEMMLOWP_EXPONENT 0xD064C
+
+#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_0 0xD0650
+
+#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_1 0xD0654
+
+#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_2 0xD0658
+
+#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_3 0xD065C
+
+#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_4 0xD0660
+
+#define mmMME_SHADOW_1_A_VALID_ELEMENTS_0 0xD0664
+
+#define mmMME_SHADOW_1_A_VALID_ELEMENTS_1 0xD0668
+
+#define mmMME_SHADOW_1_A_VALID_ELEMENTS_2 0xD066C
+
+#define mmMME_SHADOW_1_A_VALID_ELEMENTS_3 0xD0670
+
+#define mmMME_SHADOW_1_A_VALID_ELEMENTS_4 0xD0674
+
+#define mmMME_SHADOW_1_A_LOOP_STRIDE_0 0xD0678
+
+#define mmMME_SHADOW_1_A_LOOP_STRIDE_1 0xD067C
+
+#define mmMME_SHADOW_1_A_LOOP_STRIDE_2 0xD0680
+
+#define mmMME_SHADOW_1_A_LOOP_STRIDE_3 0xD0684
+
+#define mmMME_SHADOW_1_A_LOOP_STRIDE_4 0xD0688
+
+#define mmMME_SHADOW_1_A_ROI_SIZE_0 0xD068C
+
+#define mmMME_SHADOW_1_A_ROI_SIZE_1 0xD0690
+
+#define mmMME_SHADOW_1_A_ROI_SIZE_2 0xD0694
+
+#define mmMME_SHADOW_1_A_ROI_SIZE_3 0xD0698
+
+#define mmMME_SHADOW_1_A_SPATIAL_START_OFFSET_0 0xD069C
+
+#define mmMME_SHADOW_1_A_SPATIAL_START_OFFSET_1 0xD06A0
+
+#define mmMME_SHADOW_1_A_SPATIAL_START_OFFSET_2 0xD06A4
+
+#define mmMME_SHADOW_1_A_SPATIAL_START_OFFSET_3 0xD06A8
+
+#define mmMME_SHADOW_1_A_SPATIAL_STRIDE_0 0xD06AC
+
+#define mmMME_SHADOW_1_A_SPATIAL_STRIDE_1 0xD06B0
+
+#define mmMME_SHADOW_1_A_SPATIAL_STRIDE_2 0xD06B4
+
+#define mmMME_SHADOW_1_A_SPATIAL_STRIDE_3 0xD06B8
+
+#define mmMME_SHADOW_1_A_SPATIAL_SIZE_MINUS_1 0xD06BC
+
+#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_0 0xD06C0
+
+#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_1 0xD06C4
+
+#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_2 0xD06C8
+
+#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_3 0xD06CC
+
+#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_4 0xD06D0
+
+#define mmMME_SHADOW_1_B_VALID_ELEMENTS_0 0xD06D4
+
+#define mmMME_SHADOW_1_B_VALID_ELEMENTS_1 0xD06D8
+
+#define mmMME_SHADOW_1_B_VALID_ELEMENTS_2 0xD06DC
+
+#define mmMME_SHADOW_1_B_VALID_ELEMENTS_3 0xD06E0
+
+#define mmMME_SHADOW_1_B_VALID_ELEMENTS_4 0xD06E4
+
+#define mmMME_SHADOW_1_B_LOOP_STRIDE_0 0xD06E8
+
+#define mmMME_SHADOW_1_B_LOOP_STRIDE_1 0xD06EC
+
+#define mmMME_SHADOW_1_B_LOOP_STRIDE_2 0xD06F0
+
+#define mmMME_SHADOW_1_B_LOOP_STRIDE_3 0xD06F4
+
+#define mmMME_SHADOW_1_B_LOOP_STRIDE_4 0xD06F8
+
+#define mmMME_SHADOW_1_B_ROI_SIZE_0 0xD06FC
+
+#define mmMME_SHADOW_1_B_ROI_SIZE_1 0xD0700
+
+#define mmMME_SHADOW_1_B_ROI_SIZE_2 0xD0704
+
+#define mmMME_SHADOW_1_B_ROI_SIZE_3 0xD0708
+
+#define mmMME_SHADOW_1_B_SPATIAL_START_OFFSET_0 0xD070C
+
+#define mmMME_SHADOW_1_B_SPATIAL_START_OFFSET_1 0xD0710
+
+#define mmMME_SHADOW_1_B_SPATIAL_START_OFFSET_2 0xD0714
+
+#define mmMME_SHADOW_1_B_SPATIAL_START_OFFSET_3 0xD0718
+
+#define mmMME_SHADOW_1_B_SPATIAL_STRIDE_0 0xD071C
+
+#define mmMME_SHADOW_1_B_SPATIAL_STRIDE_1 0xD0720
+
+#define mmMME_SHADOW_1_B_SPATIAL_STRIDE_2 0xD0724
+
+#define mmMME_SHADOW_1_B_SPATIAL_STRIDE_3 0xD0728
+
+#define mmMME_SHADOW_1_B_SPATIAL_SIZE_MINUS_1 0xD072C
+
+#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_0 0xD0730
+
+#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_1 0xD0734
+
+#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_2 0xD0738
+
+#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_3 0xD073C
+
+#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_4 0xD0740
+
+#define mmMME_SHADOW_1_C_VALID_ELEMENTS_0 0xD0744
+
+#define mmMME_SHADOW_1_C_VALID_ELEMENTS_1 0xD0748
+
+#define mmMME_SHADOW_1_C_VALID_ELEMENTS_2 0xD074C
+
+#define mmMME_SHADOW_1_C_VALID_ELEMENTS_3 0xD0750
+
+#define mmMME_SHADOW_1_C_VALID_ELEMENTS_4 0xD0754
+
+#define mmMME_SHADOW_1_C_LOOP_STRIDE_0 0xD0758
+
+#define mmMME_SHADOW_1_C_LOOP_STRIDE_1 0xD075C
+
+#define mmMME_SHADOW_1_C_LOOP_STRIDE_2 0xD0760
+
+#define mmMME_SHADOW_1_C_LOOP_STRIDE_3 0xD0764
+
+#define mmMME_SHADOW_1_C_LOOP_STRIDE_4 0xD0768
+
+#define mmMME_SHADOW_1_C_ROI_SIZE_0 0xD076C
+
+#define mmMME_SHADOW_1_C_ROI_SIZE_1 0xD0770
+
+#define mmMME_SHADOW_1_C_ROI_SIZE_2 0xD0774
+
+#define mmMME_SHADOW_1_C_ROI_SIZE_3 0xD0778
+
+#define mmMME_SHADOW_1_C_SPATIAL_START_OFFSET_0 0xD077C
+
+#define mmMME_SHADOW_1_C_SPATIAL_START_OFFSET_1 0xD0780
+
+#define mmMME_SHADOW_1_C_SPATIAL_START_OFFSET_2 0xD0784
+
+#define mmMME_SHADOW_1_C_SPATIAL_START_OFFSET_3 0xD0788
+
+#define mmMME_SHADOW_1_C_SPATIAL_STRIDE_0 0xD078C
+
+#define mmMME_SHADOW_1_C_SPATIAL_STRIDE_1 0xD0790
+
+#define mmMME_SHADOW_1_C_SPATIAL_STRIDE_2 0xD0794
+
+#define mmMME_SHADOW_1_C_SPATIAL_STRIDE_3 0xD0798
+
+#define mmMME_SHADOW_1_C_SPATIAL_SIZE_MINUS_1 0xD079C
+
+#define mmMME_SHADOW_1_SYNC_OBJECT_MESSAGE 0xD07A0
+
+#define mmMME_SHADOW_1_E_PADDING_VALUE_A 0xD07A4
+
+#define mmMME_SHADOW_1_E_NUM_ITERATION_MINUS_1 0xD07A8
+
+#define mmMME_SHADOW_1_E_BUBBLES_PER_SPLIT 0xD07AC
+
+#define mmMME_SHADOW_2_STATUS 0xD0800
+
+#define mmMME_SHADOW_2_A_BASE_ADDR_HIGH 0xD0808
+
+#define mmMME_SHADOW_2_B_BASE_ADDR_HIGH 0xD080C
+
+#define mmMME_SHADOW_2_CIN_BASE_ADDR_HIGH 0xD0810
+
+#define mmMME_SHADOW_2_COUT_BASE_ADDR_HIGH 0xD0814
+
+#define mmMME_SHADOW_2_BIAS_BASE_ADDR_HIGH 0xD0818
+
+#define mmMME_SHADOW_2_A_BASE_ADDR_LOW 0xD081C
+
+#define mmMME_SHADOW_2_B_BASE_ADDR_LOW 0xD0820
+
+#define mmMME_SHADOW_2_CIN_BASE_ADDR_LOW 0xD0824
+
+#define mmMME_SHADOW_2_COUT_BASE_ADDR_LOW 0xD0828
+
+#define mmMME_SHADOW_2_BIAS_BASE_ADDR_LOW 0xD082C
+
+#define mmMME_SHADOW_2_HEADER 0xD0830
+
+#define mmMME_SHADOW_2_KERNEL_SIZE_MINUS_1 0xD0834
+
+#define mmMME_SHADOW_2_ASSOCIATED_DIMS_0 0xD0838
+
+#define mmMME_SHADOW_2_ASSOCIATED_DIMS_1 0xD083C
+
+#define mmMME_SHADOW_2_COUT_SCALE 0xD0840
+
+#define mmMME_SHADOW_2_CIN_SCALE 0xD0844
+
+#define mmMME_SHADOW_2_GEMMLOWP_ZP 0xD0848
+
+#define mmMME_SHADOW_2_GEMMLOWP_EXPONENT 0xD084C
+
+#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_0 0xD0850
+
+#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_1 0xD0854
+
+#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_2 0xD0858
+
+#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_3 0xD085C
+
+#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_4 0xD0860
+
+#define mmMME_SHADOW_2_A_VALID_ELEMENTS_0 0xD0864
+
+#define mmMME_SHADOW_2_A_VALID_ELEMENTS_1 0xD0868
+
+#define mmMME_SHADOW_2_A_VALID_ELEMENTS_2 0xD086C
+
+#define mmMME_SHADOW_2_A_VALID_ELEMENTS_3 0xD0870
+
+#define mmMME_SHADOW_2_A_VALID_ELEMENTS_4 0xD0874
+
+#define mmMME_SHADOW_2_A_LOOP_STRIDE_0 0xD0878
+
+#define mmMME_SHADOW_2_A_LOOP_STRIDE_1 0xD087C
+
+#define mmMME_SHADOW_2_A_LOOP_STRIDE_2 0xD0880
+
+#define mmMME_SHADOW_2_A_LOOP_STRIDE_3 0xD0884
+
+#define mmMME_SHADOW_2_A_LOOP_STRIDE_4 0xD0888
+
+#define mmMME_SHADOW_2_A_ROI_SIZE_0 0xD088C
+
+#define mmMME_SHADOW_2_A_ROI_SIZE_1 0xD0890
+
+#define mmMME_SHADOW_2_A_ROI_SIZE_2 0xD0894
+
+#define mmMME_SHADOW_2_A_ROI_SIZE_3 0xD0898
+
+#define mmMME_SHADOW_2_A_SPATIAL_START_OFFSET_0 0xD089C
+
+#define mmMME_SHADOW_2_A_SPATIAL_START_OFFSET_1 0xD08A0
+
+#define mmMME_SHADOW_2_A_SPATIAL_START_OFFSET_2 0xD08A4
+
+#define mmMME_SHADOW_2_A_SPATIAL_START_OFFSET_3 0xD08A8
+
+#define mmMME_SHADOW_2_A_SPATIAL_STRIDE_0 0xD08AC
+
+#define mmMME_SHADOW_2_A_SPATIAL_STRIDE_1 0xD08B0
+
+#define mmMME_SHADOW_2_A_SPATIAL_STRIDE_2 0xD08B4
+
+#define mmMME_SHADOW_2_A_SPATIAL_STRIDE_3 0xD08B8
+
+#define mmMME_SHADOW_2_A_SPATIAL_SIZE_MINUS_1 0xD08BC
+
+#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_0 0xD08C0
+
+#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_1 0xD08C4
+
+#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_2 0xD08C8
+
+#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_3 0xD08CC
+
+#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_4 0xD08D0
+
+#define mmMME_SHADOW_2_B_VALID_ELEMENTS_0 0xD08D4
+
+#define mmMME_SHADOW_2_B_VALID_ELEMENTS_1 0xD08D8
+
+#define mmMME_SHADOW_2_B_VALID_ELEMENTS_2 0xD08DC
+
+#define mmMME_SHADOW_2_B_VALID_ELEMENTS_3 0xD08E0
+
+#define mmMME_SHADOW_2_B_VALID_ELEMENTS_4 0xD08E4
+
+#define mmMME_SHADOW_2_B_LOOP_STRIDE_0 0xD08E8
+
+#define mmMME_SHADOW_2_B_LOOP_STRIDE_1 0xD08EC
+
+#define mmMME_SHADOW_2_B_LOOP_STRIDE_2 0xD08F0
+
+#define mmMME_SHADOW_2_B_LOOP_STRIDE_3 0xD08F4
+
+#define mmMME_SHADOW_2_B_LOOP_STRIDE_4 0xD08F8
+
+#define mmMME_SHADOW_2_B_ROI_SIZE_0 0xD08FC
+
+#define mmMME_SHADOW_2_B_ROI_SIZE_1 0xD0900
+
+#define mmMME_SHADOW_2_B_ROI_SIZE_2 0xD0904
+
+#define mmMME_SHADOW_2_B_ROI_SIZE_3 0xD0908
+
+#define mmMME_SHADOW_2_B_SPATIAL_START_OFFSET_0 0xD090C
+
+#define mmMME_SHADOW_2_B_SPATIAL_START_OFFSET_1 0xD0910
+
+#define mmMME_SHADOW_2_B_SPATIAL_START_OFFSET_2 0xD0914
+
+#define mmMME_SHADOW_2_B_SPATIAL_START_OFFSET_3 0xD0918
+
+#define mmMME_SHADOW_2_B_SPATIAL_STRIDE_0 0xD091C
+
+#define mmMME_SHADOW_2_B_SPATIAL_STRIDE_1 0xD0920
+
+#define mmMME_SHADOW_2_B_SPATIAL_STRIDE_2 0xD0924
+
+#define mmMME_SHADOW_2_B_SPATIAL_STRIDE_3 0xD0928
+
+#define mmMME_SHADOW_2_B_SPATIAL_SIZE_MINUS_1 0xD092C
+
+#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_0 0xD0930
+
+#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_1 0xD0934
+
+#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_2 0xD0938
+
+#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_3 0xD093C
+
+#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_4 0xD0940
+
+#define mmMME_SHADOW_2_C_VALID_ELEMENTS_0 0xD0944
+
+#define mmMME_SHADOW_2_C_VALID_ELEMENTS_1 0xD0948
+
+#define mmMME_SHADOW_2_C_VALID_ELEMENTS_2 0xD094C
+
+#define mmMME_SHADOW_2_C_VALID_ELEMENTS_3 0xD0950
+
+#define mmMME_SHADOW_2_C_VALID_ELEMENTS_4 0xD0954
+
+#define mmMME_SHADOW_2_C_LOOP_STRIDE_0 0xD0958
+
+#define mmMME_SHADOW_2_C_LOOP_STRIDE_1 0xD095C
+
+#define mmMME_SHADOW_2_C_LOOP_STRIDE_2 0xD0960
+
+#define mmMME_SHADOW_2_C_LOOP_STRIDE_3 0xD0964
+
+#define mmMME_SHADOW_2_C_LOOP_STRIDE_4 0xD0968
+
+#define mmMME_SHADOW_2_C_ROI_SIZE_0 0xD096C
+
+#define mmMME_SHADOW_2_C_ROI_SIZE_1 0xD0970
+
+#define mmMME_SHADOW_2_C_ROI_SIZE_2 0xD0974
+
+#define mmMME_SHADOW_2_C_ROI_SIZE_3 0xD0978
+
+#define mmMME_SHADOW_2_C_SPATIAL_START_OFFSET_0 0xD097C
+
+#define mmMME_SHADOW_2_C_SPATIAL_START_OFFSET_1 0xD0980
+
+#define mmMME_SHADOW_2_C_SPATIAL_START_OFFSET_2 0xD0984
+
+#define mmMME_SHADOW_2_C_SPATIAL_START_OFFSET_3 0xD0988
+
+#define mmMME_SHADOW_2_C_SPATIAL_STRIDE_0 0xD098C
+
+#define mmMME_SHADOW_2_C_SPATIAL_STRIDE_1 0xD0990
+
+#define mmMME_SHADOW_2_C_SPATIAL_STRIDE_2 0xD0994
+
+#define mmMME_SHADOW_2_C_SPATIAL_STRIDE_3 0xD0998
+
+#define mmMME_SHADOW_2_C_SPATIAL_SIZE_MINUS_1 0xD099C
+
+#define mmMME_SHADOW_2_SYNC_OBJECT_MESSAGE 0xD09A0
+
+#define mmMME_SHADOW_2_E_PADDING_VALUE_A 0xD09A4
+
+#define mmMME_SHADOW_2_E_NUM_ITERATION_MINUS_1 0xD09A8
+
+#define mmMME_SHADOW_2_E_BUBBLES_PER_SPLIT 0xD09AC
+
+#define mmMME_SHADOW_3_STATUS 0xD0A00
+
+#define mmMME_SHADOW_3_A_BASE_ADDR_HIGH 0xD0A08
+
+#define mmMME_SHADOW_3_B_BASE_ADDR_HIGH 0xD0A0C
+
+#define mmMME_SHADOW_3_CIN_BASE_ADDR_HIGH 0xD0A10
+
+#define mmMME_SHADOW_3_COUT_BASE_ADDR_HIGH 0xD0A14
+
+#define mmMME_SHADOW_3_BIAS_BASE_ADDR_HIGH 0xD0A18
+
+#define mmMME_SHADOW_3_A_BASE_ADDR_LOW 0xD0A1C
+
+#define mmMME_SHADOW_3_B_BASE_ADDR_LOW 0xD0A20
+
+#define mmMME_SHADOW_3_CIN_BASE_ADDR_LOW 0xD0A24
+
+#define mmMME_SHADOW_3_COUT_BASE_ADDR_LOW 0xD0A28
+
+#define mmMME_SHADOW_3_BIAS_BASE_ADDR_LOW 0xD0A2C
+
+#define mmMME_SHADOW_3_HEADER 0xD0A30
+
+#define mmMME_SHADOW_3_KERNEL_SIZE_MINUS_1 0xD0A34
+
+#define mmMME_SHADOW_3_ASSOCIATED_DIMS_0 0xD0A38
+
+#define mmMME_SHADOW_3_ASSOCIATED_DIMS_1 0xD0A3C
+
+#define mmMME_SHADOW_3_COUT_SCALE 0xD0A40
+
+#define mmMME_SHADOW_3_CIN_SCALE 0xD0A44
+
+#define mmMME_SHADOW_3_GEMMLOWP_ZP 0xD0A48
+
+#define mmMME_SHADOW_3_GEMMLOWP_EXPONENT 0xD0A4C
+
+#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_0 0xD0A50
+
+#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_1 0xD0A54
+
+#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_2 0xD0A58
+
+#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_3 0xD0A5C
+
+#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_4 0xD0A60
+
+#define mmMME_SHADOW_3_A_VALID_ELEMENTS_0 0xD0A64
+
+#define mmMME_SHADOW_3_A_VALID_ELEMENTS_1 0xD0A68
+
+#define mmMME_SHADOW_3_A_VALID_ELEMENTS_2 0xD0A6C
+
+#define mmMME_SHADOW_3_A_VALID_ELEMENTS_3 0xD0A70
+
+#define mmMME_SHADOW_3_A_VALID_ELEMENTS_4 0xD0A74
+
+#define mmMME_SHADOW_3_A_LOOP_STRIDE_0 0xD0A78
+
+#define mmMME_SHADOW_3_A_LOOP_STRIDE_1 0xD0A7C
+
+#define mmMME_SHADOW_3_A_LOOP_STRIDE_2 0xD0A80
+
+#define mmMME_SHADOW_3_A_LOOP_STRIDE_3 0xD0A84
+
+#define mmMME_SHADOW_3_A_LOOP_STRIDE_4 0xD0A88
+
+#define mmMME_SHADOW_3_A_ROI_SIZE_0 0xD0A8C
+
+#define mmMME_SHADOW_3_A_ROI_SIZE_1 0xD0A90
+
+#define mmMME_SHADOW_3_A_ROI_SIZE_2 0xD0A94
+
+#define mmMME_SHADOW_3_A_ROI_SIZE_3 0xD0A98
+
+#define mmMME_SHADOW_3_A_SPATIAL_START_OFFSET_0 0xD0A9C
+
+#define mmMME_SHADOW_3_A_SPATIAL_START_OFFSET_1 0xD0AA0
+
+#define mmMME_SHADOW_3_A_SPATIAL_START_OFFSET_2 0xD0AA4
+
+#define mmMME_SHADOW_3_A_SPATIAL_START_OFFSET_3 0xD0AA8
+
+#define mmMME_SHADOW_3_A_SPATIAL_STRIDE_0 0xD0AAC
+
+#define mmMME_SHADOW_3_A_SPATIAL_STRIDE_1 0xD0AB0
+
+#define mmMME_SHADOW_3_A_SPATIAL_STRIDE_2 0xD0AB4
+
+#define mmMME_SHADOW_3_A_SPATIAL_STRIDE_3 0xD0AB8
+
+#define mmMME_SHADOW_3_A_SPATIAL_SIZE_MINUS_1 0xD0ABC
+
+#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_0 0xD0AC0
+
+#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_1 0xD0AC4
+
+#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_2 0xD0AC8
+
+#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_3 0xD0ACC
+
+#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_4 0xD0AD0
+
+#define mmMME_SHADOW_3_B_VALID_ELEMENTS_0 0xD0AD4
+
+#define mmMME_SHADOW_3_B_VALID_ELEMENTS_1 0xD0AD8
+
+#define mmMME_SHADOW_3_B_VALID_ELEMENTS_2 0xD0ADC
+
+#define mmMME_SHADOW_3_B_VALID_ELEMENTS_3 0xD0AE0
+
+#define mmMME_SHADOW_3_B_VALID_ELEMENTS_4 0xD0AE4
+
+#define mmMME_SHADOW_3_B_LOOP_STRIDE_0 0xD0AE8
+
+#define mmMME_SHADOW_3_B_LOOP_STRIDE_1 0xD0AEC
+
+#define mmMME_SHADOW_3_B_LOOP_STRIDE_2 0xD0AF0
+
+#define mmMME_SHADOW_3_B_LOOP_STRIDE_3 0xD0AF4
+
+#define mmMME_SHADOW_3_B_LOOP_STRIDE_4 0xD0AF8
+
+#define mmMME_SHADOW_3_B_ROI_SIZE_0 0xD0AFC
+
+#define mmMME_SHADOW_3_B_ROI_SIZE_1 0xD0B00
+
+#define mmMME_SHADOW_3_B_ROI_SIZE_2 0xD0B04
+
+#define mmMME_SHADOW_3_B_ROI_SIZE_3 0xD0B08
+
+#define mmMME_SHADOW_3_B_SPATIAL_START_OFFSET_0 0xD0B0C
+
+#define mmMME_SHADOW_3_B_SPATIAL_START_OFFSET_1 0xD0B10
+
+#define mmMME_SHADOW_3_B_SPATIAL_START_OFFSET_2 0xD0B14
+
+#define mmMME_SHADOW_3_B_SPATIAL_START_OFFSET_3 0xD0B18
+
+#define mmMME_SHADOW_3_B_SPATIAL_STRIDE_0 0xD0B1C
+
+#define mmMME_SHADOW_3_B_SPATIAL_STRIDE_1 0xD0B20
+
+#define mmMME_SHADOW_3_B_SPATIAL_STRIDE_2 0xD0B24
+
+#define mmMME_SHADOW_3_B_SPATIAL_STRIDE_3 0xD0B28
+
+#define mmMME_SHADOW_3_B_SPATIAL_SIZE_MINUS_1 0xD0B2C
+
+#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_0 0xD0B30
+
+#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_1 0xD0B34
+
+#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_2 0xD0B38
+
+#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_3 0xD0B3C
+
+#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_4 0xD0B40
+
+#define mmMME_SHADOW_3_C_VALID_ELEMENTS_0 0xD0B44
+
+#define mmMME_SHADOW_3_C_VALID_ELEMENTS_1 0xD0B48
+
+#define mmMME_SHADOW_3_C_VALID_ELEMENTS_2 0xD0B4C
+
+#define mmMME_SHADOW_3_C_VALID_ELEMENTS_3 0xD0B50
+
+#define mmMME_SHADOW_3_C_VALID_ELEMENTS_4 0xD0B54
+
+#define mmMME_SHADOW_3_C_LOOP_STRIDE_0 0xD0B58
+
+#define mmMME_SHADOW_3_C_LOOP_STRIDE_1 0xD0B5C
+
+#define mmMME_SHADOW_3_C_LOOP_STRIDE_2 0xD0B60
+
+#define mmMME_SHADOW_3_C_LOOP_STRIDE_3 0xD0B64
+
+#define mmMME_SHADOW_3_C_LOOP_STRIDE_4 0xD0B68
+
+#define mmMME_SHADOW_3_C_ROI_SIZE_0 0xD0B6C
+
+#define mmMME_SHADOW_3_C_ROI_SIZE_1 0xD0B70
+
+#define mmMME_SHADOW_3_C_ROI_SIZE_2 0xD0B74
+
+#define mmMME_SHADOW_3_C_ROI_SIZE_3 0xD0B78
+
+#define mmMME_SHADOW_3_C_SPATIAL_START_OFFSET_0 0xD0B7C
+
+#define mmMME_SHADOW_3_C_SPATIAL_START_OFFSET_1 0xD0B80
+
+#define mmMME_SHADOW_3_C_SPATIAL_START_OFFSET_2 0xD0B84
+
+#define mmMME_SHADOW_3_C_SPATIAL_START_OFFSET_3 0xD0B88
+
+#define mmMME_SHADOW_3_C_SPATIAL_STRIDE_0 0xD0B8C
+
+#define mmMME_SHADOW_3_C_SPATIAL_STRIDE_1 0xD0B90
+
+#define mmMME_SHADOW_3_C_SPATIAL_STRIDE_2 0xD0B94
+
+#define mmMME_SHADOW_3_C_SPATIAL_STRIDE_3 0xD0B98
+
+#define mmMME_SHADOW_3_C_SPATIAL_SIZE_MINUS_1 0xD0B9C
+
+#define mmMME_SHADOW_3_SYNC_OBJECT_MESSAGE 0xD0BA0
+
+#define mmMME_SHADOW_3_E_PADDING_VALUE_A 0xD0BA4
+
+#define mmMME_SHADOW_3_E_NUM_ITERATION_MINUS_1 0xD0BA8
+
+#define mmMME_SHADOW_3_E_BUBBLES_PER_SPLIT 0xD0BAC
+
+#endif /* ASIC_REG_MME_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h
new file mode 100644
index 000000000000..3a78078d3c4c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MMU_MASKS_H_
+#define ASIC_REG_MMU_MASKS_H_
+
+/*
+ *****************************************
+ * MMU (Prototype: MMU)
+ *****************************************
+ */
+
+/* MMU_INPUT_FIFO_THRESHOLD */
+#define MMU_INPUT_FIFO_THRESHOLD_PCI_SHIFT 0
+#define MMU_INPUT_FIFO_THRESHOLD_PCI_MASK 0x7
+#define MMU_INPUT_FIFO_THRESHOLD_PSOC_SHIFT 4
+#define MMU_INPUT_FIFO_THRESHOLD_PSOC_MASK 0x70
+#define MMU_INPUT_FIFO_THRESHOLD_DMA_SHIFT 8
+#define MMU_INPUT_FIFO_THRESHOLD_DMA_MASK 0x700
+#define MMU_INPUT_FIFO_THRESHOLD_CPU_SHIFT 12
+#define MMU_INPUT_FIFO_THRESHOLD_CPU_MASK 0x7000
+#define MMU_INPUT_FIFO_THRESHOLD_MME_SHIFT 16
+#define MMU_INPUT_FIFO_THRESHOLD_MME_MASK 0x70000
+#define MMU_INPUT_FIFO_THRESHOLD_TPC_SHIFT 20
+#define MMU_INPUT_FIFO_THRESHOLD_TPC_MASK 0x700000
+#define MMU_INPUT_FIFO_THRESHOLD_OTHER_SHIFT 24
+#define MMU_INPUT_FIFO_THRESHOLD_OTHER_MASK 0x7000000
+
+/* MMU_MMU_ENABLE */
+#define MMU_MMU_ENABLE_R_SHIFT 0
+#define MMU_MMU_ENABLE_R_MASK 0x1
+
+/* MMU_FORCE_ORDERING */
+#define MMU_FORCE_ORDERING_DMA_WEAK_ORDERING_SHIFT 0
+#define MMU_FORCE_ORDERING_DMA_WEAK_ORDERING_MASK 0x1
+#define MMU_FORCE_ORDERING_PSOC_WEAK_ORDERING_SHIFT 1
+#define MMU_FORCE_ORDERING_PSOC_WEAK_ORDERING_MASK 0x2
+#define MMU_FORCE_ORDERING_PCI_WEAK_ORDERING_SHIFT 2
+#define MMU_FORCE_ORDERING_PCI_WEAK_ORDERING_MASK 0x4
+#define MMU_FORCE_ORDERING_CPU_WEAK_ORDERING_SHIFT 3
+#define MMU_FORCE_ORDERING_CPU_WEAK_ORDERING_MASK 0x8
+#define MMU_FORCE_ORDERING_MME_WEAK_ORDERING_SHIFT 4
+#define MMU_FORCE_ORDERING_MME_WEAK_ORDERING_MASK 0x10
+#define MMU_FORCE_ORDERING_TPC_WEAK_ORDERING_SHIFT 5
+#define MMU_FORCE_ORDERING_TPC_WEAK_ORDERING_MASK 0x20
+#define MMU_FORCE_ORDERING_DEFAULT_WEAK_ORDERING_SHIFT 6
+#define MMU_FORCE_ORDERING_DEFAULT_WEAK_ORDERING_MASK 0x40
+#define MMU_FORCE_ORDERING_DMA_STRONG_ORDERING_SHIFT 8
+#define MMU_FORCE_ORDERING_DMA_STRONG_ORDERING_MASK 0x100
+#define MMU_FORCE_ORDERING_PSOC_STRONG_ORDERING_SHIFT 9
+#define MMU_FORCE_ORDERING_PSOC_STRONG_ORDERING_MASK 0x200
+#define MMU_FORCE_ORDERING_PCI_STRONG_ORDERING_SHIFT 10
+#define MMU_FORCE_ORDERING_PCI_STRONG_ORDERING_MASK 0x400
+#define MMU_FORCE_ORDERING_CPU_STRONG_ORDERING_SHIFT 11
+#define MMU_FORCE_ORDERING_CPU_STRONG_ORDERING_MASK 0x800
+#define MMU_FORCE_ORDERING_MME_STRONG_ORDERING_SHIFT 12
+#define MMU_FORCE_ORDERING_MME_STRONG_ORDERING_MASK 0x1000
+#define MMU_FORCE_ORDERING_TPC_STRONG_ORDERING_SHIFT 13
+#define MMU_FORCE_ORDERING_TPC_STRONG_ORDERING_MASK 0x2000
+#define MMU_FORCE_ORDERING_DEFAULT_STRONG_ORDERING_SHIFT 14
+#define MMU_FORCE_ORDERING_DEFAULT_STRONG_ORDERING_MASK 0x4000
+
+/* MMU_FEATURE_ENABLE */
+#define MMU_FEATURE_ENABLE_VA_ORDERING_EN_SHIFT 0
+#define MMU_FEATURE_ENABLE_VA_ORDERING_EN_MASK 0x1
+#define MMU_FEATURE_ENABLE_CLEAN_LINK_LIST_SHIFT 1
+#define MMU_FEATURE_ENABLE_CLEAN_LINK_LIST_MASK 0x2
+#define MMU_FEATURE_ENABLE_HOP_OFFSET_EN_SHIFT 2
+#define MMU_FEATURE_ENABLE_HOP_OFFSET_EN_MASK 0x4
+#define MMU_FEATURE_ENABLE_OBI_ORDERING_EN_SHIFT 3
+#define MMU_FEATURE_ENABLE_OBI_ORDERING_EN_MASK 0x8
+#define MMU_FEATURE_ENABLE_STRONG_ORDERING_READ_EN_SHIFT 4
+#define MMU_FEATURE_ENABLE_STRONG_ORDERING_READ_EN_MASK 0x10
+#define MMU_FEATURE_ENABLE_TRACE_ENABLE_SHIFT 5
+#define MMU_FEATURE_ENABLE_TRACE_ENABLE_MASK 0x20
+
+/* MMU_VA_ORDERING_MASK_31_7 */
+#define MMU_VA_ORDERING_MASK_31_7_R_SHIFT 0
+#define MMU_VA_ORDERING_MASK_31_7_R_MASK 0x1FFFFFF
+
+/* MMU_VA_ORDERING_MASK_49_32 */
+#define MMU_VA_ORDERING_MASK_49_32_R_SHIFT 0
+#define MMU_VA_ORDERING_MASK_49_32_R_MASK 0x3FFFF
+
+/* MMU_LOG2_DDR_SIZE */
+#define MMU_LOG2_DDR_SIZE_R_SHIFT 0
+#define MMU_LOG2_DDR_SIZE_R_MASK 0xFF
+
+/* MMU_SCRAMBLER */
+#define MMU_SCRAMBLER_ADDR_BIT_SHIFT 0
+#define MMU_SCRAMBLER_ADDR_BIT_MASK 0x3F
+#define MMU_SCRAMBLER_SINGLE_DDR_EN_SHIFT 6
+#define MMU_SCRAMBLER_SINGLE_DDR_EN_MASK 0x40
+#define MMU_SCRAMBLER_SINGLE_DDR_ID_SHIFT 7
+#define MMU_SCRAMBLER_SINGLE_DDR_ID_MASK 0x80
+
+/* MMU_MEM_INIT_BUSY */
+#define MMU_MEM_INIT_BUSY_DATA_SHIFT 0
+#define MMU_MEM_INIT_BUSY_DATA_MASK 0x3
+#define MMU_MEM_INIT_BUSY_OBI0_SHIFT 2
+#define MMU_MEM_INIT_BUSY_OBI0_MASK 0x4
+#define MMU_MEM_INIT_BUSY_OBI1_SHIFT 3
+#define MMU_MEM_INIT_BUSY_OBI1_MASK 0x8
+
+/* MMU_SPI_MASK */
+#define MMU_SPI_MASK_R_SHIFT 0
+#define MMU_SPI_MASK_R_MASK 0xFF
+
+/* MMU_SPI_CAUSE */
+#define MMU_SPI_CAUSE_R_SHIFT 0
+#define MMU_SPI_CAUSE_R_MASK 0xFF
+
+/* MMU_PAGE_ERROR_CAPTURE */
+#define MMU_PAGE_ERROR_CAPTURE_VA_49_32_SHIFT 0
+#define MMU_PAGE_ERROR_CAPTURE_VA_49_32_MASK 0x3FFFF
+#define MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_SHIFT 18
+#define MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK 0x40000
+
+/* MMU_PAGE_ERROR_CAPTURE_VA */
+#define MMU_PAGE_ERROR_CAPTURE_VA_VA_31_0_SHIFT 0
+#define MMU_PAGE_ERROR_CAPTURE_VA_VA_31_0_MASK 0xFFFFFFFF
+
+/* MMU_ACCESS_ERROR_CAPTURE */
+#define MMU_ACCESS_ERROR_CAPTURE_VA_49_32_SHIFT 0
+#define MMU_ACCESS_ERROR_CAPTURE_VA_49_32_MASK 0x3FFFF
+#define MMU_ACCESS_ERROR_CAPTURE_ENTRY_VALID_SHIFT 18
+#define MMU_ACCESS_ERROR_CAPTURE_ENTRY_VALID_MASK 0x40000
+
+/* MMU_ACCESS_ERROR_CAPTURE_VA */
+#define MMU_ACCESS_ERROR_CAPTURE_VA_VA_31_0_SHIFT 0
+#define MMU_ACCESS_ERROR_CAPTURE_VA_VA_31_0_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_MMU_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h
new file mode 100644
index 000000000000..bec6c014135c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MMU_REGS_H_
+#define ASIC_REG_MMU_REGS_H_
+
+/*
+ *****************************************
+ * MMU (Prototype: MMU)
+ *****************************************
+ */
+
+#define mmMMU_INPUT_FIFO_THRESHOLD 0x480000
+
+#define mmMMU_MMU_ENABLE 0x48000C
+
+#define mmMMU_FORCE_ORDERING 0x480010
+
+#define mmMMU_FEATURE_ENABLE 0x480014
+
+#define mmMMU_VA_ORDERING_MASK_31_7 0x480018
+
+#define mmMMU_VA_ORDERING_MASK_49_32 0x48001C
+
+#define mmMMU_LOG2_DDR_SIZE 0x480020
+
+#define mmMMU_SCRAMBLER 0x480024
+
+#define mmMMU_MEM_INIT_BUSY 0x480028
+
+#define mmMMU_SPI_MASK 0x48002C
+
+#define mmMMU_SPI_CAUSE 0x480030
+
+#define mmMMU_PAGE_ERROR_CAPTURE 0x480034
+
+#define mmMMU_PAGE_ERROR_CAPTURE_VA 0x480038
+
+#define mmMMU_ACCESS_ERROR_CAPTURE 0x48003C
+
+#define mmMMU_ACCESS_ERROR_CAPTURE_VA 0x480040
+
+#endif /* ASIC_REG_MMU_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h
new file mode 100644
index 000000000000..209e41402a11
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PCI_NRTR_MASKS_H_
+#define ASIC_REG_PCI_NRTR_MASKS_H_
+
+/*
+ *****************************************
+ * PCI_NRTR (Prototype: IF_NRTR)
+ *****************************************
+ */
+
+/* PCI_NRTR_HBW_MAX_CRED */
+#define PCI_NRTR_HBW_MAX_CRED_WR_RQ_SHIFT 0
+#define PCI_NRTR_HBW_MAX_CRED_WR_RQ_MASK 0x3F
+#define PCI_NRTR_HBW_MAX_CRED_WR_RS_SHIFT 8
+#define PCI_NRTR_HBW_MAX_CRED_WR_RS_MASK 0x3F00
+#define PCI_NRTR_HBW_MAX_CRED_RD_RQ_SHIFT 16
+#define PCI_NRTR_HBW_MAX_CRED_RD_RQ_MASK 0x3F0000
+#define PCI_NRTR_HBW_MAX_CRED_RD_RS_SHIFT 24
+#define PCI_NRTR_HBW_MAX_CRED_RD_RS_MASK 0x3F000000
+
+/* PCI_NRTR_LBW_MAX_CRED */
+#define PCI_NRTR_LBW_MAX_CRED_WR_RQ_SHIFT 0
+#define PCI_NRTR_LBW_MAX_CRED_WR_RQ_MASK 0x3F
+#define PCI_NRTR_LBW_MAX_CRED_WR_RS_SHIFT 8
+#define PCI_NRTR_LBW_MAX_CRED_WR_RS_MASK 0x3F00
+#define PCI_NRTR_LBW_MAX_CRED_RD_RQ_SHIFT 16
+#define PCI_NRTR_LBW_MAX_CRED_RD_RQ_MASK 0x3F0000
+#define PCI_NRTR_LBW_MAX_CRED_RD_RS_SHIFT 24
+#define PCI_NRTR_LBW_MAX_CRED_RD_RS_MASK 0x3F000000
+
+/* PCI_NRTR_DBG_E_ARB */
+#define PCI_NRTR_DBG_E_ARB_W_SHIFT 0
+#define PCI_NRTR_DBG_E_ARB_W_MASK 0x7
+#define PCI_NRTR_DBG_E_ARB_S_SHIFT 8
+#define PCI_NRTR_DBG_E_ARB_S_MASK 0x700
+#define PCI_NRTR_DBG_E_ARB_N_SHIFT 16
+#define PCI_NRTR_DBG_E_ARB_N_MASK 0x70000
+#define PCI_NRTR_DBG_E_ARB_L_SHIFT 24
+#define PCI_NRTR_DBG_E_ARB_L_MASK 0x7000000
+
+/* PCI_NRTR_DBG_W_ARB */
+#define PCI_NRTR_DBG_W_ARB_E_SHIFT 0
+#define PCI_NRTR_DBG_W_ARB_E_MASK 0x7
+#define PCI_NRTR_DBG_W_ARB_S_SHIFT 8
+#define PCI_NRTR_DBG_W_ARB_S_MASK 0x700
+#define PCI_NRTR_DBG_W_ARB_N_SHIFT 16
+#define PCI_NRTR_DBG_W_ARB_N_MASK 0x70000
+#define PCI_NRTR_DBG_W_ARB_L_SHIFT 24
+#define PCI_NRTR_DBG_W_ARB_L_MASK 0x7000000
+
+/* PCI_NRTR_DBG_N_ARB */
+#define PCI_NRTR_DBG_N_ARB_W_SHIFT 0
+#define PCI_NRTR_DBG_N_ARB_W_MASK 0x7
+#define PCI_NRTR_DBG_N_ARB_E_SHIFT 8
+#define PCI_NRTR_DBG_N_ARB_E_MASK 0x700
+#define PCI_NRTR_DBG_N_ARB_S_SHIFT 16
+#define PCI_NRTR_DBG_N_ARB_S_MASK 0x70000
+#define PCI_NRTR_DBG_N_ARB_L_SHIFT 24
+#define PCI_NRTR_DBG_N_ARB_L_MASK 0x7000000
+
+/* PCI_NRTR_DBG_S_ARB */
+#define PCI_NRTR_DBG_S_ARB_W_SHIFT 0
+#define PCI_NRTR_DBG_S_ARB_W_MASK 0x7
+#define PCI_NRTR_DBG_S_ARB_E_SHIFT 8
+#define PCI_NRTR_DBG_S_ARB_E_MASK 0x700
+#define PCI_NRTR_DBG_S_ARB_N_SHIFT 16
+#define PCI_NRTR_DBG_S_ARB_N_MASK 0x70000
+#define PCI_NRTR_DBG_S_ARB_L_SHIFT 24
+#define PCI_NRTR_DBG_S_ARB_L_MASK 0x7000000
+
+/* PCI_NRTR_DBG_L_ARB */
+#define PCI_NRTR_DBG_L_ARB_W_SHIFT 0
+#define PCI_NRTR_DBG_L_ARB_W_MASK 0x7
+#define PCI_NRTR_DBG_L_ARB_E_SHIFT 8
+#define PCI_NRTR_DBG_L_ARB_E_MASK 0x700
+#define PCI_NRTR_DBG_L_ARB_S_SHIFT 16
+#define PCI_NRTR_DBG_L_ARB_S_MASK 0x70000
+#define PCI_NRTR_DBG_L_ARB_N_SHIFT 24
+#define PCI_NRTR_DBG_L_ARB_N_MASK 0x7000000
+
+/* PCI_NRTR_DBG_E_ARB_MAX */
+#define PCI_NRTR_DBG_E_ARB_MAX_CREDIT_SHIFT 0
+#define PCI_NRTR_DBG_E_ARB_MAX_CREDIT_MASK 0x3F
+
+/* PCI_NRTR_DBG_W_ARB_MAX */
+#define PCI_NRTR_DBG_W_ARB_MAX_CREDIT_SHIFT 0
+#define PCI_NRTR_DBG_W_ARB_MAX_CREDIT_MASK 0x3F
+
+/* PCI_NRTR_DBG_N_ARB_MAX */
+#define PCI_NRTR_DBG_N_ARB_MAX_CREDIT_SHIFT 0
+#define PCI_NRTR_DBG_N_ARB_MAX_CREDIT_MASK 0x3F
+
+/* PCI_NRTR_DBG_S_ARB_MAX */
+#define PCI_NRTR_DBG_S_ARB_MAX_CREDIT_SHIFT 0
+#define PCI_NRTR_DBG_S_ARB_MAX_CREDIT_MASK 0x3F
+
+/* PCI_NRTR_DBG_L_ARB_MAX */
+#define PCI_NRTR_DBG_L_ARB_MAX_CREDIT_SHIFT 0
+#define PCI_NRTR_DBG_L_ARB_MAX_CREDIT_MASK 0x3F
+
+/* PCI_NRTR_SPLIT_COEF */
+#define PCI_NRTR_SPLIT_COEF_VAL_SHIFT 0
+#define PCI_NRTR_SPLIT_COEF_VAL_MASK 0xFFFF
+
+/* PCI_NRTR_SPLIT_CFG */
+#define PCI_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_SHIFT 0
+#define PCI_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_MASK 0x1
+#define PCI_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_SHIFT 1
+#define PCI_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_MASK 0x2
+#define PCI_NRTR_SPLIT_CFG_DEFAULT_MESH_SHIFT 2
+#define PCI_NRTR_SPLIT_CFG_DEFAULT_MESH_MASK 0xC
+#define PCI_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_SHIFT 4
+#define PCI_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_MASK 0x10
+#define PCI_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_SHIFT 5
+#define PCI_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_MASK 0x20
+#define PCI_NRTR_SPLIT_CFG_B2B_OPT_SHIFT 6
+#define PCI_NRTR_SPLIT_CFG_B2B_OPT_MASK 0x1C0
+
+/* PCI_NRTR_SPLIT_RD_SAT */
+#define PCI_NRTR_SPLIT_RD_SAT_VAL_SHIFT 0
+#define PCI_NRTR_SPLIT_RD_SAT_VAL_MASK 0xFFFF
+
+/* PCI_NRTR_SPLIT_RD_RST_TOKEN */
+#define PCI_NRTR_SPLIT_RD_RST_TOKEN_VAL_SHIFT 0
+#define PCI_NRTR_SPLIT_RD_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* PCI_NRTR_SPLIT_RD_TIMEOUT */
+#define PCI_NRTR_SPLIT_RD_TIMEOUT_VAL_SHIFT 0
+#define PCI_NRTR_SPLIT_RD_TIMEOUT_VAL_MASK 0xFFFFFFFF
+
+/* PCI_NRTR_SPLIT_WR_SAT */
+#define PCI_NRTR_SPLIT_WR_SAT_VAL_SHIFT 0
+#define PCI_NRTR_SPLIT_WR_SAT_VAL_MASK 0xFFFF
+
+/* PCI_NRTR_WPLIT_WR_TST_TOLEN */
+#define PCI_NRTR_WPLIT_WR_TST_TOLEN_VAL_SHIFT 0
+#define PCI_NRTR_WPLIT_WR_TST_TOLEN_VAL_MASK 0xFFFF
+
+/* PCI_NRTR_SPLIT_WR_TIMEOUT */
+#define PCI_NRTR_SPLIT_WR_TIMEOUT_VAL_SHIFT 0
+#define PCI_NRTR_SPLIT_WR_TIMEOUT_VAL_MASK 0xFFFFFFFF
+
+/* PCI_NRTR_HBW_RANGE_HIT */
+#define PCI_NRTR_HBW_RANGE_HIT_IND_SHIFT 0
+#define PCI_NRTR_HBW_RANGE_HIT_IND_MASK 0xFF
+
+/* PCI_NRTR_HBW_RANGE_MASK_L */
+#define PCI_NRTR_HBW_RANGE_MASK_L_VAL_SHIFT 0
+#define PCI_NRTR_HBW_RANGE_MASK_L_VAL_MASK 0xFFFFFFFF
+
+/* PCI_NRTR_HBW_RANGE_MASK_H */
+#define PCI_NRTR_HBW_RANGE_MASK_H_VAL_SHIFT 0
+#define PCI_NRTR_HBW_RANGE_MASK_H_VAL_MASK 0x3FFFF
+
+/* PCI_NRTR_HBW_RANGE_BASE_L */
+#define PCI_NRTR_HBW_RANGE_BASE_L_VAL_SHIFT 0
+#define PCI_NRTR_HBW_RANGE_BASE_L_VAL_MASK 0xFFFFFFFF
+
+/* PCI_NRTR_HBW_RANGE_BASE_H */
+#define PCI_NRTR_HBW_RANGE_BASE_H_VAL_SHIFT 0
+#define PCI_NRTR_HBW_RANGE_BASE_H_VAL_MASK 0x3FFFF
+
+/* PCI_NRTR_LBW_RANGE_HIT */
+#define PCI_NRTR_LBW_RANGE_HIT_IND_SHIFT 0
+#define PCI_NRTR_LBW_RANGE_HIT_IND_MASK 0xFFFF
+
+/* PCI_NRTR_LBW_RANGE_MASK */
+#define PCI_NRTR_LBW_RANGE_MASK_VAL_SHIFT 0
+#define PCI_NRTR_LBW_RANGE_MASK_VAL_MASK 0x3FFFFFF
+
+/* PCI_NRTR_LBW_RANGE_BASE */
+#define PCI_NRTR_LBW_RANGE_BASE_VAL_SHIFT 0
+#define PCI_NRTR_LBW_RANGE_BASE_VAL_MASK 0x3FFFFFF
+
+/* PCI_NRTR_RGLTR */
+#define PCI_NRTR_RGLTR_WR_EN_SHIFT 0
+#define PCI_NRTR_RGLTR_WR_EN_MASK 0x1
+#define PCI_NRTR_RGLTR_RD_EN_SHIFT 4
+#define PCI_NRTR_RGLTR_RD_EN_MASK 0x10
+
+/* PCI_NRTR_RGLTR_WR_RESULT */
+#define PCI_NRTR_RGLTR_WR_RESULT_VAL_SHIFT 0
+#define PCI_NRTR_RGLTR_WR_RESULT_VAL_MASK 0xFF
+
+/* PCI_NRTR_RGLTR_RD_RESULT */
+#define PCI_NRTR_RGLTR_RD_RESULT_VAL_SHIFT 0
+#define PCI_NRTR_RGLTR_RD_RESULT_VAL_MASK 0xFF
+
+/* PCI_NRTR_SCRAMB_EN */
+#define PCI_NRTR_SCRAMB_EN_VAL_SHIFT 0
+#define PCI_NRTR_SCRAMB_EN_VAL_MASK 0x1
+
+/* PCI_NRTR_NON_LIN_SCRAMB */
+#define PCI_NRTR_NON_LIN_SCRAMB_EN_SHIFT 0
+#define PCI_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1
+
+#endif /* ASIC_REG_PCI_NRTR_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h
new file mode 100644
index 000000000000..447e5d4e7dc8
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PCI_NRTR_REGS_H_
+#define ASIC_REG_PCI_NRTR_REGS_H_
+
+/*
+ *****************************************
+ * PCI_NRTR (Prototype: IF_NRTR)
+ *****************************************
+ */
+
+#define mmPCI_NRTR_HBW_MAX_CRED 0x100
+
+#define mmPCI_NRTR_LBW_MAX_CRED 0x120
+
+#define mmPCI_NRTR_DBG_E_ARB 0x300
+
+#define mmPCI_NRTR_DBG_W_ARB 0x304
+
+#define mmPCI_NRTR_DBG_N_ARB 0x308
+
+#define mmPCI_NRTR_DBG_S_ARB 0x30C
+
+#define mmPCI_NRTR_DBG_L_ARB 0x310
+
+#define mmPCI_NRTR_DBG_E_ARB_MAX 0x320
+
+#define mmPCI_NRTR_DBG_W_ARB_MAX 0x324
+
+#define mmPCI_NRTR_DBG_N_ARB_MAX 0x328
+
+#define mmPCI_NRTR_DBG_S_ARB_MAX 0x32C
+
+#define mmPCI_NRTR_DBG_L_ARB_MAX 0x330
+
+#define mmPCI_NRTR_SPLIT_COEF_0 0x400
+
+#define mmPCI_NRTR_SPLIT_COEF_1 0x404
+
+#define mmPCI_NRTR_SPLIT_COEF_2 0x408
+
+#define mmPCI_NRTR_SPLIT_COEF_3 0x40C
+
+#define mmPCI_NRTR_SPLIT_COEF_4 0x410
+
+#define mmPCI_NRTR_SPLIT_COEF_5 0x414
+
+#define mmPCI_NRTR_SPLIT_COEF_6 0x418
+
+#define mmPCI_NRTR_SPLIT_COEF_7 0x41C
+
+#define mmPCI_NRTR_SPLIT_COEF_8 0x420
+
+#define mmPCI_NRTR_SPLIT_COEF_9 0x424
+
+#define mmPCI_NRTR_SPLIT_CFG 0x440
+
+#define mmPCI_NRTR_SPLIT_RD_SAT 0x444
+
+#define mmPCI_NRTR_SPLIT_RD_RST_TOKEN 0x448
+
+#define mmPCI_NRTR_SPLIT_RD_TIMEOUT_0 0x44C
+
+#define mmPCI_NRTR_SPLIT_RD_TIMEOUT_1 0x450
+
+#define mmPCI_NRTR_SPLIT_WR_SAT 0x454
+
+#define mmPCI_NRTR_WPLIT_WR_TST_TOLEN 0x458
+
+#define mmPCI_NRTR_SPLIT_WR_TIMEOUT_0 0x45C
+
+#define mmPCI_NRTR_SPLIT_WR_TIMEOUT_1 0x460
+
+#define mmPCI_NRTR_HBW_RANGE_HIT 0x470
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_L_0 0x480
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_L_1 0x484
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_L_2 0x488
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_L_3 0x48C
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_L_4 0x490
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_L_5 0x494
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_L_6 0x498
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_L_7 0x49C
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_H_0 0x4A0
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_H_1 0x4A4
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_H_2 0x4A8
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_H_3 0x4AC
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_H_4 0x4B0
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_H_5 0x4B4
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_H_6 0x4B8
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_H_7 0x4BC
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_L_0 0x4C0
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_L_1 0x4C4
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_L_2 0x4C8
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_L_3 0x4CC
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_L_4 0x4D0
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_L_5 0x4D4
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_L_6 0x4D8
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_L_7 0x4DC
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_H_0 0x4E0
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_H_1 0x4E4
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_H_2 0x4E8
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_H_3 0x4EC
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_H_4 0x4F0
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_H_5 0x4F4
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_H_6 0x4F8
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_H_7 0x4FC
+
+#define mmPCI_NRTR_LBW_RANGE_HIT 0x500
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_0 0x510
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_1 0x514
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_2 0x518
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_3 0x51C
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_4 0x520
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_5 0x524
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_6 0x528
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_7 0x52C
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_8 0x530
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_9 0x534
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_10 0x538
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_11 0x53C
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_12 0x540
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_13 0x544
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_14 0x548
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_15 0x54C
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_0 0x550
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_1 0x554
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_2 0x558
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_3 0x55C
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_4 0x560
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_5 0x564
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_6 0x568
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_7 0x56C
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_8 0x570
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_9 0x574
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_10 0x578
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_11 0x57C
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_12 0x580
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_13 0x584
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_14 0x588
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_15 0x58C
+
+#define mmPCI_NRTR_RGLTR 0x590
+
+#define mmPCI_NRTR_RGLTR_WR_RESULT 0x594
+
+#define mmPCI_NRTR_RGLTR_RD_RESULT 0x598
+
+#define mmPCI_NRTR_SCRAMB_EN 0x600
+
+#define mmPCI_NRTR_NON_LIN_SCRAMB 0x604
+
+#endif /* ASIC_REG_PCI_NRTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h
new file mode 100644
index 000000000000..daaf5d9079dc
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PCIE_AUX_REGS_H_
+#define ASIC_REG_PCIE_AUX_REGS_H_
+
+/*
+ *****************************************
+ * PCIE_AUX (Prototype: PCIE_AUX)
+ *****************************************
+ */
+
+#define mmPCIE_AUX_APB_TIMEOUT 0xC07004
+
+#define mmPCIE_AUX_PHY_INIT 0xC07100
+
+#define mmPCIE_AUX_LTR_MAX_LATENCY 0xC07138
+
+#define mmPCIE_AUX_BAR0_START_L 0xC07160
+
+#define mmPCIE_AUX_BAR0_START_H 0xC07164
+
+#define mmPCIE_AUX_BAR1_START 0xC07168
+
+#define mmPCIE_AUX_BAR2_START_L 0xC0716C
+
+#define mmPCIE_AUX_BAR2_START_H 0xC07170
+
+#define mmPCIE_AUX_BAR3_START 0xC07174
+
+#define mmPCIE_AUX_BAR4_START_L 0xC07178
+
+#define mmPCIE_AUX_BAR4_START_H 0xC0717C
+
+#define mmPCIE_AUX_BAR5_START 0xC07180
+
+#define mmPCIE_AUX_BAR0_LIMIT_L 0xC07184
+
+#define mmPCIE_AUX_BAR0_LIMIT_H 0xC07188
+
+#define mmPCIE_AUX_BAR1_LIMIT 0xC0718C
+
+#define mmPCIE_AUX_BAR2_LIMIT_L 0xC07190
+
+#define mmPCIE_AUX_BAR2_LIMIT_H 0xC07194
+
+#define mmPCIE_AUX_BAR3_LIMIT 0xC07198
+
+#define mmPCIE_AUX_BAR4_LIMIT_L 0xC0719C
+
+#define mmPCIE_AUX_BAR4_LIMIT_H 0xC07200
+
+#define mmPCIE_AUX_BAR5_LIMIT 0xC07204
+
+#define mmPCIE_AUX_BUS_MASTER_EN 0xC07208
+
+#define mmPCIE_AUX_MEM_SPACE_EN 0xC0720C
+
+#define mmPCIE_AUX_MAX_RD_REQ_SIZE 0xC07210
+
+#define mmPCIE_AUX_MAX_PAYLOAD_SIZE 0xC07214
+
+#define mmPCIE_AUX_EXT_TAG_EN 0xC07218
+
+#define mmPCIE_AUX_RCB 0xC0721C
+
+#define mmPCIE_AUX_PM_NO_SOFT_RST 0xC07220
+
+#define mmPCIE_AUX_PBUS_NUM 0xC07224
+
+#define mmPCIE_AUX_PBUS_DEV_NUM 0xC07228
+
+#define mmPCIE_AUX_NO_SNOOP_EN 0xC0722C
+
+#define mmPCIE_AUX_RELAX_ORDER_EN 0xC07230
+
+#define mmPCIE_AUX_HP_SLOT_CTRL_ACCESS 0xC07234
+
+#define mmPCIE_AUX_DLL_STATE_CHGED_EN 0xC07238
+
+#define mmPCIE_AUX_CMP_CPLED_INT_EN 0xC0723C
+
+#define mmPCIE_AUX_HP_INT_EN 0xC07340
+
+#define mmPCIE_AUX_PRE_DET_CHGEN_EN 0xC07344
+
+#define mmPCIE_AUX_MRL_SENSOR_CHGED_EN 0xC07348
+
+#define mmPCIE_AUX_PWR_FAULT_DET_EN 0xC0734C
+
+#define mmPCIE_AUX_ATTEN_BUTTON_PRESSED_EN 0xC07350
+
+#define mmPCIE_AUX_PF_FLR_ACTIVE 0xC07360
+
+#define mmPCIE_AUX_PF_FLR_DONE 0xC07364
+
+#define mmPCIE_AUX_FLR_INT 0xC07390
+
+#define mmPCIE_AUX_LTR_M_EN 0xC073B0
+
+#define mmPCIE_AUX_LTSSM_EN 0xC07428
+
+#define mmPCIE_AUX_SYS_INTR 0xC07440
+
+#define mmPCIE_AUX_INT_DISABLE 0xC07444
+
+#define mmPCIE_AUX_SMLH_LINK_UP 0xC07448
+
+#define mmPCIE_AUX_PM_CURR_STATE 0xC07450
+
+#define mmPCIE_AUX_RDLH_LINK_UP 0xC07458
+
+#define mmPCIE_AUX_BRDG_SLV_XFER_PENDING 0xC0745C
+
+#define mmPCIE_AUX_BRDG_DBI_XFER_PENDING 0xC07460
+
+#define mmPCIE_AUX_AUTO_SP_DIS 0xC07478
+
+#define mmPCIE_AUX_DBI 0xC07490
+
+#define mmPCIE_AUX_DBI_32 0xC07494
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_0 0xC074A4
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_1 0xC074A8
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_2 0xC074AC
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_3 0xC074B0
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_4 0xC074B4
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_5 0xC074B8
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_6 0xC074BC
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_7 0xC074C0
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_8 0xC074C4
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_9 0xC074C8
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_10 0xC074CC
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_11 0xC074D0
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_12 0xC074D4
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_13 0xC074D8
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_14 0xC074DC
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_15 0xC074E0
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_16 0xC074E4
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_17 0xC074E8
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_18 0xC074EC
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_19 0xC074F0
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_20 0xC074F4
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_21 0xC074F8
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_22 0xC074FC
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_23 0xC07500
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_24 0xC07504
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_25 0xC07508
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_26 0xC0750C
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_27 0xC07510
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_28 0xC07514
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_0 0xC07640
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_1 0xC07644
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_2 0xC07648
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_3 0xC0764C
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_4 0xC07650
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_5 0xC07654
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_6 0xC07658
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_7 0xC0765C
+
+#define mmPCIE_AUX_CDM_RAS_DES_SD_COMMON_0 0xC07744
+
+#define mmPCIE_AUX_CDM_RAS_DES_SD_COMMON_1 0xC07748
+
+#define mmPCIE_AUX_CDM_RAS_DES_SD_COMMON_2 0xC0774C
+
+#define mmPCIE_AUX_APP_RAS_DES_TBA_CTRL 0xC07774
+
+#define mmPCIE_AUX_PM_DSTATE 0xC07840
+
+#define mmPCIE_AUX_PM_PME_EN 0xC07844
+
+#define mmPCIE_AUX_PM_LINKST_IN_L0S 0xC07848
+
+#define mmPCIE_AUX_PM_LINKST_IN_L1 0xC0784C
+
+#define mmPCIE_AUX_PM_LINKST_IN_L2 0xC07850
+
+#define mmPCIE_AUX_PM_LINKST_L2_EXIT 0xC07854
+
+#define mmPCIE_AUX_PM_STATUS 0xC07858
+
+#define mmPCIE_AUX_APP_READY_ENTER_L23 0xC0785C
+
+#define mmPCIE_AUX_APP_XFER_PENDING 0xC07860
+
+#define mmPCIE_AUX_APP_REQ_L1 0xC07930
+
+#define mmPCIE_AUX_AUX_PM_EN 0xC07934
+
+#define mmPCIE_AUX_APPS_PM_XMT_PME 0xC07938
+
+#define mmPCIE_AUX_OUTBAND_PWRUP_CMD 0xC07940
+
+#define mmPCIE_AUX_PERST 0xC079B8
+
+#endif /* ASIC_REG_PCIE_AUX_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h
new file mode 100644
index 000000000000..8eda4de58788
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_EMMC_PLL_REGS_H_
+#define ASIC_REG_PSOC_EMMC_PLL_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_EMMC_PLL (Prototype: PLL)
+ *****************************************
+ */
+
+#define mmPSOC_EMMC_PLL_NR 0xC70100
+
+#define mmPSOC_EMMC_PLL_NF 0xC70104
+
+#define mmPSOC_EMMC_PLL_OD 0xC70108
+
+#define mmPSOC_EMMC_PLL_NB 0xC7010C
+
+#define mmPSOC_EMMC_PLL_CFG 0xC70110
+
+#define mmPSOC_EMMC_PLL_LOSE_MASK 0xC70120
+
+#define mmPSOC_EMMC_PLL_LOCK_INTR 0xC70128
+
+#define mmPSOC_EMMC_PLL_LOCK_BYPASS 0xC7012C
+
+#define mmPSOC_EMMC_PLL_DATA_CHNG 0xC70130
+
+#define mmPSOC_EMMC_PLL_RST 0xC70134
+
+#define mmPSOC_EMMC_PLL_SLIP_WD_CNTR 0xC70150
+
+#define mmPSOC_EMMC_PLL_DIV_FACTOR_0 0xC70200
+
+#define mmPSOC_EMMC_PLL_DIV_FACTOR_1 0xC70204
+
+#define mmPSOC_EMMC_PLL_DIV_FACTOR_2 0xC70208
+
+#define mmPSOC_EMMC_PLL_DIV_FACTOR_3 0xC7020C
+
+#define mmPSOC_EMMC_PLL_DIV_FACTOR_CMD_0 0xC70220
+
+#define mmPSOC_EMMC_PLL_DIV_FACTOR_CMD_1 0xC70224
+
+#define mmPSOC_EMMC_PLL_DIV_FACTOR_CMD_2 0xC70228
+
+#define mmPSOC_EMMC_PLL_DIV_FACTOR_CMD_3 0xC7022C
+
+#define mmPSOC_EMMC_PLL_DIV_SEL_0 0xC70280
+
+#define mmPSOC_EMMC_PLL_DIV_SEL_1 0xC70284
+
+#define mmPSOC_EMMC_PLL_DIV_SEL_2 0xC70288
+
+#define mmPSOC_EMMC_PLL_DIV_SEL_3 0xC7028C
+
+#define mmPSOC_EMMC_PLL_DIV_EN_0 0xC702A0
+
+#define mmPSOC_EMMC_PLL_DIV_EN_1 0xC702A4
+
+#define mmPSOC_EMMC_PLL_DIV_EN_2 0xC702A8
+
+#define mmPSOC_EMMC_PLL_DIV_EN_3 0xC702AC
+
+#define mmPSOC_EMMC_PLL_DIV_FACTOR_BUSY_0 0xC702C0
+
+#define mmPSOC_EMMC_PLL_DIV_FACTOR_BUSY_1 0xC702C4
+
+#define mmPSOC_EMMC_PLL_DIV_FACTOR_BUSY_2 0xC702C8
+
+#define mmPSOC_EMMC_PLL_DIV_FACTOR_BUSY_3 0xC702CC
+
+#define mmPSOC_EMMC_PLL_CLK_GATER 0xC70300
+
+#define mmPSOC_EMMC_PLL_CLK_RLX_0 0xC70310
+
+#define mmPSOC_EMMC_PLL_CLK_RLX_1 0xC70314
+
+#define mmPSOC_EMMC_PLL_CLK_RLX_2 0xC70318
+
+#define mmPSOC_EMMC_PLL_CLK_RLX_3 0xC7031C
+
+#define mmPSOC_EMMC_PLL_REF_CNTR_PERIOD 0xC70400
+
+#define mmPSOC_EMMC_PLL_REF_LOW_THRESHOLD 0xC70410
+
+#define mmPSOC_EMMC_PLL_REF_HIGH_THRESHOLD 0xC70420
+
+#define mmPSOC_EMMC_PLL_PLL_NOT_STABLE 0xC70430
+
+#define mmPSOC_EMMC_PLL_FREQ_CALC_EN 0xC70440
+
+#endif /* ASIC_REG_PSOC_EMMC_PLL_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h
new file mode 100644
index 000000000000..d4bf0e1db4df
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h
@@ -0,0 +1,447 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_
+#define ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_
+
+/*
+ *****************************************
+ * PSOC_GLOBAL_CONF (Prototype: GLOBAL_CONF)
+ *****************************************
+ */
+
+/* PSOC_GLOBAL_CONF_NON_RST_FLOPS */
+#define PSOC_GLOBAL_CONF_NON_RST_FLOPS_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_NON_RST_FLOPS_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_PCI_FW_FSM */
+#define PSOC_GLOBAL_CONF_PCI_FW_FSM_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_PCI_FW_FSM_EN_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START */
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_BTM_FSM */
+#define PSOC_GLOBAL_CONF_BTM_FSM_STATE_SHIFT 0
+#define PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_SW_BTM_FSM */
+#define PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM */
+#define PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM_CTRL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM_CTRL_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT */
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SPI_MEM_EN */
+#define PSOC_GLOBAL_CONF_SPI_MEM_EN_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_MEM_EN_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PRSTN */
+#define PSOC_GLOBAL_CONF_PRSTN_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PRSTN_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PCIE_EN */
+#define PSOC_GLOBAL_CONF_PCIE_EN_MASK_SHIFT 0
+#define PSOC_GLOBAL_CONF_PCIE_EN_MASK_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_SPI_IMG_STS */
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRI_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRI_MASK 0x1
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_SEC_SHIFT 1
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_SEC_MASK 0x2
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRSTN_SHIFT 2
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRSTN_MASK 0x4
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PCI_SHIFT 3
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PCI_MASK 0x8
+
+/* PSOC_GLOBAL_CONF_BOOT_SEQ_FSM */
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_IDLE_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_IDLE_MASK 0x1
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_BOOT_INIT_SHIFT 1
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_BOOT_INIT_MASK 0x2
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRI_SHIFT 2
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRI_MASK 0x4
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_SEC_SHIFT 3
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_SEC_MASK 0x8
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRSTN_SHIFT 4
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRSTN_MASK 0x10
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PCIE_SHIFT 5
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PCIE_MASK 0x20
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_ROM_SHIFT 6
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_ROM_MASK 0x40
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_PCLK_READY_SHIFT 7
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_PCLK_READY_MASK 0x80
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_LTSSM_EN_SHIFT 8
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_LTSSM_EN_MASK 0x100
+
+/* PSOC_GLOBAL_CONF_SCRATCHPAD */
+#define PSOC_GLOBAL_CONF_SCRATCHPAD_REG_SHIFT 0
+#define PSOC_GLOBAL_CONF_SCRATCHPAD_REG_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SEMAPHORE */
+#define PSOC_GLOBAL_CONF_SEMAPHORE_REG_SHIFT 0
+#define PSOC_GLOBAL_CONF_SEMAPHORE_REG_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_WARM_REBOOT */
+#define PSOC_GLOBAL_CONF_WARM_REBOOT_CNTR_SHIFT 0
+#define PSOC_GLOBAL_CONF_WARM_REBOOT_CNTR_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_UBOOT_MAGIC */
+#define PSOC_GLOBAL_CONF_UBOOT_MAGIC_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_UBOOT_MAGIC_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SPL_SOURCE */
+#define PSOC_GLOBAL_CONF_SPL_SOURCE_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPL_SOURCE_VAL_MASK 0x7
+
+/* PSOC_GLOBAL_CONF_I2C_MSTR1_DBG */
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_S_GEN_SHIFT 0
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_S_GEN_MASK 0x1
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_P_GEN_SHIFT 1
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_P_GEN_MASK 0x2
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_DATA_SHIFT 2
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_DATA_MASK 0x4
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_SHIFT 3
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_MASK 0x8
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_RD_SHIFT 4
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_RD_MASK 0x10
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_WR_SHIFT 5
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_WR_MASK 0x20
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_HS_SHIFT 6
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_HS_MASK 0x40
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MASTER_ACT_SHIFT 7
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MASTER_ACT_MASK 0x80
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLAVE_ACT_SHIFT 8
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLAVE_ACT_MASK 0x100
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_10BIT_SHIFT 9
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_10BIT_MASK 0x200
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MST_CSTATE_SHIFT 10
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MST_CSTATE_MASK 0x7C00
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLV_CSTATE_SHIFT 15
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLV_CSTATE_MASK 0x78000
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_IC_EN_SHIFT 19
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_IC_EN_MASK 0x80000
+
+/* PSOC_GLOBAL_CONF_I2C_SLV */
+#define PSOC_GLOBAL_CONF_I2C_SLV_CPU_CTRL_SHIFT 0
+#define PSOC_GLOBAL_CONF_I2C_SLV_CPU_CTRL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK */
+#define PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK_FLD_INT_SHIFT 0
+#define PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK_FLD_INT_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_APP_STATUS */
+#define PSOC_GLOBAL_CONF_APP_STATUS_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_APP_STATUS_IND_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_BTL_STS */
+#define PSOC_GLOBAL_CONF_BTL_STS_DONE_SHIFT 0
+#define PSOC_GLOBAL_CONF_BTL_STS_DONE_MASK 0x1
+#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_SHIFT 4
+#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_MASK 0x10
+#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_CODE_SHIFT 8
+#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_CODE_MASK 0xF00
+
+/* PSOC_GLOBAL_CONF_TIMEOUT_INTR */
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_0_SHIFT 0
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_0_MASK 0x1
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_1_SHIFT 1
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_1_MASK 0x2
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_2_SHIFT 2
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_2_MASK 0x4
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_3_SHIFT 3
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_3_MASK 0x8
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_4_SHIFT 4
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_4_MASK 0x10
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_TIMER_SHIFT 5
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_TIMER_MASK 0x20
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_0_SHIFT 6
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_0_MASK 0x40
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_1_SHIFT 7
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_1_MASK 0x80
+
+/* PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR */
+#define PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PERIPH_INTR */
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TX_SHIFT 0
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TX_MASK 0x1
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RX_SHIFT 1
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RX_MASK 0x2
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TXOVR_SHIFT 2
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TXOVR_MASK 0x4
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RXOVR_SHIFT 3
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RXOVR_MASK 0x8
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TX_SHIFT 4
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TX_MASK 0x10
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RX_SHIFT 5
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RX_MASK 0x20
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TXOVR_SHIFT 6
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TXOVR_MASK 0x40
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RXOVR_SHIFT 7
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RXOVR_MASK 0x80
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_SHIFT 12
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_MASK 0x1000
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_WAKEUP_SHIFT 13
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_WAKEUP_MASK 0x2000
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_MII_SHIFT 16
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_MII_MASK 0x10000
+
+/* PSOC_GLOBAL_CONF_COMB_PERIPH_INTR */
+#define PSOC_GLOBAL_CONF_COMB_PERIPH_INTR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_COMB_PERIPH_INTR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_AXI_ERR_INTR */
+#define PSOC_GLOBAL_CONF_AXI_ERR_INTR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_AXI_ERR_INTR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_TARGETID */
+#define PSOC_GLOBAL_CONF_TARGETID_TDESIGNER_SHIFT 1
+#define PSOC_GLOBAL_CONF_TARGETID_TDESIGNER_MASK 0xFFE
+#define PSOC_GLOBAL_CONF_TARGETID_TPARTNO_SHIFT 12
+#define PSOC_GLOBAL_CONF_TARGETID_TPARTNO_MASK 0xFFFF000
+#define PSOC_GLOBAL_CONF_TARGETID_TREVISION_SHIFT 28
+#define PSOC_GLOBAL_CONF_TARGETID_TREVISION_MASK 0xF0000000
+
+/* PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE */
+#define PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_MII_ADDR */
+#define PSOC_GLOBAL_CONF_MII_ADDR_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_MII_ADDR_VAL_MASK 0xFF
+
+/* PSOC_GLOBAL_CONF_MII_SPEED */
+#define PSOC_GLOBAL_CONF_MII_SPEED_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_MII_SPEED_VAL_MASK 0x3
+
+/* PSOC_GLOBAL_CONF_BOOT_STRAP_PINS */
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPOL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPOL_MASK 0x1
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPHA_SHIFT 1
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPHA_MASK 0x2
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_EN_SHIFT 2
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_EN_MASK 0x4
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_ROM_EN_SHIFT 3
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_ROM_EN_MASK 0x8
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PCIE_EN_SHIFT 4
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PCIE_EN_MASK 0x10
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_I2C_SLV_ADDR_SHIFT 5
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_I2C_SLV_ADDR_MASK 0xFE0
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BOOT_STG2_SRC_SHIFT 12
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BOOT_STG2_SRC_MASK 0x3000
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PLL_BPS_SHIFT 14
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PLL_BPS_MASK 0x1FC000
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_SHIFT 21
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK 0x200000
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PLL_CFG_SHIFT 22
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PLL_CFG_MASK 0x1C00000
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_MEM_REPAIR_BPS_SHIFT 25
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_MEM_REPAIR_BPS_MASK 0x2000000
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SPARE_SHIFT 26
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SPARE_MASK 0x1C000000
+
+/* PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL */
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_SET_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_SET_MASK 0x1
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_CLR_SHIFT 1
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_CLR_MASK 0x2
+
+/* PSOC_GLOBAL_CONF_MEM_REPAIR_STS */
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_OUTSTANT_TRANS */
+#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_RD_SHIFT 0
+#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_RD_MASK 0x1
+#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_WR_SHIFT 1
+#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_WR_MASK 0x2
+
+/* PSOC_GLOBAL_CONF_MASK_REQ */
+#define PSOC_GLOBAL_CONF_MASK_REQ_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_MASK_REQ_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PRSTN_RST_CFG */
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PCI_SHIFT 0
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PCI_MASK 0x1
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PCI_IF_SHIFT 1
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PCI_IF_MASK 0x2
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PLL_SHIFT 2
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PLL_MASK 0x1FC
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_TPC_SHIFT 9
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_TPC_MASK 0x200
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_MME_SHIFT 10
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_MME_MASK 0x400
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_MC_SHIFT 11
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_MC_MASK 0x800
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_CPU_SHIFT 12
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_CPU_MASK 0x1000
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_IC_IF_SHIFT 13
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_IC_IF_MASK 0x2000
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PSOC_SHIFT 14
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PSOC_MASK 0x4000
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_SRAM_SHIFT 15
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_SRAM_MASK 0x1F8000
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_DMA_SHIFT 21
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_DMA_MASK 0x200000
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_DMA_IF_SHIFT 22
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_DMA_IF_MASK 0x400000
+
+/* PSOC_GLOBAL_CONF_SW_ALL_RST_CFG */
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PCI_SHIFT 0
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PCI_MASK 0x1
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PCI_IF_SHIFT 1
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PCI_IF_MASK 0x2
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PLL_SHIFT 2
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PLL_MASK 0x1FC
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_TPC_SHIFT 9
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_TPC_MASK 0x200
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MME_SHIFT 10
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MME_MASK 0x400
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MC_SHIFT 11
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MC_MASK 0x800
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_CPU_SHIFT 12
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_CPU_MASK 0x1000
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_IC_IF_SHIFT 13
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_IC_IF_MASK 0x2000
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PSOC_SHIFT 14
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PSOC_MASK 0x4000
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_SRAM_SHIFT 15
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_SRAM_MASK 0x1F8000
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_SHIFT 21
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_MASK 0x200000
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_IF_SHIFT 22
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_IF_MASK 0x400000
+
+/* PSOC_GLOBAL_CONF_WD_RST_CFG */
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_PCI_SHIFT 0
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_PCI_MASK 0x1
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_PCI_IF_SHIFT 1
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_PCI_IF_MASK 0x2
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_PLL_SHIFT 2
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_PLL_MASK 0x1FC
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_TPC_SHIFT 9
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_TPC_MASK 0x200
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_MME_SHIFT 10
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_MME_MASK 0x400
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_MC_SHIFT 11
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_MC_MASK 0x800
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_CPU_SHIFT 12
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_CPU_MASK 0x1000
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_IC_IF_SHIFT 13
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_IC_IF_MASK 0x2000
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_PSOC_SHIFT 14
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_PSOC_MASK 0x4000
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_SRAM_SHIFT 15
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_SRAM_MASK 0x1F8000
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_DMA_SHIFT 21
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_DMA_MASK 0x200000
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_DMA_IF_SHIFT 22
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_DMA_IF_MASK 0x400000
+
+/* PSOC_GLOBAL_CONF_MNL_RST_CFG */
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PCI_SHIFT 0
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PCI_MASK 0x1
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PCI_IF_SHIFT 1
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PCI_IF_MASK 0x2
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PLL_SHIFT 2
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PLL_MASK 0x1FC
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_TPC_SHIFT 9
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_TPC_MASK 0x200
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_MME_SHIFT 10
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_MME_MASK 0x400
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_MC_SHIFT 11
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_MC_MASK 0x800
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_CPU_SHIFT 12
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_CPU_MASK 0x1000
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_IC_IF_SHIFT 13
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_IC_IF_MASK 0x2000
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PSOC_SHIFT 14
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PSOC_MASK 0x4000
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_SRAM_SHIFT 15
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_SRAM_MASK 0x1F8000
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_DMA_SHIFT 21
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_DMA_MASK 0x200000
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_DMA_IF_SHIFT 22
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_DMA_IF_MASK 0x400000
+
+/* PSOC_GLOBAL_CONF_UNIT_RST_N */
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_PCI_SHIFT 0
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_PCI_MASK 0x1
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_PCI_IF_SHIFT 1
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_PCI_IF_MASK 0x2
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_PLL_SHIFT 2
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_PLL_MASK 0x1FC
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_TPC_SHIFT 9
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_TPC_MASK 0x200
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_MME_SHIFT 10
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_MME_MASK 0x400
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_MC_SHIFT 11
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_MC_MASK 0x800
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_CPU_SHIFT 12
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_CPU_MASK 0x1000
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_IC_IF_SHIFT 13
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_IC_IF_MASK 0x2000
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_PSOC_SHIFT 14
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_PSOC_MASK 0x4000
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_SRAM_SHIFT 15
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_SRAM_MASK 0x1F8000
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_DMA_SHIFT 21
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_DMA_MASK 0x200000
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_DMA_IF_SHIFT 22
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_DMA_IF_MASK 0x400000
+
+/* PSOC_GLOBAL_CONF_PRSTN_MASK */
+#define PSOC_GLOBAL_CONF_PRSTN_MASK_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_PRSTN_MASK_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_WD_MASK */
+#define PSOC_GLOBAL_CONF_WD_MASK_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_WD_MASK_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_RST_SRC */
+#define PSOC_GLOBAL_CONF_RST_SRC_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_RST_SRC_VAL_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_PAD_1V8_CFG */
+#define PSOC_GLOBAL_CONF_PAD_1V8_CFG_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PAD_1V8_CFG_VAL_MASK 0x7F
+
+/* PSOC_GLOBAL_CONF_PAD_3V3_CFG */
+#define PSOC_GLOBAL_CONF_PAD_3V3_CFG_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PAD_3V3_CFG_VAL_MASK 0x7F
+
+/* PSOC_GLOBAL_CONF_PAD_1V8_INPUT */
+#define PSOC_GLOBAL_CONF_PAD_1V8_INPUT_CFG_SHIFT 0
+#define PSOC_GLOBAL_CONF_PAD_1V8_INPUT_CFG_MASK 0x7
+
+/* PSOC_GLOBAL_CONF_BNK3V3_MS */
+#define PSOC_GLOBAL_CONF_BNK3V3_MS_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BNK3V3_MS_VAL_MASK 0x3
+
+/* PSOC_GLOBAL_CONF_PAD_DEFAULT */
+#define PSOC_GLOBAL_CONF_PAD_DEFAULT_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PAD_DEFAULT_VAL_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_PAD_SEL */
+#define PSOC_GLOBAL_CONF_PAD_SEL_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PAD_SEL_VAL_MASK 0x3
+
+#endif /* ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h
new file mode 100644
index 000000000000..cfbdd2c9c5c7
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h
@@ -0,0 +1,745 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_
+#define ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_GLOBAL_CONF (Prototype: GLOBAL_CONF)
+ *****************************************
+ */
+
+#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_0 0xC4B000
+
+#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_1 0xC4B004
+
+#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_2 0xC4B008
+
+#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_3 0xC4B00C
+
+#define mmPSOC_GLOBAL_CONF_PCI_FW_FSM 0xC4B020
+
+#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START 0xC4B024
+
+#define mmPSOC_GLOBAL_CONF_BTM_FSM 0xC4B028
+
+#define mmPSOC_GLOBAL_CONF_SW_BTM_FSM 0xC4B030
+
+#define mmPSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM 0xC4B034
+
+#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT 0xC4B038
+
+#define mmPSOC_GLOBAL_CONF_SPI_MEM_EN 0xC4B040
+
+#define mmPSOC_GLOBAL_CONF_PRSTN 0xC4B044
+
+#define mmPSOC_GLOBAL_CONF_PCIE_EN 0xC4B048
+
+#define mmPSOC_GLOBAL_CONF_SPI_IMG_STS 0xC4B050
+
+#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_FSM 0xC4B054
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_0 0xC4B100
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_1 0xC4B104
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_2 0xC4B108
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_3 0xC4B10C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_4 0xC4B110
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_5 0xC4B114
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_6 0xC4B118
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_7 0xC4B11C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_8 0xC4B120
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_9 0xC4B124
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_10 0xC4B128
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_11 0xC4B12C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_12 0xC4B130
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_13 0xC4B134
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_14 0xC4B138
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_15 0xC4B13C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_16 0xC4B140
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_17 0xC4B144
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_18 0xC4B148
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_19 0xC4B14C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_20 0xC4B150
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_21 0xC4B154
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_22 0xC4B158
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_23 0xC4B15C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_24 0xC4B160
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_25 0xC4B164
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_26 0xC4B168
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_27 0xC4B16C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_28 0xC4B170
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_29 0xC4B174
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_30 0xC4B178
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_31 0xC4B17C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_0 0xC4B200
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_1 0xC4B204
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_2 0xC4B208
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_3 0xC4B20C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_4 0xC4B210
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_5 0xC4B214
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_6 0xC4B218
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_7 0xC4B21C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_8 0xC4B220
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_9 0xC4B224
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_10 0xC4B228
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_11 0xC4B22C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_12 0xC4B230
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_13 0xC4B234
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_14 0xC4B238
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_15 0xC4B23C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_16 0xC4B240
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_17 0xC4B244
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_18 0xC4B248
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_19 0xC4B24C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_20 0xC4B250
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_21 0xC4B254
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_22 0xC4B258
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_23 0xC4B25C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_24 0xC4B260
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_25 0xC4B264
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_26 0xC4B268
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_27 0xC4B26C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_28 0xC4B270
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_29 0xC4B274
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_30 0xC4B278
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_31 0xC4B27C
+
+#define mmPSOC_GLOBAL_CONF_WARM_REBOOT 0xC4B300
+
+#define mmPSOC_GLOBAL_CONF_UBOOT_MAGIC 0xC4B304
+
+#define mmPSOC_GLOBAL_CONF_SPL_SOURCE 0xC4B308
+
+#define mmPSOC_GLOBAL_CONF_I2C_MSTR1_DBG 0xC4B30C
+
+#define mmPSOC_GLOBAL_CONF_I2C_SLV 0xC4B310
+
+#define mmPSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK 0xC4B314
+
+#define mmPSOC_GLOBAL_CONF_APP_STATUS 0xC4B320
+
+#define mmPSOC_GLOBAL_CONF_BTL_STS 0xC4B340
+
+#define mmPSOC_GLOBAL_CONF_TIMEOUT_INTR 0xC4B350
+
+#define mmPSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR 0xC4B354
+
+#define mmPSOC_GLOBAL_CONF_PERIPH_INTR 0xC4B358
+
+#define mmPSOC_GLOBAL_CONF_COMB_PERIPH_INTR 0xC4B35C
+
+#define mmPSOC_GLOBAL_CONF_AXI_ERR_INTR 0xC4B360
+
+#define mmPSOC_GLOBAL_CONF_TARGETID 0xC4B400
+
+#define mmPSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE 0xC4B420
+
+#define mmPSOC_GLOBAL_CONF_MII_ADDR 0xC4B424
+
+#define mmPSOC_GLOBAL_CONF_MII_SPEED 0xC4B428
+
+#define mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS 0xC4B430
+
+#define mmPSOC_GLOBAL_CONF_MEM_REPAIR_CTRL 0xC4B450
+
+#define mmPSOC_GLOBAL_CONF_MEM_REPAIR_STS 0xC4B454
+
+#define mmPSOC_GLOBAL_CONF_OUTSTANT_TRANS 0xC4B458
+
+#define mmPSOC_GLOBAL_CONF_MASK_REQ 0xC4B45C
+
+#define mmPSOC_GLOBAL_CONF_PRSTN_RST_CFG 0xC4B470
+
+#define mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG 0xC4B474
+
+#define mmPSOC_GLOBAL_CONF_WD_RST_CFG 0xC4B478
+
+#define mmPSOC_GLOBAL_CONF_MNL_RST_CFG 0xC4B47C
+
+#define mmPSOC_GLOBAL_CONF_UNIT_RST_N 0xC4B480
+
+#define mmPSOC_GLOBAL_CONF_PRSTN_MASK 0xC4B484
+
+#define mmPSOC_GLOBAL_CONF_WD_MASK 0xC4B488
+
+#define mmPSOC_GLOBAL_CONF_RST_SRC 0xC4B490
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_0 0xC4B500
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_1 0xC4B504
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_2 0xC4B508
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_3 0xC4B50C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_4 0xC4B510
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_5 0xC4B514
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_6 0xC4B518
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_7 0xC4B51C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_8 0xC4B520
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_9 0xC4B524
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_10 0xC4B528
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_11 0xC4B52C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_12 0xC4B530
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_13 0xC4B534
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_14 0xC4B538
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_15 0xC4B53C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_16 0xC4B540
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_17 0xC4B544
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_18 0xC4B548
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_19 0xC4B54C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_20 0xC4B550
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_21 0xC4B554
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_22 0xC4B558
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_23 0xC4B55C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_24 0xC4B560
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_25 0xC4B564
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_26 0xC4B568
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_27 0xC4B56C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_28 0xC4B570
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_29 0xC4B574
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_30 0xC4B578
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_31 0xC4B57C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_32 0xC4B580
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_33 0xC4B584
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_34 0xC4B588
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_35 0xC4B58C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_36 0xC4B590
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_37 0xC4B594
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_38 0xC4B598
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_39 0xC4B59C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_40 0xC4B5A0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_41 0xC4B5A4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_42 0xC4B5A8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_43 0xC4B5AC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_44 0xC4B5B0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_45 0xC4B5B4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_46 0xC4B5B8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_47 0xC4B5BC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_48 0xC4B5C0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_49 0xC4B5C4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_50 0xC4B5C8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_51 0xC4B5CC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_52 0xC4B5D0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_53 0xC4B5D4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_54 0xC4B5D8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_55 0xC4B5DC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_56 0xC4B5E0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_57 0xC4B5E4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_58 0xC4B5E8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_59 0xC4B5EC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_60 0xC4B5F0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_61 0xC4B5F4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_62 0xC4B5F8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_63 0xC4B5FC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_64 0xC4B600
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_65 0xC4B604
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_66 0xC4B608
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_67 0xC4B60C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_68 0xC4B610
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_0 0xC4B640
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_1 0xC4B644
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_2 0xC4B648
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_3 0xC4B64C
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_4 0xC4B650
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_5 0xC4B654
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_6 0xC4B658
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_7 0xC4B65C
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_8 0xC4B660
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_9 0xC4B664
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_10 0xC4B668
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_11 0xC4B66C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_0 0xC4B680
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_1 0xC4B684
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_2 0xC4B688
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_3 0xC4B68C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_4 0xC4B690
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_5 0xC4B694
+
+#define mmPSOC_GLOBAL_CONF_BNK3V3_MS 0xC4B6E0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_0 0xC4B700
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_1 0xC4B704
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_2 0xC4B708
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_3 0xC4B70C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_4 0xC4B710
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_5 0xC4B714
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_6 0xC4B718
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_7 0xC4B71C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_8 0xC4B720
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_9 0xC4B724
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_10 0xC4B728
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_11 0xC4B72C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_12 0xC4B730
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_13 0xC4B734
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_14 0xC4B738
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_15 0xC4B73C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_16 0xC4B740
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_17 0xC4B744
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_18 0xC4B748
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_19 0xC4B74C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_20 0xC4B750
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_21 0xC4B754
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_22 0xC4B758
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_23 0xC4B75C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_24 0xC4B760
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_25 0xC4B764
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_26 0xC4B768
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_27 0xC4B76C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_28 0xC4B770
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_29 0xC4B774
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_30 0xC4B778
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_31 0xC4B77C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_32 0xC4B780
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_33 0xC4B784
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_34 0xC4B788
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_35 0xC4B78C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_36 0xC4B790
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_37 0xC4B794
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_38 0xC4B798
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_39 0xC4B79C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_40 0xC4B7A0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_41 0xC4B7A4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_42 0xC4B7A8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_43 0xC4B7AC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_44 0xC4B7B0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_45 0xC4B7B4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_46 0xC4B7B8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_47 0xC4B7BC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_48 0xC4B7C0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_49 0xC4B7C4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_50 0xC4B7C8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_51 0xC4B7CC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_52 0xC4B7D0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_53 0xC4B7D4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_54 0xC4B7D8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_55 0xC4B7DC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_56 0xC4B7E0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_57 0xC4B7E4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_58 0xC4B7E8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_59 0xC4B7EC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_60 0xC4B7F0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_61 0xC4B7F4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_62 0xC4B7F8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_63 0xC4B7FC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_64 0xC4B800
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_65 0xC4B804
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_66 0xC4B808
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_67 0xC4B80C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_68 0xC4B810
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_69 0xC4B814
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_70 0xC4B818
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_71 0xC4B81C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_72 0xC4B820
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_73 0xC4B824
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_74 0xC4B828
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_75 0xC4B82C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_76 0xC4B830
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_77 0xC4B834
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_78 0xC4B838
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_79 0xC4B83C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_80 0xC4B840
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_81 0xC4B844
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_0 0xC4B900
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_1 0xC4B904
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_2 0xC4B908
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_3 0xC4B90C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_4 0xC4B910
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_5 0xC4B914
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_6 0xC4B918
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_7 0xC4B91C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_8 0xC4B920
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_9 0xC4B924
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_10 0xC4B928
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_11 0xC4B92C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_12 0xC4B930
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_13 0xC4B934
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_14 0xC4B938
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_15 0xC4B93C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_16 0xC4B940
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_17 0xC4B944
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_18 0xC4B948
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_19 0xC4B94C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_20 0xC4B950
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_21 0xC4B954
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_22 0xC4B958
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_23 0xC4B95C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_24 0xC4B960
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_25 0xC4B964
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_26 0xC4B968
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_27 0xC4B96C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_28 0xC4B970
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_29 0xC4B974
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_30 0xC4B978
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_31 0xC4B97C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_32 0xC4B980
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_33 0xC4B984
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_34 0xC4B988
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_35 0xC4B98C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_36 0xC4B990
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_37 0xC4B994
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_38 0xC4B998
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_39 0xC4B99C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_40 0xC4B9A0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_41 0xC4B9A4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_42 0xC4B9A8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_43 0xC4B9AC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_44 0xC4B9B0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_45 0xC4B9B4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_46 0xC4B9B8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_47 0xC4B9BC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_48 0xC4B9C0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_49 0xC4B9C4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_50 0xC4B9C8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_51 0xC4B9CC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_52 0xC4B9D0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_53 0xC4B9D4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_54 0xC4B9D8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_55 0xC4B9DC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_56 0xC4B9E0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_57 0xC4B9E4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_58 0xC4B9E8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_59 0xC4B9EC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_60 0xC4B9F0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_61 0xC4B9F4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_62 0xC4B9F8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_63 0xC4B9FC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_64 0xC4BA00
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_65 0xC4BA04
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_66 0xC4BA08
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_67 0xC4BA0C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_68 0xC4BA10
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_69 0xC4BA14
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_70 0xC4BA18
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_71 0xC4BA1C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_72 0xC4BA20
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_73 0xC4BA24
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_74 0xC4BA28
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_75 0xC4BA2C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_76 0xC4BA30
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_77 0xC4BA34
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_78 0xC4BA38
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_79 0xC4BA3C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_80 0xC4BA40
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_81 0xC4BA44
+
+#endif /* ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h
new file mode 100644
index 000000000000..6723d8f76f30
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_MME_PLL_REGS_H_
+#define ASIC_REG_PSOC_MME_PLL_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_MME_PLL (Prototype: PLL)
+ *****************************************
+ */
+
+#define mmPSOC_MME_PLL_NR 0xC71100
+
+#define mmPSOC_MME_PLL_NF 0xC71104
+
+#define mmPSOC_MME_PLL_OD 0xC71108
+
+#define mmPSOC_MME_PLL_NB 0xC7110C
+
+#define mmPSOC_MME_PLL_CFG 0xC71110
+
+#define mmPSOC_MME_PLL_LOSE_MASK 0xC71120
+
+#define mmPSOC_MME_PLL_LOCK_INTR 0xC71128
+
+#define mmPSOC_MME_PLL_LOCK_BYPASS 0xC7112C
+
+#define mmPSOC_MME_PLL_DATA_CHNG 0xC71130
+
+#define mmPSOC_MME_PLL_RST 0xC71134
+
+#define mmPSOC_MME_PLL_SLIP_WD_CNTR 0xC71150
+
+#define mmPSOC_MME_PLL_DIV_FACTOR_0 0xC71200
+
+#define mmPSOC_MME_PLL_DIV_FACTOR_1 0xC71204
+
+#define mmPSOC_MME_PLL_DIV_FACTOR_2 0xC71208
+
+#define mmPSOC_MME_PLL_DIV_FACTOR_3 0xC7120C
+
+#define mmPSOC_MME_PLL_DIV_FACTOR_CMD_0 0xC71220
+
+#define mmPSOC_MME_PLL_DIV_FACTOR_CMD_1 0xC71224
+
+#define mmPSOC_MME_PLL_DIV_FACTOR_CMD_2 0xC71228
+
+#define mmPSOC_MME_PLL_DIV_FACTOR_CMD_3 0xC7122C
+
+#define mmPSOC_MME_PLL_DIV_SEL_0 0xC71280
+
+#define mmPSOC_MME_PLL_DIV_SEL_1 0xC71284
+
+#define mmPSOC_MME_PLL_DIV_SEL_2 0xC71288
+
+#define mmPSOC_MME_PLL_DIV_SEL_3 0xC7128C
+
+#define mmPSOC_MME_PLL_DIV_EN_0 0xC712A0
+
+#define mmPSOC_MME_PLL_DIV_EN_1 0xC712A4
+
+#define mmPSOC_MME_PLL_DIV_EN_2 0xC712A8
+
+#define mmPSOC_MME_PLL_DIV_EN_3 0xC712AC
+
+#define mmPSOC_MME_PLL_DIV_FACTOR_BUSY_0 0xC712C0
+
+#define mmPSOC_MME_PLL_DIV_FACTOR_BUSY_1 0xC712C4
+
+#define mmPSOC_MME_PLL_DIV_FACTOR_BUSY_2 0xC712C8
+
+#define mmPSOC_MME_PLL_DIV_FACTOR_BUSY_3 0xC712CC
+
+#define mmPSOC_MME_PLL_CLK_GATER 0xC71300
+
+#define mmPSOC_MME_PLL_CLK_RLX_0 0xC71310
+
+#define mmPSOC_MME_PLL_CLK_RLX_1 0xC71314
+
+#define mmPSOC_MME_PLL_CLK_RLX_2 0xC71318
+
+#define mmPSOC_MME_PLL_CLK_RLX_3 0xC7131C
+
+#define mmPSOC_MME_PLL_REF_CNTR_PERIOD 0xC71400
+
+#define mmPSOC_MME_PLL_REF_LOW_THRESHOLD 0xC71410
+
+#define mmPSOC_MME_PLL_REF_HIGH_THRESHOLD 0xC71420
+
+#define mmPSOC_MME_PLL_PLL_NOT_STABLE 0xC71430
+
+#define mmPSOC_MME_PLL_FREQ_CALC_EN 0xC71440
+
+#endif /* ASIC_REG_PSOC_MME_PLL_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h
new file mode 100644
index 000000000000..abcded0531c9
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_PCI_PLL_REGS_H_
+#define ASIC_REG_PSOC_PCI_PLL_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_PCI_PLL (Prototype: PLL)
+ *****************************************
+ */
+
+#define mmPSOC_PCI_PLL_NR 0xC72100
+
+#define mmPSOC_PCI_PLL_NF 0xC72104
+
+#define mmPSOC_PCI_PLL_OD 0xC72108
+
+#define mmPSOC_PCI_PLL_NB 0xC7210C
+
+#define mmPSOC_PCI_PLL_CFG 0xC72110
+
+#define mmPSOC_PCI_PLL_LOSE_MASK 0xC72120
+
+#define mmPSOC_PCI_PLL_LOCK_INTR 0xC72128
+
+#define mmPSOC_PCI_PLL_LOCK_BYPASS 0xC7212C
+
+#define mmPSOC_PCI_PLL_DATA_CHNG 0xC72130
+
+#define mmPSOC_PCI_PLL_RST 0xC72134
+
+#define mmPSOC_PCI_PLL_SLIP_WD_CNTR 0xC72150
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_0 0xC72200
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_1 0xC72204
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_2 0xC72208
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_3 0xC7220C
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_0 0xC72220
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_1 0xC72224
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_2 0xC72228
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_3 0xC7222C
+
+#define mmPSOC_PCI_PLL_DIV_SEL_0 0xC72280
+
+#define mmPSOC_PCI_PLL_DIV_SEL_1 0xC72284
+
+#define mmPSOC_PCI_PLL_DIV_SEL_2 0xC72288
+
+#define mmPSOC_PCI_PLL_DIV_SEL_3 0xC7228C
+
+#define mmPSOC_PCI_PLL_DIV_EN_0 0xC722A0
+
+#define mmPSOC_PCI_PLL_DIV_EN_1 0xC722A4
+
+#define mmPSOC_PCI_PLL_DIV_EN_2 0xC722A8
+
+#define mmPSOC_PCI_PLL_DIV_EN_3 0xC722AC
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_0 0xC722C0
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_1 0xC722C4
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_2 0xC722C8
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_3 0xC722CC
+
+#define mmPSOC_PCI_PLL_CLK_GATER 0xC72300
+
+#define mmPSOC_PCI_PLL_CLK_RLX_0 0xC72310
+
+#define mmPSOC_PCI_PLL_CLK_RLX_1 0xC72314
+
+#define mmPSOC_PCI_PLL_CLK_RLX_2 0xC72318
+
+#define mmPSOC_PCI_PLL_CLK_RLX_3 0xC7231C
+
+#define mmPSOC_PCI_PLL_REF_CNTR_PERIOD 0xC72400
+
+#define mmPSOC_PCI_PLL_REF_LOW_THRESHOLD 0xC72410
+
+#define mmPSOC_PCI_PLL_REF_HIGH_THRESHOLD 0xC72420
+
+#define mmPSOC_PCI_PLL_PLL_NOT_STABLE 0xC72430
+
+#define mmPSOC_PCI_PLL_FREQ_CALC_EN 0xC72440
+
+#endif /* ASIC_REG_PSOC_PCI_PLL_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h
new file mode 100644
index 000000000000..5925c7477c25
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_SPI_REGS_H_
+#define ASIC_REG_PSOC_SPI_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_SPI (Prototype: SPI)
+ *****************************************
+ */
+
+#define mmPSOC_SPI_CTRLR0 0xC43000
+
+#define mmPSOC_SPI_CTRLR1 0xC43004
+
+#define mmPSOC_SPI_SSIENR 0xC43008
+
+#define mmPSOC_SPI_MWCR 0xC4300C
+
+#define mmPSOC_SPI_SER 0xC43010
+
+#define mmPSOC_SPI_BAUDR 0xC43014
+
+#define mmPSOC_SPI_TXFTLR 0xC43018
+
+#define mmPSOC_SPI_RXFTLR 0xC4301C
+
+#define mmPSOC_SPI_TXFLR 0xC43020
+
+#define mmPSOC_SPI_RXFLR 0xC43024
+
+#define mmPSOC_SPI_SR 0xC43028
+
+#define mmPSOC_SPI_IMR 0xC4302C
+
+#define mmPSOC_SPI_ISR 0xC43030
+
+#define mmPSOC_SPI_RISR 0xC43034
+
+#define mmPSOC_SPI_TXOICR 0xC43038
+
+#define mmPSOC_SPI_RXOICR 0xC4303C
+
+#define mmPSOC_SPI_RXUICR 0xC43040
+
+#define mmPSOC_SPI_MSTICR 0xC43044
+
+#define mmPSOC_SPI_ICR 0xC43048
+
+#define mmPSOC_SPI_IDR 0xC43058
+
+#define mmPSOC_SPI_SSI_VERSION_ID 0xC4305C
+
+#define mmPSOC_SPI_DR0 0xC43060
+
+#define mmPSOC_SPI_DR1 0xC43064
+
+#define mmPSOC_SPI_DR2 0xC43068
+
+#define mmPSOC_SPI_DR3 0xC4306C
+
+#define mmPSOC_SPI_DR4 0xC43070
+
+#define mmPSOC_SPI_DR5 0xC43074
+
+#define mmPSOC_SPI_DR6 0xC43078
+
+#define mmPSOC_SPI_DR7 0xC4307C
+
+#define mmPSOC_SPI_DR8 0xC43080
+
+#define mmPSOC_SPI_DR9 0xC43084
+
+#define mmPSOC_SPI_DR10 0xC43088
+
+#define mmPSOC_SPI_DR11 0xC4308C
+
+#define mmPSOC_SPI_DR12 0xC43090
+
+#define mmPSOC_SPI_DR13 0xC43094
+
+#define mmPSOC_SPI_DR14 0xC43098
+
+#define mmPSOC_SPI_DR15 0xC4309C
+
+#define mmPSOC_SPI_DR16 0xC430A0
+
+#define mmPSOC_SPI_DR17 0xC430A4
+
+#define mmPSOC_SPI_DR18 0xC430A8
+
+#define mmPSOC_SPI_DR19 0xC430AC
+
+#define mmPSOC_SPI_DR20 0xC430B0
+
+#define mmPSOC_SPI_DR21 0xC430B4
+
+#define mmPSOC_SPI_DR22 0xC430B8
+
+#define mmPSOC_SPI_DR23 0xC430BC
+
+#define mmPSOC_SPI_DR24 0xC430C0
+
+#define mmPSOC_SPI_DR25 0xC430C4
+
+#define mmPSOC_SPI_DR26 0xC430C8
+
+#define mmPSOC_SPI_DR27 0xC430CC
+
+#define mmPSOC_SPI_DR28 0xC430D0
+
+#define mmPSOC_SPI_DR29 0xC430D4
+
+#define mmPSOC_SPI_DR30 0xC430D8
+
+#define mmPSOC_SPI_DR31 0xC430DC
+
+#define mmPSOC_SPI_DR32 0xC430E0
+
+#define mmPSOC_SPI_DR33 0xC430E4
+
+#define mmPSOC_SPI_DR34 0xC430E8
+
+#define mmPSOC_SPI_DR35 0xC430EC
+
+#define mmPSOC_SPI_RX_SAMPLE_DLY 0xC430F0
+
+#define mmPSOC_SPI_RSVD_1 0xC430F8
+
+#define mmPSOC_SPI_RSVD_2 0xC430FC
+
+#endif /* ASIC_REG_PSOC_SPI_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h
new file mode 100644
index 000000000000..d56c9fa0e7ba
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SRAM_Y0_X0_RTR_REGS_H_
+#define ASIC_REG_SRAM_Y0_X0_RTR_REGS_H_
+
+/*
+ *****************************************
+ * SRAM_Y0_X0_RTR (Prototype: IC_RTR)
+ *****************************************
+ */
+
+#define mmSRAM_Y0_X0_RTR_HBW_RD_RQ_E_ARB 0x201100
+
+#define mmSRAM_Y0_X0_RTR_HBW_RD_RQ_W_ARB 0x201104
+
+#define mmSRAM_Y0_X0_RTR_HBW_RD_RQ_L_ARB 0x201110
+
+#define mmSRAM_Y0_X0_RTR_HBW_E_ARB_MAX 0x201120
+
+#define mmSRAM_Y0_X0_RTR_HBW_W_ARB_MAX 0x201124
+
+#define mmSRAM_Y0_X0_RTR_HBW_L_ARB_MAX 0x201130
+
+#define mmSRAM_Y0_X0_RTR_HBW_DATA_E_ARB 0x201140
+
+#define mmSRAM_Y0_X0_RTR_HBW_DATA_W_ARB 0x201144
+
+#define mmSRAM_Y0_X0_RTR_HBW_DATA_L_ARB 0x201148
+
+#define mmSRAM_Y0_X0_RTR_HBW_WR_RS_E_ARB 0x201160
+
+#define mmSRAM_Y0_X0_RTR_HBW_WR_RS_W_ARB 0x201164
+
+#define mmSRAM_Y0_X0_RTR_HBW_WR_RS_L_ARB 0x201168
+
+#define mmSRAM_Y0_X0_RTR_LBW_RD_RQ_E_ARB 0x201200
+
+#define mmSRAM_Y0_X0_RTR_LBW_RD_RQ_W_ARB 0x201204
+
+#define mmSRAM_Y0_X0_RTR_LBW_RD_RQ_L_ARB 0x201210
+
+#define mmSRAM_Y0_X0_RTR_LBW_E_ARB_MAX 0x201220
+
+#define mmSRAM_Y0_X0_RTR_LBW_W_ARB_MAX 0x201224
+
+#define mmSRAM_Y0_X0_RTR_LBW_L_ARB_MAX 0x201230
+
+#define mmSRAM_Y0_X0_RTR_LBW_DATA_E_ARB 0x201240
+
+#define mmSRAM_Y0_X0_RTR_LBW_DATA_W_ARB 0x201244
+
+#define mmSRAM_Y0_X0_RTR_LBW_DATA_L_ARB 0x201248
+
+#define mmSRAM_Y0_X0_RTR_LBW_WR_RS_E_ARB 0x201260
+
+#define mmSRAM_Y0_X0_RTR_LBW_WR_RS_W_ARB 0x201264
+
+#define mmSRAM_Y0_X0_RTR_LBW_WR_RS_L_ARB 0x201268
+
+#define mmSRAM_Y0_X0_RTR_DBG_E_ARB 0x201300
+
+#define mmSRAM_Y0_X0_RTR_DBG_W_ARB 0x201304
+
+#define mmSRAM_Y0_X0_RTR_DBG_L_ARB 0x201310
+
+#define mmSRAM_Y0_X0_RTR_DBG_E_ARB_MAX 0x201320
+
+#define mmSRAM_Y0_X0_RTR_DBG_W_ARB_MAX 0x201324
+
+#define mmSRAM_Y0_X0_RTR_DBG_L_ARB_MAX 0x201330
+
+#endif /* ASIC_REG_SRAM_Y0_X0_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h
new file mode 100644
index 000000000000..5624544303ca
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SRAM_Y0_X1_RTR_REGS_H_
+#define ASIC_REG_SRAM_Y0_X1_RTR_REGS_H_
+
+/*
+ *****************************************
+ * SRAM_Y0_X1_RTR (Prototype: IC_RTR)
+ *****************************************
+ */
+
+#define mmSRAM_Y0_X1_RTR_HBW_RD_RQ_E_ARB 0x205100
+
+#define mmSRAM_Y0_X1_RTR_HBW_RD_RQ_W_ARB 0x205104
+
+#define mmSRAM_Y0_X1_RTR_HBW_RD_RQ_L_ARB 0x205110
+
+#define mmSRAM_Y0_X1_RTR_HBW_E_ARB_MAX 0x205120
+
+#define mmSRAM_Y0_X1_RTR_HBW_W_ARB_MAX 0x205124
+
+#define mmSRAM_Y0_X1_RTR_HBW_L_ARB_MAX 0x205130
+
+#define mmSRAM_Y0_X1_RTR_HBW_DATA_E_ARB 0x205140
+
+#define mmSRAM_Y0_X1_RTR_HBW_DATA_W_ARB 0x205144
+
+#define mmSRAM_Y0_X1_RTR_HBW_DATA_L_ARB 0x205148
+
+#define mmSRAM_Y0_X1_RTR_HBW_WR_RS_E_ARB 0x205160
+
+#define mmSRAM_Y0_X1_RTR_HBW_WR_RS_W_ARB 0x205164
+
+#define mmSRAM_Y0_X1_RTR_HBW_WR_RS_L_ARB 0x205168
+
+#define mmSRAM_Y0_X1_RTR_LBW_RD_RQ_E_ARB 0x205200
+
+#define mmSRAM_Y0_X1_RTR_LBW_RD_RQ_W_ARB 0x205204
+
+#define mmSRAM_Y0_X1_RTR_LBW_RD_RQ_L_ARB 0x205210
+
+#define mmSRAM_Y0_X1_RTR_LBW_E_ARB_MAX 0x205220
+
+#define mmSRAM_Y0_X1_RTR_LBW_W_ARB_MAX 0x205224
+
+#define mmSRAM_Y0_X1_RTR_LBW_L_ARB_MAX 0x205230
+
+#define mmSRAM_Y0_X1_RTR_LBW_DATA_E_ARB 0x205240
+
+#define mmSRAM_Y0_X1_RTR_LBW_DATA_W_ARB 0x205244
+
+#define mmSRAM_Y0_X1_RTR_LBW_DATA_L_ARB 0x205248
+
+#define mmSRAM_Y0_X1_RTR_LBW_WR_RS_E_ARB 0x205260
+
+#define mmSRAM_Y0_X1_RTR_LBW_WR_RS_W_ARB 0x205264
+
+#define mmSRAM_Y0_X1_RTR_LBW_WR_RS_L_ARB 0x205268
+
+#define mmSRAM_Y0_X1_RTR_DBG_E_ARB 0x205300
+
+#define mmSRAM_Y0_X1_RTR_DBG_W_ARB 0x205304
+
+#define mmSRAM_Y0_X1_RTR_DBG_L_ARB 0x205310
+
+#define mmSRAM_Y0_X1_RTR_DBG_E_ARB_MAX 0x205320
+
+#define mmSRAM_Y0_X1_RTR_DBG_W_ARB_MAX 0x205324
+
+#define mmSRAM_Y0_X1_RTR_DBG_L_ARB_MAX 0x205330
+
+#endif /* ASIC_REG_SRAM_Y0_X1_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h
new file mode 100644
index 000000000000..3322bc0bd1df
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SRAM_Y0_X2_RTR_REGS_H_
+#define ASIC_REG_SRAM_Y0_X2_RTR_REGS_H_
+
+/*
+ *****************************************
+ * SRAM_Y0_X2_RTR (Prototype: IC_RTR)
+ *****************************************
+ */
+
+#define mmSRAM_Y0_X2_RTR_HBW_RD_RQ_E_ARB 0x209100
+
+#define mmSRAM_Y0_X2_RTR_HBW_RD_RQ_W_ARB 0x209104
+
+#define mmSRAM_Y0_X2_RTR_HBW_RD_RQ_L_ARB 0x209110
+
+#define mmSRAM_Y0_X2_RTR_HBW_E_ARB_MAX 0x209120
+
+#define mmSRAM_Y0_X2_RTR_HBW_W_ARB_MAX 0x209124
+
+#define mmSRAM_Y0_X2_RTR_HBW_L_ARB_MAX 0x209130
+
+#define mmSRAM_Y0_X2_RTR_HBW_DATA_E_ARB 0x209140
+
+#define mmSRAM_Y0_X2_RTR_HBW_DATA_W_ARB 0x209144
+
+#define mmSRAM_Y0_X2_RTR_HBW_DATA_L_ARB 0x209148
+
+#define mmSRAM_Y0_X2_RTR_HBW_WR_RS_E_ARB 0x209160
+
+#define mmSRAM_Y0_X2_RTR_HBW_WR_RS_W_ARB 0x209164
+
+#define mmSRAM_Y0_X2_RTR_HBW_WR_RS_L_ARB 0x209168
+
+#define mmSRAM_Y0_X2_RTR_LBW_RD_RQ_E_ARB 0x209200
+
+#define mmSRAM_Y0_X2_RTR_LBW_RD_RQ_W_ARB 0x209204
+
+#define mmSRAM_Y0_X2_RTR_LBW_RD_RQ_L_ARB 0x209210
+
+#define mmSRAM_Y0_X2_RTR_LBW_E_ARB_MAX 0x209220
+
+#define mmSRAM_Y0_X2_RTR_LBW_W_ARB_MAX 0x209224
+
+#define mmSRAM_Y0_X2_RTR_LBW_L_ARB_MAX 0x209230
+
+#define mmSRAM_Y0_X2_RTR_LBW_DATA_E_ARB 0x209240
+
+#define mmSRAM_Y0_X2_RTR_LBW_DATA_W_ARB 0x209244
+
+#define mmSRAM_Y0_X2_RTR_LBW_DATA_L_ARB 0x209248
+
+#define mmSRAM_Y0_X2_RTR_LBW_WR_RS_E_ARB 0x209260
+
+#define mmSRAM_Y0_X2_RTR_LBW_WR_RS_W_ARB 0x209264
+
+#define mmSRAM_Y0_X2_RTR_LBW_WR_RS_L_ARB 0x209268
+
+#define mmSRAM_Y0_X2_RTR_DBG_E_ARB 0x209300
+
+#define mmSRAM_Y0_X2_RTR_DBG_W_ARB 0x209304
+
+#define mmSRAM_Y0_X2_RTR_DBG_L_ARB 0x209310
+
+#define mmSRAM_Y0_X2_RTR_DBG_E_ARB_MAX 0x209320
+
+#define mmSRAM_Y0_X2_RTR_DBG_W_ARB_MAX 0x209324
+
+#define mmSRAM_Y0_X2_RTR_DBG_L_ARB_MAX 0x209330
+
+#endif /* ASIC_REG_SRAM_Y0_X2_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h
new file mode 100644
index 000000000000..81e393db2027
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SRAM_Y0_X3_RTR_REGS_H_
+#define ASIC_REG_SRAM_Y0_X3_RTR_REGS_H_
+
+/*
+ *****************************************
+ * SRAM_Y0_X3_RTR (Prototype: IC_RTR)
+ *****************************************
+ */
+
+#define mmSRAM_Y0_X3_RTR_HBW_RD_RQ_E_ARB 0x20D100
+
+#define mmSRAM_Y0_X3_RTR_HBW_RD_RQ_W_ARB 0x20D104
+
+#define mmSRAM_Y0_X3_RTR_HBW_RD_RQ_L_ARB 0x20D110
+
+#define mmSRAM_Y0_X3_RTR_HBW_E_ARB_MAX 0x20D120
+
+#define mmSRAM_Y0_X3_RTR_HBW_W_ARB_MAX 0x20D124
+
+#define mmSRAM_Y0_X3_RTR_HBW_L_ARB_MAX 0x20D130
+
+#define mmSRAM_Y0_X3_RTR_HBW_DATA_E_ARB 0x20D140
+
+#define mmSRAM_Y0_X3_RTR_HBW_DATA_W_ARB 0x20D144
+
+#define mmSRAM_Y0_X3_RTR_HBW_DATA_L_ARB 0x20D148
+
+#define mmSRAM_Y0_X3_RTR_HBW_WR_RS_E_ARB 0x20D160
+
+#define mmSRAM_Y0_X3_RTR_HBW_WR_RS_W_ARB 0x20D164
+
+#define mmSRAM_Y0_X3_RTR_HBW_WR_RS_L_ARB 0x20D168
+
+#define mmSRAM_Y0_X3_RTR_LBW_RD_RQ_E_ARB 0x20D200
+
+#define mmSRAM_Y0_X3_RTR_LBW_RD_RQ_W_ARB 0x20D204
+
+#define mmSRAM_Y0_X3_RTR_LBW_RD_RQ_L_ARB 0x20D210
+
+#define mmSRAM_Y0_X3_RTR_LBW_E_ARB_MAX 0x20D220
+
+#define mmSRAM_Y0_X3_RTR_LBW_W_ARB_MAX 0x20D224
+
+#define mmSRAM_Y0_X3_RTR_LBW_L_ARB_MAX 0x20D230
+
+#define mmSRAM_Y0_X3_RTR_LBW_DATA_E_ARB 0x20D240
+
+#define mmSRAM_Y0_X3_RTR_LBW_DATA_W_ARB 0x20D244
+
+#define mmSRAM_Y0_X3_RTR_LBW_DATA_L_ARB 0x20D248
+
+#define mmSRAM_Y0_X3_RTR_LBW_WR_RS_E_ARB 0x20D260
+
+#define mmSRAM_Y0_X3_RTR_LBW_WR_RS_W_ARB 0x20D264
+
+#define mmSRAM_Y0_X3_RTR_LBW_WR_RS_L_ARB 0x20D268
+
+#define mmSRAM_Y0_X3_RTR_DBG_E_ARB 0x20D300
+
+#define mmSRAM_Y0_X3_RTR_DBG_W_ARB 0x20D304
+
+#define mmSRAM_Y0_X3_RTR_DBG_L_ARB 0x20D310
+
+#define mmSRAM_Y0_X3_RTR_DBG_E_ARB_MAX 0x20D320
+
+#define mmSRAM_Y0_X3_RTR_DBG_W_ARB_MAX 0x20D324
+
+#define mmSRAM_Y0_X3_RTR_DBG_L_ARB_MAX 0x20D330
+
+#endif /* ASIC_REG_SRAM_Y0_X3_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h
new file mode 100644
index 000000000000..b2e11b1de385
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SRAM_Y0_X4_RTR_REGS_H_
+#define ASIC_REG_SRAM_Y0_X4_RTR_REGS_H_
+
+/*
+ *****************************************
+ * SRAM_Y0_X4_RTR (Prototype: IC_RTR)
+ *****************************************
+ */
+
+#define mmSRAM_Y0_X4_RTR_HBW_RD_RQ_E_ARB 0x211100
+
+#define mmSRAM_Y0_X4_RTR_HBW_RD_RQ_W_ARB 0x211104
+
+#define mmSRAM_Y0_X4_RTR_HBW_RD_RQ_L_ARB 0x211110
+
+#define mmSRAM_Y0_X4_RTR_HBW_E_ARB_MAX 0x211120
+
+#define mmSRAM_Y0_X4_RTR_HBW_W_ARB_MAX 0x211124
+
+#define mmSRAM_Y0_X4_RTR_HBW_L_ARB_MAX 0x211130
+
+#define mmSRAM_Y0_X4_RTR_HBW_DATA_E_ARB 0x211140
+
+#define mmSRAM_Y0_X4_RTR_HBW_DATA_W_ARB 0x211144
+
+#define mmSRAM_Y0_X4_RTR_HBW_DATA_L_ARB 0x211148
+
+#define mmSRAM_Y0_X4_RTR_HBW_WR_RS_E_ARB 0x211160
+
+#define mmSRAM_Y0_X4_RTR_HBW_WR_RS_W_ARB 0x211164
+
+#define mmSRAM_Y0_X4_RTR_HBW_WR_RS_L_ARB 0x211168
+
+#define mmSRAM_Y0_X4_RTR_LBW_RD_RQ_E_ARB 0x211200
+
+#define mmSRAM_Y0_X4_RTR_LBW_RD_RQ_W_ARB 0x211204
+
+#define mmSRAM_Y0_X4_RTR_LBW_RD_RQ_L_ARB 0x211210
+
+#define mmSRAM_Y0_X4_RTR_LBW_E_ARB_MAX 0x211220
+
+#define mmSRAM_Y0_X4_RTR_LBW_W_ARB_MAX 0x211224
+
+#define mmSRAM_Y0_X4_RTR_LBW_L_ARB_MAX 0x211230
+
+#define mmSRAM_Y0_X4_RTR_LBW_DATA_E_ARB 0x211240
+
+#define mmSRAM_Y0_X4_RTR_LBW_DATA_W_ARB 0x211244
+
+#define mmSRAM_Y0_X4_RTR_LBW_DATA_L_ARB 0x211248
+
+#define mmSRAM_Y0_X4_RTR_LBW_WR_RS_E_ARB 0x211260
+
+#define mmSRAM_Y0_X4_RTR_LBW_WR_RS_W_ARB 0x211264
+
+#define mmSRAM_Y0_X4_RTR_LBW_WR_RS_L_ARB 0x211268
+
+#define mmSRAM_Y0_X4_RTR_DBG_E_ARB 0x211300
+
+#define mmSRAM_Y0_X4_RTR_DBG_W_ARB 0x211304
+
+#define mmSRAM_Y0_X4_RTR_DBG_L_ARB 0x211310
+
+#define mmSRAM_Y0_X4_RTR_DBG_E_ARB_MAX 0x211320
+
+#define mmSRAM_Y0_X4_RTR_DBG_W_ARB_MAX 0x211324
+
+#define mmSRAM_Y0_X4_RTR_DBG_L_ARB_MAX 0x211330
+
+#endif /* ASIC_REG_SRAM_Y0_X4_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h
new file mode 100644
index 000000000000..b4ea8cae2757
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_STLB_MASKS_H_
+#define ASIC_REG_STLB_MASKS_H_
+
+/*
+ *****************************************
+ * STLB (Prototype: STLB)
+ *****************************************
+ */
+
+/* STLB_CACHE_INV */
+#define STLB_CACHE_INV_PRODUCER_INDEX_SHIFT 0
+#define STLB_CACHE_INV_PRODUCER_INDEX_MASK 0xFF
+#define STLB_CACHE_INV_INDEX_MASK_SHIFT 8
+#define STLB_CACHE_INV_INDEX_MASK_MASK 0xFF00
+
+/* STLB_CACHE_INV_BASE_39_8 */
+#define STLB_CACHE_INV_BASE_39_8_PA_SHIFT 0
+#define STLB_CACHE_INV_BASE_39_8_PA_MASK 0xFFFFFFFF
+
+/* STLB_CACHE_INV_BASE_49_40 */
+#define STLB_CACHE_INV_BASE_49_40_PA_SHIFT 0
+#define STLB_CACHE_INV_BASE_49_40_PA_MASK 0x3FF
+
+/* STLB_STLB_FEATURE_EN */
+#define STLB_STLB_FEATURE_EN_STLB_CTRL_MULTI_PAGE_SIZE_EN_SHIFT 0
+#define STLB_STLB_FEATURE_EN_STLB_CTRL_MULTI_PAGE_SIZE_EN_MASK 0x1
+#define STLB_STLB_FEATURE_EN_MULTI_PAGE_SIZE_EN_SHIFT 1
+#define STLB_STLB_FEATURE_EN_MULTI_PAGE_SIZE_EN_MASK 0x2
+#define STLB_STLB_FEATURE_EN_LOOKUP_EN_SHIFT 2
+#define STLB_STLB_FEATURE_EN_LOOKUP_EN_MASK 0x4
+#define STLB_STLB_FEATURE_EN_BYPASS_SHIFT 3
+#define STLB_STLB_FEATURE_EN_BYPASS_MASK 0x8
+#define STLB_STLB_FEATURE_EN_BANK_STOP_SHIFT 4
+#define STLB_STLB_FEATURE_EN_BANK_STOP_MASK 0x10
+#define STLB_STLB_FEATURE_EN_TRACE_EN_SHIFT 5
+#define STLB_STLB_FEATURE_EN_TRACE_EN_MASK 0x20
+#define STLB_STLB_FEATURE_EN_FOLLOWER_EN_SHIFT 6
+#define STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK 0x40
+#define STLB_STLB_FEATURE_EN_CACHING_EN_SHIFT 7
+#define STLB_STLB_FEATURE_EN_CACHING_EN_MASK 0xF80
+
+/* STLB_STLB_AXI_CACHE */
+#define STLB_STLB_AXI_CACHE_STLB_CTRL_ARCACHE_SHIFT 0
+#define STLB_STLB_AXI_CACHE_STLB_CTRL_ARCACHE_MASK 0xF
+#define STLB_STLB_AXI_CACHE_STLB_CTRL_AWCACHE_SHIFT 4
+#define STLB_STLB_AXI_CACHE_STLB_CTRL_AWCACHE_MASK 0xF0
+#define STLB_STLB_AXI_CACHE_INV_ARCACHE_SHIFT 8
+#define STLB_STLB_AXI_CACHE_INV_ARCACHE_MASK 0xF00
+
+/* STLB_HOP_CONFIGURATION */
+#define STLB_HOP_CONFIGURATION_FIRST_HOP_SHIFT 0
+#define STLB_HOP_CONFIGURATION_FIRST_HOP_MASK 0x7
+#define STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_SHIFT 4
+#define STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_MASK 0x70
+#define STLB_HOP_CONFIGURATION_LAST_HOP_SHIFT 8
+#define STLB_HOP_CONFIGURATION_LAST_HOP_MASK 0x700
+
+/* STLB_LINK_LIST_LOOKUP_MASK_49_32 */
+#define STLB_LINK_LIST_LOOKUP_MASK_49_32_R_SHIFT 0
+#define STLB_LINK_LIST_LOOKUP_MASK_49_32_R_MASK 0x3FFFF
+
+/* STLB_LINK_LIST_LOOKUP_MASK_31_0 */
+#define STLB_LINK_LIST_LOOKUP_MASK_31_0_R_SHIFT 0
+#define STLB_LINK_LIST_LOOKUP_MASK_31_0_R_MASK 0xFFFFFFFF
+
+/* STLB_LINK_LIST */
+#define STLB_LINK_LIST_CLEAR_SHIFT 0
+#define STLB_LINK_LIST_CLEAR_MASK 0x1
+#define STLB_LINK_LIST_EN_SHIFT 1
+#define STLB_LINK_LIST_EN_MASK 0x2
+
+/* STLB_INV_ALL_START */
+#define STLB_INV_ALL_START_R_SHIFT 0
+#define STLB_INV_ALL_START_R_MASK 0x1
+
+/* STLB_INV_ALL_SET */
+#define STLB_INV_ALL_SET_R_SHIFT 0
+#define STLB_INV_ALL_SET_R_MASK 0xFF
+
+/* STLB_INV_PS */
+#define STLB_INV_PS_R_SHIFT 0
+#define STLB_INV_PS_R_MASK 0x3
+
+/* STLB_INV_CONSUMER_INDEX */
+#define STLB_INV_CONSUMER_INDEX_R_SHIFT 0
+#define STLB_INV_CONSUMER_INDEX_R_MASK 0xFF
+
+/* STLB_INV_HIT_COUNT */
+#define STLB_INV_HIT_COUNT_R_SHIFT 0
+#define STLB_INV_HIT_COUNT_R_MASK 0x7FF
+
+/* STLB_INV_SET */
+#define STLB_INV_SET_R_SHIFT 0
+#define STLB_INV_SET_R_MASK 0xFF
+
+/* STLB_SRAM_INIT */
+#define STLB_SRAM_INIT_BUSY_TAG_SHIFT 0
+#define STLB_SRAM_INIT_BUSY_TAG_MASK 0x3
+#define STLB_SRAM_INIT_BUSY_SLICE_SHIFT 2
+#define STLB_SRAM_INIT_BUSY_SLICE_MASK 0xC
+#define STLB_SRAM_INIT_BUSY_DATA_SHIFT 4
+#define STLB_SRAM_INIT_BUSY_DATA_MASK 0x10
+
+#endif /* ASIC_REG_STLB_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h
new file mode 100644
index 000000000000..0f5281d3e65b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_STLB_REGS_H_
+#define ASIC_REG_STLB_REGS_H_
+
+/*
+ *****************************************
+ * STLB (Prototype: STLB)
+ *****************************************
+ */
+
+#define mmSTLB_CACHE_INV 0x490010
+
+#define mmSTLB_CACHE_INV_BASE_39_8 0x490014
+
+#define mmSTLB_CACHE_INV_BASE_49_40 0x490018
+
+#define mmSTLB_STLB_FEATURE_EN 0x49001C
+
+#define mmSTLB_STLB_AXI_CACHE 0x490020
+
+#define mmSTLB_HOP_CONFIGURATION 0x490024
+
+#define mmSTLB_LINK_LIST_LOOKUP_MASK_49_32 0x490028
+
+#define mmSTLB_LINK_LIST_LOOKUP_MASK_31_0 0x49002C
+
+#define mmSTLB_LINK_LIST 0x490030
+
+#define mmSTLB_INV_ALL_START 0x490034
+
+#define mmSTLB_INV_ALL_SET 0x490038
+
+#define mmSTLB_INV_PS 0x49003C
+
+#define mmSTLB_INV_CONSUMER_INDEX 0x490040
+
+#define mmSTLB_INV_HIT_COUNT 0x490044
+
+#define mmSTLB_INV_SET 0x490048
+
+#define mmSTLB_SRAM_INIT 0x49004C
+
+#endif /* ASIC_REG_STLB_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h
new file mode 100644
index 000000000000..e5587b49eecd
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h
@@ -0,0 +1,1607 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_CFG_MASKS_H_
+#define ASIC_REG_TPC0_CFG_MASKS_H_
+
+/*
+ *****************************************
+ * TPC0_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+/* TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW */
+#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH */
+#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_BASE_DIM_0 */
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_0_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_0_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_SIZE_DIM_0 */
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_0_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_0_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_BASE_DIM_1 */
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_1_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_1_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_SIZE_DIM_1 */
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_1_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_1_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_BASE_DIM_2 */
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_2_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_2_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_SIZE_DIM_2 */
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_2_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_2_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_BASE_DIM_3 */
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_3_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_3_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_SIZE_DIM_3 */
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_3_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_3_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_BASE_DIM_4 */
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_4_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_4_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_SIZE_DIM_4 */
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_4_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_4_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_SRF */
+#define TPC0_CFG_KERNEL_SRF_V_SHIFT 0
+#define TPC0_CFG_KERNEL_SRF_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_KERNEL_CONFIG */
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_SMALL_VLM_SHIFT 0
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_SMALL_VLM_MASK 0x1
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_ASO_EVICT_L0_SHIFT 1
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_ASO_EVICT_L0_MASK 0x2
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_NUM_VALID_SRFS_SHIFT 8
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_NUM_VALID_SRFS_MASK 0x3F00
+
+/* TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE */
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000
+
+/* TPC0_CFG_RESERVED_DESC_END */
+#define TPC0_CFG_RESERVED_DESC_END_V_SHIFT 0
+#define TPC0_CFG_RESERVED_DESC_END_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_ROUND_CSR */
+#define TPC0_CFG_ROUND_CSR_MODE_SHIFT 0
+#define TPC0_CFG_ROUND_CSR_MODE_MASK 0x7
+
+/* TPC0_CFG_TBUF_BASE_ADDR_LOW */
+#define TPC0_CFG_TBUF_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_TBUF_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_TBUF_BASE_ADDR_HIGH */
+#define TPC0_CFG_TBUF_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_TBUF_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_SEMAPHORE */
+#define TPC0_CFG_SEMAPHORE_V_SHIFT 0
+#define TPC0_CFG_SEMAPHORE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_VFLAGS */
+#define TPC0_CFG_VFLAGS_V_SHIFT 0
+#define TPC0_CFG_VFLAGS_V_MASK 0xF
+
+/* TPC0_CFG_SFLAGS */
+#define TPC0_CFG_SFLAGS_V_SHIFT 0
+#define TPC0_CFG_SFLAGS_V_MASK 0xF
+
+/* TPC0_CFG_LFSR_POLYNOM */
+#define TPC0_CFG_LFSR_POLYNOM_V_SHIFT 0
+#define TPC0_CFG_LFSR_POLYNOM_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_STATUS */
+#define TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_SHIFT 1
+#define TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_MASK 0x2
+#define TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_SHIFT 2
+#define TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK 0x4
+#define TPC0_CFG_STATUS_IQ_EMPTY_SHIFT 3
+#define TPC0_CFG_STATUS_IQ_EMPTY_MASK 0x8
+#define TPC0_CFG_STATUS_NO_INFLIGH_MEM_ACCESSES_SHIFT 4
+#define TPC0_CFG_STATUS_NO_INFLIGH_MEM_ACCESSES_MASK 0x10
+
+/* TPC0_CFG_CFG_BASE_ADDRESS_HIGH */
+#define TPC0_CFG_CFG_BASE_ADDRESS_HIGH_V_SHIFT 0
+#define TPC0_CFG_CFG_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_CFG_SUBTRACT_VALUE */
+#define TPC0_CFG_CFG_SUBTRACT_VALUE_V_SHIFT 0
+#define TPC0_CFG_CFG_SUBTRACT_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_SM_BASE_ADDRESS_LOW */
+#define TPC0_CFG_SM_BASE_ADDRESS_LOW_V_SHIFT 0
+#define TPC0_CFG_SM_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_SM_BASE_ADDRESS_HIGH */
+#define TPC0_CFG_SM_BASE_ADDRESS_HIGH_V_SHIFT 0
+#define TPC0_CFG_SM_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_TPC_CMD */
+#define TPC0_CFG_TPC_CMD_ICACHE_INVALIDATE_SHIFT 0
+#define TPC0_CFG_TPC_CMD_ICACHE_INVALIDATE_MASK 0x1
+#define TPC0_CFG_TPC_CMD_DCACHE_INVALIDATE_SHIFT 1
+#define TPC0_CFG_TPC_CMD_DCACHE_INVALIDATE_MASK 0x2
+#define TPC0_CFG_TPC_CMD_LCACHE_INVALIDATE_SHIFT 2
+#define TPC0_CFG_TPC_CMD_LCACHE_INVALIDATE_MASK 0x4
+#define TPC0_CFG_TPC_CMD_TCACHE_INVALIDATE_SHIFT 3
+#define TPC0_CFG_TPC_CMD_TCACHE_INVALIDATE_MASK 0x8
+#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_64KB_SHIFT 4
+#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_64KB_MASK 0x10
+#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_32KB_SHIFT 5
+#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_32KB_MASK 0x20
+#define TPC0_CFG_TPC_CMD_QMAN_STOP_SHIFT 6
+#define TPC0_CFG_TPC_CMD_QMAN_STOP_MASK 0x40
+
+/* TPC0_CFG_TPC_EXECUTE */
+#define TPC0_CFG_TPC_EXECUTE_V_SHIFT 0
+#define TPC0_CFG_TPC_EXECUTE_V_MASK 0x1
+
+/* TPC0_CFG_TPC_STALL */
+#define TPC0_CFG_TPC_STALL_V_SHIFT 0
+#define TPC0_CFG_TPC_STALL_V_MASK 0x1
+
+/* TPC0_CFG_ICACHE_BASE_ADDERESS_LOW */
+#define TPC0_CFG_ICACHE_BASE_ADDERESS_LOW_V_SHIFT 0
+#define TPC0_CFG_ICACHE_BASE_ADDERESS_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH */
+#define TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH_V_SHIFT 0
+#define TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_MSS_CONFIG */
+#define TPC0_CFG_MSS_CONFIG_AWCACHE_SHIFT 0
+#define TPC0_CFG_MSS_CONFIG_AWCACHE_MASK 0xF
+#define TPC0_CFG_MSS_CONFIG_ARCACHE_SHIFT 4
+#define TPC0_CFG_MSS_CONFIG_ARCACHE_MASK 0xF0
+#define TPC0_CFG_MSS_CONFIG_ICACHE_FETCH_LINE_NUM_SHIFT 8
+#define TPC0_CFG_MSS_CONFIG_ICACHE_FETCH_LINE_NUM_MASK 0x300
+#define TPC0_CFG_MSS_CONFIG_EXPOSED_PIPE_DIS_SHIFT 10
+#define TPC0_CFG_MSS_CONFIG_EXPOSED_PIPE_DIS_MASK 0x400
+
+/* TPC0_CFG_TPC_INTR_CAUSE */
+#define TPC0_CFG_TPC_INTR_CAUSE_CAUSE_SHIFT 0
+#define TPC0_CFG_TPC_INTR_CAUSE_CAUSE_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_TPC_INTR_MASK */
+#define TPC0_CFG_TPC_INTR_MASK_MASK_SHIFT 0
+#define TPC0_CFG_TPC_INTR_MASK_MASK_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_TSB_CONFIG */
+#define TPC0_CFG_TSB_CONFIG_TSB_AGU_MAX_CREDIT_SHIFT 0
+#define TPC0_CFG_TSB_CONFIG_TSB_AGU_MAX_CREDIT_MASK 0x1F
+#define TPC0_CFG_TSB_CONFIG_TSB_EU_MAX_CREDIT_SHIFT 5
+#define TPC0_CFG_TSB_CONFIG_TSB_EU_MAX_CREDIT_MASK 0x3E0
+#define TPC0_CFG_TSB_CONFIG_MAX_OUTSTANDING_SHIFT 10
+#define TPC0_CFG_TSB_CONFIG_MAX_OUTSTANDING_MASK 0xFFC00
+#define TPC0_CFG_TSB_CONFIG_MAX_SIZE_SHIFT 20
+#define TPC0_CFG_TSB_CONFIG_MAX_SIZE_MASK 0x3FF00000
+
+/* TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_0_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_1_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_2_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_3_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_4_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_5_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_6_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_7_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW */
+#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH */
+#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_BASE_DIM_0 */
+#define TPC0_CFG_QM_TID_BASE_DIM_0_V_SHIFT 0
+#define TPC0_CFG_QM_TID_BASE_DIM_0_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_SIZE_DIM_0 */
+#define TPC0_CFG_QM_TID_SIZE_DIM_0_V_SHIFT 0
+#define TPC0_CFG_QM_TID_SIZE_DIM_0_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_BASE_DIM_1 */
+#define TPC0_CFG_QM_TID_BASE_DIM_1_V_SHIFT 0
+#define TPC0_CFG_QM_TID_BASE_DIM_1_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_SIZE_DIM_1 */
+#define TPC0_CFG_QM_TID_SIZE_DIM_1_V_SHIFT 0
+#define TPC0_CFG_QM_TID_SIZE_DIM_1_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_BASE_DIM_2 */
+#define TPC0_CFG_QM_TID_BASE_DIM_2_V_SHIFT 0
+#define TPC0_CFG_QM_TID_BASE_DIM_2_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_SIZE_DIM_2 */
+#define TPC0_CFG_QM_TID_SIZE_DIM_2_V_SHIFT 0
+#define TPC0_CFG_QM_TID_SIZE_DIM_2_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_BASE_DIM_3 */
+#define TPC0_CFG_QM_TID_BASE_DIM_3_V_SHIFT 0
+#define TPC0_CFG_QM_TID_BASE_DIM_3_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_SIZE_DIM_3 */
+#define TPC0_CFG_QM_TID_SIZE_DIM_3_V_SHIFT 0
+#define TPC0_CFG_QM_TID_SIZE_DIM_3_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_BASE_DIM_4 */
+#define TPC0_CFG_QM_TID_BASE_DIM_4_V_SHIFT 0
+#define TPC0_CFG_QM_TID_BASE_DIM_4_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_SIZE_DIM_4 */
+#define TPC0_CFG_QM_TID_SIZE_DIM_4_V_SHIFT 0
+#define TPC0_CFG_QM_TID_SIZE_DIM_4_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_SRF */
+#define TPC0_CFG_QM_SRF_V_SHIFT 0
+#define TPC0_CFG_QM_SRF_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_KERNEL_CONFIG */
+#define TPC0_CFG_QM_KERNEL_CONFIG_SMALL_VLM_SHIFT 0
+#define TPC0_CFG_QM_KERNEL_CONFIG_SMALL_VLM_MASK 0x1
+#define TPC0_CFG_QM_KERNEL_CONFIG_ASO_EVICT_L0_SHIFT 1
+#define TPC0_CFG_QM_KERNEL_CONFIG_ASO_EVICT_L0_MASK 0x2
+#define TPC0_CFG_QM_KERNEL_CONFIG_NUM_VALID_SRFS_SHIFT 8
+#define TPC0_CFG_QM_KERNEL_CONFIG_NUM_VALID_SRFS_MASK 0x3F00
+
+/* TPC0_CFG_QM_SYNC_OBJECT_MESSAGE */
+#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
+#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
+#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16
+#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000
+#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31
+#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000
+
+/* TPC0_CFG_ARUSER */
+#define TPC0_CFG_ARUSER_ASID_SHIFT 0
+#define TPC0_CFG_ARUSER_ASID_MASK 0x3FF
+#define TPC0_CFG_ARUSER_MMBP_SHIFT 10
+#define TPC0_CFG_ARUSER_MMBP_MASK 0x400
+#define TPC0_CFG_ARUSER_V_SHIFT 11
+#define TPC0_CFG_ARUSER_V_MASK 0xFFFFF800
+
+/* TPC0_CFG_AWUSER */
+#define TPC0_CFG_AWUSER_ASID_SHIFT 0
+#define TPC0_CFG_AWUSER_ASID_MASK 0x3FF
+#define TPC0_CFG_AWUSER_MMBP_SHIFT 10
+#define TPC0_CFG_AWUSER_MMBP_MASK 0x400
+#define TPC0_CFG_AWUSER_V_SHIFT 11
+#define TPC0_CFG_AWUSER_V_MASK 0xFFFFF800
+
+/* TPC0_CFG_FUNC_MBIST_CNTRL */
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT 0
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_MASK 0x1
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_SHIFT 1
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK 0x2
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_SHIFT 2
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK 0x4
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_FAILED_SHIFT 16
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_FAILED_MASK 0x3FF0000
+
+/* TPC0_CFG_FUNC_MBIST_PAT */
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_EVEN_SHIFT 0
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_EVEN_MASK 0x3
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_ODD_SHIFT 2
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_ODD_MASK 0xC
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_EVEN_SHIFT 4
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_EVEN_MASK 0x30
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_ODD_SHIFT 6
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_ODD_MASK 0xC0
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_EVEN_SHIFT 8
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_EVEN_MASK 0x300
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_ODD_SHIFT 10
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_ODD_MASK 0xC00
+
+/* TPC0_CFG_FUNC_MBIST_MEM */
+#define TPC0_CFG_FUNC_MBIST_MEM_MAX_ADDR_SHIFT 0
+#define TPC0_CFG_FUNC_MBIST_MEM_MAX_ADDR_MASK 0x7FF
+#define TPC0_CFG_FUNC_MBIST_MEM_PATTERN_EN_SHIFT 12
+#define TPC0_CFG_FUNC_MBIST_MEM_PATTERN_EN_MASK 0x7000
+#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_ADDR_SHIFT 16
+#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_ADDR_MASK 0x7FF0000
+#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_PATTERN_SHIFT 28
+#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_PATTERN_MASK 0x70000000
+
+#endif /* ASIC_REG_TPC0_CFG_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h
new file mode 100644
index 000000000000..2be28a63c50a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h
@@ -0,0 +1,887 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_CFG_REGS_H_
+#define ASIC_REG_TPC0_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC0_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xE06400
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xE06404
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xE06408
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xE0640C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xE06410
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xE06414
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xE06418
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xE0641C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xE06420
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xE06424
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xE06428
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xE0642C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xE06430
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xE06434
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xE06438
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xE0643C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xE06440
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xE06444
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xE06448
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xE0644C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xE06450
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xE06454
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xE06458
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xE0645C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xE06460
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xE06464
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xE06468
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xE0646C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xE06470
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xE06474
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xE06478
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xE0647C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xE06480
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xE06484
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xE06488
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xE0648C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xE06490
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xE06494
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xE06498
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xE0649C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xE064A0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xE064A4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xE064A8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xE064AC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xE064B0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xE064B4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xE064B8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xE064BC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xE064C0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xE064C4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xE064C8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xE064CC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xE064D0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xE064D4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xE064D8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xE064DC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xE064E0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xE064E4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xE064E8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xE064EC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xE064F0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xE064F4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xE064F8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xE064FC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xE06500
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xE06504
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xE06508
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xE0650C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xE06510
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xE06514
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xE06518
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xE0651C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xE06520
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xE06524
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xE06528
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xE0652C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xE06530
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xE06534
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xE06538
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xE0653C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xE06540
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xE06544
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xE06548
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xE0654C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xE06550
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xE06554
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xE06558
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xE0655C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xE06560
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xE06564
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xE06568
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xE0656C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xE06570
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xE06574
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xE06578
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xE0657C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xE06580
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xE06584
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xE06588
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xE0658C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xE06590
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xE06594
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xE06598
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xE0659C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xE065A0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xE065A4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xE065A8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xE065AC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xE065B0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xE065B4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xE065B8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xE065BC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xE065C0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xE065C4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xE065C8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xE065CC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xE065D0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xE065D4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xE065D8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xE065DC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xE065E0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xE065E4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xE065E8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xE065EC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xE065F0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xE065F4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xE065F8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xE065FC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xE06600
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xE06604
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xE06608
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xE0660C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xE06610
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xE06614
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xE06618
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xE0661C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xE06620
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xE06624
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xE06628
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xE0662C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xE06630
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xE06634
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xE06638
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xE0663C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xE06640
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xE06644
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xE06648
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xE0664C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xE06650
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xE06654
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xE06658
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xE0665C
+
+#define mmTPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xE06660
+
+#define mmTPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xE06664
+
+#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_0 0xE06668
+
+#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_0 0xE0666C
+
+#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_1 0xE06670
+
+#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_1 0xE06674
+
+#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_2 0xE06678
+
+#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_2 0xE0667C
+
+#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_3 0xE06680
+
+#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_3 0xE06684
+
+#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_4 0xE06688
+
+#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_4 0xE0668C
+
+#define mmTPC0_CFG_KERNEL_SRF_0 0xE06690
+
+#define mmTPC0_CFG_KERNEL_SRF_1 0xE06694
+
+#define mmTPC0_CFG_KERNEL_SRF_2 0xE06698
+
+#define mmTPC0_CFG_KERNEL_SRF_3 0xE0669C
+
+#define mmTPC0_CFG_KERNEL_SRF_4 0xE066A0
+
+#define mmTPC0_CFG_KERNEL_SRF_5 0xE066A4
+
+#define mmTPC0_CFG_KERNEL_SRF_6 0xE066A8
+
+#define mmTPC0_CFG_KERNEL_SRF_7 0xE066AC
+
+#define mmTPC0_CFG_KERNEL_SRF_8 0xE066B0
+
+#define mmTPC0_CFG_KERNEL_SRF_9 0xE066B4
+
+#define mmTPC0_CFG_KERNEL_SRF_10 0xE066B8
+
+#define mmTPC0_CFG_KERNEL_SRF_11 0xE066BC
+
+#define mmTPC0_CFG_KERNEL_SRF_12 0xE066C0
+
+#define mmTPC0_CFG_KERNEL_SRF_13 0xE066C4
+
+#define mmTPC0_CFG_KERNEL_SRF_14 0xE066C8
+
+#define mmTPC0_CFG_KERNEL_SRF_15 0xE066CC
+
+#define mmTPC0_CFG_KERNEL_SRF_16 0xE066D0
+
+#define mmTPC0_CFG_KERNEL_SRF_17 0xE066D4
+
+#define mmTPC0_CFG_KERNEL_SRF_18 0xE066D8
+
+#define mmTPC0_CFG_KERNEL_SRF_19 0xE066DC
+
+#define mmTPC0_CFG_KERNEL_SRF_20 0xE066E0
+
+#define mmTPC0_CFG_KERNEL_SRF_21 0xE066E4
+
+#define mmTPC0_CFG_KERNEL_SRF_22 0xE066E8
+
+#define mmTPC0_CFG_KERNEL_SRF_23 0xE066EC
+
+#define mmTPC0_CFG_KERNEL_SRF_24 0xE066F0
+
+#define mmTPC0_CFG_KERNEL_SRF_25 0xE066F4
+
+#define mmTPC0_CFG_KERNEL_SRF_26 0xE066F8
+
+#define mmTPC0_CFG_KERNEL_SRF_27 0xE066FC
+
+#define mmTPC0_CFG_KERNEL_SRF_28 0xE06700
+
+#define mmTPC0_CFG_KERNEL_SRF_29 0xE06704
+
+#define mmTPC0_CFG_KERNEL_SRF_30 0xE06708
+
+#define mmTPC0_CFG_KERNEL_SRF_31 0xE0670C
+
+#define mmTPC0_CFG_KERNEL_KERNEL_CONFIG 0xE06710
+
+#define mmTPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xE06714
+
+#define mmTPC0_CFG_RESERVED_DESC_END 0xE06738
+
+#define mmTPC0_CFG_ROUND_CSR 0xE067FC
+
+#define mmTPC0_CFG_TBUF_BASE_ADDR_LOW 0xE06800
+
+#define mmTPC0_CFG_TBUF_BASE_ADDR_HIGH 0xE06804
+
+#define mmTPC0_CFG_SEMAPHORE 0xE06808
+
+#define mmTPC0_CFG_VFLAGS 0xE0680C
+
+#define mmTPC0_CFG_SFLAGS 0xE06810
+
+#define mmTPC0_CFG_LFSR_POLYNOM 0xE06818
+
+#define mmTPC0_CFG_STATUS 0xE0681C
+
+#define mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH 0xE06820
+
+#define mmTPC0_CFG_CFG_SUBTRACT_VALUE 0xE06824
+
+#define mmTPC0_CFG_SM_BASE_ADDRESS_LOW 0xE06828
+
+#define mmTPC0_CFG_SM_BASE_ADDRESS_HIGH 0xE0682C
+
+#define mmTPC0_CFG_TPC_CMD 0xE06830
+
+#define mmTPC0_CFG_TPC_EXECUTE 0xE06838
+
+#define mmTPC0_CFG_TPC_STALL 0xE0683C
+
+#define mmTPC0_CFG_ICACHE_BASE_ADDERESS_LOW 0xE06840
+
+#define mmTPC0_CFG_ICACHE_BASE_ADDERESS_HIGH 0xE06844
+
+#define mmTPC0_CFG_MSS_CONFIG 0xE06854
+
+#define mmTPC0_CFG_TPC_INTR_CAUSE 0xE06858
+
+#define mmTPC0_CFG_TPC_INTR_MASK 0xE0685C
+
+#define mmTPC0_CFG_TSB_CONFIG 0xE06860
+
+#define mmTPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xE06A00
+
+#define mmTPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xE06A04
+
+#define mmTPC0_CFG_QM_TENSOR_0_PADDING_VALUE 0xE06A08
+
+#define mmTPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xE06A0C
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_0_SIZE 0xE06A10
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xE06A14
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xE06A18
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_1_SIZE 0xE06A1C
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xE06A20
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xE06A24
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_2_SIZE 0xE06A28
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xE06A2C
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xE06A30
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_3_SIZE 0xE06A34
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xE06A38
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xE06A3C
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_4_SIZE 0xE06A40
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xE06A44
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xE06A48
+
+#define mmTPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xE06A4C
+
+#define mmTPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xE06A50
+
+#define mmTPC0_CFG_QM_TENSOR_1_PADDING_VALUE 0xE06A54
+
+#define mmTPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xE06A58
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_0_SIZE 0xE06A5C
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xE06A60
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xE06A64
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_1_SIZE 0xE06A68
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xE06A6C
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xE06A70
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_2_SIZE 0xE06A74
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xE06A78
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xE06A7C
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_3_SIZE 0xE06A80
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xE06A84
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xE06A88
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_4_SIZE 0xE06A8C
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xE06A90
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xE06A94
+
+#define mmTPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xE06A98
+
+#define mmTPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xE06A9C
+
+#define mmTPC0_CFG_QM_TENSOR_2_PADDING_VALUE 0xE06AA0
+
+#define mmTPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xE06AA4
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_0_SIZE 0xE06AA8
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xE06AAC
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xE06AB0
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_1_SIZE 0xE06AB4
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xE06AB8
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xE06ABC
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_2_SIZE 0xE06AC0
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xE06AC4
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xE06AC8
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_3_SIZE 0xE06ACC
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xE06AD0
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xE06AD4
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_4_SIZE 0xE06AD8
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xE06ADC
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xE06AE0
+
+#define mmTPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xE06AE4
+
+#define mmTPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xE06AE8
+
+#define mmTPC0_CFG_QM_TENSOR_3_PADDING_VALUE 0xE06AEC
+
+#define mmTPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xE06AF0
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_0_SIZE 0xE06AF4
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xE06AF8
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xE06AFC
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_1_SIZE 0xE06B00
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xE06B04
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xE06B08
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_2_SIZE 0xE06B0C
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xE06B10
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xE06B14
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_3_SIZE 0xE06B18
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xE06B1C
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xE06B20
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_4_SIZE 0xE06B24
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xE06B28
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xE06B2C
+
+#define mmTPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xE06B30
+
+#define mmTPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xE06B34
+
+#define mmTPC0_CFG_QM_TENSOR_4_PADDING_VALUE 0xE06B38
+
+#define mmTPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xE06B3C
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_0_SIZE 0xE06B40
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xE06B44
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xE06B48
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_1_SIZE 0xE06B4C
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xE06B50
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xE06B54
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_2_SIZE 0xE06B58
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xE06B5C
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xE06B60
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_3_SIZE 0xE06B64
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xE06B68
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xE06B6C
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_4_SIZE 0xE06B70
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xE06B74
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xE06B78
+
+#define mmTPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xE06B7C
+
+#define mmTPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xE06B80
+
+#define mmTPC0_CFG_QM_TENSOR_5_PADDING_VALUE 0xE06B84
+
+#define mmTPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xE06B88
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_0_SIZE 0xE06B8C
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xE06B90
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xE06B94
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_1_SIZE 0xE06B98
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xE06B9C
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xE06BA0
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_2_SIZE 0xE06BA4
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xE06BA8
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xE06BAC
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_3_SIZE 0xE06BB0
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xE06BB4
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xE06BB8
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_4_SIZE 0xE06BBC
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xE06BC0
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xE06BC4
+
+#define mmTPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xE06BC8
+
+#define mmTPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xE06BCC
+
+#define mmTPC0_CFG_QM_TENSOR_6_PADDING_VALUE 0xE06BD0
+
+#define mmTPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xE06BD4
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_0_SIZE 0xE06BD8
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xE06BDC
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xE06BE0
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_1_SIZE 0xE06BE4
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xE06BE8
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xE06BEC
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_2_SIZE 0xE06BF0
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xE06BF4
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xE06BF8
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_3_SIZE 0xE06BFC
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xE06C00
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xE06C04
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_4_SIZE 0xE06C08
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xE06C0C
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xE06C10
+
+#define mmTPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xE06C14
+
+#define mmTPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xE06C18
+
+#define mmTPC0_CFG_QM_TENSOR_7_PADDING_VALUE 0xE06C1C
+
+#define mmTPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xE06C20
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_0_SIZE 0xE06C24
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xE06C28
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xE06C2C
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_1_SIZE 0xE06C30
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xE06C34
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xE06C38
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_2_SIZE 0xE06C3C
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xE06C40
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xE06C44
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_3_SIZE 0xE06C48
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xE06C4C
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xE06C50
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_4_SIZE 0xE06C54
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xE06C58
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xE06C5C
+
+#define mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xE06C60
+
+#define mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xE06C64
+
+#define mmTPC0_CFG_QM_TID_BASE_DIM_0 0xE06C68
+
+#define mmTPC0_CFG_QM_TID_SIZE_DIM_0 0xE06C6C
+
+#define mmTPC0_CFG_QM_TID_BASE_DIM_1 0xE06C70
+
+#define mmTPC0_CFG_QM_TID_SIZE_DIM_1 0xE06C74
+
+#define mmTPC0_CFG_QM_TID_BASE_DIM_2 0xE06C78
+
+#define mmTPC0_CFG_QM_TID_SIZE_DIM_2 0xE06C7C
+
+#define mmTPC0_CFG_QM_TID_BASE_DIM_3 0xE06C80
+
+#define mmTPC0_CFG_QM_TID_SIZE_DIM_3 0xE06C84
+
+#define mmTPC0_CFG_QM_TID_BASE_DIM_4 0xE06C88
+
+#define mmTPC0_CFG_QM_TID_SIZE_DIM_4 0xE06C8C
+
+#define mmTPC0_CFG_QM_SRF_0 0xE06C90
+
+#define mmTPC0_CFG_QM_SRF_1 0xE06C94
+
+#define mmTPC0_CFG_QM_SRF_2 0xE06C98
+
+#define mmTPC0_CFG_QM_SRF_3 0xE06C9C
+
+#define mmTPC0_CFG_QM_SRF_4 0xE06CA0
+
+#define mmTPC0_CFG_QM_SRF_5 0xE06CA4
+
+#define mmTPC0_CFG_QM_SRF_6 0xE06CA8
+
+#define mmTPC0_CFG_QM_SRF_7 0xE06CAC
+
+#define mmTPC0_CFG_QM_SRF_8 0xE06CB0
+
+#define mmTPC0_CFG_QM_SRF_9 0xE06CB4
+
+#define mmTPC0_CFG_QM_SRF_10 0xE06CB8
+
+#define mmTPC0_CFG_QM_SRF_11 0xE06CBC
+
+#define mmTPC0_CFG_QM_SRF_12 0xE06CC0
+
+#define mmTPC0_CFG_QM_SRF_13 0xE06CC4
+
+#define mmTPC0_CFG_QM_SRF_14 0xE06CC8
+
+#define mmTPC0_CFG_QM_SRF_15 0xE06CCC
+
+#define mmTPC0_CFG_QM_SRF_16 0xE06CD0
+
+#define mmTPC0_CFG_QM_SRF_17 0xE06CD4
+
+#define mmTPC0_CFG_QM_SRF_18 0xE06CD8
+
+#define mmTPC0_CFG_QM_SRF_19 0xE06CDC
+
+#define mmTPC0_CFG_QM_SRF_20 0xE06CE0
+
+#define mmTPC0_CFG_QM_SRF_21 0xE06CE4
+
+#define mmTPC0_CFG_QM_SRF_22 0xE06CE8
+
+#define mmTPC0_CFG_QM_SRF_23 0xE06CEC
+
+#define mmTPC0_CFG_QM_SRF_24 0xE06CF0
+
+#define mmTPC0_CFG_QM_SRF_25 0xE06CF4
+
+#define mmTPC0_CFG_QM_SRF_26 0xE06CF8
+
+#define mmTPC0_CFG_QM_SRF_27 0xE06CFC
+
+#define mmTPC0_CFG_QM_SRF_28 0xE06D00
+
+#define mmTPC0_CFG_QM_SRF_29 0xE06D04
+
+#define mmTPC0_CFG_QM_SRF_30 0xE06D08
+
+#define mmTPC0_CFG_QM_SRF_31 0xE06D0C
+
+#define mmTPC0_CFG_QM_KERNEL_CONFIG 0xE06D10
+
+#define mmTPC0_CFG_QM_SYNC_OBJECT_MESSAGE 0xE06D14
+
+#define mmTPC0_CFG_ARUSER 0xE06D18
+
+#define mmTPC0_CFG_AWUSER 0xE06D1C
+
+#define mmTPC0_CFG_FUNC_MBIST_CNTRL 0xE06E00
+
+#define mmTPC0_CFG_FUNC_MBIST_PAT 0xE06E04
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_0 0xE06E08
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_1 0xE06E0C
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_2 0xE06E10
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_3 0xE06E14
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_4 0xE06E18
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_5 0xE06E1C
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_6 0xE06E20
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_7 0xE06E24
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_8 0xE06E28
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_9 0xE06E2C
+
+#endif /* ASIC_REG_TPC0_CFG_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h
new file mode 100644
index 000000000000..9aa2d8b53207
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h
@@ -0,0 +1,373 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_CMDQ_MASKS_H_
+#define ASIC_REG_TPC0_CMDQ_MASKS_H_
+
+/*
+ *****************************************
+ * TPC0_CMDQ (Prototype: CMDQ)
+ *****************************************
+ */
+
+/* TPC0_CMDQ_GLBL_CFG0 */
+#define TPC0_CMDQ_GLBL_CFG0_PQF_EN_SHIFT 0
+#define TPC0_CMDQ_GLBL_CFG0_PQF_EN_MASK 0x1
+#define TPC0_CMDQ_GLBL_CFG0_CQF_EN_SHIFT 1
+#define TPC0_CMDQ_GLBL_CFG0_CQF_EN_MASK 0x2
+#define TPC0_CMDQ_GLBL_CFG0_CP_EN_SHIFT 2
+#define TPC0_CMDQ_GLBL_CFG0_CP_EN_MASK 0x4
+#define TPC0_CMDQ_GLBL_CFG0_DMA_EN_SHIFT 3
+#define TPC0_CMDQ_GLBL_CFG0_DMA_EN_MASK 0x8
+
+/* TPC0_CMDQ_GLBL_CFG1 */
+#define TPC0_CMDQ_GLBL_CFG1_PQF_STOP_SHIFT 0
+#define TPC0_CMDQ_GLBL_CFG1_PQF_STOP_MASK 0x1
+#define TPC0_CMDQ_GLBL_CFG1_CQF_STOP_SHIFT 1
+#define TPC0_CMDQ_GLBL_CFG1_CQF_STOP_MASK 0x2
+#define TPC0_CMDQ_GLBL_CFG1_CP_STOP_SHIFT 2
+#define TPC0_CMDQ_GLBL_CFG1_CP_STOP_MASK 0x4
+#define TPC0_CMDQ_GLBL_CFG1_DMA_STOP_SHIFT 3
+#define TPC0_CMDQ_GLBL_CFG1_DMA_STOP_MASK 0x8
+#define TPC0_CMDQ_GLBL_CFG1_PQF_FLUSH_SHIFT 8
+#define TPC0_CMDQ_GLBL_CFG1_PQF_FLUSH_MASK 0x100
+#define TPC0_CMDQ_GLBL_CFG1_CQF_FLUSH_SHIFT 9
+#define TPC0_CMDQ_GLBL_CFG1_CQF_FLUSH_MASK 0x200
+#define TPC0_CMDQ_GLBL_CFG1_CP_FLUSH_SHIFT 10
+#define TPC0_CMDQ_GLBL_CFG1_CP_FLUSH_MASK 0x400
+#define TPC0_CMDQ_GLBL_CFG1_DMA_FLUSH_SHIFT 11
+#define TPC0_CMDQ_GLBL_CFG1_DMA_FLUSH_MASK 0x800
+
+/* TPC0_CMDQ_GLBL_PROT */
+#define TPC0_CMDQ_GLBL_PROT_PQF_PROT_SHIFT 0
+#define TPC0_CMDQ_GLBL_PROT_PQF_PROT_MASK 0x1
+#define TPC0_CMDQ_GLBL_PROT_CQF_PROT_SHIFT 1
+#define TPC0_CMDQ_GLBL_PROT_CQF_PROT_MASK 0x2
+#define TPC0_CMDQ_GLBL_PROT_CP_PROT_SHIFT 2
+#define TPC0_CMDQ_GLBL_PROT_CP_PROT_MASK 0x4
+#define TPC0_CMDQ_GLBL_PROT_DMA_PROT_SHIFT 3
+#define TPC0_CMDQ_GLBL_PROT_DMA_PROT_MASK 0x8
+#define TPC0_CMDQ_GLBL_PROT_PQF_ERR_PROT_SHIFT 4
+#define TPC0_CMDQ_GLBL_PROT_PQF_ERR_PROT_MASK 0x10
+#define TPC0_CMDQ_GLBL_PROT_CQF_ERR_PROT_SHIFT 5
+#define TPC0_CMDQ_GLBL_PROT_CQF_ERR_PROT_MASK 0x20
+#define TPC0_CMDQ_GLBL_PROT_CP_ERR_PROT_SHIFT 6
+#define TPC0_CMDQ_GLBL_PROT_CP_ERR_PROT_MASK 0x40
+#define TPC0_CMDQ_GLBL_PROT_DMA_ERR_PROT_SHIFT 7
+#define TPC0_CMDQ_GLBL_PROT_DMA_ERR_PROT_MASK 0x80
+
+/* TPC0_CMDQ_GLBL_ERR_CFG */
+#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0
+#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1
+#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1
+#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2
+#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2
+#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4
+#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3
+#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8
+#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4
+#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10
+#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5
+#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20
+#define TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6
+#define TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40
+#define TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7
+#define TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80
+#define TPC0_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8
+#define TPC0_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100
+#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9
+#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200
+#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10
+#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400
+#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11
+#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800
+
+/* TPC0_CMDQ_GLBL_ERR_ADDR_LO */
+#define TPC0_CMDQ_GLBL_ERR_ADDR_LO_VAL_SHIFT 0
+#define TPC0_CMDQ_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_GLBL_ERR_ADDR_HI */
+#define TPC0_CMDQ_GLBL_ERR_ADDR_HI_VAL_SHIFT 0
+#define TPC0_CMDQ_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_GLBL_ERR_WDATA */
+#define TPC0_CMDQ_GLBL_ERR_WDATA_VAL_SHIFT 0
+#define TPC0_CMDQ_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_GLBL_SECURE_PROPS */
+#define TPC0_CMDQ_GLBL_SECURE_PROPS_ASID_SHIFT 0
+#define TPC0_CMDQ_GLBL_SECURE_PROPS_ASID_MASK 0x3FF
+#define TPC0_CMDQ_GLBL_SECURE_PROPS_MMBP_SHIFT 10
+#define TPC0_CMDQ_GLBL_SECURE_PROPS_MMBP_MASK 0x400
+
+/* TPC0_CMDQ_GLBL_NON_SECURE_PROPS */
+#define TPC0_CMDQ_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0
+#define TPC0_CMDQ_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF
+#define TPC0_CMDQ_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10
+#define TPC0_CMDQ_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400
+
+/* TPC0_CMDQ_GLBL_STS0 */
+#define TPC0_CMDQ_GLBL_STS0_PQF_IDLE_SHIFT 0
+#define TPC0_CMDQ_GLBL_STS0_PQF_IDLE_MASK 0x1
+#define TPC0_CMDQ_GLBL_STS0_CQF_IDLE_SHIFT 1
+#define TPC0_CMDQ_GLBL_STS0_CQF_IDLE_MASK 0x2
+#define TPC0_CMDQ_GLBL_STS0_CP_IDLE_SHIFT 2
+#define TPC0_CMDQ_GLBL_STS0_CP_IDLE_MASK 0x4
+#define TPC0_CMDQ_GLBL_STS0_DMA_IDLE_SHIFT 3
+#define TPC0_CMDQ_GLBL_STS0_DMA_IDLE_MASK 0x8
+#define TPC0_CMDQ_GLBL_STS0_PQF_IS_STOP_SHIFT 4
+#define TPC0_CMDQ_GLBL_STS0_PQF_IS_STOP_MASK 0x10
+#define TPC0_CMDQ_GLBL_STS0_CQF_IS_STOP_SHIFT 5
+#define TPC0_CMDQ_GLBL_STS0_CQF_IS_STOP_MASK 0x20
+#define TPC0_CMDQ_GLBL_STS0_CP_IS_STOP_SHIFT 6
+#define TPC0_CMDQ_GLBL_STS0_CP_IS_STOP_MASK 0x40
+#define TPC0_CMDQ_GLBL_STS0_DMA_IS_STOP_SHIFT 7
+#define TPC0_CMDQ_GLBL_STS0_DMA_IS_STOP_MASK 0x80
+
+/* TPC0_CMDQ_GLBL_STS1 */
+#define TPC0_CMDQ_GLBL_STS1_PQF_RD_ERR_SHIFT 0
+#define TPC0_CMDQ_GLBL_STS1_PQF_RD_ERR_MASK 0x1
+#define TPC0_CMDQ_GLBL_STS1_CQF_RD_ERR_SHIFT 1
+#define TPC0_CMDQ_GLBL_STS1_CQF_RD_ERR_MASK 0x2
+#define TPC0_CMDQ_GLBL_STS1_CP_RD_ERR_SHIFT 2
+#define TPC0_CMDQ_GLBL_STS1_CP_RD_ERR_MASK 0x4
+#define TPC0_CMDQ_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3
+#define TPC0_CMDQ_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8
+#define TPC0_CMDQ_GLBL_STS1_CP_STOP_OP_SHIFT 4
+#define TPC0_CMDQ_GLBL_STS1_CP_STOP_OP_MASK 0x10
+#define TPC0_CMDQ_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5
+#define TPC0_CMDQ_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20
+#define TPC0_CMDQ_GLBL_STS1_DMA_RD_ERR_SHIFT 8
+#define TPC0_CMDQ_GLBL_STS1_DMA_RD_ERR_MASK 0x100
+#define TPC0_CMDQ_GLBL_STS1_DMA_WR_ERR_SHIFT 9
+#define TPC0_CMDQ_GLBL_STS1_DMA_WR_ERR_MASK 0x200
+#define TPC0_CMDQ_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10
+#define TPC0_CMDQ_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400
+#define TPC0_CMDQ_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11
+#define TPC0_CMDQ_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800
+
+/* TPC0_CMDQ_CQ_CFG0 */
+#define TPC0_CMDQ_CQ_CFG0_RESERVED_SHIFT 0
+#define TPC0_CMDQ_CQ_CFG0_RESERVED_MASK 0x1
+
+/* TPC0_CMDQ_CQ_CFG1 */
+#define TPC0_CMDQ_CQ_CFG1_CREDIT_LIM_SHIFT 0
+#define TPC0_CMDQ_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define TPC0_CMDQ_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define TPC0_CMDQ_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* TPC0_CMDQ_CQ_ARUSER */
+#define TPC0_CMDQ_CQ_ARUSER_NOSNOOP_SHIFT 0
+#define TPC0_CMDQ_CQ_ARUSER_NOSNOOP_MASK 0x1
+#define TPC0_CMDQ_CQ_ARUSER_WORD_SHIFT 1
+#define TPC0_CMDQ_CQ_ARUSER_WORD_MASK 0x2
+
+/* TPC0_CMDQ_CQ_PTR_LO */
+#define TPC0_CMDQ_CQ_PTR_LO_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CQ_PTR_HI */
+#define TPC0_CMDQ_CQ_PTR_HI_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CQ_TSIZE */
+#define TPC0_CMDQ_CQ_TSIZE_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_TSIZE_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CQ_CTL */
+#define TPC0_CMDQ_CQ_CTL_RPT_SHIFT 0
+#define TPC0_CMDQ_CQ_CTL_RPT_MASK 0xFFFF
+#define TPC0_CMDQ_CQ_CTL_CTL_SHIFT 16
+#define TPC0_CMDQ_CQ_CTL_CTL_MASK 0xFFFF0000
+
+/* TPC0_CMDQ_CQ_PTR_LO_STS */
+#define TPC0_CMDQ_CQ_PTR_LO_STS_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CQ_PTR_HI_STS */
+#define TPC0_CMDQ_CQ_PTR_HI_STS_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CQ_TSIZE_STS */
+#define TPC0_CMDQ_CQ_TSIZE_STS_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CQ_CTL_STS */
+#define TPC0_CMDQ_CQ_CTL_STS_RPT_SHIFT 0
+#define TPC0_CMDQ_CQ_CTL_STS_RPT_MASK 0xFFFF
+#define TPC0_CMDQ_CQ_CTL_STS_CTL_SHIFT 16
+#define TPC0_CMDQ_CQ_CTL_STS_CTL_MASK 0xFFFF0000
+
+/* TPC0_CMDQ_CQ_STS0 */
+#define TPC0_CMDQ_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0
+#define TPC0_CMDQ_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF
+#define TPC0_CMDQ_CQ_STS0_CQ_FREE_CNT_SHIFT 16
+#define TPC0_CMDQ_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000
+
+/* TPC0_CMDQ_CQ_STS1 */
+#define TPC0_CMDQ_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0
+#define TPC0_CMDQ_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF
+#define TPC0_CMDQ_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30
+#define TPC0_CMDQ_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000
+#define TPC0_CMDQ_CQ_STS1_CQ_BUSY_SHIFT 31
+#define TPC0_CMDQ_CQ_STS1_CQ_BUSY_MASK 0x80000000
+
+/* TPC0_CMDQ_CQ_RD_RATE_LIM_EN */
+#define TPC0_CMDQ_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1
+
+/* TPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN */
+#define TPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* TPC0_CMDQ_CQ_RD_RATE_LIM_SAT */
+#define TPC0_CMDQ_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
+
+/* TPC0_CMDQ_CQ_RD_RATE_LIM_TOUT */
+#define TPC0_CMDQ_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
+
+/* TPC0_CMDQ_CQ_IFIFO_CNT */
+#define TPC0_CMDQ_CQ_IFIFO_CNT_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_IFIFO_CNT_VAL_MASK 0x3
+
+/* TPC0_CMDQ_CP_MSG_BASE0_ADDR_LO */
+#define TPC0_CMDQ_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_MSG_BASE0_ADDR_HI */
+#define TPC0_CMDQ_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_MSG_BASE1_ADDR_LO */
+#define TPC0_CMDQ_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_MSG_BASE1_ADDR_HI */
+#define TPC0_CMDQ_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_MSG_BASE2_ADDR_LO */
+#define TPC0_CMDQ_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_MSG_BASE2_ADDR_HI */
+#define TPC0_CMDQ_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_MSG_BASE3_ADDR_LO */
+#define TPC0_CMDQ_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_MSG_BASE3_ADDR_HI */
+#define TPC0_CMDQ_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_LDMA_TSIZE_OFFSET */
+#define TPC0_CMDQ_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET */
+#define TPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET */
+#define TPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET */
+#define TPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET */
+#define TPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_LDMA_COMMIT_OFFSET */
+#define TPC0_CMDQ_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_FENCE0_RDATA */
+#define TPC0_CMDQ_CP_FENCE0_RDATA_INC_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_FENCE0_RDATA_INC_VAL_MASK 0xF
+
+/* TPC0_CMDQ_CP_FENCE1_RDATA */
+#define TPC0_CMDQ_CP_FENCE1_RDATA_INC_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_FENCE1_RDATA_INC_VAL_MASK 0xF
+
+/* TPC0_CMDQ_CP_FENCE2_RDATA */
+#define TPC0_CMDQ_CP_FENCE2_RDATA_INC_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_FENCE2_RDATA_INC_VAL_MASK 0xF
+
+/* TPC0_CMDQ_CP_FENCE3_RDATA */
+#define TPC0_CMDQ_CP_FENCE3_RDATA_INC_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_FENCE3_RDATA_INC_VAL_MASK 0xF
+
+/* TPC0_CMDQ_CP_FENCE0_CNT */
+#define TPC0_CMDQ_CP_FENCE0_CNT_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_FENCE0_CNT_VAL_MASK 0xFF
+
+/* TPC0_CMDQ_CP_FENCE1_CNT */
+#define TPC0_CMDQ_CP_FENCE1_CNT_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_FENCE1_CNT_VAL_MASK 0xFF
+
+/* TPC0_CMDQ_CP_FENCE2_CNT */
+#define TPC0_CMDQ_CP_FENCE2_CNT_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_FENCE2_CNT_VAL_MASK 0xFF
+
+/* TPC0_CMDQ_CP_FENCE3_CNT */
+#define TPC0_CMDQ_CP_FENCE3_CNT_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_FENCE3_CNT_VAL_MASK 0xFF
+
+/* TPC0_CMDQ_CP_STS */
+#define TPC0_CMDQ_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0
+#define TPC0_CMDQ_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF
+#define TPC0_CMDQ_CP_STS_ERDY_SHIFT 16
+#define TPC0_CMDQ_CP_STS_ERDY_MASK 0x10000
+#define TPC0_CMDQ_CP_STS_RRDY_SHIFT 17
+#define TPC0_CMDQ_CP_STS_RRDY_MASK 0x20000
+#define TPC0_CMDQ_CP_STS_MRDY_SHIFT 18
+#define TPC0_CMDQ_CP_STS_MRDY_MASK 0x40000
+#define TPC0_CMDQ_CP_STS_SW_STOP_SHIFT 19
+#define TPC0_CMDQ_CP_STS_SW_STOP_MASK 0x80000
+#define TPC0_CMDQ_CP_STS_FENCE_ID_SHIFT 20
+#define TPC0_CMDQ_CP_STS_FENCE_ID_MASK 0x300000
+#define TPC0_CMDQ_CP_STS_FENCE_IN_PROGRESS_SHIFT 22
+#define TPC0_CMDQ_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000
+
+/* TPC0_CMDQ_CP_CURRENT_INST_LO */
+#define TPC0_CMDQ_CP_CURRENT_INST_LO_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_CURRENT_INST_HI */
+#define TPC0_CMDQ_CP_CURRENT_INST_HI_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_BARRIER_CFG */
+#define TPC0_CMDQ_CP_BARRIER_CFG_EBGUARD_SHIFT 0
+#define TPC0_CMDQ_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF
+
+/* TPC0_CMDQ_CP_DBG_0 */
+#define TPC0_CMDQ_CP_DBG_0_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_DBG_0_VAL_MASK 0xFF
+
+/* TPC0_CMDQ_CQ_BUF_ADDR */
+#define TPC0_CMDQ_CQ_BUF_ADDR_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CQ_BUF_RDATA */
+#define TPC0_CMDQ_CQ_BUF_RDATA_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_TPC0_CMDQ_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h
new file mode 100644
index 000000000000..3572752ba66e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_CMDQ_REGS_H_
+#define ASIC_REG_TPC0_CMDQ_REGS_H_
+
+/*
+ *****************************************
+ * TPC0_CMDQ (Prototype: CMDQ)
+ *****************************************
+ */
+
+#define mmTPC0_CMDQ_GLBL_CFG0 0xE09000
+
+#define mmTPC0_CMDQ_GLBL_CFG1 0xE09004
+
+#define mmTPC0_CMDQ_GLBL_PROT 0xE09008
+
+#define mmTPC0_CMDQ_GLBL_ERR_CFG 0xE0900C
+
+#define mmTPC0_CMDQ_GLBL_ERR_ADDR_LO 0xE09010
+
+#define mmTPC0_CMDQ_GLBL_ERR_ADDR_HI 0xE09014
+
+#define mmTPC0_CMDQ_GLBL_ERR_WDATA 0xE09018
+
+#define mmTPC0_CMDQ_GLBL_SECURE_PROPS 0xE0901C
+
+#define mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS 0xE09020
+
+#define mmTPC0_CMDQ_GLBL_STS0 0xE09024
+
+#define mmTPC0_CMDQ_GLBL_STS1 0xE09028
+
+#define mmTPC0_CMDQ_CQ_CFG0 0xE090B0
+
+#define mmTPC0_CMDQ_CQ_CFG1 0xE090B4
+
+#define mmTPC0_CMDQ_CQ_ARUSER 0xE090B8
+
+#define mmTPC0_CMDQ_CQ_PTR_LO 0xE090C0
+
+#define mmTPC0_CMDQ_CQ_PTR_HI 0xE090C4
+
+#define mmTPC0_CMDQ_CQ_TSIZE 0xE090C8
+
+#define mmTPC0_CMDQ_CQ_CTL 0xE090CC
+
+#define mmTPC0_CMDQ_CQ_PTR_LO_STS 0xE090D4
+
+#define mmTPC0_CMDQ_CQ_PTR_HI_STS 0xE090D8
+
+#define mmTPC0_CMDQ_CQ_TSIZE_STS 0xE090DC
+
+#define mmTPC0_CMDQ_CQ_CTL_STS 0xE090E0
+
+#define mmTPC0_CMDQ_CQ_STS0 0xE090E4
+
+#define mmTPC0_CMDQ_CQ_STS1 0xE090E8
+
+#define mmTPC0_CMDQ_CQ_RD_RATE_LIM_EN 0xE090F0
+
+#define mmTPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xE090F4
+
+#define mmTPC0_CMDQ_CQ_RD_RATE_LIM_SAT 0xE090F8
+
+#define mmTPC0_CMDQ_CQ_RD_RATE_LIM_TOUT 0xE090FC
+
+#define mmTPC0_CMDQ_CQ_IFIFO_CNT 0xE09108
+
+#define mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO 0xE09120
+
+#define mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI 0xE09124
+
+#define mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO 0xE09128
+
+#define mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI 0xE0912C
+
+#define mmTPC0_CMDQ_CP_MSG_BASE2_ADDR_LO 0xE09130
+
+#define mmTPC0_CMDQ_CP_MSG_BASE2_ADDR_HI 0xE09134
+
+#define mmTPC0_CMDQ_CP_MSG_BASE3_ADDR_LO 0xE09138
+
+#define mmTPC0_CMDQ_CP_MSG_BASE3_ADDR_HI 0xE0913C
+
+#define mmTPC0_CMDQ_CP_LDMA_TSIZE_OFFSET 0xE09140
+
+#define mmTPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xE09144
+
+#define mmTPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xE09148
+
+#define mmTPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xE0914C
+
+#define mmTPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xE09150
+
+#define mmTPC0_CMDQ_CP_LDMA_COMMIT_OFFSET 0xE09154
+
+#define mmTPC0_CMDQ_CP_FENCE0_RDATA 0xE09158
+
+#define mmTPC0_CMDQ_CP_FENCE1_RDATA 0xE0915C
+
+#define mmTPC0_CMDQ_CP_FENCE2_RDATA 0xE09160
+
+#define mmTPC0_CMDQ_CP_FENCE3_RDATA 0xE09164
+
+#define mmTPC0_CMDQ_CP_FENCE0_CNT 0xE09168
+
+#define mmTPC0_CMDQ_CP_FENCE1_CNT 0xE0916C
+
+#define mmTPC0_CMDQ_CP_FENCE2_CNT 0xE09170
+
+#define mmTPC0_CMDQ_CP_FENCE3_CNT 0xE09174
+
+#define mmTPC0_CMDQ_CP_STS 0xE09178
+
+#define mmTPC0_CMDQ_CP_CURRENT_INST_LO 0xE0917C
+
+#define mmTPC0_CMDQ_CP_CURRENT_INST_HI 0xE09180
+
+#define mmTPC0_CMDQ_CP_BARRIER_CFG 0xE09184
+
+#define mmTPC0_CMDQ_CP_DBG_0 0xE09188
+
+#define mmTPC0_CMDQ_CQ_BUF_ADDR 0xE09308
+
+#define mmTPC0_CMDQ_CQ_BUF_RDATA 0xE0930C
+
+#endif /* ASIC_REG_TPC0_CMDQ_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h
new file mode 100644
index 000000000000..ed866d93c440
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h
@@ -0,0 +1,347 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_EML_CFG_MASKS_H_
+#define ASIC_REG_TPC0_EML_CFG_MASKS_H_
+
+/*
+ *****************************************
+ * TPC0_EML_CFG (Prototype: TPC_EML_CFG)
+ *****************************************
+ */
+
+/* TPC0_EML_CFG_DBG_CNT */
+#define TPC0_EML_CFG_DBG_CNT_DBG_ENTER_SHIFT 0
+#define TPC0_EML_CFG_DBG_CNT_DBG_ENTER_MASK 0x1
+#define TPC0_EML_CFG_DBG_CNT_DBG_EN_SHIFT 1
+#define TPC0_EML_CFG_DBG_CNT_DBG_EN_MASK 0x2
+#define TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT 2
+#define TPC0_EML_CFG_DBG_CNT_CORE_RST_MASK 0x4
+#define TPC0_EML_CFG_DBG_CNT_DCACHE_INV_SHIFT 4
+#define TPC0_EML_CFG_DBG_CNT_DCACHE_INV_MASK 0x10
+#define TPC0_EML_CFG_DBG_CNT_ICACHE_INV_SHIFT 5
+#define TPC0_EML_CFG_DBG_CNT_ICACHE_INV_MASK 0x20
+#define TPC0_EML_CFG_DBG_CNT_DBG_EXIT_SHIFT 6
+#define TPC0_EML_CFG_DBG_CNT_DBG_EXIT_MASK 0x40
+#define TPC0_EML_CFG_DBG_CNT_SNG_STEP_SHIFT 7
+#define TPC0_EML_CFG_DBG_CNT_SNG_STEP_MASK 0x80
+#define TPC0_EML_CFG_DBG_CNT_BP_DBGSW_EN_SHIFT 16
+#define TPC0_EML_CFG_DBG_CNT_BP_DBGSW_EN_MASK 0x10000
+
+/* TPC0_EML_CFG_DBG_STS */
+#define TPC0_EML_CFG_DBG_STS_DBG_MODE_SHIFT 0
+#define TPC0_EML_CFG_DBG_STS_DBG_MODE_MASK 0x1
+#define TPC0_EML_CFG_DBG_STS_CORE_READY_SHIFT 1
+#define TPC0_EML_CFG_DBG_STS_CORE_READY_MASK 0x2
+#define TPC0_EML_CFG_DBG_STS_DURING_KERNEL_SHIFT 2
+#define TPC0_EML_CFG_DBG_STS_DURING_KERNEL_MASK 0x4
+#define TPC0_EML_CFG_DBG_STS_ICACHE_IDLE_SHIFT 3
+#define TPC0_EML_CFG_DBG_STS_ICACHE_IDLE_MASK 0x8
+#define TPC0_EML_CFG_DBG_STS_DCACHE_IDLE_SHIFT 4
+#define TPC0_EML_CFG_DBG_STS_DCACHE_IDLE_MASK 0x10
+#define TPC0_EML_CFG_DBG_STS_QM_IDLE_SHIFT 5
+#define TPC0_EML_CFG_DBG_STS_QM_IDLE_MASK 0x20
+#define TPC0_EML_CFG_DBG_STS_WQ_IDLE_SHIFT 6
+#define TPC0_EML_CFG_DBG_STS_WQ_IDLE_MASK 0x40
+#define TPC0_EML_CFG_DBG_STS_MSS_IDLE_SHIFT 7
+#define TPC0_EML_CFG_DBG_STS_MSS_IDLE_MASK 0x80
+#define TPC0_EML_CFG_DBG_STS_DBG_CAUSE_SHIFT 8
+#define TPC0_EML_CFG_DBG_STS_DBG_CAUSE_MASK 0xFFFFFF00
+
+/* TPC0_EML_CFG_DBG_PADD */
+#define TPC0_EML_CFG_DBG_PADD_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_PADD_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_PADD_COUNT */
+#define TPC0_EML_CFG_DBG_PADD_COUNT_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_PADD_COUNT_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_PADD_COUNT_MATCH */
+#define TPC0_EML_CFG_DBG_PADD_COUNT_MATCH_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_PADD_COUNT_MATCH_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_PADD_EN */
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE0_SHIFT 0
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE0_MASK 0x1
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE1_SHIFT 1
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE1_MASK 0x2
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE2_SHIFT 2
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE2_MASK 0x4
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE3_SHIFT 3
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE3_MASK 0x8
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE4_SHIFT 4
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE4_MASK 0x10
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE5_SHIFT 5
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE5_MASK 0x20
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE6_SHIFT 6
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE6_MASK 0x40
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE7_SHIFT 7
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE7_MASK 0x80
+
+/* TPC0_EML_CFG_DBG_VPADD_HIGH */
+#define TPC0_EML_CFG_DBG_VPADD_HIGH_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_VPADD_HIGH_ADDRESS_MASK 0x1FF
+
+/* TPC0_EML_CFG_DBG_VPADD_LOW */
+#define TPC0_EML_CFG_DBG_VPADD_LOW_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_VPADD_LOW_ADDRESS_MASK 0x1FF
+
+/* TPC0_EML_CFG_DBG_VPADD_COUNT */
+#define TPC0_EML_CFG_DBG_VPADD_COUNT_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_VPADD_COUNT_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_VPADD_COUNT_MATCH */
+#define TPC0_EML_CFG_DBG_VPADD_COUNT_MATCH_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_VPADD_COUNT_MATCH_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_VPADD_EN */
+#define TPC0_EML_CFG_DBG_VPADD_EN_ENABLE0_SHIFT 0
+#define TPC0_EML_CFG_DBG_VPADD_EN_ENABLE0_MASK 0x1
+#define TPC0_EML_CFG_DBG_VPADD_EN_ENABLE1_SHIFT 1
+#define TPC0_EML_CFG_DBG_VPADD_EN_ENABLE1_MASK 0x2
+#define TPC0_EML_CFG_DBG_VPADD_EN_RW_N0_SHIFT 2
+#define TPC0_EML_CFG_DBG_VPADD_EN_RW_N0_MASK 0x4
+#define TPC0_EML_CFG_DBG_VPADD_EN_RW_N1_SHIFT 3
+#define TPC0_EML_CFG_DBG_VPADD_EN_RW_N1_MASK 0x8
+
+/* TPC0_EML_CFG_DBG_SPADD_HIGH */
+#define TPC0_EML_CFG_DBG_SPADD_HIGH_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_SPADD_HIGH_ADDRESS_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_SPADD_LOW */
+#define TPC0_EML_CFG_DBG_SPADD_LOW_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_SPADD_LOW_ADDRESS_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_SPADD_COUNT */
+#define TPC0_EML_CFG_DBG_SPADD_COUNT_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_SPADD_COUNT_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_SPADD_COUNT_MATCH */
+#define TPC0_EML_CFG_DBG_SPADD_COUNT_MATCH_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_SPADD_COUNT_MATCH_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_SPADD_EN */
+#define TPC0_EML_CFG_DBG_SPADD_EN_ENABLE0_SHIFT 0
+#define TPC0_EML_CFG_DBG_SPADD_EN_ENABLE0_MASK 0x1
+#define TPC0_EML_CFG_DBG_SPADD_EN_ENABLE1_SHIFT 1
+#define TPC0_EML_CFG_DBG_SPADD_EN_ENABLE1_MASK 0x2
+#define TPC0_EML_CFG_DBG_SPADD_EN_RW_N0_SHIFT 2
+#define TPC0_EML_CFG_DBG_SPADD_EN_RW_N0_MASK 0x4
+#define TPC0_EML_CFG_DBG_SPADD_EN_RW_N1_SHIFT 3
+#define TPC0_EML_CFG_DBG_SPADD_EN_RW_N1_MASK 0x8
+
+/* TPC0_EML_CFG_DBG_AGUADD_MSB_HIGH */
+#define TPC0_EML_CFG_DBG_AGUADD_MSB_HIGH_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_AGUADD_MSB_HIGH_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AGUADD_MSB_LOW */
+#define TPC0_EML_CFG_DBG_AGUADD_MSB_LOW_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_AGUADD_MSB_LOW_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AGUADD_LSB_HIGH */
+#define TPC0_EML_CFG_DBG_AGUADD_LSB_HIGH_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_AGUADD_LSB_HIGH_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AGUADD_LSB_LOW */
+#define TPC0_EML_CFG_DBG_AGUADD_LSB_LOW_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_AGUADD_LSB_LOW_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AGUADD_COUNT */
+#define TPC0_EML_CFG_DBG_AGUADD_COUNT_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_AGUADD_COUNT_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH */
+#define TPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_AGUADD_EN */
+#define TPC0_EML_CFG_DBG_AGUADD_EN_ENABLE0_SHIFT 0
+#define TPC0_EML_CFG_DBG_AGUADD_EN_ENABLE0_MASK 0x1
+#define TPC0_EML_CFG_DBG_AGUADD_EN_ENABLE1_SHIFT 1
+#define TPC0_EML_CFG_DBG_AGUADD_EN_ENABLE1_MASK 0x2
+#define TPC0_EML_CFG_DBG_AGUADD_EN_RW_N0_SHIFT 2
+#define TPC0_EML_CFG_DBG_AGUADD_EN_RW_N0_MASK 0x4
+#define TPC0_EML_CFG_DBG_AGUADD_EN_RW_N1_SHIFT 3
+#define TPC0_EML_CFG_DBG_AGUADD_EN_RW_N1_MASK 0x8
+
+/* TPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH */
+#define TPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW */
+#define TPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH */
+#define TPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW */
+#define TPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AXIHBWADD_COUNT */
+#define TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH */
+#define TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH_MATCH_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH_MATCH_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_AXIHBWADD_EN */
+#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_ENABLE0_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_ENABLE0_MASK 0x1
+#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_ENABLE1_SHIFT 1
+#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_ENABLE1_MASK 0x2
+#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_RW_N0_SHIFT 2
+#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_RW_N0_MASK 0x4
+#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_RW_N1_SHIFT 3
+#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_RW_N1_MASK 0x8
+
+/* TPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH */
+#define TPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW */
+#define TPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH */
+#define TPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW */
+#define TPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AXILBWADD_COUNT */
+#define TPC0_EML_CFG_DBG_AXILBWADD_COUNT_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXILBWADD_COUNT_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH */
+#define TPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH_MATCH_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH_MATCH_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_AXILBWADD_EN */
+#define TPC0_EML_CFG_DBG_AXILBWADD_EN_ENABLE0_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXILBWADD_EN_ENABLE0_MASK 0x1
+#define TPC0_EML_CFG_DBG_AXILBWADD_EN_ENABLE1_SHIFT 1
+#define TPC0_EML_CFG_DBG_AXILBWADD_EN_ENABLE1_MASK 0x2
+#define TPC0_EML_CFG_DBG_AXILBWADD_EN_RW_N0_SHIFT 2
+#define TPC0_EML_CFG_DBG_AXILBWADD_EN_RW_N0_MASK 0x4
+#define TPC0_EML_CFG_DBG_AXILBWADD_EN_RW_N1_SHIFT 3
+#define TPC0_EML_CFG_DBG_AXILBWADD_EN_RW_N1_MASK 0x8
+
+/* TPC0_EML_CFG_DBG_SPDATA */
+#define TPC0_EML_CFG_DBG_SPDATA_DATA_SHIFT 0
+#define TPC0_EML_CFG_DBG_SPDATA_DATA_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_SPDATA_COUNT */
+#define TPC0_EML_CFG_DBG_SPDATA_COUNT_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_SPDATA_COUNT_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH */
+#define TPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH_MATCH_SHIFT 0
+#define TPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH_MATCH_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_SPDATA_EN */
+#define TPC0_EML_CFG_DBG_SPDATA_EN_ENABLE0_SHIFT 0
+#define TPC0_EML_CFG_DBG_SPDATA_EN_ENABLE0_MASK 0x1
+#define TPC0_EML_CFG_DBG_SPDATA_EN_ENABLE1_SHIFT 1
+#define TPC0_EML_CFG_DBG_SPDATA_EN_ENABLE1_MASK 0x2
+#define TPC0_EML_CFG_DBG_SPDATA_EN_RW_N0_SHIFT 2
+#define TPC0_EML_CFG_DBG_SPDATA_EN_RW_N0_MASK 0x4
+#define TPC0_EML_CFG_DBG_SPDATA_EN_RW_N1_SHIFT 3
+#define TPC0_EML_CFG_DBG_SPDATA_EN_RW_N1_MASK 0x8
+
+/* TPC0_EML_CFG_DBG_AXIHBWDATA */
+#define TPC0_EML_CFG_DBG_AXIHBWDATA_DATA_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXIHBWDATA_DATA_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AXIHBWDATA_COUNT */
+#define TPC0_EML_CFG_DBG_AXIHBWDATA_COUNT_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXIHBWDATA_COUNT_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_AXIHBWDAT_COUNT_MATCH */
+#define TPC0_EML_CFG_DBG_AXIHBWDAT_COUNT_MATCH_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXIHBWDAT_COUNT_MATCH_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_AXIHBWDATA_EN */
+#define TPC0_EML_CFG_DBG_AXIHBWDATA_EN_ENABLE_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXIHBWDATA_EN_ENABLE_MASK 0x1
+#define TPC0_EML_CFG_DBG_AXIHBWDATA_EN_RW_N_SHIFT 1
+#define TPC0_EML_CFG_DBG_AXIHBWDATA_EN_RW_N_MASK 0x2
+
+/* TPC0_EML_CFG_DBG_AXILBWDATA */
+#define TPC0_EML_CFG_DBG_AXILBWDATA_DATA_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXILBWDATA_DATA_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AXILBWDATA_COUNT */
+#define TPC0_EML_CFG_DBG_AXILBWDATA_COUNT_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXILBWDATA_COUNT_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_AXILBWDAT_COUNT_MATCH */
+#define TPC0_EML_CFG_DBG_AXILBWDAT_COUNT_MATCH_MATCH_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXILBWDAT_COUNT_MATCH_MATCH_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_AXILBWDATA_EN */
+#define TPC0_EML_CFG_DBG_AXILBWDATA_EN_ENABLE_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXILBWDATA_EN_ENABLE_MASK 0x1
+#define TPC0_EML_CFG_DBG_AXILBWDATA_EN_RW_N_SHIFT 1
+#define TPC0_EML_CFG_DBG_AXILBWDATA_EN_RW_N_MASK 0x2
+
+/* TPC0_EML_CFG_DBG_D0_PC */
+#define TPC0_EML_CFG_DBG_D0_PC_PC_SHIFT 0
+#define TPC0_EML_CFG_DBG_D0_PC_PC_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_RTTCONFIG */
+#define TPC0_EML_CFG_RTTCONFIG_TR_EN_SHIFT 0
+#define TPC0_EML_CFG_RTTCONFIG_TR_EN_MASK 0x1
+#define TPC0_EML_CFG_RTTCONFIG_PRIO_SHIFT 1
+#define TPC0_EML_CFG_RTTCONFIG_PRIO_MASK 0x2
+
+/* TPC0_EML_CFG_RTTPREDICATE */
+#define TPC0_EML_CFG_RTTPREDICATE_TR_EN_SHIFT 0
+#define TPC0_EML_CFG_RTTPREDICATE_TR_EN_MASK 0x1
+#define TPC0_EML_CFG_RTTPREDICATE_GEN_SHIFT 1
+#define TPC0_EML_CFG_RTTPREDICATE_GEN_MASK 0x2
+#define TPC0_EML_CFG_RTTPREDICATE_USE_INTERVAL_SHIFT 2
+#define TPC0_EML_CFG_RTTPREDICATE_USE_INTERVAL_MASK 0x4
+#define TPC0_EML_CFG_RTTPREDICATE_SPRF_MASK_SHIFT 16
+#define TPC0_EML_CFG_RTTPREDICATE_SPRF_MASK_MASK 0xFFFF0000
+
+/* TPC0_EML_CFG_RTTPREDICATE_INTV */
+#define TPC0_EML_CFG_RTTPREDICATE_INTV_INTERVAL_SHIFT 0
+#define TPC0_EML_CFG_RTTPREDICATE_INTV_INTERVAL_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_RTTTS */
+#define TPC0_EML_CFG_RTTTS_TR_EN_SHIFT 0
+#define TPC0_EML_CFG_RTTTS_TR_EN_MASK 0x1
+#define TPC0_EML_CFG_RTTTS_GEN_SHIFT 1
+#define TPC0_EML_CFG_RTTTS_GEN_MASK 0x2
+#define TPC0_EML_CFG_RTTTS_COMPRESS_EN_SHIFT 2
+#define TPC0_EML_CFG_RTTTS_COMPRESS_EN_MASK 0x4
+
+/* TPC0_EML_CFG_RTTTS_INTV */
+#define TPC0_EML_CFG_RTTTS_INTV_INTERVAL_SHIFT 0
+#define TPC0_EML_CFG_RTTTS_INTV_INTERVAL_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_INST_INSERT */
+#define TPC0_EML_CFG_DBG_INST_INSERT_INST_SHIFT 0
+#define TPC0_EML_CFG_DBG_INST_INSERT_INST_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_INST_INSERT_CTL */
+#define TPC0_EML_CFG_DBG_INST_INSERT_CTL_INSERT_SHIFT 0
+#define TPC0_EML_CFG_DBG_INST_INSERT_CTL_INSERT_MASK 0x1
+
+#endif /* ASIC_REG_TPC0_EML_CFG_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h
new file mode 100644
index 000000000000..f1a1b4fa4841
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_EML_CFG_REGS_H_
+#define ASIC_REG_TPC0_EML_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC0_EML_CFG (Prototype: TPC_EML_CFG)
+ *****************************************
+ */
+
+#define mmTPC0_EML_CFG_DBG_CNT 0x3040000
+
+#define mmTPC0_EML_CFG_DBG_STS 0x3040004
+
+#define mmTPC0_EML_CFG_DBG_PADD_0 0x3040008
+
+#define mmTPC0_EML_CFG_DBG_PADD_1 0x304000C
+
+#define mmTPC0_EML_CFG_DBG_PADD_2 0x3040010
+
+#define mmTPC0_EML_CFG_DBG_PADD_3 0x3040014
+
+#define mmTPC0_EML_CFG_DBG_PADD_4 0x3040018
+
+#define mmTPC0_EML_CFG_DBG_PADD_5 0x304001C
+
+#define mmTPC0_EML_CFG_DBG_PADD_6 0x3040020
+
+#define mmTPC0_EML_CFG_DBG_PADD_7 0x3040024
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_0 0x3040028
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_1 0x304002C
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_2 0x3040030
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_3 0x3040034
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_4 0x3040038
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_5 0x304003C
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_6 0x3040040
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_7 0x3040044
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_0 0x3040048
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_1 0x304004C
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_2 0x3040050
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_3 0x3040054
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_4 0x3040058
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_5 0x304005C
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_6 0x3040060
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_7 0x3040064
+
+#define mmTPC0_EML_CFG_DBG_PADD_EN 0x3040068
+
+#define mmTPC0_EML_CFG_DBG_VPADD_HIGH_0 0x304006C
+
+#define mmTPC0_EML_CFG_DBG_VPADD_HIGH_1 0x3040070
+
+#define mmTPC0_EML_CFG_DBG_VPADD_LOW_0 0x3040074
+
+#define mmTPC0_EML_CFG_DBG_VPADD_LOW_1 0x3040078
+
+#define mmTPC0_EML_CFG_DBG_VPADD_COUNT_0 0x304007C
+
+#define mmTPC0_EML_CFG_DBG_VPADD_COUNT_1 0x3040080
+
+#define mmTPC0_EML_CFG_DBG_VPADD_COUNT_MATCH_0 0x3040084
+
+#define mmTPC0_EML_CFG_DBG_VPADD_COUNT_MATCH_1 0x3040088
+
+#define mmTPC0_EML_CFG_DBG_VPADD_EN 0x304008C
+
+#define mmTPC0_EML_CFG_DBG_SPADD_HIGH_0 0x3040090
+
+#define mmTPC0_EML_CFG_DBG_SPADD_HIGH_1 0x3040094
+
+#define mmTPC0_EML_CFG_DBG_SPADD_LOW_0 0x3040098
+
+#define mmTPC0_EML_CFG_DBG_SPADD_LOW_1 0x304009C
+
+#define mmTPC0_EML_CFG_DBG_SPADD_COUNT_0 0x30400A0
+
+#define mmTPC0_EML_CFG_DBG_SPADD_COUNT_1 0x30400A4
+
+#define mmTPC0_EML_CFG_DBG_SPADD_COUNT_MATCH_0 0x30400A8
+
+#define mmTPC0_EML_CFG_DBG_SPADD_COUNT_MATCH_1 0x30400AC
+
+#define mmTPC0_EML_CFG_DBG_SPADD_EN 0x30400B0
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_MSB_HIGH_0 0x30400B4
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_MSB_HIGH_1 0x30400B8
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_MSB_LOW_0 0x30400BC
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_MSB_LOW_1 0x30400C0
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_LSB_HIGH_0 0x30400C4
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_LSB_HIGH_1 0x30400C8
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_LSB_LOW_0 0x30400CC
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_LSB_LOW_1 0x30400D0
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_COUNT_0 0x30400D4
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_COUNT_1 0x30400D8
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH_0 0x30400DC
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH_1 0x30400E0
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_EN 0x30400E4
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH_0 0x30400E8
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH_1 0x30400EC
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW_0 0x30400F0
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW_1 0x30400F4
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH_0 0x30400F8
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH_1 0x30400FC
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW_0 0x3040100
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW_1 0x3040104
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_COUNT_0 0x3040108
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_COUNT_1 0x304010C
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH_0 0x3040110
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH_1 0x3040114
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_EN 0x3040118
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH_0 0x304011C
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH_1 0x3040120
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW_0 0x3040124
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW_1 0x3040128
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH_0 0x304012C
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH_1 0x3040130
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW_0 0x3040134
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW_1 0x3040138
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_COUNT_0 0x304013C
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_COUNT_1 0x3040140
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH_0 0x3040144
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH_1 0x3040148
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_EN 0x304014C
+
+#define mmTPC0_EML_CFG_DBG_SPDATA_0 0x3040150
+
+#define mmTPC0_EML_CFG_DBG_SPDATA_1 0x3040154
+
+#define mmTPC0_EML_CFG_DBG_SPDATA_COUNT_0 0x3040158
+
+#define mmTPC0_EML_CFG_DBG_SPDATA_COUNT_1 0x304015C
+
+#define mmTPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH_0 0x3040160
+
+#define mmTPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH_1 0x3040164
+
+#define mmTPC0_EML_CFG_DBG_SPDATA_EN 0x3040168
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_0 0x304016C
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_1 0x3040170
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_2 0x3040174
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_3 0x3040178
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_4 0x304017C
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_5 0x3040180
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_6 0x3040184
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_7 0x3040188
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_8 0x304018C
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_9 0x3040190
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_10 0x3040194
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_11 0x3040198
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_12 0x304019C
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_13 0x30401A0
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_14 0x30401A4
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_15 0x30401A8
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_16 0x30401AC
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_17 0x30401B0
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_18 0x30401B4
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_19 0x30401B8
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_20 0x30401BC
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_21 0x30401C0
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_22 0x30401C4
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_23 0x30401C8
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_24 0x30401CC
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_25 0x30401D0
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_26 0x30401D4
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_27 0x30401D8
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_28 0x30401DC
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_29 0x30401E0
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_30 0x30401E4
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_31 0x30401E8
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_COUNT 0x30401EC
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDAT_COUNT_MATCH 0x30401F0
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_EN 0x30401F4
+
+#define mmTPC0_EML_CFG_DBG_AXILBWDATA 0x30401F8
+
+#define mmTPC0_EML_CFG_DBG_AXILBWDATA_COUNT 0x30401FC
+
+#define mmTPC0_EML_CFG_DBG_AXILBWDAT_COUNT_MATCH 0x3040200
+
+#define mmTPC0_EML_CFG_DBG_AXILBWDATA_EN 0x3040204
+
+#define mmTPC0_EML_CFG_DBG_D0_PC 0x3040208
+
+#define mmTPC0_EML_CFG_RTTCONFIG 0x3040300
+
+#define mmTPC0_EML_CFG_RTTPREDICATE 0x3040304
+
+#define mmTPC0_EML_CFG_RTTPREDICATE_INTV 0x3040308
+
+#define mmTPC0_EML_CFG_RTTTS 0x304030C
+
+#define mmTPC0_EML_CFG_RTTTS_INTV 0x3040310
+
+#define mmTPC0_EML_CFG_DBG_INST_INSERT_0 0x3040314
+
+#define mmTPC0_EML_CFG_DBG_INST_INSERT_1 0x3040318
+
+#define mmTPC0_EML_CFG_DBG_INST_INSERT_2 0x304031C
+
+#define mmTPC0_EML_CFG_DBG_INST_INSERT_3 0x3040320
+
+#define mmTPC0_EML_CFG_DBG_INST_INSERT_4 0x3040324
+
+#define mmTPC0_EML_CFG_DBG_INST_INSERT_5 0x3040328
+
+#define mmTPC0_EML_CFG_DBG_INST_INSERT_6 0x304032C
+
+#define mmTPC0_EML_CFG_DBG_INST_INSERT_7 0x3040330
+
+#define mmTPC0_EML_CFG_DBG_INST_INSERT_CTL 0x3040334
+
+#endif /* ASIC_REG_TPC0_EML_CFG_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h
new file mode 100644
index 000000000000..7f86621179a5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_NRTR_MASKS_H_
+#define ASIC_REG_TPC0_NRTR_MASKS_H_
+
+/*
+ *****************************************
+ * TPC0_NRTR (Prototype: IF_NRTR)
+ *****************************************
+ */
+
+/* TPC0_NRTR_HBW_MAX_CRED */
+#define TPC0_NRTR_HBW_MAX_CRED_WR_RQ_SHIFT 0
+#define TPC0_NRTR_HBW_MAX_CRED_WR_RQ_MASK 0x3F
+#define TPC0_NRTR_HBW_MAX_CRED_WR_RS_SHIFT 8
+#define TPC0_NRTR_HBW_MAX_CRED_WR_RS_MASK 0x3F00
+#define TPC0_NRTR_HBW_MAX_CRED_RD_RQ_SHIFT 16
+#define TPC0_NRTR_HBW_MAX_CRED_RD_RQ_MASK 0x3F0000
+#define TPC0_NRTR_HBW_MAX_CRED_RD_RS_SHIFT 24
+#define TPC0_NRTR_HBW_MAX_CRED_RD_RS_MASK 0x3F000000
+
+/* TPC0_NRTR_LBW_MAX_CRED */
+#define TPC0_NRTR_LBW_MAX_CRED_WR_RQ_SHIFT 0
+#define TPC0_NRTR_LBW_MAX_CRED_WR_RQ_MASK 0x3F
+#define TPC0_NRTR_LBW_MAX_CRED_WR_RS_SHIFT 8
+#define TPC0_NRTR_LBW_MAX_CRED_WR_RS_MASK 0x3F00
+#define TPC0_NRTR_LBW_MAX_CRED_RD_RQ_SHIFT 16
+#define TPC0_NRTR_LBW_MAX_CRED_RD_RQ_MASK 0x3F0000
+#define TPC0_NRTR_LBW_MAX_CRED_RD_RS_SHIFT 24
+#define TPC0_NRTR_LBW_MAX_CRED_RD_RS_MASK 0x3F000000
+
+/* TPC0_NRTR_DBG_E_ARB */
+#define TPC0_NRTR_DBG_E_ARB_W_SHIFT 0
+#define TPC0_NRTR_DBG_E_ARB_W_MASK 0x7
+#define TPC0_NRTR_DBG_E_ARB_S_SHIFT 8
+#define TPC0_NRTR_DBG_E_ARB_S_MASK 0x700
+#define TPC0_NRTR_DBG_E_ARB_N_SHIFT 16
+#define TPC0_NRTR_DBG_E_ARB_N_MASK 0x70000
+#define TPC0_NRTR_DBG_E_ARB_L_SHIFT 24
+#define TPC0_NRTR_DBG_E_ARB_L_MASK 0x7000000
+
+/* TPC0_NRTR_DBG_W_ARB */
+#define TPC0_NRTR_DBG_W_ARB_E_SHIFT 0
+#define TPC0_NRTR_DBG_W_ARB_E_MASK 0x7
+#define TPC0_NRTR_DBG_W_ARB_S_SHIFT 8
+#define TPC0_NRTR_DBG_W_ARB_S_MASK 0x700
+#define TPC0_NRTR_DBG_W_ARB_N_SHIFT 16
+#define TPC0_NRTR_DBG_W_ARB_N_MASK 0x70000
+#define TPC0_NRTR_DBG_W_ARB_L_SHIFT 24
+#define TPC0_NRTR_DBG_W_ARB_L_MASK 0x7000000
+
+/* TPC0_NRTR_DBG_N_ARB */
+#define TPC0_NRTR_DBG_N_ARB_W_SHIFT 0
+#define TPC0_NRTR_DBG_N_ARB_W_MASK 0x7
+#define TPC0_NRTR_DBG_N_ARB_E_SHIFT 8
+#define TPC0_NRTR_DBG_N_ARB_E_MASK 0x700
+#define TPC0_NRTR_DBG_N_ARB_S_SHIFT 16
+#define TPC0_NRTR_DBG_N_ARB_S_MASK 0x70000
+#define TPC0_NRTR_DBG_N_ARB_L_SHIFT 24
+#define TPC0_NRTR_DBG_N_ARB_L_MASK 0x7000000
+
+/* TPC0_NRTR_DBG_S_ARB */
+#define TPC0_NRTR_DBG_S_ARB_W_SHIFT 0
+#define TPC0_NRTR_DBG_S_ARB_W_MASK 0x7
+#define TPC0_NRTR_DBG_S_ARB_E_SHIFT 8
+#define TPC0_NRTR_DBG_S_ARB_E_MASK 0x700
+#define TPC0_NRTR_DBG_S_ARB_N_SHIFT 16
+#define TPC0_NRTR_DBG_S_ARB_N_MASK 0x70000
+#define TPC0_NRTR_DBG_S_ARB_L_SHIFT 24
+#define TPC0_NRTR_DBG_S_ARB_L_MASK 0x7000000
+
+/* TPC0_NRTR_DBG_L_ARB */
+#define TPC0_NRTR_DBG_L_ARB_W_SHIFT 0
+#define TPC0_NRTR_DBG_L_ARB_W_MASK 0x7
+#define TPC0_NRTR_DBG_L_ARB_E_SHIFT 8
+#define TPC0_NRTR_DBG_L_ARB_E_MASK 0x700
+#define TPC0_NRTR_DBG_L_ARB_S_SHIFT 16
+#define TPC0_NRTR_DBG_L_ARB_S_MASK 0x70000
+#define TPC0_NRTR_DBG_L_ARB_N_SHIFT 24
+#define TPC0_NRTR_DBG_L_ARB_N_MASK 0x7000000
+
+/* TPC0_NRTR_DBG_E_ARB_MAX */
+#define TPC0_NRTR_DBG_E_ARB_MAX_CREDIT_SHIFT 0
+#define TPC0_NRTR_DBG_E_ARB_MAX_CREDIT_MASK 0x3F
+
+/* TPC0_NRTR_DBG_W_ARB_MAX */
+#define TPC0_NRTR_DBG_W_ARB_MAX_CREDIT_SHIFT 0
+#define TPC0_NRTR_DBG_W_ARB_MAX_CREDIT_MASK 0x3F
+
+/* TPC0_NRTR_DBG_N_ARB_MAX */
+#define TPC0_NRTR_DBG_N_ARB_MAX_CREDIT_SHIFT 0
+#define TPC0_NRTR_DBG_N_ARB_MAX_CREDIT_MASK 0x3F
+
+/* TPC0_NRTR_DBG_S_ARB_MAX */
+#define TPC0_NRTR_DBG_S_ARB_MAX_CREDIT_SHIFT 0
+#define TPC0_NRTR_DBG_S_ARB_MAX_CREDIT_MASK 0x3F
+
+/* TPC0_NRTR_DBG_L_ARB_MAX */
+#define TPC0_NRTR_DBG_L_ARB_MAX_CREDIT_SHIFT 0
+#define TPC0_NRTR_DBG_L_ARB_MAX_CREDIT_MASK 0x3F
+
+/* TPC0_NRTR_SPLIT_COEF */
+#define TPC0_NRTR_SPLIT_COEF_VAL_SHIFT 0
+#define TPC0_NRTR_SPLIT_COEF_VAL_MASK 0xFFFF
+
+/* TPC0_NRTR_SPLIT_CFG */
+#define TPC0_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_SHIFT 0
+#define TPC0_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_MASK 0x1
+#define TPC0_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_SHIFT 1
+#define TPC0_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_MASK 0x2
+#define TPC0_NRTR_SPLIT_CFG_DEFAULT_MESH_SHIFT 2
+#define TPC0_NRTR_SPLIT_CFG_DEFAULT_MESH_MASK 0xC
+#define TPC0_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_SHIFT 4
+#define TPC0_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_MASK 0x10
+#define TPC0_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_SHIFT 5
+#define TPC0_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_MASK 0x20
+#define TPC0_NRTR_SPLIT_CFG_B2B_OPT_SHIFT 6
+#define TPC0_NRTR_SPLIT_CFG_B2B_OPT_MASK 0x1C0
+
+/* TPC0_NRTR_SPLIT_RD_SAT */
+#define TPC0_NRTR_SPLIT_RD_SAT_VAL_SHIFT 0
+#define TPC0_NRTR_SPLIT_RD_SAT_VAL_MASK 0xFFFF
+
+/* TPC0_NRTR_SPLIT_RD_RST_TOKEN */
+#define TPC0_NRTR_SPLIT_RD_RST_TOKEN_VAL_SHIFT 0
+#define TPC0_NRTR_SPLIT_RD_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* TPC0_NRTR_SPLIT_RD_TIMEOUT */
+#define TPC0_NRTR_SPLIT_RD_TIMEOUT_VAL_SHIFT 0
+#define TPC0_NRTR_SPLIT_RD_TIMEOUT_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_NRTR_SPLIT_WR_SAT */
+#define TPC0_NRTR_SPLIT_WR_SAT_VAL_SHIFT 0
+#define TPC0_NRTR_SPLIT_WR_SAT_VAL_MASK 0xFFFF
+
+/* TPC0_NRTR_WPLIT_WR_TST_TOLEN */
+#define TPC0_NRTR_WPLIT_WR_TST_TOLEN_VAL_SHIFT 0
+#define TPC0_NRTR_WPLIT_WR_TST_TOLEN_VAL_MASK 0xFFFF
+
+/* TPC0_NRTR_SPLIT_WR_TIMEOUT */
+#define TPC0_NRTR_SPLIT_WR_TIMEOUT_VAL_SHIFT 0
+#define TPC0_NRTR_SPLIT_WR_TIMEOUT_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_NRTR_HBW_RANGE_HIT */
+#define TPC0_NRTR_HBW_RANGE_HIT_IND_SHIFT 0
+#define TPC0_NRTR_HBW_RANGE_HIT_IND_MASK 0xFF
+
+/* TPC0_NRTR_HBW_RANGE_MASK_L */
+#define TPC0_NRTR_HBW_RANGE_MASK_L_VAL_SHIFT 0
+#define TPC0_NRTR_HBW_RANGE_MASK_L_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_NRTR_HBW_RANGE_MASK_H */
+#define TPC0_NRTR_HBW_RANGE_MASK_H_VAL_SHIFT 0
+#define TPC0_NRTR_HBW_RANGE_MASK_H_VAL_MASK 0x3FFFF
+
+/* TPC0_NRTR_HBW_RANGE_BASE_L */
+#define TPC0_NRTR_HBW_RANGE_BASE_L_VAL_SHIFT 0
+#define TPC0_NRTR_HBW_RANGE_BASE_L_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_NRTR_HBW_RANGE_BASE_H */
+#define TPC0_NRTR_HBW_RANGE_BASE_H_VAL_SHIFT 0
+#define TPC0_NRTR_HBW_RANGE_BASE_H_VAL_MASK 0x3FFFF
+
+/* TPC0_NRTR_LBW_RANGE_HIT */
+#define TPC0_NRTR_LBW_RANGE_HIT_IND_SHIFT 0
+#define TPC0_NRTR_LBW_RANGE_HIT_IND_MASK 0xFFFF
+
+/* TPC0_NRTR_LBW_RANGE_MASK */
+#define TPC0_NRTR_LBW_RANGE_MASK_VAL_SHIFT 0
+#define TPC0_NRTR_LBW_RANGE_MASK_VAL_MASK 0x3FFFFFF
+
+/* TPC0_NRTR_LBW_RANGE_BASE */
+#define TPC0_NRTR_LBW_RANGE_BASE_VAL_SHIFT 0
+#define TPC0_NRTR_LBW_RANGE_BASE_VAL_MASK 0x3FFFFFF
+
+/* TPC0_NRTR_RGLTR */
+#define TPC0_NRTR_RGLTR_WR_EN_SHIFT 0
+#define TPC0_NRTR_RGLTR_WR_EN_MASK 0x1
+#define TPC0_NRTR_RGLTR_RD_EN_SHIFT 4
+#define TPC0_NRTR_RGLTR_RD_EN_MASK 0x10
+
+/* TPC0_NRTR_RGLTR_WR_RESULT */
+#define TPC0_NRTR_RGLTR_WR_RESULT_VAL_SHIFT 0
+#define TPC0_NRTR_RGLTR_WR_RESULT_VAL_MASK 0xFF
+
+/* TPC0_NRTR_RGLTR_RD_RESULT */
+#define TPC0_NRTR_RGLTR_RD_RESULT_VAL_SHIFT 0
+#define TPC0_NRTR_RGLTR_RD_RESULT_VAL_MASK 0xFF
+
+/* TPC0_NRTR_SCRAMB_EN */
+#define TPC0_NRTR_SCRAMB_EN_VAL_SHIFT 0
+#define TPC0_NRTR_SCRAMB_EN_VAL_MASK 0x1
+
+/* TPC0_NRTR_NON_LIN_SCRAMB */
+#define TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT 0
+#define TPC0_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1
+
+#endif /* ASIC_REG_TPC0_NRTR_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h
new file mode 100644
index 000000000000..dc280f4e6608
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_NRTR_REGS_H_
+#define ASIC_REG_TPC0_NRTR_REGS_H_
+
+/*
+ *****************************************
+ * TPC0_NRTR (Prototype: IF_NRTR)
+ *****************************************
+ */
+
+#define mmTPC0_NRTR_HBW_MAX_CRED 0xE00100
+
+#define mmTPC0_NRTR_LBW_MAX_CRED 0xE00120
+
+#define mmTPC0_NRTR_DBG_E_ARB 0xE00300
+
+#define mmTPC0_NRTR_DBG_W_ARB 0xE00304
+
+#define mmTPC0_NRTR_DBG_N_ARB 0xE00308
+
+#define mmTPC0_NRTR_DBG_S_ARB 0xE0030C
+
+#define mmTPC0_NRTR_DBG_L_ARB 0xE00310
+
+#define mmTPC0_NRTR_DBG_E_ARB_MAX 0xE00320
+
+#define mmTPC0_NRTR_DBG_W_ARB_MAX 0xE00324
+
+#define mmTPC0_NRTR_DBG_N_ARB_MAX 0xE00328
+
+#define mmTPC0_NRTR_DBG_S_ARB_MAX 0xE0032C
+
+#define mmTPC0_NRTR_DBG_L_ARB_MAX 0xE00330
+
+#define mmTPC0_NRTR_SPLIT_COEF_0 0xE00400
+
+#define mmTPC0_NRTR_SPLIT_COEF_1 0xE00404
+
+#define mmTPC0_NRTR_SPLIT_COEF_2 0xE00408
+
+#define mmTPC0_NRTR_SPLIT_COEF_3 0xE0040C
+
+#define mmTPC0_NRTR_SPLIT_COEF_4 0xE00410
+
+#define mmTPC0_NRTR_SPLIT_COEF_5 0xE00414
+
+#define mmTPC0_NRTR_SPLIT_COEF_6 0xE00418
+
+#define mmTPC0_NRTR_SPLIT_COEF_7 0xE0041C
+
+#define mmTPC0_NRTR_SPLIT_COEF_8 0xE00420
+
+#define mmTPC0_NRTR_SPLIT_COEF_9 0xE00424
+
+#define mmTPC0_NRTR_SPLIT_CFG 0xE00440
+
+#define mmTPC0_NRTR_SPLIT_RD_SAT 0xE00444
+
+#define mmTPC0_NRTR_SPLIT_RD_RST_TOKEN 0xE00448
+
+#define mmTPC0_NRTR_SPLIT_RD_TIMEOUT_0 0xE0044C
+
+#define mmTPC0_NRTR_SPLIT_RD_TIMEOUT_1 0xE00450
+
+#define mmTPC0_NRTR_SPLIT_WR_SAT 0xE00454
+
+#define mmTPC0_NRTR_WPLIT_WR_TST_TOLEN 0xE00458
+
+#define mmTPC0_NRTR_SPLIT_WR_TIMEOUT_0 0xE0045C
+
+#define mmTPC0_NRTR_SPLIT_WR_TIMEOUT_1 0xE00460
+
+#define mmTPC0_NRTR_HBW_RANGE_HIT 0xE00470
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_L_0 0xE00480
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_L_1 0xE00484
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_L_2 0xE00488
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_L_3 0xE0048C
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_L_4 0xE00490
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_L_5 0xE00494
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_L_6 0xE00498
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_L_7 0xE0049C
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_H_0 0xE004A0
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_H_1 0xE004A4
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_H_2 0xE004A8
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_H_3 0xE004AC
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_H_4 0xE004B0
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_H_5 0xE004B4
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_H_6 0xE004B8
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_H_7 0xE004BC
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_L_0 0xE004C0
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_L_1 0xE004C4
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_L_2 0xE004C8
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_L_3 0xE004CC
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_L_4 0xE004D0
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_L_5 0xE004D4
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_L_6 0xE004D8
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_L_7 0xE004DC
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_H_0 0xE004E0
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_H_1 0xE004E4
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_H_2 0xE004E8
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_H_3 0xE004EC
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_H_4 0xE004F0
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_H_5 0xE004F4
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_H_6 0xE004F8
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_H_7 0xE004FC
+
+#define mmTPC0_NRTR_LBW_RANGE_HIT 0xE00500
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_0 0xE00510
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_1 0xE00514
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_2 0xE00518
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_3 0xE0051C
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_4 0xE00520
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_5 0xE00524
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_6 0xE00528
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_7 0xE0052C
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_8 0xE00530
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_9 0xE00534
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_10 0xE00538
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_11 0xE0053C
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_12 0xE00540
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_13 0xE00544
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_14 0xE00548
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_15 0xE0054C
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_0 0xE00550
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_1 0xE00554
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_2 0xE00558
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_3 0xE0055C
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_4 0xE00560
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_5 0xE00564
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_6 0xE00568
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_7 0xE0056C
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_8 0xE00570
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_9 0xE00574
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_10 0xE00578
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_11 0xE0057C
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_12 0xE00580
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_13 0xE00584
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_14 0xE00588
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_15 0xE0058C
+
+#define mmTPC0_NRTR_RGLTR 0xE00590
+
+#define mmTPC0_NRTR_RGLTR_WR_RESULT 0xE00594
+
+#define mmTPC0_NRTR_RGLTR_RD_RESULT 0xE00598
+
+#define mmTPC0_NRTR_SCRAMB_EN 0xE00600
+
+#define mmTPC0_NRTR_NON_LIN_SCRAMB 0xE00604
+
+#endif /* ASIC_REG_TPC0_NRTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h
new file mode 100644
index 000000000000..80d97ee3d8d6
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h
@@ -0,0 +1,465 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_QM_MASKS_H_
+#define ASIC_REG_TPC0_QM_MASKS_H_
+
+/*
+ *****************************************
+ * TPC0_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+/* TPC0_QM_GLBL_CFG0 */
+#define TPC0_QM_GLBL_CFG0_PQF_EN_SHIFT 0
+#define TPC0_QM_GLBL_CFG0_PQF_EN_MASK 0x1
+#define TPC0_QM_GLBL_CFG0_CQF_EN_SHIFT 1
+#define TPC0_QM_GLBL_CFG0_CQF_EN_MASK 0x2
+#define TPC0_QM_GLBL_CFG0_CP_EN_SHIFT 2
+#define TPC0_QM_GLBL_CFG0_CP_EN_MASK 0x4
+#define TPC0_QM_GLBL_CFG0_DMA_EN_SHIFT 3
+#define TPC0_QM_GLBL_CFG0_DMA_EN_MASK 0x8
+
+/* TPC0_QM_GLBL_CFG1 */
+#define TPC0_QM_GLBL_CFG1_PQF_STOP_SHIFT 0
+#define TPC0_QM_GLBL_CFG1_PQF_STOP_MASK 0x1
+#define TPC0_QM_GLBL_CFG1_CQF_STOP_SHIFT 1
+#define TPC0_QM_GLBL_CFG1_CQF_STOP_MASK 0x2
+#define TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT 2
+#define TPC0_QM_GLBL_CFG1_CP_STOP_MASK 0x4
+#define TPC0_QM_GLBL_CFG1_DMA_STOP_SHIFT 3
+#define TPC0_QM_GLBL_CFG1_DMA_STOP_MASK 0x8
+#define TPC0_QM_GLBL_CFG1_PQF_FLUSH_SHIFT 8
+#define TPC0_QM_GLBL_CFG1_PQF_FLUSH_MASK 0x100
+#define TPC0_QM_GLBL_CFG1_CQF_FLUSH_SHIFT 9
+#define TPC0_QM_GLBL_CFG1_CQF_FLUSH_MASK 0x200
+#define TPC0_QM_GLBL_CFG1_CP_FLUSH_SHIFT 10
+#define TPC0_QM_GLBL_CFG1_CP_FLUSH_MASK 0x400
+#define TPC0_QM_GLBL_CFG1_DMA_FLUSH_SHIFT 11
+#define TPC0_QM_GLBL_CFG1_DMA_FLUSH_MASK 0x800
+
+/* TPC0_QM_GLBL_PROT */
+#define TPC0_QM_GLBL_PROT_PQF_PROT_SHIFT 0
+#define TPC0_QM_GLBL_PROT_PQF_PROT_MASK 0x1
+#define TPC0_QM_GLBL_PROT_CQF_PROT_SHIFT 1
+#define TPC0_QM_GLBL_PROT_CQF_PROT_MASK 0x2
+#define TPC0_QM_GLBL_PROT_CP_PROT_SHIFT 2
+#define TPC0_QM_GLBL_PROT_CP_PROT_MASK 0x4
+#define TPC0_QM_GLBL_PROT_DMA_PROT_SHIFT 3
+#define TPC0_QM_GLBL_PROT_DMA_PROT_MASK 0x8
+#define TPC0_QM_GLBL_PROT_PQF_ERR_PROT_SHIFT 4
+#define TPC0_QM_GLBL_PROT_PQF_ERR_PROT_MASK 0x10
+#define TPC0_QM_GLBL_PROT_CQF_ERR_PROT_SHIFT 5
+#define TPC0_QM_GLBL_PROT_CQF_ERR_PROT_MASK 0x20
+#define TPC0_QM_GLBL_PROT_CP_ERR_PROT_SHIFT 6
+#define TPC0_QM_GLBL_PROT_CP_ERR_PROT_MASK 0x40
+#define TPC0_QM_GLBL_PROT_DMA_ERR_PROT_SHIFT 7
+#define TPC0_QM_GLBL_PROT_DMA_ERR_PROT_MASK 0x80
+
+/* TPC0_QM_GLBL_ERR_CFG */
+#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0
+#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1
+#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1
+#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2
+#define TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2
+#define TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4
+#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3
+#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8
+#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4
+#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10
+#define TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5
+#define TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20
+#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6
+#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40
+#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7
+#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80
+#define TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8
+#define TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100
+#define TPC0_QM_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9
+#define TPC0_QM_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200
+#define TPC0_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10
+#define TPC0_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400
+#define TPC0_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11
+#define TPC0_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800
+
+/* TPC0_QM_GLBL_ERR_ADDR_LO */
+#define TPC0_QM_GLBL_ERR_ADDR_LO_VAL_SHIFT 0
+#define TPC0_QM_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_GLBL_ERR_ADDR_HI */
+#define TPC0_QM_GLBL_ERR_ADDR_HI_VAL_SHIFT 0
+#define TPC0_QM_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_GLBL_ERR_WDATA */
+#define TPC0_QM_GLBL_ERR_WDATA_VAL_SHIFT 0
+#define TPC0_QM_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_GLBL_SECURE_PROPS */
+#define TPC0_QM_GLBL_SECURE_PROPS_ASID_SHIFT 0
+#define TPC0_QM_GLBL_SECURE_PROPS_ASID_MASK 0x3FF
+#define TPC0_QM_GLBL_SECURE_PROPS_MMBP_SHIFT 10
+#define TPC0_QM_GLBL_SECURE_PROPS_MMBP_MASK 0x400
+
+/* TPC0_QM_GLBL_NON_SECURE_PROPS */
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400
+
+/* TPC0_QM_GLBL_STS0 */
+#define TPC0_QM_GLBL_STS0_PQF_IDLE_SHIFT 0
+#define TPC0_QM_GLBL_STS0_PQF_IDLE_MASK 0x1
+#define TPC0_QM_GLBL_STS0_CQF_IDLE_SHIFT 1
+#define TPC0_QM_GLBL_STS0_CQF_IDLE_MASK 0x2
+#define TPC0_QM_GLBL_STS0_CP_IDLE_SHIFT 2
+#define TPC0_QM_GLBL_STS0_CP_IDLE_MASK 0x4
+#define TPC0_QM_GLBL_STS0_DMA_IDLE_SHIFT 3
+#define TPC0_QM_GLBL_STS0_DMA_IDLE_MASK 0x8
+#define TPC0_QM_GLBL_STS0_PQF_IS_STOP_SHIFT 4
+#define TPC0_QM_GLBL_STS0_PQF_IS_STOP_MASK 0x10
+#define TPC0_QM_GLBL_STS0_CQF_IS_STOP_SHIFT 5
+#define TPC0_QM_GLBL_STS0_CQF_IS_STOP_MASK 0x20
+#define TPC0_QM_GLBL_STS0_CP_IS_STOP_SHIFT 6
+#define TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK 0x40
+#define TPC0_QM_GLBL_STS0_DMA_IS_STOP_SHIFT 7
+#define TPC0_QM_GLBL_STS0_DMA_IS_STOP_MASK 0x80
+
+/* TPC0_QM_GLBL_STS1 */
+#define TPC0_QM_GLBL_STS1_PQF_RD_ERR_SHIFT 0
+#define TPC0_QM_GLBL_STS1_PQF_RD_ERR_MASK 0x1
+#define TPC0_QM_GLBL_STS1_CQF_RD_ERR_SHIFT 1
+#define TPC0_QM_GLBL_STS1_CQF_RD_ERR_MASK 0x2
+#define TPC0_QM_GLBL_STS1_CP_RD_ERR_SHIFT 2
+#define TPC0_QM_GLBL_STS1_CP_RD_ERR_MASK 0x4
+#define TPC0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3
+#define TPC0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8
+#define TPC0_QM_GLBL_STS1_CP_STOP_OP_SHIFT 4
+#define TPC0_QM_GLBL_STS1_CP_STOP_OP_MASK 0x10
+#define TPC0_QM_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5
+#define TPC0_QM_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20
+#define TPC0_QM_GLBL_STS1_DMA_RD_ERR_SHIFT 8
+#define TPC0_QM_GLBL_STS1_DMA_RD_ERR_MASK 0x100
+#define TPC0_QM_GLBL_STS1_DMA_WR_ERR_SHIFT 9
+#define TPC0_QM_GLBL_STS1_DMA_WR_ERR_MASK 0x200
+#define TPC0_QM_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10
+#define TPC0_QM_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400
+#define TPC0_QM_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11
+#define TPC0_QM_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800
+
+/* TPC0_QM_PQ_BASE_LO */
+#define TPC0_QM_PQ_BASE_LO_VAL_SHIFT 0
+#define TPC0_QM_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_BASE_HI */
+#define TPC0_QM_PQ_BASE_HI_VAL_SHIFT 0
+#define TPC0_QM_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_SIZE */
+#define TPC0_QM_PQ_SIZE_VAL_SHIFT 0
+#define TPC0_QM_PQ_SIZE_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_PI */
+#define TPC0_QM_PQ_PI_VAL_SHIFT 0
+#define TPC0_QM_PQ_PI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_CI */
+#define TPC0_QM_PQ_CI_VAL_SHIFT 0
+#define TPC0_QM_PQ_CI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_CFG0 */
+#define TPC0_QM_PQ_CFG0_RESERVED_SHIFT 0
+#define TPC0_QM_PQ_CFG0_RESERVED_MASK 0x1
+
+/* TPC0_QM_PQ_CFG1 */
+#define TPC0_QM_PQ_CFG1_CREDIT_LIM_SHIFT 0
+#define TPC0_QM_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define TPC0_QM_PQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define TPC0_QM_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* TPC0_QM_PQ_ARUSER */
+#define TPC0_QM_PQ_ARUSER_NOSNOOP_SHIFT 0
+#define TPC0_QM_PQ_ARUSER_NOSNOOP_MASK 0x1
+#define TPC0_QM_PQ_ARUSER_WORD_SHIFT 1
+#define TPC0_QM_PQ_ARUSER_WORD_MASK 0x2
+
+/* TPC0_QM_PQ_PUSH0 */
+#define TPC0_QM_PQ_PUSH0_PTR_LO_SHIFT 0
+#define TPC0_QM_PQ_PUSH0_PTR_LO_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_PUSH1 */
+#define TPC0_QM_PQ_PUSH1_PTR_HI_SHIFT 0
+#define TPC0_QM_PQ_PUSH1_PTR_HI_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_PUSH2 */
+#define TPC0_QM_PQ_PUSH2_TSIZE_SHIFT 0
+#define TPC0_QM_PQ_PUSH2_TSIZE_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_PUSH3 */
+#define TPC0_QM_PQ_PUSH3_RPT_SHIFT 0
+#define TPC0_QM_PQ_PUSH3_RPT_MASK 0xFFFF
+#define TPC0_QM_PQ_PUSH3_CTL_SHIFT 16
+#define TPC0_QM_PQ_PUSH3_CTL_MASK 0xFFFF0000
+
+/* TPC0_QM_PQ_STS0 */
+#define TPC0_QM_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0
+#define TPC0_QM_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF
+#define TPC0_QM_PQ_STS0_PQ_FREE_CNT_SHIFT 16
+#define TPC0_QM_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000
+
+/* TPC0_QM_PQ_STS1 */
+#define TPC0_QM_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0
+#define TPC0_QM_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF
+#define TPC0_QM_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30
+#define TPC0_QM_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000
+#define TPC0_QM_PQ_STS1_PQ_BUSY_SHIFT 31
+#define TPC0_QM_PQ_STS1_PQ_BUSY_MASK 0x80000000
+
+/* TPC0_QM_PQ_RD_RATE_LIM_EN */
+#define TPC0_QM_PQ_RD_RATE_LIM_EN_VAL_SHIFT 0
+#define TPC0_QM_PQ_RD_RATE_LIM_EN_VAL_MASK 0x1
+
+/* TPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN */
+#define TPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
+#define TPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* TPC0_QM_PQ_RD_RATE_LIM_SAT */
+#define TPC0_QM_PQ_RD_RATE_LIM_SAT_VAL_SHIFT 0
+#define TPC0_QM_PQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
+
+/* TPC0_QM_PQ_RD_RATE_LIM_TOUT */
+#define TPC0_QM_PQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0
+#define TPC0_QM_PQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
+
+/* TPC0_QM_CQ_CFG0 */
+#define TPC0_QM_CQ_CFG0_RESERVED_SHIFT 0
+#define TPC0_QM_CQ_CFG0_RESERVED_MASK 0x1
+
+/* TPC0_QM_CQ_CFG1 */
+#define TPC0_QM_CQ_CFG1_CREDIT_LIM_SHIFT 0
+#define TPC0_QM_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define TPC0_QM_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define TPC0_QM_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* TPC0_QM_CQ_ARUSER */
+#define TPC0_QM_CQ_ARUSER_NOSNOOP_SHIFT 0
+#define TPC0_QM_CQ_ARUSER_NOSNOOP_MASK 0x1
+#define TPC0_QM_CQ_ARUSER_WORD_SHIFT 1
+#define TPC0_QM_CQ_ARUSER_WORD_MASK 0x2
+
+/* TPC0_QM_CQ_PTR_LO */
+#define TPC0_QM_CQ_PTR_LO_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_PTR_HI */
+#define TPC0_QM_CQ_PTR_HI_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_TSIZE */
+#define TPC0_QM_CQ_TSIZE_VAL_SHIFT 0
+#define TPC0_QM_CQ_TSIZE_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_CTL */
+#define TPC0_QM_CQ_CTL_RPT_SHIFT 0
+#define TPC0_QM_CQ_CTL_RPT_MASK 0xFFFF
+#define TPC0_QM_CQ_CTL_CTL_SHIFT 16
+#define TPC0_QM_CQ_CTL_CTL_MASK 0xFFFF0000
+
+/* TPC0_QM_CQ_PTR_LO_STS */
+#define TPC0_QM_CQ_PTR_LO_STS_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_PTR_HI_STS */
+#define TPC0_QM_CQ_PTR_HI_STS_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_TSIZE_STS */
+#define TPC0_QM_CQ_TSIZE_STS_VAL_SHIFT 0
+#define TPC0_QM_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_CTL_STS */
+#define TPC0_QM_CQ_CTL_STS_RPT_SHIFT 0
+#define TPC0_QM_CQ_CTL_STS_RPT_MASK 0xFFFF
+#define TPC0_QM_CQ_CTL_STS_CTL_SHIFT 16
+#define TPC0_QM_CQ_CTL_STS_CTL_MASK 0xFFFF0000
+
+/* TPC0_QM_CQ_STS0 */
+#define TPC0_QM_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0
+#define TPC0_QM_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF
+#define TPC0_QM_CQ_STS0_CQ_FREE_CNT_SHIFT 16
+#define TPC0_QM_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000
+
+/* TPC0_QM_CQ_STS1 */
+#define TPC0_QM_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0
+#define TPC0_QM_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF
+#define TPC0_QM_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30
+#define TPC0_QM_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000
+#define TPC0_QM_CQ_STS1_CQ_BUSY_SHIFT 31
+#define TPC0_QM_CQ_STS1_CQ_BUSY_MASK 0x80000000
+
+/* TPC0_QM_CQ_RD_RATE_LIM_EN */
+#define TPC0_QM_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0
+#define TPC0_QM_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1
+
+/* TPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN */
+#define TPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
+#define TPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* TPC0_QM_CQ_RD_RATE_LIM_SAT */
+#define TPC0_QM_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0
+#define TPC0_QM_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
+
+/* TPC0_QM_CQ_RD_RATE_LIM_TOUT */
+#define TPC0_QM_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0
+#define TPC0_QM_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
+
+/* TPC0_QM_CQ_IFIFO_CNT */
+#define TPC0_QM_CQ_IFIFO_CNT_VAL_SHIFT 0
+#define TPC0_QM_CQ_IFIFO_CNT_VAL_MASK 0x3
+
+/* TPC0_QM_CP_MSG_BASE0_ADDR_LO */
+#define TPC0_QM_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE0_ADDR_HI */
+#define TPC0_QM_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE1_ADDR_LO */
+#define TPC0_QM_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE1_ADDR_HI */
+#define TPC0_QM_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE2_ADDR_LO */
+#define TPC0_QM_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE2_ADDR_HI */
+#define TPC0_QM_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE3_ADDR_LO */
+#define TPC0_QM_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE3_ADDR_HI */
+#define TPC0_QM_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_LDMA_TSIZE_OFFSET */
+#define TPC0_QM_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0
+#define TPC0_QM_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET */
+#define TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0
+#define TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET */
+#define TPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0
+#define TPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET */
+#define TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0
+#define TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET */
+#define TPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0
+#define TPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_LDMA_COMMIT_OFFSET */
+#define TPC0_QM_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0
+#define TPC0_QM_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_FENCE0_RDATA */
+#define TPC0_QM_CP_FENCE0_RDATA_INC_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE0_RDATA_INC_VAL_MASK 0xF
+
+/* TPC0_QM_CP_FENCE1_RDATA */
+#define TPC0_QM_CP_FENCE1_RDATA_INC_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE1_RDATA_INC_VAL_MASK 0xF
+
+/* TPC0_QM_CP_FENCE2_RDATA */
+#define TPC0_QM_CP_FENCE2_RDATA_INC_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE2_RDATA_INC_VAL_MASK 0xF
+
+/* TPC0_QM_CP_FENCE3_RDATA */
+#define TPC0_QM_CP_FENCE3_RDATA_INC_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE3_RDATA_INC_VAL_MASK 0xF
+
+/* TPC0_QM_CP_FENCE0_CNT */
+#define TPC0_QM_CP_FENCE0_CNT_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE0_CNT_VAL_MASK 0xFF
+
+/* TPC0_QM_CP_FENCE1_CNT */
+#define TPC0_QM_CP_FENCE1_CNT_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE1_CNT_VAL_MASK 0xFF
+
+/* TPC0_QM_CP_FENCE2_CNT */
+#define TPC0_QM_CP_FENCE2_CNT_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE2_CNT_VAL_MASK 0xFF
+
+/* TPC0_QM_CP_FENCE3_CNT */
+#define TPC0_QM_CP_FENCE3_CNT_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE3_CNT_VAL_MASK 0xFF
+
+/* TPC0_QM_CP_STS */
+#define TPC0_QM_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0
+#define TPC0_QM_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF
+#define TPC0_QM_CP_STS_ERDY_SHIFT 16
+#define TPC0_QM_CP_STS_ERDY_MASK 0x10000
+#define TPC0_QM_CP_STS_RRDY_SHIFT 17
+#define TPC0_QM_CP_STS_RRDY_MASK 0x20000
+#define TPC0_QM_CP_STS_MRDY_SHIFT 18
+#define TPC0_QM_CP_STS_MRDY_MASK 0x40000
+#define TPC0_QM_CP_STS_SW_STOP_SHIFT 19
+#define TPC0_QM_CP_STS_SW_STOP_MASK 0x80000
+#define TPC0_QM_CP_STS_FENCE_ID_SHIFT 20
+#define TPC0_QM_CP_STS_FENCE_ID_MASK 0x300000
+#define TPC0_QM_CP_STS_FENCE_IN_PROGRESS_SHIFT 22
+#define TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000
+
+/* TPC0_QM_CP_CURRENT_INST_LO */
+#define TPC0_QM_CP_CURRENT_INST_LO_VAL_SHIFT 0
+#define TPC0_QM_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_CURRENT_INST_HI */
+#define TPC0_QM_CP_CURRENT_INST_HI_VAL_SHIFT 0
+#define TPC0_QM_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_BARRIER_CFG */
+#define TPC0_QM_CP_BARRIER_CFG_EBGUARD_SHIFT 0
+#define TPC0_QM_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF
+
+/* TPC0_QM_CP_DBG_0 */
+#define TPC0_QM_CP_DBG_0_VAL_SHIFT 0
+#define TPC0_QM_CP_DBG_0_VAL_MASK 0xFF
+
+/* TPC0_QM_PQ_BUF_ADDR */
+#define TPC0_QM_PQ_BUF_ADDR_VAL_SHIFT 0
+#define TPC0_QM_PQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_BUF_RDATA */
+#define TPC0_QM_PQ_BUF_RDATA_VAL_SHIFT 0
+#define TPC0_QM_PQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_BUF_ADDR */
+#define TPC0_QM_CQ_BUF_ADDR_VAL_SHIFT 0
+#define TPC0_QM_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_BUF_RDATA */
+#define TPC0_QM_CQ_BUF_RDATA_VAL_SHIFT 0
+#define TPC0_QM_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_TPC0_QM_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h
new file mode 100644
index 000000000000..7552d4ba61fe
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_QM_REGS_H_
+#define ASIC_REG_TPC0_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC0_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC0_QM_GLBL_CFG0 0xE08000
+
+#define mmTPC0_QM_GLBL_CFG1 0xE08004
+
+#define mmTPC0_QM_GLBL_PROT 0xE08008
+
+#define mmTPC0_QM_GLBL_ERR_CFG 0xE0800C
+
+#define mmTPC0_QM_GLBL_ERR_ADDR_LO 0xE08010
+
+#define mmTPC0_QM_GLBL_ERR_ADDR_HI 0xE08014
+
+#define mmTPC0_QM_GLBL_ERR_WDATA 0xE08018
+
+#define mmTPC0_QM_GLBL_SECURE_PROPS 0xE0801C
+
+#define mmTPC0_QM_GLBL_NON_SECURE_PROPS 0xE08020
+
+#define mmTPC0_QM_GLBL_STS0 0xE08024
+
+#define mmTPC0_QM_GLBL_STS1 0xE08028
+
+#define mmTPC0_QM_PQ_BASE_LO 0xE08060
+
+#define mmTPC0_QM_PQ_BASE_HI 0xE08064
+
+#define mmTPC0_QM_PQ_SIZE 0xE08068
+
+#define mmTPC0_QM_PQ_PI 0xE0806C
+
+#define mmTPC0_QM_PQ_CI 0xE08070
+
+#define mmTPC0_QM_PQ_CFG0 0xE08074
+
+#define mmTPC0_QM_PQ_CFG1 0xE08078
+
+#define mmTPC0_QM_PQ_ARUSER 0xE0807C
+
+#define mmTPC0_QM_PQ_PUSH0 0xE08080
+
+#define mmTPC0_QM_PQ_PUSH1 0xE08084
+
+#define mmTPC0_QM_PQ_PUSH2 0xE08088
+
+#define mmTPC0_QM_PQ_PUSH3 0xE0808C
+
+#define mmTPC0_QM_PQ_STS0 0xE08090
+
+#define mmTPC0_QM_PQ_STS1 0xE08094
+
+#define mmTPC0_QM_PQ_RD_RATE_LIM_EN 0xE080A0
+
+#define mmTPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xE080A4
+
+#define mmTPC0_QM_PQ_RD_RATE_LIM_SAT 0xE080A8
+
+#define mmTPC0_QM_PQ_RD_RATE_LIM_TOUT 0xE080AC
+
+#define mmTPC0_QM_CQ_CFG0 0xE080B0
+
+#define mmTPC0_QM_CQ_CFG1 0xE080B4
+
+#define mmTPC0_QM_CQ_ARUSER 0xE080B8
+
+#define mmTPC0_QM_CQ_PTR_LO 0xE080C0
+
+#define mmTPC0_QM_CQ_PTR_HI 0xE080C4
+
+#define mmTPC0_QM_CQ_TSIZE 0xE080C8
+
+#define mmTPC0_QM_CQ_CTL 0xE080CC
+
+#define mmTPC0_QM_CQ_PTR_LO_STS 0xE080D4
+
+#define mmTPC0_QM_CQ_PTR_HI_STS 0xE080D8
+
+#define mmTPC0_QM_CQ_TSIZE_STS 0xE080DC
+
+#define mmTPC0_QM_CQ_CTL_STS 0xE080E0
+
+#define mmTPC0_QM_CQ_STS0 0xE080E4
+
+#define mmTPC0_QM_CQ_STS1 0xE080E8
+
+#define mmTPC0_QM_CQ_RD_RATE_LIM_EN 0xE080F0
+
+#define mmTPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xE080F4
+
+#define mmTPC0_QM_CQ_RD_RATE_LIM_SAT 0xE080F8
+
+#define mmTPC0_QM_CQ_RD_RATE_LIM_TOUT 0xE080FC
+
+#define mmTPC0_QM_CQ_IFIFO_CNT 0xE08108
+
+#define mmTPC0_QM_CP_MSG_BASE0_ADDR_LO 0xE08120
+
+#define mmTPC0_QM_CP_MSG_BASE0_ADDR_HI 0xE08124
+
+#define mmTPC0_QM_CP_MSG_BASE1_ADDR_LO 0xE08128
+
+#define mmTPC0_QM_CP_MSG_BASE1_ADDR_HI 0xE0812C
+
+#define mmTPC0_QM_CP_MSG_BASE2_ADDR_LO 0xE08130
+
+#define mmTPC0_QM_CP_MSG_BASE2_ADDR_HI 0xE08134
+
+#define mmTPC0_QM_CP_MSG_BASE3_ADDR_LO 0xE08138
+
+#define mmTPC0_QM_CP_MSG_BASE3_ADDR_HI 0xE0813C
+
+#define mmTPC0_QM_CP_LDMA_TSIZE_OFFSET 0xE08140
+
+#define mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xE08144
+
+#define mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xE08148
+
+#define mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xE0814C
+
+#define mmTPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xE08150
+
+#define mmTPC0_QM_CP_LDMA_COMMIT_OFFSET 0xE08154
+
+#define mmTPC0_QM_CP_FENCE0_RDATA 0xE08158
+
+#define mmTPC0_QM_CP_FENCE1_RDATA 0xE0815C
+
+#define mmTPC0_QM_CP_FENCE2_RDATA 0xE08160
+
+#define mmTPC0_QM_CP_FENCE3_RDATA 0xE08164
+
+#define mmTPC0_QM_CP_FENCE0_CNT 0xE08168
+
+#define mmTPC0_QM_CP_FENCE1_CNT 0xE0816C
+
+#define mmTPC0_QM_CP_FENCE2_CNT 0xE08170
+
+#define mmTPC0_QM_CP_FENCE3_CNT 0xE08174
+
+#define mmTPC0_QM_CP_STS 0xE08178
+
+#define mmTPC0_QM_CP_CURRENT_INST_LO 0xE0817C
+
+#define mmTPC0_QM_CP_CURRENT_INST_HI 0xE08180
+
+#define mmTPC0_QM_CP_BARRIER_CFG 0xE08184
+
+#define mmTPC0_QM_CP_DBG_0 0xE08188
+
+#define mmTPC0_QM_PQ_BUF_ADDR 0xE08300
+
+#define mmTPC0_QM_PQ_BUF_RDATA 0xE08304
+
+#define mmTPC0_QM_CQ_BUF_ADDR 0xE08308
+
+#define mmTPC0_QM_CQ_BUF_RDATA 0xE0830C
+
+#endif /* ASIC_REG_TPC0_QM_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h
new file mode 100644
index 000000000000..19894413474a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h
@@ -0,0 +1,887 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC1_CFG_REGS_H_
+#define ASIC_REG_TPC1_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC1_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xE46400
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xE46404
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xE46408
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xE4640C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xE46410
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xE46414
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xE46418
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xE4641C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xE46420
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xE46424
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xE46428
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xE4642C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xE46430
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xE46434
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xE46438
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xE4643C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xE46440
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xE46444
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xE46448
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xE4644C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xE46450
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xE46454
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xE46458
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xE4645C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xE46460
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xE46464
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xE46468
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xE4646C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xE46470
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xE46474
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xE46478
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xE4647C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xE46480
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xE46484
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xE46488
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xE4648C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xE46490
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xE46494
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xE46498
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xE4649C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xE464A0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xE464A4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xE464A8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xE464AC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xE464B0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xE464B4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xE464B8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xE464BC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xE464C0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xE464C4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xE464C8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xE464CC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xE464D0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xE464D4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xE464D8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xE464DC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xE464E0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xE464E4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xE464E8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xE464EC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xE464F0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xE464F4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xE464F8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xE464FC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xE46500
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xE46504
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xE46508
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xE4650C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xE46510
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xE46514
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xE46518
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xE4651C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xE46520
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xE46524
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xE46528
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xE4652C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xE46530
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xE46534
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xE46538
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xE4653C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xE46540
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xE46544
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xE46548
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xE4654C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xE46550
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xE46554
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xE46558
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xE4655C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xE46560
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xE46564
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xE46568
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xE4656C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xE46570
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xE46574
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xE46578
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xE4657C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xE46580
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xE46584
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xE46588
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xE4658C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xE46590
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xE46594
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xE46598
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xE4659C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xE465A0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xE465A4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xE465A8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xE465AC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xE465B0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xE465B4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xE465B8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xE465BC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xE465C0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xE465C4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xE465C8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xE465CC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xE465D0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xE465D4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xE465D8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xE465DC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xE465E0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xE465E4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xE465E8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xE465EC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xE465F0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xE465F4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xE465F8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xE465FC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xE46600
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xE46604
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xE46608
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xE4660C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xE46610
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xE46614
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xE46618
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xE4661C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xE46620
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xE46624
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xE46628
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xE4662C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xE46630
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xE46634
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xE46638
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xE4663C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xE46640
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xE46644
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xE46648
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xE4664C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xE46650
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xE46654
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xE46658
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xE4665C
+
+#define mmTPC1_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xE46660
+
+#define mmTPC1_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xE46664
+
+#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_0 0xE46668
+
+#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_0 0xE4666C
+
+#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_1 0xE46670
+
+#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_1 0xE46674
+
+#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_2 0xE46678
+
+#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_2 0xE4667C
+
+#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_3 0xE46680
+
+#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_3 0xE46684
+
+#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_4 0xE46688
+
+#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_4 0xE4668C
+
+#define mmTPC1_CFG_KERNEL_SRF_0 0xE46690
+
+#define mmTPC1_CFG_KERNEL_SRF_1 0xE46694
+
+#define mmTPC1_CFG_KERNEL_SRF_2 0xE46698
+
+#define mmTPC1_CFG_KERNEL_SRF_3 0xE4669C
+
+#define mmTPC1_CFG_KERNEL_SRF_4 0xE466A0
+
+#define mmTPC1_CFG_KERNEL_SRF_5 0xE466A4
+
+#define mmTPC1_CFG_KERNEL_SRF_6 0xE466A8
+
+#define mmTPC1_CFG_KERNEL_SRF_7 0xE466AC
+
+#define mmTPC1_CFG_KERNEL_SRF_8 0xE466B0
+
+#define mmTPC1_CFG_KERNEL_SRF_9 0xE466B4
+
+#define mmTPC1_CFG_KERNEL_SRF_10 0xE466B8
+
+#define mmTPC1_CFG_KERNEL_SRF_11 0xE466BC
+
+#define mmTPC1_CFG_KERNEL_SRF_12 0xE466C0
+
+#define mmTPC1_CFG_KERNEL_SRF_13 0xE466C4
+
+#define mmTPC1_CFG_KERNEL_SRF_14 0xE466C8
+
+#define mmTPC1_CFG_KERNEL_SRF_15 0xE466CC
+
+#define mmTPC1_CFG_KERNEL_SRF_16 0xE466D0
+
+#define mmTPC1_CFG_KERNEL_SRF_17 0xE466D4
+
+#define mmTPC1_CFG_KERNEL_SRF_18 0xE466D8
+
+#define mmTPC1_CFG_KERNEL_SRF_19 0xE466DC
+
+#define mmTPC1_CFG_KERNEL_SRF_20 0xE466E0
+
+#define mmTPC1_CFG_KERNEL_SRF_21 0xE466E4
+
+#define mmTPC1_CFG_KERNEL_SRF_22 0xE466E8
+
+#define mmTPC1_CFG_KERNEL_SRF_23 0xE466EC
+
+#define mmTPC1_CFG_KERNEL_SRF_24 0xE466F0
+
+#define mmTPC1_CFG_KERNEL_SRF_25 0xE466F4
+
+#define mmTPC1_CFG_KERNEL_SRF_26 0xE466F8
+
+#define mmTPC1_CFG_KERNEL_SRF_27 0xE466FC
+
+#define mmTPC1_CFG_KERNEL_SRF_28 0xE46700
+
+#define mmTPC1_CFG_KERNEL_SRF_29 0xE46704
+
+#define mmTPC1_CFG_KERNEL_SRF_30 0xE46708
+
+#define mmTPC1_CFG_KERNEL_SRF_31 0xE4670C
+
+#define mmTPC1_CFG_KERNEL_KERNEL_CONFIG 0xE46710
+
+#define mmTPC1_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xE46714
+
+#define mmTPC1_CFG_RESERVED_DESC_END 0xE46738
+
+#define mmTPC1_CFG_ROUND_CSR 0xE467FC
+
+#define mmTPC1_CFG_TBUF_BASE_ADDR_LOW 0xE46800
+
+#define mmTPC1_CFG_TBUF_BASE_ADDR_HIGH 0xE46804
+
+#define mmTPC1_CFG_SEMAPHORE 0xE46808
+
+#define mmTPC1_CFG_VFLAGS 0xE4680C
+
+#define mmTPC1_CFG_SFLAGS 0xE46810
+
+#define mmTPC1_CFG_LFSR_POLYNOM 0xE46818
+
+#define mmTPC1_CFG_STATUS 0xE4681C
+
+#define mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH 0xE46820
+
+#define mmTPC1_CFG_CFG_SUBTRACT_VALUE 0xE46824
+
+#define mmTPC1_CFG_SM_BASE_ADDRESS_LOW 0xE46828
+
+#define mmTPC1_CFG_SM_BASE_ADDRESS_HIGH 0xE4682C
+
+#define mmTPC1_CFG_TPC_CMD 0xE46830
+
+#define mmTPC1_CFG_TPC_EXECUTE 0xE46838
+
+#define mmTPC1_CFG_TPC_STALL 0xE4683C
+
+#define mmTPC1_CFG_ICACHE_BASE_ADDERESS_LOW 0xE46840
+
+#define mmTPC1_CFG_ICACHE_BASE_ADDERESS_HIGH 0xE46844
+
+#define mmTPC1_CFG_MSS_CONFIG 0xE46854
+
+#define mmTPC1_CFG_TPC_INTR_CAUSE 0xE46858
+
+#define mmTPC1_CFG_TPC_INTR_MASK 0xE4685C
+
+#define mmTPC1_CFG_TSB_CONFIG 0xE46860
+
+#define mmTPC1_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xE46A00
+
+#define mmTPC1_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xE46A04
+
+#define mmTPC1_CFG_QM_TENSOR_0_PADDING_VALUE 0xE46A08
+
+#define mmTPC1_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xE46A0C
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_0_SIZE 0xE46A10
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xE46A14
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xE46A18
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_1_SIZE 0xE46A1C
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xE46A20
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xE46A24
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_2_SIZE 0xE46A28
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xE46A2C
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xE46A30
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_3_SIZE 0xE46A34
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xE46A38
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xE46A3C
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_4_SIZE 0xE46A40
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xE46A44
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xE46A48
+
+#define mmTPC1_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xE46A4C
+
+#define mmTPC1_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xE46A50
+
+#define mmTPC1_CFG_QM_TENSOR_1_PADDING_VALUE 0xE46A54
+
+#define mmTPC1_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xE46A58
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_0_SIZE 0xE46A5C
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xE46A60
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xE46A64
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_1_SIZE 0xE46A68
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xE46A6C
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xE46A70
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_2_SIZE 0xE46A74
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xE46A78
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xE46A7C
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_3_SIZE 0xE46A80
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xE46A84
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xE46A88
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_4_SIZE 0xE46A8C
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xE46A90
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xE46A94
+
+#define mmTPC1_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xE46A98
+
+#define mmTPC1_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xE46A9C
+
+#define mmTPC1_CFG_QM_TENSOR_2_PADDING_VALUE 0xE46AA0
+
+#define mmTPC1_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xE46AA4
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_0_SIZE 0xE46AA8
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xE46AAC
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xE46AB0
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_1_SIZE 0xE46AB4
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xE46AB8
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xE46ABC
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_2_SIZE 0xE46AC0
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xE46AC4
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xE46AC8
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_3_SIZE 0xE46ACC
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xE46AD0
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xE46AD4
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_4_SIZE 0xE46AD8
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xE46ADC
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xE46AE0
+
+#define mmTPC1_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xE46AE4
+
+#define mmTPC1_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xE46AE8
+
+#define mmTPC1_CFG_QM_TENSOR_3_PADDING_VALUE 0xE46AEC
+
+#define mmTPC1_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xE46AF0
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_0_SIZE 0xE46AF4
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xE46AF8
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xE46AFC
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_1_SIZE 0xE46B00
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xE46B04
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xE46B08
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_2_SIZE 0xE46B0C
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xE46B10
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xE46B14
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_3_SIZE 0xE46B18
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xE46B1C
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xE46B20
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_4_SIZE 0xE46B24
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xE46B28
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xE46B2C
+
+#define mmTPC1_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xE46B30
+
+#define mmTPC1_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xE46B34
+
+#define mmTPC1_CFG_QM_TENSOR_4_PADDING_VALUE 0xE46B38
+
+#define mmTPC1_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xE46B3C
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_0_SIZE 0xE46B40
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xE46B44
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xE46B48
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_1_SIZE 0xE46B4C
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xE46B50
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xE46B54
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_2_SIZE 0xE46B58
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xE46B5C
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xE46B60
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_3_SIZE 0xE46B64
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xE46B68
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xE46B6C
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_4_SIZE 0xE46B70
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xE46B74
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xE46B78
+
+#define mmTPC1_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xE46B7C
+
+#define mmTPC1_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xE46B80
+
+#define mmTPC1_CFG_QM_TENSOR_5_PADDING_VALUE 0xE46B84
+
+#define mmTPC1_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xE46B88
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_0_SIZE 0xE46B8C
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xE46B90
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xE46B94
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_1_SIZE 0xE46B98
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xE46B9C
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xE46BA0
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_2_SIZE 0xE46BA4
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xE46BA8
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xE46BAC
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_3_SIZE 0xE46BB0
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xE46BB4
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xE46BB8
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_4_SIZE 0xE46BBC
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xE46BC0
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xE46BC4
+
+#define mmTPC1_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xE46BC8
+
+#define mmTPC1_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xE46BCC
+
+#define mmTPC1_CFG_QM_TENSOR_6_PADDING_VALUE 0xE46BD0
+
+#define mmTPC1_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xE46BD4
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_0_SIZE 0xE46BD8
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xE46BDC
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xE46BE0
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_1_SIZE 0xE46BE4
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xE46BE8
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xE46BEC
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_2_SIZE 0xE46BF0
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xE46BF4
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xE46BF8
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_3_SIZE 0xE46BFC
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xE46C00
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xE46C04
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_4_SIZE 0xE46C08
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xE46C0C
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xE46C10
+
+#define mmTPC1_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xE46C14
+
+#define mmTPC1_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xE46C18
+
+#define mmTPC1_CFG_QM_TENSOR_7_PADDING_VALUE 0xE46C1C
+
+#define mmTPC1_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xE46C20
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_0_SIZE 0xE46C24
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xE46C28
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xE46C2C
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_1_SIZE 0xE46C30
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xE46C34
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xE46C38
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_2_SIZE 0xE46C3C
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xE46C40
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xE46C44
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_3_SIZE 0xE46C48
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xE46C4C
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xE46C50
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_4_SIZE 0xE46C54
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xE46C58
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xE46C5C
+
+#define mmTPC1_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xE46C60
+
+#define mmTPC1_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xE46C64
+
+#define mmTPC1_CFG_QM_TID_BASE_DIM_0 0xE46C68
+
+#define mmTPC1_CFG_QM_TID_SIZE_DIM_0 0xE46C6C
+
+#define mmTPC1_CFG_QM_TID_BASE_DIM_1 0xE46C70
+
+#define mmTPC1_CFG_QM_TID_SIZE_DIM_1 0xE46C74
+
+#define mmTPC1_CFG_QM_TID_BASE_DIM_2 0xE46C78
+
+#define mmTPC1_CFG_QM_TID_SIZE_DIM_2 0xE46C7C
+
+#define mmTPC1_CFG_QM_TID_BASE_DIM_3 0xE46C80
+
+#define mmTPC1_CFG_QM_TID_SIZE_DIM_3 0xE46C84
+
+#define mmTPC1_CFG_QM_TID_BASE_DIM_4 0xE46C88
+
+#define mmTPC1_CFG_QM_TID_SIZE_DIM_4 0xE46C8C
+
+#define mmTPC1_CFG_QM_SRF_0 0xE46C90
+
+#define mmTPC1_CFG_QM_SRF_1 0xE46C94
+
+#define mmTPC1_CFG_QM_SRF_2 0xE46C98
+
+#define mmTPC1_CFG_QM_SRF_3 0xE46C9C
+
+#define mmTPC1_CFG_QM_SRF_4 0xE46CA0
+
+#define mmTPC1_CFG_QM_SRF_5 0xE46CA4
+
+#define mmTPC1_CFG_QM_SRF_6 0xE46CA8
+
+#define mmTPC1_CFG_QM_SRF_7 0xE46CAC
+
+#define mmTPC1_CFG_QM_SRF_8 0xE46CB0
+
+#define mmTPC1_CFG_QM_SRF_9 0xE46CB4
+
+#define mmTPC1_CFG_QM_SRF_10 0xE46CB8
+
+#define mmTPC1_CFG_QM_SRF_11 0xE46CBC
+
+#define mmTPC1_CFG_QM_SRF_12 0xE46CC0
+
+#define mmTPC1_CFG_QM_SRF_13 0xE46CC4
+
+#define mmTPC1_CFG_QM_SRF_14 0xE46CC8
+
+#define mmTPC1_CFG_QM_SRF_15 0xE46CCC
+
+#define mmTPC1_CFG_QM_SRF_16 0xE46CD0
+
+#define mmTPC1_CFG_QM_SRF_17 0xE46CD4
+
+#define mmTPC1_CFG_QM_SRF_18 0xE46CD8
+
+#define mmTPC1_CFG_QM_SRF_19 0xE46CDC
+
+#define mmTPC1_CFG_QM_SRF_20 0xE46CE0
+
+#define mmTPC1_CFG_QM_SRF_21 0xE46CE4
+
+#define mmTPC1_CFG_QM_SRF_22 0xE46CE8
+
+#define mmTPC1_CFG_QM_SRF_23 0xE46CEC
+
+#define mmTPC1_CFG_QM_SRF_24 0xE46CF0
+
+#define mmTPC1_CFG_QM_SRF_25 0xE46CF4
+
+#define mmTPC1_CFG_QM_SRF_26 0xE46CF8
+
+#define mmTPC1_CFG_QM_SRF_27 0xE46CFC
+
+#define mmTPC1_CFG_QM_SRF_28 0xE46D00
+
+#define mmTPC1_CFG_QM_SRF_29 0xE46D04
+
+#define mmTPC1_CFG_QM_SRF_30 0xE46D08
+
+#define mmTPC1_CFG_QM_SRF_31 0xE46D0C
+
+#define mmTPC1_CFG_QM_KERNEL_CONFIG 0xE46D10
+
+#define mmTPC1_CFG_QM_SYNC_OBJECT_MESSAGE 0xE46D14
+
+#define mmTPC1_CFG_ARUSER 0xE46D18
+
+#define mmTPC1_CFG_AWUSER 0xE46D1C
+
+#define mmTPC1_CFG_FUNC_MBIST_CNTRL 0xE46E00
+
+#define mmTPC1_CFG_FUNC_MBIST_PAT 0xE46E04
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_0 0xE46E08
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_1 0xE46E0C
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_2 0xE46E10
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_3 0xE46E14
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_4 0xE46E18
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_5 0xE46E1C
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_6 0xE46E20
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_7 0xE46E24
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_8 0xE46E28
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_9 0xE46E2C
+
+#endif /* ASIC_REG_TPC1_CFG_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h
new file mode 100644
index 000000000000..9099ebd7ab23
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC1_CMDQ_REGS_H_
+#define ASIC_REG_TPC1_CMDQ_REGS_H_
+
+/*
+ *****************************************
+ * TPC1_CMDQ (Prototype: CMDQ)
+ *****************************************
+ */
+
+#define mmTPC1_CMDQ_GLBL_CFG0 0xE49000
+
+#define mmTPC1_CMDQ_GLBL_CFG1 0xE49004
+
+#define mmTPC1_CMDQ_GLBL_PROT 0xE49008
+
+#define mmTPC1_CMDQ_GLBL_ERR_CFG 0xE4900C
+
+#define mmTPC1_CMDQ_GLBL_ERR_ADDR_LO 0xE49010
+
+#define mmTPC1_CMDQ_GLBL_ERR_ADDR_HI 0xE49014
+
+#define mmTPC1_CMDQ_GLBL_ERR_WDATA 0xE49018
+
+#define mmTPC1_CMDQ_GLBL_SECURE_PROPS 0xE4901C
+
+#define mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS 0xE49020
+
+#define mmTPC1_CMDQ_GLBL_STS0 0xE49024
+
+#define mmTPC1_CMDQ_GLBL_STS1 0xE49028
+
+#define mmTPC1_CMDQ_CQ_CFG0 0xE490B0
+
+#define mmTPC1_CMDQ_CQ_CFG1 0xE490B4
+
+#define mmTPC1_CMDQ_CQ_ARUSER 0xE490B8
+
+#define mmTPC1_CMDQ_CQ_PTR_LO 0xE490C0
+
+#define mmTPC1_CMDQ_CQ_PTR_HI 0xE490C4
+
+#define mmTPC1_CMDQ_CQ_TSIZE 0xE490C8
+
+#define mmTPC1_CMDQ_CQ_CTL 0xE490CC
+
+#define mmTPC1_CMDQ_CQ_PTR_LO_STS 0xE490D4
+
+#define mmTPC1_CMDQ_CQ_PTR_HI_STS 0xE490D8
+
+#define mmTPC1_CMDQ_CQ_TSIZE_STS 0xE490DC
+
+#define mmTPC1_CMDQ_CQ_CTL_STS 0xE490E0
+
+#define mmTPC1_CMDQ_CQ_STS0 0xE490E4
+
+#define mmTPC1_CMDQ_CQ_STS1 0xE490E8
+
+#define mmTPC1_CMDQ_CQ_RD_RATE_LIM_EN 0xE490F0
+
+#define mmTPC1_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xE490F4
+
+#define mmTPC1_CMDQ_CQ_RD_RATE_LIM_SAT 0xE490F8
+
+#define mmTPC1_CMDQ_CQ_RD_RATE_LIM_TOUT 0xE490FC
+
+#define mmTPC1_CMDQ_CQ_IFIFO_CNT 0xE49108
+
+#define mmTPC1_CMDQ_CP_MSG_BASE0_ADDR_LO 0xE49120
+
+#define mmTPC1_CMDQ_CP_MSG_BASE0_ADDR_HI 0xE49124
+
+#define mmTPC1_CMDQ_CP_MSG_BASE1_ADDR_LO 0xE49128
+
+#define mmTPC1_CMDQ_CP_MSG_BASE1_ADDR_HI 0xE4912C
+
+#define mmTPC1_CMDQ_CP_MSG_BASE2_ADDR_LO 0xE49130
+
+#define mmTPC1_CMDQ_CP_MSG_BASE2_ADDR_HI 0xE49134
+
+#define mmTPC1_CMDQ_CP_MSG_BASE3_ADDR_LO 0xE49138
+
+#define mmTPC1_CMDQ_CP_MSG_BASE3_ADDR_HI 0xE4913C
+
+#define mmTPC1_CMDQ_CP_LDMA_TSIZE_OFFSET 0xE49140
+
+#define mmTPC1_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xE49144
+
+#define mmTPC1_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xE49148
+
+#define mmTPC1_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xE4914C
+
+#define mmTPC1_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xE49150
+
+#define mmTPC1_CMDQ_CP_LDMA_COMMIT_OFFSET 0xE49154
+
+#define mmTPC1_CMDQ_CP_FENCE0_RDATA 0xE49158
+
+#define mmTPC1_CMDQ_CP_FENCE1_RDATA 0xE4915C
+
+#define mmTPC1_CMDQ_CP_FENCE2_RDATA 0xE49160
+
+#define mmTPC1_CMDQ_CP_FENCE3_RDATA 0xE49164
+
+#define mmTPC1_CMDQ_CP_FENCE0_CNT 0xE49168
+
+#define mmTPC1_CMDQ_CP_FENCE1_CNT 0xE4916C
+
+#define mmTPC1_CMDQ_CP_FENCE2_CNT 0xE49170
+
+#define mmTPC1_CMDQ_CP_FENCE3_CNT 0xE49174
+
+#define mmTPC1_CMDQ_CP_STS 0xE49178
+
+#define mmTPC1_CMDQ_CP_CURRENT_INST_LO 0xE4917C
+
+#define mmTPC1_CMDQ_CP_CURRENT_INST_HI 0xE49180
+
+#define mmTPC1_CMDQ_CP_BARRIER_CFG 0xE49184
+
+#define mmTPC1_CMDQ_CP_DBG_0 0xE49188
+
+#define mmTPC1_CMDQ_CQ_BUF_ADDR 0xE49308
+
+#define mmTPC1_CMDQ_CQ_BUF_RDATA 0xE4930C
+
+#endif /* ASIC_REG_TPC1_CMDQ_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h
new file mode 100644
index 000000000000..bc8b9a10391f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC1_QM_REGS_H_
+#define ASIC_REG_TPC1_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC1_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC1_QM_GLBL_CFG0 0xE48000
+
+#define mmTPC1_QM_GLBL_CFG1 0xE48004
+
+#define mmTPC1_QM_GLBL_PROT 0xE48008
+
+#define mmTPC1_QM_GLBL_ERR_CFG 0xE4800C
+
+#define mmTPC1_QM_GLBL_ERR_ADDR_LO 0xE48010
+
+#define mmTPC1_QM_GLBL_ERR_ADDR_HI 0xE48014
+
+#define mmTPC1_QM_GLBL_ERR_WDATA 0xE48018
+
+#define mmTPC1_QM_GLBL_SECURE_PROPS 0xE4801C
+
+#define mmTPC1_QM_GLBL_NON_SECURE_PROPS 0xE48020
+
+#define mmTPC1_QM_GLBL_STS0 0xE48024
+
+#define mmTPC1_QM_GLBL_STS1 0xE48028
+
+#define mmTPC1_QM_PQ_BASE_LO 0xE48060
+
+#define mmTPC1_QM_PQ_BASE_HI 0xE48064
+
+#define mmTPC1_QM_PQ_SIZE 0xE48068
+
+#define mmTPC1_QM_PQ_PI 0xE4806C
+
+#define mmTPC1_QM_PQ_CI 0xE48070
+
+#define mmTPC1_QM_PQ_CFG0 0xE48074
+
+#define mmTPC1_QM_PQ_CFG1 0xE48078
+
+#define mmTPC1_QM_PQ_ARUSER 0xE4807C
+
+#define mmTPC1_QM_PQ_PUSH0 0xE48080
+
+#define mmTPC1_QM_PQ_PUSH1 0xE48084
+
+#define mmTPC1_QM_PQ_PUSH2 0xE48088
+
+#define mmTPC1_QM_PQ_PUSH3 0xE4808C
+
+#define mmTPC1_QM_PQ_STS0 0xE48090
+
+#define mmTPC1_QM_PQ_STS1 0xE48094
+
+#define mmTPC1_QM_PQ_RD_RATE_LIM_EN 0xE480A0
+
+#define mmTPC1_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xE480A4
+
+#define mmTPC1_QM_PQ_RD_RATE_LIM_SAT 0xE480A8
+
+#define mmTPC1_QM_PQ_RD_RATE_LIM_TOUT 0xE480AC
+
+#define mmTPC1_QM_CQ_CFG0 0xE480B0
+
+#define mmTPC1_QM_CQ_CFG1 0xE480B4
+
+#define mmTPC1_QM_CQ_ARUSER 0xE480B8
+
+#define mmTPC1_QM_CQ_PTR_LO 0xE480C0
+
+#define mmTPC1_QM_CQ_PTR_HI 0xE480C4
+
+#define mmTPC1_QM_CQ_TSIZE 0xE480C8
+
+#define mmTPC1_QM_CQ_CTL 0xE480CC
+
+#define mmTPC1_QM_CQ_PTR_LO_STS 0xE480D4
+
+#define mmTPC1_QM_CQ_PTR_HI_STS 0xE480D8
+
+#define mmTPC1_QM_CQ_TSIZE_STS 0xE480DC
+
+#define mmTPC1_QM_CQ_CTL_STS 0xE480E0
+
+#define mmTPC1_QM_CQ_STS0 0xE480E4
+
+#define mmTPC1_QM_CQ_STS1 0xE480E8
+
+#define mmTPC1_QM_CQ_RD_RATE_LIM_EN 0xE480F0
+
+#define mmTPC1_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xE480F4
+
+#define mmTPC1_QM_CQ_RD_RATE_LIM_SAT 0xE480F8
+
+#define mmTPC1_QM_CQ_RD_RATE_LIM_TOUT 0xE480FC
+
+#define mmTPC1_QM_CQ_IFIFO_CNT 0xE48108
+
+#define mmTPC1_QM_CP_MSG_BASE0_ADDR_LO 0xE48120
+
+#define mmTPC1_QM_CP_MSG_BASE0_ADDR_HI 0xE48124
+
+#define mmTPC1_QM_CP_MSG_BASE1_ADDR_LO 0xE48128
+
+#define mmTPC1_QM_CP_MSG_BASE1_ADDR_HI 0xE4812C
+
+#define mmTPC1_QM_CP_MSG_BASE2_ADDR_LO 0xE48130
+
+#define mmTPC1_QM_CP_MSG_BASE2_ADDR_HI 0xE48134
+
+#define mmTPC1_QM_CP_MSG_BASE3_ADDR_LO 0xE48138
+
+#define mmTPC1_QM_CP_MSG_BASE3_ADDR_HI 0xE4813C
+
+#define mmTPC1_QM_CP_LDMA_TSIZE_OFFSET 0xE48140
+
+#define mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xE48144
+
+#define mmTPC1_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xE48148
+
+#define mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xE4814C
+
+#define mmTPC1_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xE48150
+
+#define mmTPC1_QM_CP_LDMA_COMMIT_OFFSET 0xE48154
+
+#define mmTPC1_QM_CP_FENCE0_RDATA 0xE48158
+
+#define mmTPC1_QM_CP_FENCE1_RDATA 0xE4815C
+
+#define mmTPC1_QM_CP_FENCE2_RDATA 0xE48160
+
+#define mmTPC1_QM_CP_FENCE3_RDATA 0xE48164
+
+#define mmTPC1_QM_CP_FENCE0_CNT 0xE48168
+
+#define mmTPC1_QM_CP_FENCE1_CNT 0xE4816C
+
+#define mmTPC1_QM_CP_FENCE2_CNT 0xE48170
+
+#define mmTPC1_QM_CP_FENCE3_CNT 0xE48174
+
+#define mmTPC1_QM_CP_STS 0xE48178
+
+#define mmTPC1_QM_CP_CURRENT_INST_LO 0xE4817C
+
+#define mmTPC1_QM_CP_CURRENT_INST_HI 0xE48180
+
+#define mmTPC1_QM_CP_BARRIER_CFG 0xE48184
+
+#define mmTPC1_QM_CP_DBG_0 0xE48188
+
+#define mmTPC1_QM_PQ_BUF_ADDR 0xE48300
+
+#define mmTPC1_QM_PQ_BUF_RDATA 0xE48304
+
+#define mmTPC1_QM_CQ_BUF_ADDR 0xE48308
+
+#define mmTPC1_QM_CQ_BUF_RDATA 0xE4830C
+
+#endif /* ASIC_REG_TPC1_QM_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h
new file mode 100644
index 000000000000..ae267f8f457e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC1_RTR_REGS_H_
+#define ASIC_REG_TPC1_RTR_REGS_H_
+
+/*
+ *****************************************
+ * TPC1_RTR (Prototype: TPC_RTR)
+ *****************************************
+ */
+
+#define mmTPC1_RTR_HBW_RD_RQ_E_ARB 0xE40100
+
+#define mmTPC1_RTR_HBW_RD_RQ_W_ARB 0xE40104
+
+#define mmTPC1_RTR_HBW_RD_RQ_N_ARB 0xE40108
+
+#define mmTPC1_RTR_HBW_RD_RQ_S_ARB 0xE4010C
+
+#define mmTPC1_RTR_HBW_RD_RQ_L_ARB 0xE40110
+
+#define mmTPC1_RTR_HBW_E_ARB_MAX 0xE40120
+
+#define mmTPC1_RTR_HBW_W_ARB_MAX 0xE40124
+
+#define mmTPC1_RTR_HBW_N_ARB_MAX 0xE40128
+
+#define mmTPC1_RTR_HBW_S_ARB_MAX 0xE4012C
+
+#define mmTPC1_RTR_HBW_L_ARB_MAX 0xE40130
+
+#define mmTPC1_RTR_HBW_RD_RS_E_ARB 0xE40140
+
+#define mmTPC1_RTR_HBW_RD_RS_W_ARB 0xE40144
+
+#define mmTPC1_RTR_HBW_RD_RS_N_ARB 0xE40148
+
+#define mmTPC1_RTR_HBW_RD_RS_S_ARB 0xE4014C
+
+#define mmTPC1_RTR_HBW_RD_RS_L_ARB 0xE40150
+
+#define mmTPC1_RTR_HBW_WR_RQ_E_ARB 0xE40170
+
+#define mmTPC1_RTR_HBW_WR_RQ_W_ARB 0xE40174
+
+#define mmTPC1_RTR_HBW_WR_RQ_N_ARB 0xE40178
+
+#define mmTPC1_RTR_HBW_WR_RQ_S_ARB 0xE4017C
+
+#define mmTPC1_RTR_HBW_WR_RQ_L_ARB 0xE40180
+
+#define mmTPC1_RTR_HBW_WR_RS_E_ARB 0xE40190
+
+#define mmTPC1_RTR_HBW_WR_RS_W_ARB 0xE40194
+
+#define mmTPC1_RTR_HBW_WR_RS_N_ARB 0xE40198
+
+#define mmTPC1_RTR_HBW_WR_RS_S_ARB 0xE4019C
+
+#define mmTPC1_RTR_HBW_WR_RS_L_ARB 0xE401A0
+
+#define mmTPC1_RTR_LBW_RD_RQ_E_ARB 0xE40200
+
+#define mmTPC1_RTR_LBW_RD_RQ_W_ARB 0xE40204
+
+#define mmTPC1_RTR_LBW_RD_RQ_N_ARB 0xE40208
+
+#define mmTPC1_RTR_LBW_RD_RQ_S_ARB 0xE4020C
+
+#define mmTPC1_RTR_LBW_RD_RQ_L_ARB 0xE40210
+
+#define mmTPC1_RTR_LBW_E_ARB_MAX 0xE40220
+
+#define mmTPC1_RTR_LBW_W_ARB_MAX 0xE40224
+
+#define mmTPC1_RTR_LBW_N_ARB_MAX 0xE40228
+
+#define mmTPC1_RTR_LBW_S_ARB_MAX 0xE4022C
+
+#define mmTPC1_RTR_LBW_L_ARB_MAX 0xE40230
+
+#define mmTPC1_RTR_LBW_RD_RS_E_ARB 0xE40250
+
+#define mmTPC1_RTR_LBW_RD_RS_W_ARB 0xE40254
+
+#define mmTPC1_RTR_LBW_RD_RS_N_ARB 0xE40258
+
+#define mmTPC1_RTR_LBW_RD_RS_S_ARB 0xE4025C
+
+#define mmTPC1_RTR_LBW_RD_RS_L_ARB 0xE40260
+
+#define mmTPC1_RTR_LBW_WR_RQ_E_ARB 0xE40270
+
+#define mmTPC1_RTR_LBW_WR_RQ_W_ARB 0xE40274
+
+#define mmTPC1_RTR_LBW_WR_RQ_N_ARB 0xE40278
+
+#define mmTPC1_RTR_LBW_WR_RQ_S_ARB 0xE4027C
+
+#define mmTPC1_RTR_LBW_WR_RQ_L_ARB 0xE40280
+
+#define mmTPC1_RTR_LBW_WR_RS_E_ARB 0xE40290
+
+#define mmTPC1_RTR_LBW_WR_RS_W_ARB 0xE40294
+
+#define mmTPC1_RTR_LBW_WR_RS_N_ARB 0xE40298
+
+#define mmTPC1_RTR_LBW_WR_RS_S_ARB 0xE4029C
+
+#define mmTPC1_RTR_LBW_WR_RS_L_ARB 0xE402A0
+
+#define mmTPC1_RTR_DBG_E_ARB 0xE40300
+
+#define mmTPC1_RTR_DBG_W_ARB 0xE40304
+
+#define mmTPC1_RTR_DBG_N_ARB 0xE40308
+
+#define mmTPC1_RTR_DBG_S_ARB 0xE4030C
+
+#define mmTPC1_RTR_DBG_L_ARB 0xE40310
+
+#define mmTPC1_RTR_DBG_E_ARB_MAX 0xE40320
+
+#define mmTPC1_RTR_DBG_W_ARB_MAX 0xE40324
+
+#define mmTPC1_RTR_DBG_N_ARB_MAX 0xE40328
+
+#define mmTPC1_RTR_DBG_S_ARB_MAX 0xE4032C
+
+#define mmTPC1_RTR_DBG_L_ARB_MAX 0xE40330
+
+#define mmTPC1_RTR_SPLIT_COEF_0 0xE40400
+
+#define mmTPC1_RTR_SPLIT_COEF_1 0xE40404
+
+#define mmTPC1_RTR_SPLIT_COEF_2 0xE40408
+
+#define mmTPC1_RTR_SPLIT_COEF_3 0xE4040C
+
+#define mmTPC1_RTR_SPLIT_COEF_4 0xE40410
+
+#define mmTPC1_RTR_SPLIT_COEF_5 0xE40414
+
+#define mmTPC1_RTR_SPLIT_COEF_6 0xE40418
+
+#define mmTPC1_RTR_SPLIT_COEF_7 0xE4041C
+
+#define mmTPC1_RTR_SPLIT_COEF_8 0xE40420
+
+#define mmTPC1_RTR_SPLIT_COEF_9 0xE40424
+
+#define mmTPC1_RTR_SPLIT_CFG 0xE40440
+
+#define mmTPC1_RTR_SPLIT_RD_SAT 0xE40444
+
+#define mmTPC1_RTR_SPLIT_RD_RST_TOKEN 0xE40448
+
+#define mmTPC1_RTR_SPLIT_RD_TIMEOUT_0 0xE4044C
+
+#define mmTPC1_RTR_SPLIT_RD_TIMEOUT_1 0xE40450
+
+#define mmTPC1_RTR_SPLIT_WR_SAT 0xE40454
+
+#define mmTPC1_RTR_WPLIT_WR_TST_TOLEN 0xE40458
+
+#define mmTPC1_RTR_SPLIT_WR_TIMEOUT_0 0xE4045C
+
+#define mmTPC1_RTR_SPLIT_WR_TIMEOUT_1 0xE40460
+
+#define mmTPC1_RTR_HBW_RANGE_HIT 0xE40470
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_L_0 0xE40480
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_L_1 0xE40484
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_L_2 0xE40488
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_L_3 0xE4048C
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_L_4 0xE40490
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_L_5 0xE40494
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_L_6 0xE40498
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_L_7 0xE4049C
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_H_0 0xE404A0
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_H_1 0xE404A4
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_H_2 0xE404A8
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_H_3 0xE404AC
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_H_4 0xE404B0
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_H_5 0xE404B4
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_H_6 0xE404B8
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_H_7 0xE404BC
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_L_0 0xE404C0
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_L_1 0xE404C4
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_L_2 0xE404C8
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_L_3 0xE404CC
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_L_4 0xE404D0
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_L_5 0xE404D4
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_L_6 0xE404D8
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_L_7 0xE404DC
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_H_0 0xE404E0
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_H_1 0xE404E4
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_H_2 0xE404E8
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_H_3 0xE404EC
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_H_4 0xE404F0
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_H_5 0xE404F4
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_H_6 0xE404F8
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_H_7 0xE404FC
+
+#define mmTPC1_RTR_LBW_RANGE_HIT 0xE40500
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_0 0xE40510
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_1 0xE40514
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_2 0xE40518
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_3 0xE4051C
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_4 0xE40520
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_5 0xE40524
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_6 0xE40528
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_7 0xE4052C
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_8 0xE40530
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_9 0xE40534
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_10 0xE40538
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_11 0xE4053C
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_12 0xE40540
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_13 0xE40544
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_14 0xE40548
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_15 0xE4054C
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_0 0xE40550
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_1 0xE40554
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_2 0xE40558
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_3 0xE4055C
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_4 0xE40560
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_5 0xE40564
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_6 0xE40568
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_7 0xE4056C
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_8 0xE40570
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_9 0xE40574
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_10 0xE40578
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_11 0xE4057C
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_12 0xE40580
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_13 0xE40584
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_14 0xE40588
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_15 0xE4058C
+
+#define mmTPC1_RTR_RGLTR 0xE40590
+
+#define mmTPC1_RTR_RGLTR_WR_RESULT 0xE40594
+
+#define mmTPC1_RTR_RGLTR_RD_RESULT 0xE40598
+
+#define mmTPC1_RTR_SCRAMB_EN 0xE40600
+
+#define mmTPC1_RTR_NON_LIN_SCRAMB 0xE40604
+
+#endif /* ASIC_REG_TPC1_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h
new file mode 100644
index 000000000000..9c33fc039036
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h
@@ -0,0 +1,887 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC2_CFG_REGS_H_
+#define ASIC_REG_TPC2_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC2_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xE86400
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xE86404
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xE86408
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xE8640C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xE86410
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xE86414
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xE86418
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xE8641C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xE86420
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xE86424
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xE86428
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xE8642C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xE86430
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xE86434
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xE86438
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xE8643C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xE86440
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xE86444
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xE86448
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xE8644C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xE86450
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xE86454
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xE86458
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xE8645C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xE86460
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xE86464
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xE86468
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xE8646C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xE86470
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xE86474
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xE86478
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xE8647C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xE86480
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xE86484
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xE86488
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xE8648C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xE86490
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xE86494
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xE86498
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xE8649C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xE864A0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xE864A4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xE864A8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xE864AC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xE864B0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xE864B4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xE864B8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xE864BC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xE864C0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xE864C4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xE864C8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xE864CC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xE864D0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xE864D4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xE864D8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xE864DC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xE864E0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xE864E4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xE864E8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xE864EC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xE864F0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xE864F4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xE864F8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xE864FC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xE86500
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xE86504
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xE86508
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xE8650C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xE86510
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xE86514
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xE86518
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xE8651C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xE86520
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xE86524
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xE86528
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xE8652C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xE86530
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xE86534
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xE86538
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xE8653C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xE86540
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xE86544
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xE86548
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xE8654C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xE86550
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xE86554
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xE86558
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xE8655C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xE86560
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xE86564
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xE86568
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xE8656C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xE86570
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xE86574
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xE86578
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xE8657C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xE86580
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xE86584
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xE86588
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xE8658C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xE86590
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xE86594
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xE86598
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xE8659C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xE865A0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xE865A4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xE865A8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xE865AC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xE865B0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xE865B4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xE865B8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xE865BC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xE865C0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xE865C4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xE865C8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xE865CC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xE865D0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xE865D4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xE865D8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xE865DC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xE865E0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xE865E4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xE865E8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xE865EC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xE865F0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xE865F4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xE865F8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xE865FC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xE86600
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xE86604
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xE86608
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xE8660C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xE86610
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xE86614
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xE86618
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xE8661C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xE86620
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xE86624
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xE86628
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xE8662C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xE86630
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xE86634
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xE86638
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xE8663C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xE86640
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xE86644
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xE86648
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xE8664C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xE86650
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xE86654
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xE86658
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xE8665C
+
+#define mmTPC2_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xE86660
+
+#define mmTPC2_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xE86664
+
+#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_0 0xE86668
+
+#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_0 0xE8666C
+
+#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_1 0xE86670
+
+#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_1 0xE86674
+
+#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_2 0xE86678
+
+#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_2 0xE8667C
+
+#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_3 0xE86680
+
+#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_3 0xE86684
+
+#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_4 0xE86688
+
+#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_4 0xE8668C
+
+#define mmTPC2_CFG_KERNEL_SRF_0 0xE86690
+
+#define mmTPC2_CFG_KERNEL_SRF_1 0xE86694
+
+#define mmTPC2_CFG_KERNEL_SRF_2 0xE86698
+
+#define mmTPC2_CFG_KERNEL_SRF_3 0xE8669C
+
+#define mmTPC2_CFG_KERNEL_SRF_4 0xE866A0
+
+#define mmTPC2_CFG_KERNEL_SRF_5 0xE866A4
+
+#define mmTPC2_CFG_KERNEL_SRF_6 0xE866A8
+
+#define mmTPC2_CFG_KERNEL_SRF_7 0xE866AC
+
+#define mmTPC2_CFG_KERNEL_SRF_8 0xE866B0
+
+#define mmTPC2_CFG_KERNEL_SRF_9 0xE866B4
+
+#define mmTPC2_CFG_KERNEL_SRF_10 0xE866B8
+
+#define mmTPC2_CFG_KERNEL_SRF_11 0xE866BC
+
+#define mmTPC2_CFG_KERNEL_SRF_12 0xE866C0
+
+#define mmTPC2_CFG_KERNEL_SRF_13 0xE866C4
+
+#define mmTPC2_CFG_KERNEL_SRF_14 0xE866C8
+
+#define mmTPC2_CFG_KERNEL_SRF_15 0xE866CC
+
+#define mmTPC2_CFG_KERNEL_SRF_16 0xE866D0
+
+#define mmTPC2_CFG_KERNEL_SRF_17 0xE866D4
+
+#define mmTPC2_CFG_KERNEL_SRF_18 0xE866D8
+
+#define mmTPC2_CFG_KERNEL_SRF_19 0xE866DC
+
+#define mmTPC2_CFG_KERNEL_SRF_20 0xE866E0
+
+#define mmTPC2_CFG_KERNEL_SRF_21 0xE866E4
+
+#define mmTPC2_CFG_KERNEL_SRF_22 0xE866E8
+
+#define mmTPC2_CFG_KERNEL_SRF_23 0xE866EC
+
+#define mmTPC2_CFG_KERNEL_SRF_24 0xE866F0
+
+#define mmTPC2_CFG_KERNEL_SRF_25 0xE866F4
+
+#define mmTPC2_CFG_KERNEL_SRF_26 0xE866F8
+
+#define mmTPC2_CFG_KERNEL_SRF_27 0xE866FC
+
+#define mmTPC2_CFG_KERNEL_SRF_28 0xE86700
+
+#define mmTPC2_CFG_KERNEL_SRF_29 0xE86704
+
+#define mmTPC2_CFG_KERNEL_SRF_30 0xE86708
+
+#define mmTPC2_CFG_KERNEL_SRF_31 0xE8670C
+
+#define mmTPC2_CFG_KERNEL_KERNEL_CONFIG 0xE86710
+
+#define mmTPC2_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xE86714
+
+#define mmTPC2_CFG_RESERVED_DESC_END 0xE86738
+
+#define mmTPC2_CFG_ROUND_CSR 0xE867FC
+
+#define mmTPC2_CFG_TBUF_BASE_ADDR_LOW 0xE86800
+
+#define mmTPC2_CFG_TBUF_BASE_ADDR_HIGH 0xE86804
+
+#define mmTPC2_CFG_SEMAPHORE 0xE86808
+
+#define mmTPC2_CFG_VFLAGS 0xE8680C
+
+#define mmTPC2_CFG_SFLAGS 0xE86810
+
+#define mmTPC2_CFG_LFSR_POLYNOM 0xE86818
+
+#define mmTPC2_CFG_STATUS 0xE8681C
+
+#define mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH 0xE86820
+
+#define mmTPC2_CFG_CFG_SUBTRACT_VALUE 0xE86824
+
+#define mmTPC2_CFG_SM_BASE_ADDRESS_LOW 0xE86828
+
+#define mmTPC2_CFG_SM_BASE_ADDRESS_HIGH 0xE8682C
+
+#define mmTPC2_CFG_TPC_CMD 0xE86830
+
+#define mmTPC2_CFG_TPC_EXECUTE 0xE86838
+
+#define mmTPC2_CFG_TPC_STALL 0xE8683C
+
+#define mmTPC2_CFG_ICACHE_BASE_ADDERESS_LOW 0xE86840
+
+#define mmTPC2_CFG_ICACHE_BASE_ADDERESS_HIGH 0xE86844
+
+#define mmTPC2_CFG_MSS_CONFIG 0xE86854
+
+#define mmTPC2_CFG_TPC_INTR_CAUSE 0xE86858
+
+#define mmTPC2_CFG_TPC_INTR_MASK 0xE8685C
+
+#define mmTPC2_CFG_TSB_CONFIG 0xE86860
+
+#define mmTPC2_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xE86A00
+
+#define mmTPC2_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xE86A04
+
+#define mmTPC2_CFG_QM_TENSOR_0_PADDING_VALUE 0xE86A08
+
+#define mmTPC2_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xE86A0C
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_0_SIZE 0xE86A10
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xE86A14
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xE86A18
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_1_SIZE 0xE86A1C
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xE86A20
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xE86A24
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_2_SIZE 0xE86A28
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xE86A2C
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xE86A30
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_3_SIZE 0xE86A34
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xE86A38
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xE86A3C
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_4_SIZE 0xE86A40
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xE86A44
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xE86A48
+
+#define mmTPC2_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xE86A4C
+
+#define mmTPC2_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xE86A50
+
+#define mmTPC2_CFG_QM_TENSOR_1_PADDING_VALUE 0xE86A54
+
+#define mmTPC2_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xE86A58
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_0_SIZE 0xE86A5C
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xE86A60
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xE86A64
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_1_SIZE 0xE86A68
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xE86A6C
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xE86A70
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_2_SIZE 0xE86A74
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xE86A78
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xE86A7C
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_3_SIZE 0xE86A80
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xE86A84
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xE86A88
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_4_SIZE 0xE86A8C
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xE86A90
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xE86A94
+
+#define mmTPC2_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xE86A98
+
+#define mmTPC2_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xE86A9C
+
+#define mmTPC2_CFG_QM_TENSOR_2_PADDING_VALUE 0xE86AA0
+
+#define mmTPC2_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xE86AA4
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_0_SIZE 0xE86AA8
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xE86AAC
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xE86AB0
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_1_SIZE 0xE86AB4
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xE86AB8
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xE86ABC
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_2_SIZE 0xE86AC0
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xE86AC4
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xE86AC8
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_3_SIZE 0xE86ACC
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xE86AD0
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xE86AD4
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_4_SIZE 0xE86AD8
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xE86ADC
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xE86AE0
+
+#define mmTPC2_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xE86AE4
+
+#define mmTPC2_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xE86AE8
+
+#define mmTPC2_CFG_QM_TENSOR_3_PADDING_VALUE 0xE86AEC
+
+#define mmTPC2_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xE86AF0
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_0_SIZE 0xE86AF4
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xE86AF8
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xE86AFC
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_1_SIZE 0xE86B00
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xE86B04
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xE86B08
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_2_SIZE 0xE86B0C
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xE86B10
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xE86B14
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_3_SIZE 0xE86B18
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xE86B1C
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xE86B20
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_4_SIZE 0xE86B24
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xE86B28
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xE86B2C
+
+#define mmTPC2_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xE86B30
+
+#define mmTPC2_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xE86B34
+
+#define mmTPC2_CFG_QM_TENSOR_4_PADDING_VALUE 0xE86B38
+
+#define mmTPC2_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xE86B3C
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_0_SIZE 0xE86B40
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xE86B44
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xE86B48
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_1_SIZE 0xE86B4C
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xE86B50
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xE86B54
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_2_SIZE 0xE86B58
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xE86B5C
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xE86B60
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_3_SIZE 0xE86B64
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xE86B68
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xE86B6C
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_4_SIZE 0xE86B70
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xE86B74
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xE86B78
+
+#define mmTPC2_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xE86B7C
+
+#define mmTPC2_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xE86B80
+
+#define mmTPC2_CFG_QM_TENSOR_5_PADDING_VALUE 0xE86B84
+
+#define mmTPC2_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xE86B88
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_0_SIZE 0xE86B8C
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xE86B90
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xE86B94
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_1_SIZE 0xE86B98
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xE86B9C
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xE86BA0
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_2_SIZE 0xE86BA4
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xE86BA8
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xE86BAC
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_3_SIZE 0xE86BB0
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xE86BB4
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xE86BB8
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_4_SIZE 0xE86BBC
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xE86BC0
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xE86BC4
+
+#define mmTPC2_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xE86BC8
+
+#define mmTPC2_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xE86BCC
+
+#define mmTPC2_CFG_QM_TENSOR_6_PADDING_VALUE 0xE86BD0
+
+#define mmTPC2_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xE86BD4
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_0_SIZE 0xE86BD8
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xE86BDC
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xE86BE0
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_1_SIZE 0xE86BE4
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xE86BE8
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xE86BEC
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_2_SIZE 0xE86BF0
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xE86BF4
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xE86BF8
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_3_SIZE 0xE86BFC
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xE86C00
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xE86C04
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_4_SIZE 0xE86C08
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xE86C0C
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xE86C10
+
+#define mmTPC2_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xE86C14
+
+#define mmTPC2_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xE86C18
+
+#define mmTPC2_CFG_QM_TENSOR_7_PADDING_VALUE 0xE86C1C
+
+#define mmTPC2_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xE86C20
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_0_SIZE 0xE86C24
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xE86C28
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xE86C2C
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_1_SIZE 0xE86C30
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xE86C34
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xE86C38
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_2_SIZE 0xE86C3C
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xE86C40
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xE86C44
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_3_SIZE 0xE86C48
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xE86C4C
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xE86C50
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_4_SIZE 0xE86C54
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xE86C58
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xE86C5C
+
+#define mmTPC2_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xE86C60
+
+#define mmTPC2_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xE86C64
+
+#define mmTPC2_CFG_QM_TID_BASE_DIM_0 0xE86C68
+
+#define mmTPC2_CFG_QM_TID_SIZE_DIM_0 0xE86C6C
+
+#define mmTPC2_CFG_QM_TID_BASE_DIM_1 0xE86C70
+
+#define mmTPC2_CFG_QM_TID_SIZE_DIM_1 0xE86C74
+
+#define mmTPC2_CFG_QM_TID_BASE_DIM_2 0xE86C78
+
+#define mmTPC2_CFG_QM_TID_SIZE_DIM_2 0xE86C7C
+
+#define mmTPC2_CFG_QM_TID_BASE_DIM_3 0xE86C80
+
+#define mmTPC2_CFG_QM_TID_SIZE_DIM_3 0xE86C84
+
+#define mmTPC2_CFG_QM_TID_BASE_DIM_4 0xE86C88
+
+#define mmTPC2_CFG_QM_TID_SIZE_DIM_4 0xE86C8C
+
+#define mmTPC2_CFG_QM_SRF_0 0xE86C90
+
+#define mmTPC2_CFG_QM_SRF_1 0xE86C94
+
+#define mmTPC2_CFG_QM_SRF_2 0xE86C98
+
+#define mmTPC2_CFG_QM_SRF_3 0xE86C9C
+
+#define mmTPC2_CFG_QM_SRF_4 0xE86CA0
+
+#define mmTPC2_CFG_QM_SRF_5 0xE86CA4
+
+#define mmTPC2_CFG_QM_SRF_6 0xE86CA8
+
+#define mmTPC2_CFG_QM_SRF_7 0xE86CAC
+
+#define mmTPC2_CFG_QM_SRF_8 0xE86CB0
+
+#define mmTPC2_CFG_QM_SRF_9 0xE86CB4
+
+#define mmTPC2_CFG_QM_SRF_10 0xE86CB8
+
+#define mmTPC2_CFG_QM_SRF_11 0xE86CBC
+
+#define mmTPC2_CFG_QM_SRF_12 0xE86CC0
+
+#define mmTPC2_CFG_QM_SRF_13 0xE86CC4
+
+#define mmTPC2_CFG_QM_SRF_14 0xE86CC8
+
+#define mmTPC2_CFG_QM_SRF_15 0xE86CCC
+
+#define mmTPC2_CFG_QM_SRF_16 0xE86CD0
+
+#define mmTPC2_CFG_QM_SRF_17 0xE86CD4
+
+#define mmTPC2_CFG_QM_SRF_18 0xE86CD8
+
+#define mmTPC2_CFG_QM_SRF_19 0xE86CDC
+
+#define mmTPC2_CFG_QM_SRF_20 0xE86CE0
+
+#define mmTPC2_CFG_QM_SRF_21 0xE86CE4
+
+#define mmTPC2_CFG_QM_SRF_22 0xE86CE8
+
+#define mmTPC2_CFG_QM_SRF_23 0xE86CEC
+
+#define mmTPC2_CFG_QM_SRF_24 0xE86CF0
+
+#define mmTPC2_CFG_QM_SRF_25 0xE86CF4
+
+#define mmTPC2_CFG_QM_SRF_26 0xE86CF8
+
+#define mmTPC2_CFG_QM_SRF_27 0xE86CFC
+
+#define mmTPC2_CFG_QM_SRF_28 0xE86D00
+
+#define mmTPC2_CFG_QM_SRF_29 0xE86D04
+
+#define mmTPC2_CFG_QM_SRF_30 0xE86D08
+
+#define mmTPC2_CFG_QM_SRF_31 0xE86D0C
+
+#define mmTPC2_CFG_QM_KERNEL_CONFIG 0xE86D10
+
+#define mmTPC2_CFG_QM_SYNC_OBJECT_MESSAGE 0xE86D14
+
+#define mmTPC2_CFG_ARUSER 0xE86D18
+
+#define mmTPC2_CFG_AWUSER 0xE86D1C
+
+#define mmTPC2_CFG_FUNC_MBIST_CNTRL 0xE86E00
+
+#define mmTPC2_CFG_FUNC_MBIST_PAT 0xE86E04
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_0 0xE86E08
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_1 0xE86E0C
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_2 0xE86E10
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_3 0xE86E14
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_4 0xE86E18
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_5 0xE86E1C
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_6 0xE86E20
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_7 0xE86E24
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_8 0xE86E28
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_9 0xE86E2C
+
+#endif /* ASIC_REG_TPC2_CFG_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h
new file mode 100644
index 000000000000..7a643887d6e1
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC2_CMDQ_REGS_H_
+#define ASIC_REG_TPC2_CMDQ_REGS_H_
+
+/*
+ *****************************************
+ * TPC2_CMDQ (Prototype: CMDQ)
+ *****************************************
+ */
+
+#define mmTPC2_CMDQ_GLBL_CFG0 0xE89000
+
+#define mmTPC2_CMDQ_GLBL_CFG1 0xE89004
+
+#define mmTPC2_CMDQ_GLBL_PROT 0xE89008
+
+#define mmTPC2_CMDQ_GLBL_ERR_CFG 0xE8900C
+
+#define mmTPC2_CMDQ_GLBL_ERR_ADDR_LO 0xE89010
+
+#define mmTPC2_CMDQ_GLBL_ERR_ADDR_HI 0xE89014
+
+#define mmTPC2_CMDQ_GLBL_ERR_WDATA 0xE89018
+
+#define mmTPC2_CMDQ_GLBL_SECURE_PROPS 0xE8901C
+
+#define mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS 0xE89020
+
+#define mmTPC2_CMDQ_GLBL_STS0 0xE89024
+
+#define mmTPC2_CMDQ_GLBL_STS1 0xE89028
+
+#define mmTPC2_CMDQ_CQ_CFG0 0xE890B0
+
+#define mmTPC2_CMDQ_CQ_CFG1 0xE890B4
+
+#define mmTPC2_CMDQ_CQ_ARUSER 0xE890B8
+
+#define mmTPC2_CMDQ_CQ_PTR_LO 0xE890C0
+
+#define mmTPC2_CMDQ_CQ_PTR_HI 0xE890C4
+
+#define mmTPC2_CMDQ_CQ_TSIZE 0xE890C8
+
+#define mmTPC2_CMDQ_CQ_CTL 0xE890CC
+
+#define mmTPC2_CMDQ_CQ_PTR_LO_STS 0xE890D4
+
+#define mmTPC2_CMDQ_CQ_PTR_HI_STS 0xE890D8
+
+#define mmTPC2_CMDQ_CQ_TSIZE_STS 0xE890DC
+
+#define mmTPC2_CMDQ_CQ_CTL_STS 0xE890E0
+
+#define mmTPC2_CMDQ_CQ_STS0 0xE890E4
+
+#define mmTPC2_CMDQ_CQ_STS1 0xE890E8
+
+#define mmTPC2_CMDQ_CQ_RD_RATE_LIM_EN 0xE890F0
+
+#define mmTPC2_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xE890F4
+
+#define mmTPC2_CMDQ_CQ_RD_RATE_LIM_SAT 0xE890F8
+
+#define mmTPC2_CMDQ_CQ_RD_RATE_LIM_TOUT 0xE890FC
+
+#define mmTPC2_CMDQ_CQ_IFIFO_CNT 0xE89108
+
+#define mmTPC2_CMDQ_CP_MSG_BASE0_ADDR_LO 0xE89120
+
+#define mmTPC2_CMDQ_CP_MSG_BASE0_ADDR_HI 0xE89124
+
+#define mmTPC2_CMDQ_CP_MSG_BASE1_ADDR_LO 0xE89128
+
+#define mmTPC2_CMDQ_CP_MSG_BASE1_ADDR_HI 0xE8912C
+
+#define mmTPC2_CMDQ_CP_MSG_BASE2_ADDR_LO 0xE89130
+
+#define mmTPC2_CMDQ_CP_MSG_BASE2_ADDR_HI 0xE89134
+
+#define mmTPC2_CMDQ_CP_MSG_BASE3_ADDR_LO 0xE89138
+
+#define mmTPC2_CMDQ_CP_MSG_BASE3_ADDR_HI 0xE8913C
+
+#define mmTPC2_CMDQ_CP_LDMA_TSIZE_OFFSET 0xE89140
+
+#define mmTPC2_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xE89144
+
+#define mmTPC2_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xE89148
+
+#define mmTPC2_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xE8914C
+
+#define mmTPC2_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xE89150
+
+#define mmTPC2_CMDQ_CP_LDMA_COMMIT_OFFSET 0xE89154
+
+#define mmTPC2_CMDQ_CP_FENCE0_RDATA 0xE89158
+
+#define mmTPC2_CMDQ_CP_FENCE1_RDATA 0xE8915C
+
+#define mmTPC2_CMDQ_CP_FENCE2_RDATA 0xE89160
+
+#define mmTPC2_CMDQ_CP_FENCE3_RDATA 0xE89164
+
+#define mmTPC2_CMDQ_CP_FENCE0_CNT 0xE89168
+
+#define mmTPC2_CMDQ_CP_FENCE1_CNT 0xE8916C
+
+#define mmTPC2_CMDQ_CP_FENCE2_CNT 0xE89170
+
+#define mmTPC2_CMDQ_CP_FENCE3_CNT 0xE89174
+
+#define mmTPC2_CMDQ_CP_STS 0xE89178
+
+#define mmTPC2_CMDQ_CP_CURRENT_INST_LO 0xE8917C
+
+#define mmTPC2_CMDQ_CP_CURRENT_INST_HI 0xE89180
+
+#define mmTPC2_CMDQ_CP_BARRIER_CFG 0xE89184
+
+#define mmTPC2_CMDQ_CP_DBG_0 0xE89188
+
+#define mmTPC2_CMDQ_CQ_BUF_ADDR 0xE89308
+
+#define mmTPC2_CMDQ_CQ_BUF_RDATA 0xE8930C
+
+#endif /* ASIC_REG_TPC2_CMDQ_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h
new file mode 100644
index 000000000000..f3e32c018064
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC2_QM_REGS_H_
+#define ASIC_REG_TPC2_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC2_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC2_QM_GLBL_CFG0 0xE88000
+
+#define mmTPC2_QM_GLBL_CFG1 0xE88004
+
+#define mmTPC2_QM_GLBL_PROT 0xE88008
+
+#define mmTPC2_QM_GLBL_ERR_CFG 0xE8800C
+
+#define mmTPC2_QM_GLBL_ERR_ADDR_LO 0xE88010
+
+#define mmTPC2_QM_GLBL_ERR_ADDR_HI 0xE88014
+
+#define mmTPC2_QM_GLBL_ERR_WDATA 0xE88018
+
+#define mmTPC2_QM_GLBL_SECURE_PROPS 0xE8801C
+
+#define mmTPC2_QM_GLBL_NON_SECURE_PROPS 0xE88020
+
+#define mmTPC2_QM_GLBL_STS0 0xE88024
+
+#define mmTPC2_QM_GLBL_STS1 0xE88028
+
+#define mmTPC2_QM_PQ_BASE_LO 0xE88060
+
+#define mmTPC2_QM_PQ_BASE_HI 0xE88064
+
+#define mmTPC2_QM_PQ_SIZE 0xE88068
+
+#define mmTPC2_QM_PQ_PI 0xE8806C
+
+#define mmTPC2_QM_PQ_CI 0xE88070
+
+#define mmTPC2_QM_PQ_CFG0 0xE88074
+
+#define mmTPC2_QM_PQ_CFG1 0xE88078
+
+#define mmTPC2_QM_PQ_ARUSER 0xE8807C
+
+#define mmTPC2_QM_PQ_PUSH0 0xE88080
+
+#define mmTPC2_QM_PQ_PUSH1 0xE88084
+
+#define mmTPC2_QM_PQ_PUSH2 0xE88088
+
+#define mmTPC2_QM_PQ_PUSH3 0xE8808C
+
+#define mmTPC2_QM_PQ_STS0 0xE88090
+
+#define mmTPC2_QM_PQ_STS1 0xE88094
+
+#define mmTPC2_QM_PQ_RD_RATE_LIM_EN 0xE880A0
+
+#define mmTPC2_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xE880A4
+
+#define mmTPC2_QM_PQ_RD_RATE_LIM_SAT 0xE880A8
+
+#define mmTPC2_QM_PQ_RD_RATE_LIM_TOUT 0xE880AC
+
+#define mmTPC2_QM_CQ_CFG0 0xE880B0
+
+#define mmTPC2_QM_CQ_CFG1 0xE880B4
+
+#define mmTPC2_QM_CQ_ARUSER 0xE880B8
+
+#define mmTPC2_QM_CQ_PTR_LO 0xE880C0
+
+#define mmTPC2_QM_CQ_PTR_HI 0xE880C4
+
+#define mmTPC2_QM_CQ_TSIZE 0xE880C8
+
+#define mmTPC2_QM_CQ_CTL 0xE880CC
+
+#define mmTPC2_QM_CQ_PTR_LO_STS 0xE880D4
+
+#define mmTPC2_QM_CQ_PTR_HI_STS 0xE880D8
+
+#define mmTPC2_QM_CQ_TSIZE_STS 0xE880DC
+
+#define mmTPC2_QM_CQ_CTL_STS 0xE880E0
+
+#define mmTPC2_QM_CQ_STS0 0xE880E4
+
+#define mmTPC2_QM_CQ_STS1 0xE880E8
+
+#define mmTPC2_QM_CQ_RD_RATE_LIM_EN 0xE880F0
+
+#define mmTPC2_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xE880F4
+
+#define mmTPC2_QM_CQ_RD_RATE_LIM_SAT 0xE880F8
+
+#define mmTPC2_QM_CQ_RD_RATE_LIM_TOUT 0xE880FC
+
+#define mmTPC2_QM_CQ_IFIFO_CNT 0xE88108
+
+#define mmTPC2_QM_CP_MSG_BASE0_ADDR_LO 0xE88120
+
+#define mmTPC2_QM_CP_MSG_BASE0_ADDR_HI 0xE88124
+
+#define mmTPC2_QM_CP_MSG_BASE1_ADDR_LO 0xE88128
+
+#define mmTPC2_QM_CP_MSG_BASE1_ADDR_HI 0xE8812C
+
+#define mmTPC2_QM_CP_MSG_BASE2_ADDR_LO 0xE88130
+
+#define mmTPC2_QM_CP_MSG_BASE2_ADDR_HI 0xE88134
+
+#define mmTPC2_QM_CP_MSG_BASE3_ADDR_LO 0xE88138
+
+#define mmTPC2_QM_CP_MSG_BASE3_ADDR_HI 0xE8813C
+
+#define mmTPC2_QM_CP_LDMA_TSIZE_OFFSET 0xE88140
+
+#define mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xE88144
+
+#define mmTPC2_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xE88148
+
+#define mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xE8814C
+
+#define mmTPC2_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xE88150
+
+#define mmTPC2_QM_CP_LDMA_COMMIT_OFFSET 0xE88154
+
+#define mmTPC2_QM_CP_FENCE0_RDATA 0xE88158
+
+#define mmTPC2_QM_CP_FENCE1_RDATA 0xE8815C
+
+#define mmTPC2_QM_CP_FENCE2_RDATA 0xE88160
+
+#define mmTPC2_QM_CP_FENCE3_RDATA 0xE88164
+
+#define mmTPC2_QM_CP_FENCE0_CNT 0xE88168
+
+#define mmTPC2_QM_CP_FENCE1_CNT 0xE8816C
+
+#define mmTPC2_QM_CP_FENCE2_CNT 0xE88170
+
+#define mmTPC2_QM_CP_FENCE3_CNT 0xE88174
+
+#define mmTPC2_QM_CP_STS 0xE88178
+
+#define mmTPC2_QM_CP_CURRENT_INST_LO 0xE8817C
+
+#define mmTPC2_QM_CP_CURRENT_INST_HI 0xE88180
+
+#define mmTPC2_QM_CP_BARRIER_CFG 0xE88184
+
+#define mmTPC2_QM_CP_DBG_0 0xE88188
+
+#define mmTPC2_QM_PQ_BUF_ADDR 0xE88300
+
+#define mmTPC2_QM_PQ_BUF_RDATA 0xE88304
+
+#define mmTPC2_QM_CQ_BUF_ADDR 0xE88308
+
+#define mmTPC2_QM_CQ_BUF_RDATA 0xE8830C
+
+#endif /* ASIC_REG_TPC2_QM_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h
new file mode 100644
index 000000000000..0eb0cd1fbd19
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC2_RTR_REGS_H_
+#define ASIC_REG_TPC2_RTR_REGS_H_
+
+/*
+ *****************************************
+ * TPC2_RTR (Prototype: TPC_RTR)
+ *****************************************
+ */
+
+#define mmTPC2_RTR_HBW_RD_RQ_E_ARB 0xE80100
+
+#define mmTPC2_RTR_HBW_RD_RQ_W_ARB 0xE80104
+
+#define mmTPC2_RTR_HBW_RD_RQ_N_ARB 0xE80108
+
+#define mmTPC2_RTR_HBW_RD_RQ_S_ARB 0xE8010C
+
+#define mmTPC2_RTR_HBW_RD_RQ_L_ARB 0xE80110
+
+#define mmTPC2_RTR_HBW_E_ARB_MAX 0xE80120
+
+#define mmTPC2_RTR_HBW_W_ARB_MAX 0xE80124
+
+#define mmTPC2_RTR_HBW_N_ARB_MAX 0xE80128
+
+#define mmTPC2_RTR_HBW_S_ARB_MAX 0xE8012C
+
+#define mmTPC2_RTR_HBW_L_ARB_MAX 0xE80130
+
+#define mmTPC2_RTR_HBW_RD_RS_E_ARB 0xE80140
+
+#define mmTPC2_RTR_HBW_RD_RS_W_ARB 0xE80144
+
+#define mmTPC2_RTR_HBW_RD_RS_N_ARB 0xE80148
+
+#define mmTPC2_RTR_HBW_RD_RS_S_ARB 0xE8014C
+
+#define mmTPC2_RTR_HBW_RD_RS_L_ARB 0xE80150
+
+#define mmTPC2_RTR_HBW_WR_RQ_E_ARB 0xE80170
+
+#define mmTPC2_RTR_HBW_WR_RQ_W_ARB 0xE80174
+
+#define mmTPC2_RTR_HBW_WR_RQ_N_ARB 0xE80178
+
+#define mmTPC2_RTR_HBW_WR_RQ_S_ARB 0xE8017C
+
+#define mmTPC2_RTR_HBW_WR_RQ_L_ARB 0xE80180
+
+#define mmTPC2_RTR_HBW_WR_RS_E_ARB 0xE80190
+
+#define mmTPC2_RTR_HBW_WR_RS_W_ARB 0xE80194
+
+#define mmTPC2_RTR_HBW_WR_RS_N_ARB 0xE80198
+
+#define mmTPC2_RTR_HBW_WR_RS_S_ARB 0xE8019C
+
+#define mmTPC2_RTR_HBW_WR_RS_L_ARB 0xE801A0
+
+#define mmTPC2_RTR_LBW_RD_RQ_E_ARB 0xE80200
+
+#define mmTPC2_RTR_LBW_RD_RQ_W_ARB 0xE80204
+
+#define mmTPC2_RTR_LBW_RD_RQ_N_ARB 0xE80208
+
+#define mmTPC2_RTR_LBW_RD_RQ_S_ARB 0xE8020C
+
+#define mmTPC2_RTR_LBW_RD_RQ_L_ARB 0xE80210
+
+#define mmTPC2_RTR_LBW_E_ARB_MAX 0xE80220
+
+#define mmTPC2_RTR_LBW_W_ARB_MAX 0xE80224
+
+#define mmTPC2_RTR_LBW_N_ARB_MAX 0xE80228
+
+#define mmTPC2_RTR_LBW_S_ARB_MAX 0xE8022C
+
+#define mmTPC2_RTR_LBW_L_ARB_MAX 0xE80230
+
+#define mmTPC2_RTR_LBW_RD_RS_E_ARB 0xE80250
+
+#define mmTPC2_RTR_LBW_RD_RS_W_ARB 0xE80254
+
+#define mmTPC2_RTR_LBW_RD_RS_N_ARB 0xE80258
+
+#define mmTPC2_RTR_LBW_RD_RS_S_ARB 0xE8025C
+
+#define mmTPC2_RTR_LBW_RD_RS_L_ARB 0xE80260
+
+#define mmTPC2_RTR_LBW_WR_RQ_E_ARB 0xE80270
+
+#define mmTPC2_RTR_LBW_WR_RQ_W_ARB 0xE80274
+
+#define mmTPC2_RTR_LBW_WR_RQ_N_ARB 0xE80278
+
+#define mmTPC2_RTR_LBW_WR_RQ_S_ARB 0xE8027C
+
+#define mmTPC2_RTR_LBW_WR_RQ_L_ARB 0xE80280
+
+#define mmTPC2_RTR_LBW_WR_RS_E_ARB 0xE80290
+
+#define mmTPC2_RTR_LBW_WR_RS_W_ARB 0xE80294
+
+#define mmTPC2_RTR_LBW_WR_RS_N_ARB 0xE80298
+
+#define mmTPC2_RTR_LBW_WR_RS_S_ARB 0xE8029C
+
+#define mmTPC2_RTR_LBW_WR_RS_L_ARB 0xE802A0
+
+#define mmTPC2_RTR_DBG_E_ARB 0xE80300
+
+#define mmTPC2_RTR_DBG_W_ARB 0xE80304
+
+#define mmTPC2_RTR_DBG_N_ARB 0xE80308
+
+#define mmTPC2_RTR_DBG_S_ARB 0xE8030C
+
+#define mmTPC2_RTR_DBG_L_ARB 0xE80310
+
+#define mmTPC2_RTR_DBG_E_ARB_MAX 0xE80320
+
+#define mmTPC2_RTR_DBG_W_ARB_MAX 0xE80324
+
+#define mmTPC2_RTR_DBG_N_ARB_MAX 0xE80328
+
+#define mmTPC2_RTR_DBG_S_ARB_MAX 0xE8032C
+
+#define mmTPC2_RTR_DBG_L_ARB_MAX 0xE80330
+
+#define mmTPC2_RTR_SPLIT_COEF_0 0xE80400
+
+#define mmTPC2_RTR_SPLIT_COEF_1 0xE80404
+
+#define mmTPC2_RTR_SPLIT_COEF_2 0xE80408
+
+#define mmTPC2_RTR_SPLIT_COEF_3 0xE8040C
+
+#define mmTPC2_RTR_SPLIT_COEF_4 0xE80410
+
+#define mmTPC2_RTR_SPLIT_COEF_5 0xE80414
+
+#define mmTPC2_RTR_SPLIT_COEF_6 0xE80418
+
+#define mmTPC2_RTR_SPLIT_COEF_7 0xE8041C
+
+#define mmTPC2_RTR_SPLIT_COEF_8 0xE80420
+
+#define mmTPC2_RTR_SPLIT_COEF_9 0xE80424
+
+#define mmTPC2_RTR_SPLIT_CFG 0xE80440
+
+#define mmTPC2_RTR_SPLIT_RD_SAT 0xE80444
+
+#define mmTPC2_RTR_SPLIT_RD_RST_TOKEN 0xE80448
+
+#define mmTPC2_RTR_SPLIT_RD_TIMEOUT_0 0xE8044C
+
+#define mmTPC2_RTR_SPLIT_RD_TIMEOUT_1 0xE80450
+
+#define mmTPC2_RTR_SPLIT_WR_SAT 0xE80454
+
+#define mmTPC2_RTR_WPLIT_WR_TST_TOLEN 0xE80458
+
+#define mmTPC2_RTR_SPLIT_WR_TIMEOUT_0 0xE8045C
+
+#define mmTPC2_RTR_SPLIT_WR_TIMEOUT_1 0xE80460
+
+#define mmTPC2_RTR_HBW_RANGE_HIT 0xE80470
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_L_0 0xE80480
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_L_1 0xE80484
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_L_2 0xE80488
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_L_3 0xE8048C
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_L_4 0xE80490
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_L_5 0xE80494
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_L_6 0xE80498
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_L_7 0xE8049C
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_H_0 0xE804A0
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_H_1 0xE804A4
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_H_2 0xE804A8
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_H_3 0xE804AC
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_H_4 0xE804B0
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_H_5 0xE804B4
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_H_6 0xE804B8
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_H_7 0xE804BC
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_L_0 0xE804C0
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_L_1 0xE804C4
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_L_2 0xE804C8
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_L_3 0xE804CC
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_L_4 0xE804D0
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_L_5 0xE804D4
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_L_6 0xE804D8
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_L_7 0xE804DC
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_H_0 0xE804E0
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_H_1 0xE804E4
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_H_2 0xE804E8
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_H_3 0xE804EC
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_H_4 0xE804F0
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_H_5 0xE804F4
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_H_6 0xE804F8
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_H_7 0xE804FC
+
+#define mmTPC2_RTR_LBW_RANGE_HIT 0xE80500
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_0 0xE80510
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_1 0xE80514
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_2 0xE80518
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_3 0xE8051C
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_4 0xE80520
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_5 0xE80524
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_6 0xE80528
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_7 0xE8052C
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_8 0xE80530
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_9 0xE80534
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_10 0xE80538
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_11 0xE8053C
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_12 0xE80540
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_13 0xE80544
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_14 0xE80548
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_15 0xE8054C
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_0 0xE80550
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_1 0xE80554
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_2 0xE80558
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_3 0xE8055C
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_4 0xE80560
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_5 0xE80564
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_6 0xE80568
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_7 0xE8056C
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_8 0xE80570
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_9 0xE80574
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_10 0xE80578
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_11 0xE8057C
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_12 0xE80580
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_13 0xE80584
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_14 0xE80588
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_15 0xE8058C
+
+#define mmTPC2_RTR_RGLTR 0xE80590
+
+#define mmTPC2_RTR_RGLTR_WR_RESULT 0xE80594
+
+#define mmTPC2_RTR_RGLTR_RD_RESULT 0xE80598
+
+#define mmTPC2_RTR_SCRAMB_EN 0xE80600
+
+#define mmTPC2_RTR_NON_LIN_SCRAMB 0xE80604
+
+#endif /* ASIC_REG_TPC2_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h
new file mode 100644
index 000000000000..0baf63c69b25
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h
@@ -0,0 +1,887 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC3_CFG_REGS_H_
+#define ASIC_REG_TPC3_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC3_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xEC6400
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xEC6404
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xEC6408
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xEC640C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xEC6410
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xEC6414
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xEC6418
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xEC641C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xEC6420
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xEC6424
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xEC6428
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xEC642C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xEC6430
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xEC6434
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xEC6438
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xEC643C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xEC6440
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xEC6444
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xEC6448
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xEC644C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xEC6450
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xEC6454
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xEC6458
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xEC645C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xEC6460
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xEC6464
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xEC6468
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xEC646C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xEC6470
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xEC6474
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xEC6478
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xEC647C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xEC6480
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xEC6484
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xEC6488
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xEC648C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xEC6490
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xEC6494
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xEC6498
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xEC649C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xEC64A0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xEC64A4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xEC64A8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xEC64AC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xEC64B0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xEC64B4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xEC64B8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xEC64BC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xEC64C0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xEC64C4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xEC64C8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xEC64CC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xEC64D0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xEC64D4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xEC64D8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xEC64DC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xEC64E0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xEC64E4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xEC64E8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xEC64EC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xEC64F0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xEC64F4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xEC64F8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xEC64FC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xEC6500
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xEC6504
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xEC6508
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xEC650C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xEC6510
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xEC6514
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xEC6518
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xEC651C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xEC6520
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xEC6524
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xEC6528
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xEC652C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xEC6530
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xEC6534
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xEC6538
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xEC653C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xEC6540
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xEC6544
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xEC6548
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xEC654C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xEC6550
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xEC6554
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xEC6558
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xEC655C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xEC6560
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xEC6564
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xEC6568
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xEC656C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xEC6570
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xEC6574
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xEC6578
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xEC657C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xEC6580
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xEC6584
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xEC6588
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xEC658C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xEC6590
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xEC6594
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xEC6598
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xEC659C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xEC65A0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xEC65A4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xEC65A8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xEC65AC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xEC65B0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xEC65B4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xEC65B8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xEC65BC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xEC65C0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xEC65C4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xEC65C8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xEC65CC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xEC65D0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xEC65D4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xEC65D8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xEC65DC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xEC65E0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xEC65E4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xEC65E8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xEC65EC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xEC65F0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xEC65F4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xEC65F8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xEC65FC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xEC6600
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xEC6604
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xEC6608
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xEC660C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xEC6610
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xEC6614
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xEC6618
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xEC661C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xEC6620
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xEC6624
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xEC6628
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xEC662C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xEC6630
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xEC6634
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xEC6638
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xEC663C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xEC6640
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xEC6644
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xEC6648
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xEC664C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xEC6650
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xEC6654
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xEC6658
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xEC665C
+
+#define mmTPC3_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xEC6660
+
+#define mmTPC3_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xEC6664
+
+#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_0 0xEC6668
+
+#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_0 0xEC666C
+
+#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_1 0xEC6670
+
+#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_1 0xEC6674
+
+#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_2 0xEC6678
+
+#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_2 0xEC667C
+
+#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_3 0xEC6680
+
+#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_3 0xEC6684
+
+#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_4 0xEC6688
+
+#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_4 0xEC668C
+
+#define mmTPC3_CFG_KERNEL_SRF_0 0xEC6690
+
+#define mmTPC3_CFG_KERNEL_SRF_1 0xEC6694
+
+#define mmTPC3_CFG_KERNEL_SRF_2 0xEC6698
+
+#define mmTPC3_CFG_KERNEL_SRF_3 0xEC669C
+
+#define mmTPC3_CFG_KERNEL_SRF_4 0xEC66A0
+
+#define mmTPC3_CFG_KERNEL_SRF_5 0xEC66A4
+
+#define mmTPC3_CFG_KERNEL_SRF_6 0xEC66A8
+
+#define mmTPC3_CFG_KERNEL_SRF_7 0xEC66AC
+
+#define mmTPC3_CFG_KERNEL_SRF_8 0xEC66B0
+
+#define mmTPC3_CFG_KERNEL_SRF_9 0xEC66B4
+
+#define mmTPC3_CFG_KERNEL_SRF_10 0xEC66B8
+
+#define mmTPC3_CFG_KERNEL_SRF_11 0xEC66BC
+
+#define mmTPC3_CFG_KERNEL_SRF_12 0xEC66C0
+
+#define mmTPC3_CFG_KERNEL_SRF_13 0xEC66C4
+
+#define mmTPC3_CFG_KERNEL_SRF_14 0xEC66C8
+
+#define mmTPC3_CFG_KERNEL_SRF_15 0xEC66CC
+
+#define mmTPC3_CFG_KERNEL_SRF_16 0xEC66D0
+
+#define mmTPC3_CFG_KERNEL_SRF_17 0xEC66D4
+
+#define mmTPC3_CFG_KERNEL_SRF_18 0xEC66D8
+
+#define mmTPC3_CFG_KERNEL_SRF_19 0xEC66DC
+
+#define mmTPC3_CFG_KERNEL_SRF_20 0xEC66E0
+
+#define mmTPC3_CFG_KERNEL_SRF_21 0xEC66E4
+
+#define mmTPC3_CFG_KERNEL_SRF_22 0xEC66E8
+
+#define mmTPC3_CFG_KERNEL_SRF_23 0xEC66EC
+
+#define mmTPC3_CFG_KERNEL_SRF_24 0xEC66F0
+
+#define mmTPC3_CFG_KERNEL_SRF_25 0xEC66F4
+
+#define mmTPC3_CFG_KERNEL_SRF_26 0xEC66F8
+
+#define mmTPC3_CFG_KERNEL_SRF_27 0xEC66FC
+
+#define mmTPC3_CFG_KERNEL_SRF_28 0xEC6700
+
+#define mmTPC3_CFG_KERNEL_SRF_29 0xEC6704
+
+#define mmTPC3_CFG_KERNEL_SRF_30 0xEC6708
+
+#define mmTPC3_CFG_KERNEL_SRF_31 0xEC670C
+
+#define mmTPC3_CFG_KERNEL_KERNEL_CONFIG 0xEC6710
+
+#define mmTPC3_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xEC6714
+
+#define mmTPC3_CFG_RESERVED_DESC_END 0xEC6738
+
+#define mmTPC3_CFG_ROUND_CSR 0xEC67FC
+
+#define mmTPC3_CFG_TBUF_BASE_ADDR_LOW 0xEC6800
+
+#define mmTPC3_CFG_TBUF_BASE_ADDR_HIGH 0xEC6804
+
+#define mmTPC3_CFG_SEMAPHORE 0xEC6808
+
+#define mmTPC3_CFG_VFLAGS 0xEC680C
+
+#define mmTPC3_CFG_SFLAGS 0xEC6810
+
+#define mmTPC3_CFG_LFSR_POLYNOM 0xEC6818
+
+#define mmTPC3_CFG_STATUS 0xEC681C
+
+#define mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH 0xEC6820
+
+#define mmTPC3_CFG_CFG_SUBTRACT_VALUE 0xEC6824
+
+#define mmTPC3_CFG_SM_BASE_ADDRESS_LOW 0xEC6828
+
+#define mmTPC3_CFG_SM_BASE_ADDRESS_HIGH 0xEC682C
+
+#define mmTPC3_CFG_TPC_CMD 0xEC6830
+
+#define mmTPC3_CFG_TPC_EXECUTE 0xEC6838
+
+#define mmTPC3_CFG_TPC_STALL 0xEC683C
+
+#define mmTPC3_CFG_ICACHE_BASE_ADDERESS_LOW 0xEC6840
+
+#define mmTPC3_CFG_ICACHE_BASE_ADDERESS_HIGH 0xEC6844
+
+#define mmTPC3_CFG_MSS_CONFIG 0xEC6854
+
+#define mmTPC3_CFG_TPC_INTR_CAUSE 0xEC6858
+
+#define mmTPC3_CFG_TPC_INTR_MASK 0xEC685C
+
+#define mmTPC3_CFG_TSB_CONFIG 0xEC6860
+
+#define mmTPC3_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xEC6A00
+
+#define mmTPC3_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xEC6A04
+
+#define mmTPC3_CFG_QM_TENSOR_0_PADDING_VALUE 0xEC6A08
+
+#define mmTPC3_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xEC6A0C
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_0_SIZE 0xEC6A10
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xEC6A14
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xEC6A18
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_1_SIZE 0xEC6A1C
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xEC6A20
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xEC6A24
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_2_SIZE 0xEC6A28
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xEC6A2C
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xEC6A30
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_3_SIZE 0xEC6A34
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xEC6A38
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xEC6A3C
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_4_SIZE 0xEC6A40
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xEC6A44
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xEC6A48
+
+#define mmTPC3_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xEC6A4C
+
+#define mmTPC3_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xEC6A50
+
+#define mmTPC3_CFG_QM_TENSOR_1_PADDING_VALUE 0xEC6A54
+
+#define mmTPC3_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xEC6A58
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_0_SIZE 0xEC6A5C
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xEC6A60
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xEC6A64
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_1_SIZE 0xEC6A68
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xEC6A6C
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xEC6A70
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_2_SIZE 0xEC6A74
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xEC6A78
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xEC6A7C
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_3_SIZE 0xEC6A80
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xEC6A84
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xEC6A88
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_4_SIZE 0xEC6A8C
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xEC6A90
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xEC6A94
+
+#define mmTPC3_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xEC6A98
+
+#define mmTPC3_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xEC6A9C
+
+#define mmTPC3_CFG_QM_TENSOR_2_PADDING_VALUE 0xEC6AA0
+
+#define mmTPC3_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xEC6AA4
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_0_SIZE 0xEC6AA8
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xEC6AAC
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xEC6AB0
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_1_SIZE 0xEC6AB4
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xEC6AB8
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xEC6ABC
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_2_SIZE 0xEC6AC0
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xEC6AC4
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xEC6AC8
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_3_SIZE 0xEC6ACC
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xEC6AD0
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xEC6AD4
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_4_SIZE 0xEC6AD8
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xEC6ADC
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xEC6AE0
+
+#define mmTPC3_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xEC6AE4
+
+#define mmTPC3_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xEC6AE8
+
+#define mmTPC3_CFG_QM_TENSOR_3_PADDING_VALUE 0xEC6AEC
+
+#define mmTPC3_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xEC6AF0
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_0_SIZE 0xEC6AF4
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xEC6AF8
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xEC6AFC
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_1_SIZE 0xEC6B00
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xEC6B04
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xEC6B08
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_2_SIZE 0xEC6B0C
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xEC6B10
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xEC6B14
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_3_SIZE 0xEC6B18
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xEC6B1C
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xEC6B20
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_4_SIZE 0xEC6B24
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xEC6B28
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xEC6B2C
+
+#define mmTPC3_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xEC6B30
+
+#define mmTPC3_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xEC6B34
+
+#define mmTPC3_CFG_QM_TENSOR_4_PADDING_VALUE 0xEC6B38
+
+#define mmTPC3_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xEC6B3C
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_0_SIZE 0xEC6B40
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xEC6B44
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xEC6B48
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_1_SIZE 0xEC6B4C
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xEC6B50
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xEC6B54
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_2_SIZE 0xEC6B58
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xEC6B5C
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xEC6B60
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_3_SIZE 0xEC6B64
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xEC6B68
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xEC6B6C
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_4_SIZE 0xEC6B70
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xEC6B74
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xEC6B78
+
+#define mmTPC3_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xEC6B7C
+
+#define mmTPC3_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xEC6B80
+
+#define mmTPC3_CFG_QM_TENSOR_5_PADDING_VALUE 0xEC6B84
+
+#define mmTPC3_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xEC6B88
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_0_SIZE 0xEC6B8C
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xEC6B90
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xEC6B94
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_1_SIZE 0xEC6B98
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xEC6B9C
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xEC6BA0
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_2_SIZE 0xEC6BA4
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xEC6BA8
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xEC6BAC
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_3_SIZE 0xEC6BB0
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xEC6BB4
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xEC6BB8
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_4_SIZE 0xEC6BBC
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xEC6BC0
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xEC6BC4
+
+#define mmTPC3_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xEC6BC8
+
+#define mmTPC3_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xEC6BCC
+
+#define mmTPC3_CFG_QM_TENSOR_6_PADDING_VALUE 0xEC6BD0
+
+#define mmTPC3_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xEC6BD4
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_0_SIZE 0xEC6BD8
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xEC6BDC
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xEC6BE0
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_1_SIZE 0xEC6BE4
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xEC6BE8
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xEC6BEC
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_2_SIZE 0xEC6BF0
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xEC6BF4
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xEC6BF8
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_3_SIZE 0xEC6BFC
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xEC6C00
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xEC6C04
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_4_SIZE 0xEC6C08
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xEC6C0C
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xEC6C10
+
+#define mmTPC3_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xEC6C14
+
+#define mmTPC3_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xEC6C18
+
+#define mmTPC3_CFG_QM_TENSOR_7_PADDING_VALUE 0xEC6C1C
+
+#define mmTPC3_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xEC6C20
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_0_SIZE 0xEC6C24
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xEC6C28
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xEC6C2C
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_1_SIZE 0xEC6C30
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xEC6C34
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xEC6C38
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_2_SIZE 0xEC6C3C
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xEC6C40
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xEC6C44
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_3_SIZE 0xEC6C48
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xEC6C4C
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xEC6C50
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_4_SIZE 0xEC6C54
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xEC6C58
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xEC6C5C
+
+#define mmTPC3_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xEC6C60
+
+#define mmTPC3_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xEC6C64
+
+#define mmTPC3_CFG_QM_TID_BASE_DIM_0 0xEC6C68
+
+#define mmTPC3_CFG_QM_TID_SIZE_DIM_0 0xEC6C6C
+
+#define mmTPC3_CFG_QM_TID_BASE_DIM_1 0xEC6C70
+
+#define mmTPC3_CFG_QM_TID_SIZE_DIM_1 0xEC6C74
+
+#define mmTPC3_CFG_QM_TID_BASE_DIM_2 0xEC6C78
+
+#define mmTPC3_CFG_QM_TID_SIZE_DIM_2 0xEC6C7C
+
+#define mmTPC3_CFG_QM_TID_BASE_DIM_3 0xEC6C80
+
+#define mmTPC3_CFG_QM_TID_SIZE_DIM_3 0xEC6C84
+
+#define mmTPC3_CFG_QM_TID_BASE_DIM_4 0xEC6C88
+
+#define mmTPC3_CFG_QM_TID_SIZE_DIM_4 0xEC6C8C
+
+#define mmTPC3_CFG_QM_SRF_0 0xEC6C90
+
+#define mmTPC3_CFG_QM_SRF_1 0xEC6C94
+
+#define mmTPC3_CFG_QM_SRF_2 0xEC6C98
+
+#define mmTPC3_CFG_QM_SRF_3 0xEC6C9C
+
+#define mmTPC3_CFG_QM_SRF_4 0xEC6CA0
+
+#define mmTPC3_CFG_QM_SRF_5 0xEC6CA4
+
+#define mmTPC3_CFG_QM_SRF_6 0xEC6CA8
+
+#define mmTPC3_CFG_QM_SRF_7 0xEC6CAC
+
+#define mmTPC3_CFG_QM_SRF_8 0xEC6CB0
+
+#define mmTPC3_CFG_QM_SRF_9 0xEC6CB4
+
+#define mmTPC3_CFG_QM_SRF_10 0xEC6CB8
+
+#define mmTPC3_CFG_QM_SRF_11 0xEC6CBC
+
+#define mmTPC3_CFG_QM_SRF_12 0xEC6CC0
+
+#define mmTPC3_CFG_QM_SRF_13 0xEC6CC4
+
+#define mmTPC3_CFG_QM_SRF_14 0xEC6CC8
+
+#define mmTPC3_CFG_QM_SRF_15 0xEC6CCC
+
+#define mmTPC3_CFG_QM_SRF_16 0xEC6CD0
+
+#define mmTPC3_CFG_QM_SRF_17 0xEC6CD4
+
+#define mmTPC3_CFG_QM_SRF_18 0xEC6CD8
+
+#define mmTPC3_CFG_QM_SRF_19 0xEC6CDC
+
+#define mmTPC3_CFG_QM_SRF_20 0xEC6CE0
+
+#define mmTPC3_CFG_QM_SRF_21 0xEC6CE4
+
+#define mmTPC3_CFG_QM_SRF_22 0xEC6CE8
+
+#define mmTPC3_CFG_QM_SRF_23 0xEC6CEC
+
+#define mmTPC3_CFG_QM_SRF_24 0xEC6CF0
+
+#define mmTPC3_CFG_QM_SRF_25 0xEC6CF4
+
+#define mmTPC3_CFG_QM_SRF_26 0xEC6CF8
+
+#define mmTPC3_CFG_QM_SRF_27 0xEC6CFC
+
+#define mmTPC3_CFG_QM_SRF_28 0xEC6D00
+
+#define mmTPC3_CFG_QM_SRF_29 0xEC6D04
+
+#define mmTPC3_CFG_QM_SRF_30 0xEC6D08
+
+#define mmTPC3_CFG_QM_SRF_31 0xEC6D0C
+
+#define mmTPC3_CFG_QM_KERNEL_CONFIG 0xEC6D10
+
+#define mmTPC3_CFG_QM_SYNC_OBJECT_MESSAGE 0xEC6D14
+
+#define mmTPC3_CFG_ARUSER 0xEC6D18
+
+#define mmTPC3_CFG_AWUSER 0xEC6D1C
+
+#define mmTPC3_CFG_FUNC_MBIST_CNTRL 0xEC6E00
+
+#define mmTPC3_CFG_FUNC_MBIST_PAT 0xEC6E04
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_0 0xEC6E08
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_1 0xEC6E0C
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_2 0xEC6E10
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_3 0xEC6E14
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_4 0xEC6E18
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_5 0xEC6E1C
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_6 0xEC6E20
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_7 0xEC6E24
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_8 0xEC6E28
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_9 0xEC6E2C
+
+#endif /* ASIC_REG_TPC3_CFG_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h
new file mode 100644
index 000000000000..82a5261e852f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC3_CMDQ_REGS_H_
+#define ASIC_REG_TPC3_CMDQ_REGS_H_
+
+/*
+ *****************************************
+ * TPC3_CMDQ (Prototype: CMDQ)
+ *****************************************
+ */
+
+#define mmTPC3_CMDQ_GLBL_CFG0 0xEC9000
+
+#define mmTPC3_CMDQ_GLBL_CFG1 0xEC9004
+
+#define mmTPC3_CMDQ_GLBL_PROT 0xEC9008
+
+#define mmTPC3_CMDQ_GLBL_ERR_CFG 0xEC900C
+
+#define mmTPC3_CMDQ_GLBL_ERR_ADDR_LO 0xEC9010
+
+#define mmTPC3_CMDQ_GLBL_ERR_ADDR_HI 0xEC9014
+
+#define mmTPC3_CMDQ_GLBL_ERR_WDATA 0xEC9018
+
+#define mmTPC3_CMDQ_GLBL_SECURE_PROPS 0xEC901C
+
+#define mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS 0xEC9020
+
+#define mmTPC3_CMDQ_GLBL_STS0 0xEC9024
+
+#define mmTPC3_CMDQ_GLBL_STS1 0xEC9028
+
+#define mmTPC3_CMDQ_CQ_CFG0 0xEC90B0
+
+#define mmTPC3_CMDQ_CQ_CFG1 0xEC90B4
+
+#define mmTPC3_CMDQ_CQ_ARUSER 0xEC90B8
+
+#define mmTPC3_CMDQ_CQ_PTR_LO 0xEC90C0
+
+#define mmTPC3_CMDQ_CQ_PTR_HI 0xEC90C4
+
+#define mmTPC3_CMDQ_CQ_TSIZE 0xEC90C8
+
+#define mmTPC3_CMDQ_CQ_CTL 0xEC90CC
+
+#define mmTPC3_CMDQ_CQ_PTR_LO_STS 0xEC90D4
+
+#define mmTPC3_CMDQ_CQ_PTR_HI_STS 0xEC90D8
+
+#define mmTPC3_CMDQ_CQ_TSIZE_STS 0xEC90DC
+
+#define mmTPC3_CMDQ_CQ_CTL_STS 0xEC90E0
+
+#define mmTPC3_CMDQ_CQ_STS0 0xEC90E4
+
+#define mmTPC3_CMDQ_CQ_STS1 0xEC90E8
+
+#define mmTPC3_CMDQ_CQ_RD_RATE_LIM_EN 0xEC90F0
+
+#define mmTPC3_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xEC90F4
+
+#define mmTPC3_CMDQ_CQ_RD_RATE_LIM_SAT 0xEC90F8
+
+#define mmTPC3_CMDQ_CQ_RD_RATE_LIM_TOUT 0xEC90FC
+
+#define mmTPC3_CMDQ_CQ_IFIFO_CNT 0xEC9108
+
+#define mmTPC3_CMDQ_CP_MSG_BASE0_ADDR_LO 0xEC9120
+
+#define mmTPC3_CMDQ_CP_MSG_BASE0_ADDR_HI 0xEC9124
+
+#define mmTPC3_CMDQ_CP_MSG_BASE1_ADDR_LO 0xEC9128
+
+#define mmTPC3_CMDQ_CP_MSG_BASE1_ADDR_HI 0xEC912C
+
+#define mmTPC3_CMDQ_CP_MSG_BASE2_ADDR_LO 0xEC9130
+
+#define mmTPC3_CMDQ_CP_MSG_BASE2_ADDR_HI 0xEC9134
+
+#define mmTPC3_CMDQ_CP_MSG_BASE3_ADDR_LO 0xEC9138
+
+#define mmTPC3_CMDQ_CP_MSG_BASE3_ADDR_HI 0xEC913C
+
+#define mmTPC3_CMDQ_CP_LDMA_TSIZE_OFFSET 0xEC9140
+
+#define mmTPC3_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xEC9144
+
+#define mmTPC3_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xEC9148
+
+#define mmTPC3_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xEC914C
+
+#define mmTPC3_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xEC9150
+
+#define mmTPC3_CMDQ_CP_LDMA_COMMIT_OFFSET 0xEC9154
+
+#define mmTPC3_CMDQ_CP_FENCE0_RDATA 0xEC9158
+
+#define mmTPC3_CMDQ_CP_FENCE1_RDATA 0xEC915C
+
+#define mmTPC3_CMDQ_CP_FENCE2_RDATA 0xEC9160
+
+#define mmTPC3_CMDQ_CP_FENCE3_RDATA 0xEC9164
+
+#define mmTPC3_CMDQ_CP_FENCE0_CNT 0xEC9168
+
+#define mmTPC3_CMDQ_CP_FENCE1_CNT 0xEC916C
+
+#define mmTPC3_CMDQ_CP_FENCE2_CNT 0xEC9170
+
+#define mmTPC3_CMDQ_CP_FENCE3_CNT 0xEC9174
+
+#define mmTPC3_CMDQ_CP_STS 0xEC9178
+
+#define mmTPC3_CMDQ_CP_CURRENT_INST_LO 0xEC917C
+
+#define mmTPC3_CMDQ_CP_CURRENT_INST_HI 0xEC9180
+
+#define mmTPC3_CMDQ_CP_BARRIER_CFG 0xEC9184
+
+#define mmTPC3_CMDQ_CP_DBG_0 0xEC9188
+
+#define mmTPC3_CMDQ_CQ_BUF_ADDR 0xEC9308
+
+#define mmTPC3_CMDQ_CQ_BUF_RDATA 0xEC930C
+
+#endif /* ASIC_REG_TPC3_CMDQ_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h
new file mode 100644
index 000000000000..b05b1e18e664
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC3_QM_REGS_H_
+#define ASIC_REG_TPC3_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC3_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC3_QM_GLBL_CFG0 0xEC8000
+
+#define mmTPC3_QM_GLBL_CFG1 0xEC8004
+
+#define mmTPC3_QM_GLBL_PROT 0xEC8008
+
+#define mmTPC3_QM_GLBL_ERR_CFG 0xEC800C
+
+#define mmTPC3_QM_GLBL_ERR_ADDR_LO 0xEC8010
+
+#define mmTPC3_QM_GLBL_ERR_ADDR_HI 0xEC8014
+
+#define mmTPC3_QM_GLBL_ERR_WDATA 0xEC8018
+
+#define mmTPC3_QM_GLBL_SECURE_PROPS 0xEC801C
+
+#define mmTPC3_QM_GLBL_NON_SECURE_PROPS 0xEC8020
+
+#define mmTPC3_QM_GLBL_STS0 0xEC8024
+
+#define mmTPC3_QM_GLBL_STS1 0xEC8028
+
+#define mmTPC3_QM_PQ_BASE_LO 0xEC8060
+
+#define mmTPC3_QM_PQ_BASE_HI 0xEC8064
+
+#define mmTPC3_QM_PQ_SIZE 0xEC8068
+
+#define mmTPC3_QM_PQ_PI 0xEC806C
+
+#define mmTPC3_QM_PQ_CI 0xEC8070
+
+#define mmTPC3_QM_PQ_CFG0 0xEC8074
+
+#define mmTPC3_QM_PQ_CFG1 0xEC8078
+
+#define mmTPC3_QM_PQ_ARUSER 0xEC807C
+
+#define mmTPC3_QM_PQ_PUSH0 0xEC8080
+
+#define mmTPC3_QM_PQ_PUSH1 0xEC8084
+
+#define mmTPC3_QM_PQ_PUSH2 0xEC8088
+
+#define mmTPC3_QM_PQ_PUSH3 0xEC808C
+
+#define mmTPC3_QM_PQ_STS0 0xEC8090
+
+#define mmTPC3_QM_PQ_STS1 0xEC8094
+
+#define mmTPC3_QM_PQ_RD_RATE_LIM_EN 0xEC80A0
+
+#define mmTPC3_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xEC80A4
+
+#define mmTPC3_QM_PQ_RD_RATE_LIM_SAT 0xEC80A8
+
+#define mmTPC3_QM_PQ_RD_RATE_LIM_TOUT 0xEC80AC
+
+#define mmTPC3_QM_CQ_CFG0 0xEC80B0
+
+#define mmTPC3_QM_CQ_CFG1 0xEC80B4
+
+#define mmTPC3_QM_CQ_ARUSER 0xEC80B8
+
+#define mmTPC3_QM_CQ_PTR_LO 0xEC80C0
+
+#define mmTPC3_QM_CQ_PTR_HI 0xEC80C4
+
+#define mmTPC3_QM_CQ_TSIZE 0xEC80C8
+
+#define mmTPC3_QM_CQ_CTL 0xEC80CC
+
+#define mmTPC3_QM_CQ_PTR_LO_STS 0xEC80D4
+
+#define mmTPC3_QM_CQ_PTR_HI_STS 0xEC80D8
+
+#define mmTPC3_QM_CQ_TSIZE_STS 0xEC80DC
+
+#define mmTPC3_QM_CQ_CTL_STS 0xEC80E0
+
+#define mmTPC3_QM_CQ_STS0 0xEC80E4
+
+#define mmTPC3_QM_CQ_STS1 0xEC80E8
+
+#define mmTPC3_QM_CQ_RD_RATE_LIM_EN 0xEC80F0
+
+#define mmTPC3_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xEC80F4
+
+#define mmTPC3_QM_CQ_RD_RATE_LIM_SAT 0xEC80F8
+
+#define mmTPC3_QM_CQ_RD_RATE_LIM_TOUT 0xEC80FC
+
+#define mmTPC3_QM_CQ_IFIFO_CNT 0xEC8108
+
+#define mmTPC3_QM_CP_MSG_BASE0_ADDR_LO 0xEC8120
+
+#define mmTPC3_QM_CP_MSG_BASE0_ADDR_HI 0xEC8124
+
+#define mmTPC3_QM_CP_MSG_BASE1_ADDR_LO 0xEC8128
+
+#define mmTPC3_QM_CP_MSG_BASE1_ADDR_HI 0xEC812C
+
+#define mmTPC3_QM_CP_MSG_BASE2_ADDR_LO 0xEC8130
+
+#define mmTPC3_QM_CP_MSG_BASE2_ADDR_HI 0xEC8134
+
+#define mmTPC3_QM_CP_MSG_BASE3_ADDR_LO 0xEC8138
+
+#define mmTPC3_QM_CP_MSG_BASE3_ADDR_HI 0xEC813C
+
+#define mmTPC3_QM_CP_LDMA_TSIZE_OFFSET 0xEC8140
+
+#define mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xEC8144
+
+#define mmTPC3_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xEC8148
+
+#define mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xEC814C
+
+#define mmTPC3_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xEC8150
+
+#define mmTPC3_QM_CP_LDMA_COMMIT_OFFSET 0xEC8154
+
+#define mmTPC3_QM_CP_FENCE0_RDATA 0xEC8158
+
+#define mmTPC3_QM_CP_FENCE1_RDATA 0xEC815C
+
+#define mmTPC3_QM_CP_FENCE2_RDATA 0xEC8160
+
+#define mmTPC3_QM_CP_FENCE3_RDATA 0xEC8164
+
+#define mmTPC3_QM_CP_FENCE0_CNT 0xEC8168
+
+#define mmTPC3_QM_CP_FENCE1_CNT 0xEC816C
+
+#define mmTPC3_QM_CP_FENCE2_CNT 0xEC8170
+
+#define mmTPC3_QM_CP_FENCE3_CNT 0xEC8174
+
+#define mmTPC3_QM_CP_STS 0xEC8178
+
+#define mmTPC3_QM_CP_CURRENT_INST_LO 0xEC817C
+
+#define mmTPC3_QM_CP_CURRENT_INST_HI 0xEC8180
+
+#define mmTPC3_QM_CP_BARRIER_CFG 0xEC8184
+
+#define mmTPC3_QM_CP_DBG_0 0xEC8188
+
+#define mmTPC3_QM_PQ_BUF_ADDR 0xEC8300
+
+#define mmTPC3_QM_PQ_BUF_RDATA 0xEC8304
+
+#define mmTPC3_QM_CQ_BUF_ADDR 0xEC8308
+
+#define mmTPC3_QM_CQ_BUF_RDATA 0xEC830C
+
+#endif /* ASIC_REG_TPC3_QM_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h
new file mode 100644
index 000000000000..5a2fd7652650
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC3_RTR_REGS_H_
+#define ASIC_REG_TPC3_RTR_REGS_H_
+
+/*
+ *****************************************
+ * TPC3_RTR (Prototype: TPC_RTR)
+ *****************************************
+ */
+
+#define mmTPC3_RTR_HBW_RD_RQ_E_ARB 0xEC0100
+
+#define mmTPC3_RTR_HBW_RD_RQ_W_ARB 0xEC0104
+
+#define mmTPC3_RTR_HBW_RD_RQ_N_ARB 0xEC0108
+
+#define mmTPC3_RTR_HBW_RD_RQ_S_ARB 0xEC010C
+
+#define mmTPC3_RTR_HBW_RD_RQ_L_ARB 0xEC0110
+
+#define mmTPC3_RTR_HBW_E_ARB_MAX 0xEC0120
+
+#define mmTPC3_RTR_HBW_W_ARB_MAX 0xEC0124
+
+#define mmTPC3_RTR_HBW_N_ARB_MAX 0xEC0128
+
+#define mmTPC3_RTR_HBW_S_ARB_MAX 0xEC012C
+
+#define mmTPC3_RTR_HBW_L_ARB_MAX 0xEC0130
+
+#define mmTPC3_RTR_HBW_RD_RS_E_ARB 0xEC0140
+
+#define mmTPC3_RTR_HBW_RD_RS_W_ARB 0xEC0144
+
+#define mmTPC3_RTR_HBW_RD_RS_N_ARB 0xEC0148
+
+#define mmTPC3_RTR_HBW_RD_RS_S_ARB 0xEC014C
+
+#define mmTPC3_RTR_HBW_RD_RS_L_ARB 0xEC0150
+
+#define mmTPC3_RTR_HBW_WR_RQ_E_ARB 0xEC0170
+
+#define mmTPC3_RTR_HBW_WR_RQ_W_ARB 0xEC0174
+
+#define mmTPC3_RTR_HBW_WR_RQ_N_ARB 0xEC0178
+
+#define mmTPC3_RTR_HBW_WR_RQ_S_ARB 0xEC017C
+
+#define mmTPC3_RTR_HBW_WR_RQ_L_ARB 0xEC0180
+
+#define mmTPC3_RTR_HBW_WR_RS_E_ARB 0xEC0190
+
+#define mmTPC3_RTR_HBW_WR_RS_W_ARB 0xEC0194
+
+#define mmTPC3_RTR_HBW_WR_RS_N_ARB 0xEC0198
+
+#define mmTPC3_RTR_HBW_WR_RS_S_ARB 0xEC019C
+
+#define mmTPC3_RTR_HBW_WR_RS_L_ARB 0xEC01A0
+
+#define mmTPC3_RTR_LBW_RD_RQ_E_ARB 0xEC0200
+
+#define mmTPC3_RTR_LBW_RD_RQ_W_ARB 0xEC0204
+
+#define mmTPC3_RTR_LBW_RD_RQ_N_ARB 0xEC0208
+
+#define mmTPC3_RTR_LBW_RD_RQ_S_ARB 0xEC020C
+
+#define mmTPC3_RTR_LBW_RD_RQ_L_ARB 0xEC0210
+
+#define mmTPC3_RTR_LBW_E_ARB_MAX 0xEC0220
+
+#define mmTPC3_RTR_LBW_W_ARB_MAX 0xEC0224
+
+#define mmTPC3_RTR_LBW_N_ARB_MAX 0xEC0228
+
+#define mmTPC3_RTR_LBW_S_ARB_MAX 0xEC022C
+
+#define mmTPC3_RTR_LBW_L_ARB_MAX 0xEC0230
+
+#define mmTPC3_RTR_LBW_RD_RS_E_ARB 0xEC0250
+
+#define mmTPC3_RTR_LBW_RD_RS_W_ARB 0xEC0254
+
+#define mmTPC3_RTR_LBW_RD_RS_N_ARB 0xEC0258
+
+#define mmTPC3_RTR_LBW_RD_RS_S_ARB 0xEC025C
+
+#define mmTPC3_RTR_LBW_RD_RS_L_ARB 0xEC0260
+
+#define mmTPC3_RTR_LBW_WR_RQ_E_ARB 0xEC0270
+
+#define mmTPC3_RTR_LBW_WR_RQ_W_ARB 0xEC0274
+
+#define mmTPC3_RTR_LBW_WR_RQ_N_ARB 0xEC0278
+
+#define mmTPC3_RTR_LBW_WR_RQ_S_ARB 0xEC027C
+
+#define mmTPC3_RTR_LBW_WR_RQ_L_ARB 0xEC0280
+
+#define mmTPC3_RTR_LBW_WR_RS_E_ARB 0xEC0290
+
+#define mmTPC3_RTR_LBW_WR_RS_W_ARB 0xEC0294
+
+#define mmTPC3_RTR_LBW_WR_RS_N_ARB 0xEC0298
+
+#define mmTPC3_RTR_LBW_WR_RS_S_ARB 0xEC029C
+
+#define mmTPC3_RTR_LBW_WR_RS_L_ARB 0xEC02A0
+
+#define mmTPC3_RTR_DBG_E_ARB 0xEC0300
+
+#define mmTPC3_RTR_DBG_W_ARB 0xEC0304
+
+#define mmTPC3_RTR_DBG_N_ARB 0xEC0308
+
+#define mmTPC3_RTR_DBG_S_ARB 0xEC030C
+
+#define mmTPC3_RTR_DBG_L_ARB 0xEC0310
+
+#define mmTPC3_RTR_DBG_E_ARB_MAX 0xEC0320
+
+#define mmTPC3_RTR_DBG_W_ARB_MAX 0xEC0324
+
+#define mmTPC3_RTR_DBG_N_ARB_MAX 0xEC0328
+
+#define mmTPC3_RTR_DBG_S_ARB_MAX 0xEC032C
+
+#define mmTPC3_RTR_DBG_L_ARB_MAX 0xEC0330
+
+#define mmTPC3_RTR_SPLIT_COEF_0 0xEC0400
+
+#define mmTPC3_RTR_SPLIT_COEF_1 0xEC0404
+
+#define mmTPC3_RTR_SPLIT_COEF_2 0xEC0408
+
+#define mmTPC3_RTR_SPLIT_COEF_3 0xEC040C
+
+#define mmTPC3_RTR_SPLIT_COEF_4 0xEC0410
+
+#define mmTPC3_RTR_SPLIT_COEF_5 0xEC0414
+
+#define mmTPC3_RTR_SPLIT_COEF_6 0xEC0418
+
+#define mmTPC3_RTR_SPLIT_COEF_7 0xEC041C
+
+#define mmTPC3_RTR_SPLIT_COEF_8 0xEC0420
+
+#define mmTPC3_RTR_SPLIT_COEF_9 0xEC0424
+
+#define mmTPC3_RTR_SPLIT_CFG 0xEC0440
+
+#define mmTPC3_RTR_SPLIT_RD_SAT 0xEC0444
+
+#define mmTPC3_RTR_SPLIT_RD_RST_TOKEN 0xEC0448
+
+#define mmTPC3_RTR_SPLIT_RD_TIMEOUT_0 0xEC044C
+
+#define mmTPC3_RTR_SPLIT_RD_TIMEOUT_1 0xEC0450
+
+#define mmTPC3_RTR_SPLIT_WR_SAT 0xEC0454
+
+#define mmTPC3_RTR_WPLIT_WR_TST_TOLEN 0xEC0458
+
+#define mmTPC3_RTR_SPLIT_WR_TIMEOUT_0 0xEC045C
+
+#define mmTPC3_RTR_SPLIT_WR_TIMEOUT_1 0xEC0460
+
+#define mmTPC3_RTR_HBW_RANGE_HIT 0xEC0470
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_L_0 0xEC0480
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_L_1 0xEC0484
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_L_2 0xEC0488
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_L_3 0xEC048C
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_L_4 0xEC0490
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_L_5 0xEC0494
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_L_6 0xEC0498
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_L_7 0xEC049C
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_H_0 0xEC04A0
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_H_1 0xEC04A4
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_H_2 0xEC04A8
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_H_3 0xEC04AC
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_H_4 0xEC04B0
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_H_5 0xEC04B4
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_H_6 0xEC04B8
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_H_7 0xEC04BC
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_L_0 0xEC04C0
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_L_1 0xEC04C4
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_L_2 0xEC04C8
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_L_3 0xEC04CC
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_L_4 0xEC04D0
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_L_5 0xEC04D4
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_L_6 0xEC04D8
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_L_7 0xEC04DC
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_H_0 0xEC04E0
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_H_1 0xEC04E4
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_H_2 0xEC04E8
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_H_3 0xEC04EC
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_H_4 0xEC04F0
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_H_5 0xEC04F4
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_H_6 0xEC04F8
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_H_7 0xEC04FC
+
+#define mmTPC3_RTR_LBW_RANGE_HIT 0xEC0500
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_0 0xEC0510
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_1 0xEC0514
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_2 0xEC0518
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_3 0xEC051C
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_4 0xEC0520
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_5 0xEC0524
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_6 0xEC0528
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_7 0xEC052C
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_8 0xEC0530
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_9 0xEC0534
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_10 0xEC0538
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_11 0xEC053C
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_12 0xEC0540
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_13 0xEC0544
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_14 0xEC0548
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_15 0xEC054C
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_0 0xEC0550
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_1 0xEC0554
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_2 0xEC0558
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_3 0xEC055C
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_4 0xEC0560
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_5 0xEC0564
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_6 0xEC0568
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_7 0xEC056C
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_8 0xEC0570
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_9 0xEC0574
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_10 0xEC0578
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_11 0xEC057C
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_12 0xEC0580
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_13 0xEC0584
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_14 0xEC0588
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_15 0xEC058C
+
+#define mmTPC3_RTR_RGLTR 0xEC0590
+
+#define mmTPC3_RTR_RGLTR_WR_RESULT 0xEC0594
+
+#define mmTPC3_RTR_RGLTR_RD_RESULT 0xEC0598
+
+#define mmTPC3_RTR_SCRAMB_EN 0xEC0600
+
+#define mmTPC3_RTR_NON_LIN_SCRAMB 0xEC0604
+
+#endif /* ASIC_REG_TPC3_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h
new file mode 100644
index 000000000000..d64a100075f2
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h
@@ -0,0 +1,887 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC4_CFG_REGS_H_
+#define ASIC_REG_TPC4_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC4_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xF06400
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xF06404
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xF06408
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xF0640C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xF06410
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xF06414
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xF06418
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xF0641C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xF06420
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xF06424
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xF06428
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xF0642C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xF06430
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xF06434
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xF06438
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xF0643C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xF06440
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xF06444
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xF06448
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xF0644C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xF06450
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xF06454
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xF06458
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xF0645C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xF06460
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xF06464
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xF06468
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xF0646C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xF06470
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xF06474
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xF06478
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xF0647C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xF06480
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xF06484
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xF06488
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xF0648C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xF06490
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xF06494
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xF06498
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xF0649C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xF064A0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xF064A4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xF064A8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xF064AC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xF064B0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xF064B4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xF064B8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xF064BC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xF064C0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xF064C4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xF064C8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xF064CC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xF064D0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xF064D4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xF064D8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xF064DC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xF064E0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xF064E4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xF064E8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xF064EC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xF064F0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xF064F4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xF064F8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xF064FC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xF06500
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xF06504
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xF06508
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xF0650C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xF06510
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xF06514
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xF06518
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xF0651C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xF06520
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xF06524
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xF06528
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xF0652C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xF06530
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xF06534
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xF06538
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xF0653C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xF06540
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xF06544
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xF06548
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xF0654C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xF06550
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xF06554
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xF06558
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xF0655C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xF06560
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xF06564
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xF06568
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xF0656C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xF06570
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xF06574
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xF06578
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xF0657C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xF06580
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xF06584
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xF06588
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xF0658C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xF06590
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xF06594
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xF06598
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xF0659C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xF065A0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xF065A4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xF065A8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xF065AC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xF065B0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xF065B4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xF065B8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xF065BC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xF065C0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xF065C4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xF065C8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xF065CC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xF065D0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xF065D4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xF065D8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xF065DC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xF065E0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xF065E4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xF065E8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xF065EC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xF065F0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xF065F4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xF065F8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xF065FC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xF06600
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xF06604
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xF06608
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xF0660C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xF06610
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xF06614
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xF06618
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xF0661C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xF06620
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xF06624
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xF06628
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xF0662C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xF06630
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xF06634
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xF06638
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xF0663C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xF06640
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xF06644
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xF06648
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xF0664C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xF06650
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xF06654
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xF06658
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xF0665C
+
+#define mmTPC4_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xF06660
+
+#define mmTPC4_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xF06664
+
+#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_0 0xF06668
+
+#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_0 0xF0666C
+
+#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_1 0xF06670
+
+#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_1 0xF06674
+
+#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_2 0xF06678
+
+#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_2 0xF0667C
+
+#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_3 0xF06680
+
+#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_3 0xF06684
+
+#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_4 0xF06688
+
+#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_4 0xF0668C
+
+#define mmTPC4_CFG_KERNEL_SRF_0 0xF06690
+
+#define mmTPC4_CFG_KERNEL_SRF_1 0xF06694
+
+#define mmTPC4_CFG_KERNEL_SRF_2 0xF06698
+
+#define mmTPC4_CFG_KERNEL_SRF_3 0xF0669C
+
+#define mmTPC4_CFG_KERNEL_SRF_4 0xF066A0
+
+#define mmTPC4_CFG_KERNEL_SRF_5 0xF066A4
+
+#define mmTPC4_CFG_KERNEL_SRF_6 0xF066A8
+
+#define mmTPC4_CFG_KERNEL_SRF_7 0xF066AC
+
+#define mmTPC4_CFG_KERNEL_SRF_8 0xF066B0
+
+#define mmTPC4_CFG_KERNEL_SRF_9 0xF066B4
+
+#define mmTPC4_CFG_KERNEL_SRF_10 0xF066B8
+
+#define mmTPC4_CFG_KERNEL_SRF_11 0xF066BC
+
+#define mmTPC4_CFG_KERNEL_SRF_12 0xF066C0
+
+#define mmTPC4_CFG_KERNEL_SRF_13 0xF066C4
+
+#define mmTPC4_CFG_KERNEL_SRF_14 0xF066C8
+
+#define mmTPC4_CFG_KERNEL_SRF_15 0xF066CC
+
+#define mmTPC4_CFG_KERNEL_SRF_16 0xF066D0
+
+#define mmTPC4_CFG_KERNEL_SRF_17 0xF066D4
+
+#define mmTPC4_CFG_KERNEL_SRF_18 0xF066D8
+
+#define mmTPC4_CFG_KERNEL_SRF_19 0xF066DC
+
+#define mmTPC4_CFG_KERNEL_SRF_20 0xF066E0
+
+#define mmTPC4_CFG_KERNEL_SRF_21 0xF066E4
+
+#define mmTPC4_CFG_KERNEL_SRF_22 0xF066E8
+
+#define mmTPC4_CFG_KERNEL_SRF_23 0xF066EC
+
+#define mmTPC4_CFG_KERNEL_SRF_24 0xF066F0
+
+#define mmTPC4_CFG_KERNEL_SRF_25 0xF066F4
+
+#define mmTPC4_CFG_KERNEL_SRF_26 0xF066F8
+
+#define mmTPC4_CFG_KERNEL_SRF_27 0xF066FC
+
+#define mmTPC4_CFG_KERNEL_SRF_28 0xF06700
+
+#define mmTPC4_CFG_KERNEL_SRF_29 0xF06704
+
+#define mmTPC4_CFG_KERNEL_SRF_30 0xF06708
+
+#define mmTPC4_CFG_KERNEL_SRF_31 0xF0670C
+
+#define mmTPC4_CFG_KERNEL_KERNEL_CONFIG 0xF06710
+
+#define mmTPC4_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xF06714
+
+#define mmTPC4_CFG_RESERVED_DESC_END 0xF06738
+
+#define mmTPC4_CFG_ROUND_CSR 0xF067FC
+
+#define mmTPC4_CFG_TBUF_BASE_ADDR_LOW 0xF06800
+
+#define mmTPC4_CFG_TBUF_BASE_ADDR_HIGH 0xF06804
+
+#define mmTPC4_CFG_SEMAPHORE 0xF06808
+
+#define mmTPC4_CFG_VFLAGS 0xF0680C
+
+#define mmTPC4_CFG_SFLAGS 0xF06810
+
+#define mmTPC4_CFG_LFSR_POLYNOM 0xF06818
+
+#define mmTPC4_CFG_STATUS 0xF0681C
+
+#define mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH 0xF06820
+
+#define mmTPC4_CFG_CFG_SUBTRACT_VALUE 0xF06824
+
+#define mmTPC4_CFG_SM_BASE_ADDRESS_LOW 0xF06828
+
+#define mmTPC4_CFG_SM_BASE_ADDRESS_HIGH 0xF0682C
+
+#define mmTPC4_CFG_TPC_CMD 0xF06830
+
+#define mmTPC4_CFG_TPC_EXECUTE 0xF06838
+
+#define mmTPC4_CFG_TPC_STALL 0xF0683C
+
+#define mmTPC4_CFG_ICACHE_BASE_ADDERESS_LOW 0xF06840
+
+#define mmTPC4_CFG_ICACHE_BASE_ADDERESS_HIGH 0xF06844
+
+#define mmTPC4_CFG_MSS_CONFIG 0xF06854
+
+#define mmTPC4_CFG_TPC_INTR_CAUSE 0xF06858
+
+#define mmTPC4_CFG_TPC_INTR_MASK 0xF0685C
+
+#define mmTPC4_CFG_TSB_CONFIG 0xF06860
+
+#define mmTPC4_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xF06A00
+
+#define mmTPC4_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xF06A04
+
+#define mmTPC4_CFG_QM_TENSOR_0_PADDING_VALUE 0xF06A08
+
+#define mmTPC4_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xF06A0C
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_0_SIZE 0xF06A10
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xF06A14
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xF06A18
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_1_SIZE 0xF06A1C
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xF06A20
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xF06A24
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_2_SIZE 0xF06A28
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xF06A2C
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xF06A30
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_3_SIZE 0xF06A34
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xF06A38
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xF06A3C
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_4_SIZE 0xF06A40
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xF06A44
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xF06A48
+
+#define mmTPC4_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xF06A4C
+
+#define mmTPC4_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xF06A50
+
+#define mmTPC4_CFG_QM_TENSOR_1_PADDING_VALUE 0xF06A54
+
+#define mmTPC4_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xF06A58
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_0_SIZE 0xF06A5C
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xF06A60
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xF06A64
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_1_SIZE 0xF06A68
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xF06A6C
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xF06A70
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_2_SIZE 0xF06A74
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xF06A78
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xF06A7C
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_3_SIZE 0xF06A80
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xF06A84
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xF06A88
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_4_SIZE 0xF06A8C
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xF06A90
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xF06A94
+
+#define mmTPC4_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xF06A98
+
+#define mmTPC4_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xF06A9C
+
+#define mmTPC4_CFG_QM_TENSOR_2_PADDING_VALUE 0xF06AA0
+
+#define mmTPC4_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xF06AA4
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_0_SIZE 0xF06AA8
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xF06AAC
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xF06AB0
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_1_SIZE 0xF06AB4
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xF06AB8
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xF06ABC
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_2_SIZE 0xF06AC0
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xF06AC4
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xF06AC8
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_3_SIZE 0xF06ACC
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xF06AD0
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xF06AD4
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_4_SIZE 0xF06AD8
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xF06ADC
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xF06AE0
+
+#define mmTPC4_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xF06AE4
+
+#define mmTPC4_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xF06AE8
+
+#define mmTPC4_CFG_QM_TENSOR_3_PADDING_VALUE 0xF06AEC
+
+#define mmTPC4_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xF06AF0
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_0_SIZE 0xF06AF4
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xF06AF8
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xF06AFC
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_1_SIZE 0xF06B00
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xF06B04
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xF06B08
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_2_SIZE 0xF06B0C
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xF06B10
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xF06B14
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_3_SIZE 0xF06B18
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xF06B1C
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xF06B20
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_4_SIZE 0xF06B24
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xF06B28
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xF06B2C
+
+#define mmTPC4_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xF06B30
+
+#define mmTPC4_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xF06B34
+
+#define mmTPC4_CFG_QM_TENSOR_4_PADDING_VALUE 0xF06B38
+
+#define mmTPC4_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xF06B3C
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_0_SIZE 0xF06B40
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xF06B44
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xF06B48
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_1_SIZE 0xF06B4C
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xF06B50
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xF06B54
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_2_SIZE 0xF06B58
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xF06B5C
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xF06B60
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_3_SIZE 0xF06B64
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xF06B68
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xF06B6C
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_4_SIZE 0xF06B70
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xF06B74
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xF06B78
+
+#define mmTPC4_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xF06B7C
+
+#define mmTPC4_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xF06B80
+
+#define mmTPC4_CFG_QM_TENSOR_5_PADDING_VALUE 0xF06B84
+
+#define mmTPC4_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xF06B88
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_0_SIZE 0xF06B8C
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xF06B90
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xF06B94
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_1_SIZE 0xF06B98
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xF06B9C
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xF06BA0
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_2_SIZE 0xF06BA4
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xF06BA8
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xF06BAC
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_3_SIZE 0xF06BB0
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xF06BB4
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xF06BB8
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_4_SIZE 0xF06BBC
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xF06BC0
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xF06BC4
+
+#define mmTPC4_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xF06BC8
+
+#define mmTPC4_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xF06BCC
+
+#define mmTPC4_CFG_QM_TENSOR_6_PADDING_VALUE 0xF06BD0
+
+#define mmTPC4_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xF06BD4
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_0_SIZE 0xF06BD8
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xF06BDC
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xF06BE0
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_1_SIZE 0xF06BE4
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xF06BE8
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xF06BEC
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_2_SIZE 0xF06BF0
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xF06BF4
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xF06BF8
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_3_SIZE 0xF06BFC
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xF06C00
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xF06C04
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_4_SIZE 0xF06C08
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xF06C0C
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xF06C10
+
+#define mmTPC4_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xF06C14
+
+#define mmTPC4_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xF06C18
+
+#define mmTPC4_CFG_QM_TENSOR_7_PADDING_VALUE 0xF06C1C
+
+#define mmTPC4_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xF06C20
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_0_SIZE 0xF06C24
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xF06C28
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xF06C2C
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_1_SIZE 0xF06C30
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xF06C34
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xF06C38
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_2_SIZE 0xF06C3C
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xF06C40
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xF06C44
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_3_SIZE 0xF06C48
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xF06C4C
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xF06C50
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_4_SIZE 0xF06C54
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xF06C58
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xF06C5C
+
+#define mmTPC4_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xF06C60
+
+#define mmTPC4_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xF06C64
+
+#define mmTPC4_CFG_QM_TID_BASE_DIM_0 0xF06C68
+
+#define mmTPC4_CFG_QM_TID_SIZE_DIM_0 0xF06C6C
+
+#define mmTPC4_CFG_QM_TID_BASE_DIM_1 0xF06C70
+
+#define mmTPC4_CFG_QM_TID_SIZE_DIM_1 0xF06C74
+
+#define mmTPC4_CFG_QM_TID_BASE_DIM_2 0xF06C78
+
+#define mmTPC4_CFG_QM_TID_SIZE_DIM_2 0xF06C7C
+
+#define mmTPC4_CFG_QM_TID_BASE_DIM_3 0xF06C80
+
+#define mmTPC4_CFG_QM_TID_SIZE_DIM_3 0xF06C84
+
+#define mmTPC4_CFG_QM_TID_BASE_DIM_4 0xF06C88
+
+#define mmTPC4_CFG_QM_TID_SIZE_DIM_4 0xF06C8C
+
+#define mmTPC4_CFG_QM_SRF_0 0xF06C90
+
+#define mmTPC4_CFG_QM_SRF_1 0xF06C94
+
+#define mmTPC4_CFG_QM_SRF_2 0xF06C98
+
+#define mmTPC4_CFG_QM_SRF_3 0xF06C9C
+
+#define mmTPC4_CFG_QM_SRF_4 0xF06CA0
+
+#define mmTPC4_CFG_QM_SRF_5 0xF06CA4
+
+#define mmTPC4_CFG_QM_SRF_6 0xF06CA8
+
+#define mmTPC4_CFG_QM_SRF_7 0xF06CAC
+
+#define mmTPC4_CFG_QM_SRF_8 0xF06CB0
+
+#define mmTPC4_CFG_QM_SRF_9 0xF06CB4
+
+#define mmTPC4_CFG_QM_SRF_10 0xF06CB8
+
+#define mmTPC4_CFG_QM_SRF_11 0xF06CBC
+
+#define mmTPC4_CFG_QM_SRF_12 0xF06CC0
+
+#define mmTPC4_CFG_QM_SRF_13 0xF06CC4
+
+#define mmTPC4_CFG_QM_SRF_14 0xF06CC8
+
+#define mmTPC4_CFG_QM_SRF_15 0xF06CCC
+
+#define mmTPC4_CFG_QM_SRF_16 0xF06CD0
+
+#define mmTPC4_CFG_QM_SRF_17 0xF06CD4
+
+#define mmTPC4_CFG_QM_SRF_18 0xF06CD8
+
+#define mmTPC4_CFG_QM_SRF_19 0xF06CDC
+
+#define mmTPC4_CFG_QM_SRF_20 0xF06CE0
+
+#define mmTPC4_CFG_QM_SRF_21 0xF06CE4
+
+#define mmTPC4_CFG_QM_SRF_22 0xF06CE8
+
+#define mmTPC4_CFG_QM_SRF_23 0xF06CEC
+
+#define mmTPC4_CFG_QM_SRF_24 0xF06CF0
+
+#define mmTPC4_CFG_QM_SRF_25 0xF06CF4
+
+#define mmTPC4_CFG_QM_SRF_26 0xF06CF8
+
+#define mmTPC4_CFG_QM_SRF_27 0xF06CFC
+
+#define mmTPC4_CFG_QM_SRF_28 0xF06D00
+
+#define mmTPC4_CFG_QM_SRF_29 0xF06D04
+
+#define mmTPC4_CFG_QM_SRF_30 0xF06D08
+
+#define mmTPC4_CFG_QM_SRF_31 0xF06D0C
+
+#define mmTPC4_CFG_QM_KERNEL_CONFIG 0xF06D10
+
+#define mmTPC4_CFG_QM_SYNC_OBJECT_MESSAGE 0xF06D14
+
+#define mmTPC4_CFG_ARUSER 0xF06D18
+
+#define mmTPC4_CFG_AWUSER 0xF06D1C
+
+#define mmTPC4_CFG_FUNC_MBIST_CNTRL 0xF06E00
+
+#define mmTPC4_CFG_FUNC_MBIST_PAT 0xF06E04
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_0 0xF06E08
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_1 0xF06E0C
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_2 0xF06E10
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_3 0xF06E14
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_4 0xF06E18
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_5 0xF06E1C
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_6 0xF06E20
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_7 0xF06E24
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_8 0xF06E28
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_9 0xF06E2C
+
+#endif /* ASIC_REG_TPC4_CFG_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h
new file mode 100644
index 000000000000..565b42885b0d
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC4_CMDQ_REGS_H_
+#define ASIC_REG_TPC4_CMDQ_REGS_H_
+
+/*
+ *****************************************
+ * TPC4_CMDQ (Prototype: CMDQ)
+ *****************************************
+ */
+
+#define mmTPC4_CMDQ_GLBL_CFG0 0xF09000
+
+#define mmTPC4_CMDQ_GLBL_CFG1 0xF09004
+
+#define mmTPC4_CMDQ_GLBL_PROT 0xF09008
+
+#define mmTPC4_CMDQ_GLBL_ERR_CFG 0xF0900C
+
+#define mmTPC4_CMDQ_GLBL_ERR_ADDR_LO 0xF09010
+
+#define mmTPC4_CMDQ_GLBL_ERR_ADDR_HI 0xF09014
+
+#define mmTPC4_CMDQ_GLBL_ERR_WDATA 0xF09018
+
+#define mmTPC4_CMDQ_GLBL_SECURE_PROPS 0xF0901C
+
+#define mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS 0xF09020
+
+#define mmTPC4_CMDQ_GLBL_STS0 0xF09024
+
+#define mmTPC4_CMDQ_GLBL_STS1 0xF09028
+
+#define mmTPC4_CMDQ_CQ_CFG0 0xF090B0
+
+#define mmTPC4_CMDQ_CQ_CFG1 0xF090B4
+
+#define mmTPC4_CMDQ_CQ_ARUSER 0xF090B8
+
+#define mmTPC4_CMDQ_CQ_PTR_LO 0xF090C0
+
+#define mmTPC4_CMDQ_CQ_PTR_HI 0xF090C4
+
+#define mmTPC4_CMDQ_CQ_TSIZE 0xF090C8
+
+#define mmTPC4_CMDQ_CQ_CTL 0xF090CC
+
+#define mmTPC4_CMDQ_CQ_PTR_LO_STS 0xF090D4
+
+#define mmTPC4_CMDQ_CQ_PTR_HI_STS 0xF090D8
+
+#define mmTPC4_CMDQ_CQ_TSIZE_STS 0xF090DC
+
+#define mmTPC4_CMDQ_CQ_CTL_STS 0xF090E0
+
+#define mmTPC4_CMDQ_CQ_STS0 0xF090E4
+
+#define mmTPC4_CMDQ_CQ_STS1 0xF090E8
+
+#define mmTPC4_CMDQ_CQ_RD_RATE_LIM_EN 0xF090F0
+
+#define mmTPC4_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xF090F4
+
+#define mmTPC4_CMDQ_CQ_RD_RATE_LIM_SAT 0xF090F8
+
+#define mmTPC4_CMDQ_CQ_RD_RATE_LIM_TOUT 0xF090FC
+
+#define mmTPC4_CMDQ_CQ_IFIFO_CNT 0xF09108
+
+#define mmTPC4_CMDQ_CP_MSG_BASE0_ADDR_LO 0xF09120
+
+#define mmTPC4_CMDQ_CP_MSG_BASE0_ADDR_HI 0xF09124
+
+#define mmTPC4_CMDQ_CP_MSG_BASE1_ADDR_LO 0xF09128
+
+#define mmTPC4_CMDQ_CP_MSG_BASE1_ADDR_HI 0xF0912C
+
+#define mmTPC4_CMDQ_CP_MSG_BASE2_ADDR_LO 0xF09130
+
+#define mmTPC4_CMDQ_CP_MSG_BASE2_ADDR_HI 0xF09134
+
+#define mmTPC4_CMDQ_CP_MSG_BASE3_ADDR_LO 0xF09138
+
+#define mmTPC4_CMDQ_CP_MSG_BASE3_ADDR_HI 0xF0913C
+
+#define mmTPC4_CMDQ_CP_LDMA_TSIZE_OFFSET 0xF09140
+
+#define mmTPC4_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xF09144
+
+#define mmTPC4_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xF09148
+
+#define mmTPC4_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xF0914C
+
+#define mmTPC4_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xF09150
+
+#define mmTPC4_CMDQ_CP_LDMA_COMMIT_OFFSET 0xF09154
+
+#define mmTPC4_CMDQ_CP_FENCE0_RDATA 0xF09158
+
+#define mmTPC4_CMDQ_CP_FENCE1_RDATA 0xF0915C
+
+#define mmTPC4_CMDQ_CP_FENCE2_RDATA 0xF09160
+
+#define mmTPC4_CMDQ_CP_FENCE3_RDATA 0xF09164
+
+#define mmTPC4_CMDQ_CP_FENCE0_CNT 0xF09168
+
+#define mmTPC4_CMDQ_CP_FENCE1_CNT 0xF0916C
+
+#define mmTPC4_CMDQ_CP_FENCE2_CNT 0xF09170
+
+#define mmTPC4_CMDQ_CP_FENCE3_CNT 0xF09174
+
+#define mmTPC4_CMDQ_CP_STS 0xF09178
+
+#define mmTPC4_CMDQ_CP_CURRENT_INST_LO 0xF0917C
+
+#define mmTPC4_CMDQ_CP_CURRENT_INST_HI 0xF09180
+
+#define mmTPC4_CMDQ_CP_BARRIER_CFG 0xF09184
+
+#define mmTPC4_CMDQ_CP_DBG_0 0xF09188
+
+#define mmTPC4_CMDQ_CQ_BUF_ADDR 0xF09308
+
+#define mmTPC4_CMDQ_CQ_BUF_RDATA 0xF0930C
+
+#endif /* ASIC_REG_TPC4_CMDQ_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h
new file mode 100644
index 000000000000..196da3f12710
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC4_QM_REGS_H_
+#define ASIC_REG_TPC4_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC4_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC4_QM_GLBL_CFG0 0xF08000
+
+#define mmTPC4_QM_GLBL_CFG1 0xF08004
+
+#define mmTPC4_QM_GLBL_PROT 0xF08008
+
+#define mmTPC4_QM_GLBL_ERR_CFG 0xF0800C
+
+#define mmTPC4_QM_GLBL_ERR_ADDR_LO 0xF08010
+
+#define mmTPC4_QM_GLBL_ERR_ADDR_HI 0xF08014
+
+#define mmTPC4_QM_GLBL_ERR_WDATA 0xF08018
+
+#define mmTPC4_QM_GLBL_SECURE_PROPS 0xF0801C
+
+#define mmTPC4_QM_GLBL_NON_SECURE_PROPS 0xF08020
+
+#define mmTPC4_QM_GLBL_STS0 0xF08024
+
+#define mmTPC4_QM_GLBL_STS1 0xF08028
+
+#define mmTPC4_QM_PQ_BASE_LO 0xF08060
+
+#define mmTPC4_QM_PQ_BASE_HI 0xF08064
+
+#define mmTPC4_QM_PQ_SIZE 0xF08068
+
+#define mmTPC4_QM_PQ_PI 0xF0806C
+
+#define mmTPC4_QM_PQ_CI 0xF08070
+
+#define mmTPC4_QM_PQ_CFG0 0xF08074
+
+#define mmTPC4_QM_PQ_CFG1 0xF08078
+
+#define mmTPC4_QM_PQ_ARUSER 0xF0807C
+
+#define mmTPC4_QM_PQ_PUSH0 0xF08080
+
+#define mmTPC4_QM_PQ_PUSH1 0xF08084
+
+#define mmTPC4_QM_PQ_PUSH2 0xF08088
+
+#define mmTPC4_QM_PQ_PUSH3 0xF0808C
+
+#define mmTPC4_QM_PQ_STS0 0xF08090
+
+#define mmTPC4_QM_PQ_STS1 0xF08094
+
+#define mmTPC4_QM_PQ_RD_RATE_LIM_EN 0xF080A0
+
+#define mmTPC4_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xF080A4
+
+#define mmTPC4_QM_PQ_RD_RATE_LIM_SAT 0xF080A8
+
+#define mmTPC4_QM_PQ_RD_RATE_LIM_TOUT 0xF080AC
+
+#define mmTPC4_QM_CQ_CFG0 0xF080B0
+
+#define mmTPC4_QM_CQ_CFG1 0xF080B4
+
+#define mmTPC4_QM_CQ_ARUSER 0xF080B8
+
+#define mmTPC4_QM_CQ_PTR_LO 0xF080C0
+
+#define mmTPC4_QM_CQ_PTR_HI 0xF080C4
+
+#define mmTPC4_QM_CQ_TSIZE 0xF080C8
+
+#define mmTPC4_QM_CQ_CTL 0xF080CC
+
+#define mmTPC4_QM_CQ_PTR_LO_STS 0xF080D4
+
+#define mmTPC4_QM_CQ_PTR_HI_STS 0xF080D8
+
+#define mmTPC4_QM_CQ_TSIZE_STS 0xF080DC
+
+#define mmTPC4_QM_CQ_CTL_STS 0xF080E0
+
+#define mmTPC4_QM_CQ_STS0 0xF080E4
+
+#define mmTPC4_QM_CQ_STS1 0xF080E8
+
+#define mmTPC4_QM_CQ_RD_RATE_LIM_EN 0xF080F0
+
+#define mmTPC4_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xF080F4
+
+#define mmTPC4_QM_CQ_RD_RATE_LIM_SAT 0xF080F8
+
+#define mmTPC4_QM_CQ_RD_RATE_LIM_TOUT 0xF080FC
+
+#define mmTPC4_QM_CQ_IFIFO_CNT 0xF08108
+
+#define mmTPC4_QM_CP_MSG_BASE0_ADDR_LO 0xF08120
+
+#define mmTPC4_QM_CP_MSG_BASE0_ADDR_HI 0xF08124
+
+#define mmTPC4_QM_CP_MSG_BASE1_ADDR_LO 0xF08128
+
+#define mmTPC4_QM_CP_MSG_BASE1_ADDR_HI 0xF0812C
+
+#define mmTPC4_QM_CP_MSG_BASE2_ADDR_LO 0xF08130
+
+#define mmTPC4_QM_CP_MSG_BASE2_ADDR_HI 0xF08134
+
+#define mmTPC4_QM_CP_MSG_BASE3_ADDR_LO 0xF08138
+
+#define mmTPC4_QM_CP_MSG_BASE3_ADDR_HI 0xF0813C
+
+#define mmTPC4_QM_CP_LDMA_TSIZE_OFFSET 0xF08140
+
+#define mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xF08144
+
+#define mmTPC4_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xF08148
+
+#define mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xF0814C
+
+#define mmTPC4_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xF08150
+
+#define mmTPC4_QM_CP_LDMA_COMMIT_OFFSET 0xF08154
+
+#define mmTPC4_QM_CP_FENCE0_RDATA 0xF08158
+
+#define mmTPC4_QM_CP_FENCE1_RDATA 0xF0815C
+
+#define mmTPC4_QM_CP_FENCE2_RDATA 0xF08160
+
+#define mmTPC4_QM_CP_FENCE3_RDATA 0xF08164
+
+#define mmTPC4_QM_CP_FENCE0_CNT 0xF08168
+
+#define mmTPC4_QM_CP_FENCE1_CNT 0xF0816C
+
+#define mmTPC4_QM_CP_FENCE2_CNT 0xF08170
+
+#define mmTPC4_QM_CP_FENCE3_CNT 0xF08174
+
+#define mmTPC4_QM_CP_STS 0xF08178
+
+#define mmTPC4_QM_CP_CURRENT_INST_LO 0xF0817C
+
+#define mmTPC4_QM_CP_CURRENT_INST_HI 0xF08180
+
+#define mmTPC4_QM_CP_BARRIER_CFG 0xF08184
+
+#define mmTPC4_QM_CP_DBG_0 0xF08188
+
+#define mmTPC4_QM_PQ_BUF_ADDR 0xF08300
+
+#define mmTPC4_QM_PQ_BUF_RDATA 0xF08304
+
+#define mmTPC4_QM_CQ_BUF_ADDR 0xF08308
+
+#define mmTPC4_QM_CQ_BUF_RDATA 0xF0830C
+
+#endif /* ASIC_REG_TPC4_QM_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h
new file mode 100644
index 000000000000..8b54041d144a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC4_RTR_REGS_H_
+#define ASIC_REG_TPC4_RTR_REGS_H_
+
+/*
+ *****************************************
+ * TPC4_RTR (Prototype: TPC_RTR)
+ *****************************************
+ */
+
+#define mmTPC4_RTR_HBW_RD_RQ_E_ARB 0xF00100
+
+#define mmTPC4_RTR_HBW_RD_RQ_W_ARB 0xF00104
+
+#define mmTPC4_RTR_HBW_RD_RQ_N_ARB 0xF00108
+
+#define mmTPC4_RTR_HBW_RD_RQ_S_ARB 0xF0010C
+
+#define mmTPC4_RTR_HBW_RD_RQ_L_ARB 0xF00110
+
+#define mmTPC4_RTR_HBW_E_ARB_MAX 0xF00120
+
+#define mmTPC4_RTR_HBW_W_ARB_MAX 0xF00124
+
+#define mmTPC4_RTR_HBW_N_ARB_MAX 0xF00128
+
+#define mmTPC4_RTR_HBW_S_ARB_MAX 0xF0012C
+
+#define mmTPC4_RTR_HBW_L_ARB_MAX 0xF00130
+
+#define mmTPC4_RTR_HBW_RD_RS_E_ARB 0xF00140
+
+#define mmTPC4_RTR_HBW_RD_RS_W_ARB 0xF00144
+
+#define mmTPC4_RTR_HBW_RD_RS_N_ARB 0xF00148
+
+#define mmTPC4_RTR_HBW_RD_RS_S_ARB 0xF0014C
+
+#define mmTPC4_RTR_HBW_RD_RS_L_ARB 0xF00150
+
+#define mmTPC4_RTR_HBW_WR_RQ_E_ARB 0xF00170
+
+#define mmTPC4_RTR_HBW_WR_RQ_W_ARB 0xF00174
+
+#define mmTPC4_RTR_HBW_WR_RQ_N_ARB 0xF00178
+
+#define mmTPC4_RTR_HBW_WR_RQ_S_ARB 0xF0017C
+
+#define mmTPC4_RTR_HBW_WR_RQ_L_ARB 0xF00180
+
+#define mmTPC4_RTR_HBW_WR_RS_E_ARB 0xF00190
+
+#define mmTPC4_RTR_HBW_WR_RS_W_ARB 0xF00194
+
+#define mmTPC4_RTR_HBW_WR_RS_N_ARB 0xF00198
+
+#define mmTPC4_RTR_HBW_WR_RS_S_ARB 0xF0019C
+
+#define mmTPC4_RTR_HBW_WR_RS_L_ARB 0xF001A0
+
+#define mmTPC4_RTR_LBW_RD_RQ_E_ARB 0xF00200
+
+#define mmTPC4_RTR_LBW_RD_RQ_W_ARB 0xF00204
+
+#define mmTPC4_RTR_LBW_RD_RQ_N_ARB 0xF00208
+
+#define mmTPC4_RTR_LBW_RD_RQ_S_ARB 0xF0020C
+
+#define mmTPC4_RTR_LBW_RD_RQ_L_ARB 0xF00210
+
+#define mmTPC4_RTR_LBW_E_ARB_MAX 0xF00220
+
+#define mmTPC4_RTR_LBW_W_ARB_MAX 0xF00224
+
+#define mmTPC4_RTR_LBW_N_ARB_MAX 0xF00228
+
+#define mmTPC4_RTR_LBW_S_ARB_MAX 0xF0022C
+
+#define mmTPC4_RTR_LBW_L_ARB_MAX 0xF00230
+
+#define mmTPC4_RTR_LBW_RD_RS_E_ARB 0xF00250
+
+#define mmTPC4_RTR_LBW_RD_RS_W_ARB 0xF00254
+
+#define mmTPC4_RTR_LBW_RD_RS_N_ARB 0xF00258
+
+#define mmTPC4_RTR_LBW_RD_RS_S_ARB 0xF0025C
+
+#define mmTPC4_RTR_LBW_RD_RS_L_ARB 0xF00260
+
+#define mmTPC4_RTR_LBW_WR_RQ_E_ARB 0xF00270
+
+#define mmTPC4_RTR_LBW_WR_RQ_W_ARB 0xF00274
+
+#define mmTPC4_RTR_LBW_WR_RQ_N_ARB 0xF00278
+
+#define mmTPC4_RTR_LBW_WR_RQ_S_ARB 0xF0027C
+
+#define mmTPC4_RTR_LBW_WR_RQ_L_ARB 0xF00280
+
+#define mmTPC4_RTR_LBW_WR_RS_E_ARB 0xF00290
+
+#define mmTPC4_RTR_LBW_WR_RS_W_ARB 0xF00294
+
+#define mmTPC4_RTR_LBW_WR_RS_N_ARB 0xF00298
+
+#define mmTPC4_RTR_LBW_WR_RS_S_ARB 0xF0029C
+
+#define mmTPC4_RTR_LBW_WR_RS_L_ARB 0xF002A0
+
+#define mmTPC4_RTR_DBG_E_ARB 0xF00300
+
+#define mmTPC4_RTR_DBG_W_ARB 0xF00304
+
+#define mmTPC4_RTR_DBG_N_ARB 0xF00308
+
+#define mmTPC4_RTR_DBG_S_ARB 0xF0030C
+
+#define mmTPC4_RTR_DBG_L_ARB 0xF00310
+
+#define mmTPC4_RTR_DBG_E_ARB_MAX 0xF00320
+
+#define mmTPC4_RTR_DBG_W_ARB_MAX 0xF00324
+
+#define mmTPC4_RTR_DBG_N_ARB_MAX 0xF00328
+
+#define mmTPC4_RTR_DBG_S_ARB_MAX 0xF0032C
+
+#define mmTPC4_RTR_DBG_L_ARB_MAX 0xF00330
+
+#define mmTPC4_RTR_SPLIT_COEF_0 0xF00400
+
+#define mmTPC4_RTR_SPLIT_COEF_1 0xF00404
+
+#define mmTPC4_RTR_SPLIT_COEF_2 0xF00408
+
+#define mmTPC4_RTR_SPLIT_COEF_3 0xF0040C
+
+#define mmTPC4_RTR_SPLIT_COEF_4 0xF00410
+
+#define mmTPC4_RTR_SPLIT_COEF_5 0xF00414
+
+#define mmTPC4_RTR_SPLIT_COEF_6 0xF00418
+
+#define mmTPC4_RTR_SPLIT_COEF_7 0xF0041C
+
+#define mmTPC4_RTR_SPLIT_COEF_8 0xF00420
+
+#define mmTPC4_RTR_SPLIT_COEF_9 0xF00424
+
+#define mmTPC4_RTR_SPLIT_CFG 0xF00440
+
+#define mmTPC4_RTR_SPLIT_RD_SAT 0xF00444
+
+#define mmTPC4_RTR_SPLIT_RD_RST_TOKEN 0xF00448
+
+#define mmTPC4_RTR_SPLIT_RD_TIMEOUT_0 0xF0044C
+
+#define mmTPC4_RTR_SPLIT_RD_TIMEOUT_1 0xF00450
+
+#define mmTPC4_RTR_SPLIT_WR_SAT 0xF00454
+
+#define mmTPC4_RTR_WPLIT_WR_TST_TOLEN 0xF00458
+
+#define mmTPC4_RTR_SPLIT_WR_TIMEOUT_0 0xF0045C
+
+#define mmTPC4_RTR_SPLIT_WR_TIMEOUT_1 0xF00460
+
+#define mmTPC4_RTR_HBW_RANGE_HIT 0xF00470
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_L_0 0xF00480
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_L_1 0xF00484
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_L_2 0xF00488
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_L_3 0xF0048C
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_L_4 0xF00490
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_L_5 0xF00494
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_L_6 0xF00498
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_L_7 0xF0049C
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_H_0 0xF004A0
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_H_1 0xF004A4
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_H_2 0xF004A8
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_H_3 0xF004AC
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_H_4 0xF004B0
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_H_5 0xF004B4
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_H_6 0xF004B8
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_H_7 0xF004BC
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_L_0 0xF004C0
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_L_1 0xF004C4
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_L_2 0xF004C8
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_L_3 0xF004CC
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_L_4 0xF004D0
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_L_5 0xF004D4
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_L_6 0xF004D8
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_L_7 0xF004DC
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_H_0 0xF004E0
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_H_1 0xF004E4
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_H_2 0xF004E8
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_H_3 0xF004EC
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_H_4 0xF004F0
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_H_5 0xF004F4
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_H_6 0xF004F8
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_H_7 0xF004FC
+
+#define mmTPC4_RTR_LBW_RANGE_HIT 0xF00500
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_0 0xF00510
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_1 0xF00514
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_2 0xF00518
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_3 0xF0051C
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_4 0xF00520
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_5 0xF00524
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_6 0xF00528
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_7 0xF0052C
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_8 0xF00530
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_9 0xF00534
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_10 0xF00538
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_11 0xF0053C
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_12 0xF00540
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_13 0xF00544
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_14 0xF00548
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_15 0xF0054C
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_0 0xF00550
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_1 0xF00554
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_2 0xF00558
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_3 0xF0055C
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_4 0xF00560
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_5 0xF00564
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_6 0xF00568
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_7 0xF0056C
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_8 0xF00570
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_9 0xF00574
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_10 0xF00578
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_11 0xF0057C
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_12 0xF00580
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_13 0xF00584
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_14 0xF00588
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_15 0xF0058C
+
+#define mmTPC4_RTR_RGLTR 0xF00590
+
+#define mmTPC4_RTR_RGLTR_WR_RESULT 0xF00594
+
+#define mmTPC4_RTR_RGLTR_RD_RESULT 0xF00598
+
+#define mmTPC4_RTR_SCRAMB_EN 0xF00600
+
+#define mmTPC4_RTR_NON_LIN_SCRAMB 0xF00604
+
+#endif /* ASIC_REG_TPC4_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h
new file mode 100644
index 000000000000..3f00954fcdba
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h
@@ -0,0 +1,887 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC5_CFG_REGS_H_
+#define ASIC_REG_TPC5_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC5_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xF46400
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xF46404
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xF46408
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xF4640C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xF46410
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xF46414
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xF46418
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xF4641C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xF46420
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xF46424
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xF46428
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xF4642C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xF46430
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xF46434
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xF46438
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xF4643C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xF46440
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xF46444
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xF46448
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xF4644C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xF46450
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xF46454
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xF46458
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xF4645C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xF46460
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xF46464
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xF46468
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xF4646C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xF46470
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xF46474
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xF46478
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xF4647C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xF46480
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xF46484
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xF46488
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xF4648C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xF46490
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xF46494
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xF46498
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xF4649C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xF464A0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xF464A4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xF464A8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xF464AC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xF464B0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xF464B4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xF464B8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xF464BC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xF464C0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xF464C4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xF464C8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xF464CC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xF464D0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xF464D4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xF464D8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xF464DC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xF464E0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xF464E4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xF464E8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xF464EC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xF464F0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xF464F4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xF464F8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xF464FC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xF46500
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xF46504
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xF46508
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xF4650C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xF46510
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xF46514
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xF46518
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xF4651C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xF46520
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xF46524
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xF46528
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xF4652C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xF46530
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xF46534
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xF46538
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xF4653C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xF46540
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xF46544
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xF46548
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xF4654C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xF46550
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xF46554
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xF46558
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xF4655C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xF46560
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xF46564
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xF46568
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xF4656C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xF46570
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xF46574
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xF46578
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xF4657C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xF46580
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xF46584
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xF46588
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xF4658C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xF46590
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xF46594
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xF46598
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xF4659C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xF465A0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xF465A4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xF465A8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xF465AC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xF465B0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xF465B4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xF465B8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xF465BC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xF465C0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xF465C4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xF465C8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xF465CC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xF465D0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xF465D4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xF465D8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xF465DC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xF465E0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xF465E4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xF465E8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xF465EC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xF465F0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xF465F4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xF465F8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xF465FC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xF46600
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xF46604
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xF46608
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xF4660C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xF46610
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xF46614
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xF46618
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xF4661C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xF46620
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xF46624
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xF46628
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xF4662C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xF46630
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xF46634
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xF46638
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xF4663C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xF46640
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xF46644
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xF46648
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xF4664C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xF46650
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xF46654
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xF46658
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xF4665C
+
+#define mmTPC5_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xF46660
+
+#define mmTPC5_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xF46664
+
+#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_0 0xF46668
+
+#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_0 0xF4666C
+
+#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_1 0xF46670
+
+#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_1 0xF46674
+
+#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_2 0xF46678
+
+#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_2 0xF4667C
+
+#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_3 0xF46680
+
+#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_3 0xF46684
+
+#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_4 0xF46688
+
+#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_4 0xF4668C
+
+#define mmTPC5_CFG_KERNEL_SRF_0 0xF46690
+
+#define mmTPC5_CFG_KERNEL_SRF_1 0xF46694
+
+#define mmTPC5_CFG_KERNEL_SRF_2 0xF46698
+
+#define mmTPC5_CFG_KERNEL_SRF_3 0xF4669C
+
+#define mmTPC5_CFG_KERNEL_SRF_4 0xF466A0
+
+#define mmTPC5_CFG_KERNEL_SRF_5 0xF466A4
+
+#define mmTPC5_CFG_KERNEL_SRF_6 0xF466A8
+
+#define mmTPC5_CFG_KERNEL_SRF_7 0xF466AC
+
+#define mmTPC5_CFG_KERNEL_SRF_8 0xF466B0
+
+#define mmTPC5_CFG_KERNEL_SRF_9 0xF466B4
+
+#define mmTPC5_CFG_KERNEL_SRF_10 0xF466B8
+
+#define mmTPC5_CFG_KERNEL_SRF_11 0xF466BC
+
+#define mmTPC5_CFG_KERNEL_SRF_12 0xF466C0
+
+#define mmTPC5_CFG_KERNEL_SRF_13 0xF466C4
+
+#define mmTPC5_CFG_KERNEL_SRF_14 0xF466C8
+
+#define mmTPC5_CFG_KERNEL_SRF_15 0xF466CC
+
+#define mmTPC5_CFG_KERNEL_SRF_16 0xF466D0
+
+#define mmTPC5_CFG_KERNEL_SRF_17 0xF466D4
+
+#define mmTPC5_CFG_KERNEL_SRF_18 0xF466D8
+
+#define mmTPC5_CFG_KERNEL_SRF_19 0xF466DC
+
+#define mmTPC5_CFG_KERNEL_SRF_20 0xF466E0
+
+#define mmTPC5_CFG_KERNEL_SRF_21 0xF466E4
+
+#define mmTPC5_CFG_KERNEL_SRF_22 0xF466E8
+
+#define mmTPC5_CFG_KERNEL_SRF_23 0xF466EC
+
+#define mmTPC5_CFG_KERNEL_SRF_24 0xF466F0
+
+#define mmTPC5_CFG_KERNEL_SRF_25 0xF466F4
+
+#define mmTPC5_CFG_KERNEL_SRF_26 0xF466F8
+
+#define mmTPC5_CFG_KERNEL_SRF_27 0xF466FC
+
+#define mmTPC5_CFG_KERNEL_SRF_28 0xF46700
+
+#define mmTPC5_CFG_KERNEL_SRF_29 0xF46704
+
+#define mmTPC5_CFG_KERNEL_SRF_30 0xF46708
+
+#define mmTPC5_CFG_KERNEL_SRF_31 0xF4670C
+
+#define mmTPC5_CFG_KERNEL_KERNEL_CONFIG 0xF46710
+
+#define mmTPC5_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xF46714
+
+#define mmTPC5_CFG_RESERVED_DESC_END 0xF46738
+
+#define mmTPC5_CFG_ROUND_CSR 0xF467FC
+
+#define mmTPC5_CFG_TBUF_BASE_ADDR_LOW 0xF46800
+
+#define mmTPC5_CFG_TBUF_BASE_ADDR_HIGH 0xF46804
+
+#define mmTPC5_CFG_SEMAPHORE 0xF46808
+
+#define mmTPC5_CFG_VFLAGS 0xF4680C
+
+#define mmTPC5_CFG_SFLAGS 0xF46810
+
+#define mmTPC5_CFG_LFSR_POLYNOM 0xF46818
+
+#define mmTPC5_CFG_STATUS 0xF4681C
+
+#define mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH 0xF46820
+
+#define mmTPC5_CFG_CFG_SUBTRACT_VALUE 0xF46824
+
+#define mmTPC5_CFG_SM_BASE_ADDRESS_LOW 0xF46828
+
+#define mmTPC5_CFG_SM_BASE_ADDRESS_HIGH 0xF4682C
+
+#define mmTPC5_CFG_TPC_CMD 0xF46830
+
+#define mmTPC5_CFG_TPC_EXECUTE 0xF46838
+
+#define mmTPC5_CFG_TPC_STALL 0xF4683C
+
+#define mmTPC5_CFG_ICACHE_BASE_ADDERESS_LOW 0xF46840
+
+#define mmTPC5_CFG_ICACHE_BASE_ADDERESS_HIGH 0xF46844
+
+#define mmTPC5_CFG_MSS_CONFIG 0xF46854
+
+#define mmTPC5_CFG_TPC_INTR_CAUSE 0xF46858
+
+#define mmTPC5_CFG_TPC_INTR_MASK 0xF4685C
+
+#define mmTPC5_CFG_TSB_CONFIG 0xF46860
+
+#define mmTPC5_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xF46A00
+
+#define mmTPC5_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xF46A04
+
+#define mmTPC5_CFG_QM_TENSOR_0_PADDING_VALUE 0xF46A08
+
+#define mmTPC5_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xF46A0C
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_0_SIZE 0xF46A10
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xF46A14
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xF46A18
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_1_SIZE 0xF46A1C
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xF46A20
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xF46A24
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_2_SIZE 0xF46A28
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xF46A2C
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xF46A30
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_3_SIZE 0xF46A34
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xF46A38
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xF46A3C
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_4_SIZE 0xF46A40
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xF46A44
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xF46A48
+
+#define mmTPC5_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xF46A4C
+
+#define mmTPC5_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xF46A50
+
+#define mmTPC5_CFG_QM_TENSOR_1_PADDING_VALUE 0xF46A54
+
+#define mmTPC5_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xF46A58
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_0_SIZE 0xF46A5C
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xF46A60
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xF46A64
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_1_SIZE 0xF46A68
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xF46A6C
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xF46A70
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_2_SIZE 0xF46A74
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xF46A78
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xF46A7C
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_3_SIZE 0xF46A80
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xF46A84
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xF46A88
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_4_SIZE 0xF46A8C
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xF46A90
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xF46A94
+
+#define mmTPC5_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xF46A98
+
+#define mmTPC5_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xF46A9C
+
+#define mmTPC5_CFG_QM_TENSOR_2_PADDING_VALUE 0xF46AA0
+
+#define mmTPC5_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xF46AA4
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_0_SIZE 0xF46AA8
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xF46AAC
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xF46AB0
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_1_SIZE 0xF46AB4
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xF46AB8
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xF46ABC
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_2_SIZE 0xF46AC0
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xF46AC4
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xF46AC8
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_3_SIZE 0xF46ACC
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xF46AD0
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xF46AD4
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_4_SIZE 0xF46AD8
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xF46ADC
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xF46AE0
+
+#define mmTPC5_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xF46AE4
+
+#define mmTPC5_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xF46AE8
+
+#define mmTPC5_CFG_QM_TENSOR_3_PADDING_VALUE 0xF46AEC
+
+#define mmTPC5_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xF46AF0
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_0_SIZE 0xF46AF4
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xF46AF8
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xF46AFC
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_1_SIZE 0xF46B00
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xF46B04
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xF46B08
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_2_SIZE 0xF46B0C
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xF46B10
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xF46B14
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_3_SIZE 0xF46B18
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xF46B1C
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xF46B20
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_4_SIZE 0xF46B24
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xF46B28
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xF46B2C
+
+#define mmTPC5_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xF46B30
+
+#define mmTPC5_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xF46B34
+
+#define mmTPC5_CFG_QM_TENSOR_4_PADDING_VALUE 0xF46B38
+
+#define mmTPC5_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xF46B3C
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_0_SIZE 0xF46B40
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xF46B44
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xF46B48
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_1_SIZE 0xF46B4C
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xF46B50
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xF46B54
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_2_SIZE 0xF46B58
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xF46B5C
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xF46B60
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_3_SIZE 0xF46B64
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xF46B68
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xF46B6C
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_4_SIZE 0xF46B70
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xF46B74
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xF46B78
+
+#define mmTPC5_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xF46B7C
+
+#define mmTPC5_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xF46B80
+
+#define mmTPC5_CFG_QM_TENSOR_5_PADDING_VALUE 0xF46B84
+
+#define mmTPC5_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xF46B88
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_0_SIZE 0xF46B8C
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xF46B90
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xF46B94
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_1_SIZE 0xF46B98
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xF46B9C
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xF46BA0
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_2_SIZE 0xF46BA4
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xF46BA8
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xF46BAC
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_3_SIZE 0xF46BB0
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xF46BB4
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xF46BB8
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_4_SIZE 0xF46BBC
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xF46BC0
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xF46BC4
+
+#define mmTPC5_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xF46BC8
+
+#define mmTPC5_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xF46BCC
+
+#define mmTPC5_CFG_QM_TENSOR_6_PADDING_VALUE 0xF46BD0
+
+#define mmTPC5_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xF46BD4
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_0_SIZE 0xF46BD8
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xF46BDC
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xF46BE0
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_1_SIZE 0xF46BE4
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xF46BE8
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xF46BEC
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_2_SIZE 0xF46BF0
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xF46BF4
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xF46BF8
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_3_SIZE 0xF46BFC
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xF46C00
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xF46C04
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_4_SIZE 0xF46C08
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xF46C0C
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xF46C10
+
+#define mmTPC5_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xF46C14
+
+#define mmTPC5_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xF46C18
+
+#define mmTPC5_CFG_QM_TENSOR_7_PADDING_VALUE 0xF46C1C
+
+#define mmTPC5_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xF46C20
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_0_SIZE 0xF46C24
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xF46C28
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xF46C2C
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_1_SIZE 0xF46C30
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xF46C34
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xF46C38
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_2_SIZE 0xF46C3C
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xF46C40
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xF46C44
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_3_SIZE 0xF46C48
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xF46C4C
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xF46C50
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_4_SIZE 0xF46C54
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xF46C58
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xF46C5C
+
+#define mmTPC5_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xF46C60
+
+#define mmTPC5_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xF46C64
+
+#define mmTPC5_CFG_QM_TID_BASE_DIM_0 0xF46C68
+
+#define mmTPC5_CFG_QM_TID_SIZE_DIM_0 0xF46C6C
+
+#define mmTPC5_CFG_QM_TID_BASE_DIM_1 0xF46C70
+
+#define mmTPC5_CFG_QM_TID_SIZE_DIM_1 0xF46C74
+
+#define mmTPC5_CFG_QM_TID_BASE_DIM_2 0xF46C78
+
+#define mmTPC5_CFG_QM_TID_SIZE_DIM_2 0xF46C7C
+
+#define mmTPC5_CFG_QM_TID_BASE_DIM_3 0xF46C80
+
+#define mmTPC5_CFG_QM_TID_SIZE_DIM_3 0xF46C84
+
+#define mmTPC5_CFG_QM_TID_BASE_DIM_4 0xF46C88
+
+#define mmTPC5_CFG_QM_TID_SIZE_DIM_4 0xF46C8C
+
+#define mmTPC5_CFG_QM_SRF_0 0xF46C90
+
+#define mmTPC5_CFG_QM_SRF_1 0xF46C94
+
+#define mmTPC5_CFG_QM_SRF_2 0xF46C98
+
+#define mmTPC5_CFG_QM_SRF_3 0xF46C9C
+
+#define mmTPC5_CFG_QM_SRF_4 0xF46CA0
+
+#define mmTPC5_CFG_QM_SRF_5 0xF46CA4
+
+#define mmTPC5_CFG_QM_SRF_6 0xF46CA8
+
+#define mmTPC5_CFG_QM_SRF_7 0xF46CAC
+
+#define mmTPC5_CFG_QM_SRF_8 0xF46CB0
+
+#define mmTPC5_CFG_QM_SRF_9 0xF46CB4
+
+#define mmTPC5_CFG_QM_SRF_10 0xF46CB8
+
+#define mmTPC5_CFG_QM_SRF_11 0xF46CBC
+
+#define mmTPC5_CFG_QM_SRF_12 0xF46CC0
+
+#define mmTPC5_CFG_QM_SRF_13 0xF46CC4
+
+#define mmTPC5_CFG_QM_SRF_14 0xF46CC8
+
+#define mmTPC5_CFG_QM_SRF_15 0xF46CCC
+
+#define mmTPC5_CFG_QM_SRF_16 0xF46CD0
+
+#define mmTPC5_CFG_QM_SRF_17 0xF46CD4
+
+#define mmTPC5_CFG_QM_SRF_18 0xF46CD8
+
+#define mmTPC5_CFG_QM_SRF_19 0xF46CDC
+
+#define mmTPC5_CFG_QM_SRF_20 0xF46CE0
+
+#define mmTPC5_CFG_QM_SRF_21 0xF46CE4
+
+#define mmTPC5_CFG_QM_SRF_22 0xF46CE8
+
+#define mmTPC5_CFG_QM_SRF_23 0xF46CEC
+
+#define mmTPC5_CFG_QM_SRF_24 0xF46CF0
+
+#define mmTPC5_CFG_QM_SRF_25 0xF46CF4
+
+#define mmTPC5_CFG_QM_SRF_26 0xF46CF8
+
+#define mmTPC5_CFG_QM_SRF_27 0xF46CFC
+
+#define mmTPC5_CFG_QM_SRF_28 0xF46D00
+
+#define mmTPC5_CFG_QM_SRF_29 0xF46D04
+
+#define mmTPC5_CFG_QM_SRF_30 0xF46D08
+
+#define mmTPC5_CFG_QM_SRF_31 0xF46D0C
+
+#define mmTPC5_CFG_QM_KERNEL_CONFIG 0xF46D10
+
+#define mmTPC5_CFG_QM_SYNC_OBJECT_MESSAGE 0xF46D14
+
+#define mmTPC5_CFG_ARUSER 0xF46D18
+
+#define mmTPC5_CFG_AWUSER 0xF46D1C
+
+#define mmTPC5_CFG_FUNC_MBIST_CNTRL 0xF46E00
+
+#define mmTPC5_CFG_FUNC_MBIST_PAT 0xF46E04
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_0 0xF46E08
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_1 0xF46E0C
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_2 0xF46E10
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_3 0xF46E14
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_4 0xF46E18
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_5 0xF46E1C
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_6 0xF46E20
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_7 0xF46E24
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_8 0xF46E28
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_9 0xF46E2C
+
+#endif /* ASIC_REG_TPC5_CFG_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h
new file mode 100644
index 000000000000..d8e72a8e18d7
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC5_CMDQ_REGS_H_
+#define ASIC_REG_TPC5_CMDQ_REGS_H_
+
+/*
+ *****************************************
+ * TPC5_CMDQ (Prototype: CMDQ)
+ *****************************************
+ */
+
+#define mmTPC5_CMDQ_GLBL_CFG0 0xF49000
+
+#define mmTPC5_CMDQ_GLBL_CFG1 0xF49004
+
+#define mmTPC5_CMDQ_GLBL_PROT 0xF49008
+
+#define mmTPC5_CMDQ_GLBL_ERR_CFG 0xF4900C
+
+#define mmTPC5_CMDQ_GLBL_ERR_ADDR_LO 0xF49010
+
+#define mmTPC5_CMDQ_GLBL_ERR_ADDR_HI 0xF49014
+
+#define mmTPC5_CMDQ_GLBL_ERR_WDATA 0xF49018
+
+#define mmTPC5_CMDQ_GLBL_SECURE_PROPS 0xF4901C
+
+#define mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS 0xF49020
+
+#define mmTPC5_CMDQ_GLBL_STS0 0xF49024
+
+#define mmTPC5_CMDQ_GLBL_STS1 0xF49028
+
+#define mmTPC5_CMDQ_CQ_CFG0 0xF490B0
+
+#define mmTPC5_CMDQ_CQ_CFG1 0xF490B4
+
+#define mmTPC5_CMDQ_CQ_ARUSER 0xF490B8
+
+#define mmTPC5_CMDQ_CQ_PTR_LO 0xF490C0
+
+#define mmTPC5_CMDQ_CQ_PTR_HI 0xF490C4
+
+#define mmTPC5_CMDQ_CQ_TSIZE 0xF490C8
+
+#define mmTPC5_CMDQ_CQ_CTL 0xF490CC
+
+#define mmTPC5_CMDQ_CQ_PTR_LO_STS 0xF490D4
+
+#define mmTPC5_CMDQ_CQ_PTR_HI_STS 0xF490D8
+
+#define mmTPC5_CMDQ_CQ_TSIZE_STS 0xF490DC
+
+#define mmTPC5_CMDQ_CQ_CTL_STS 0xF490E0
+
+#define mmTPC5_CMDQ_CQ_STS0 0xF490E4
+
+#define mmTPC5_CMDQ_CQ_STS1 0xF490E8
+
+#define mmTPC5_CMDQ_CQ_RD_RATE_LIM_EN 0xF490F0
+
+#define mmTPC5_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xF490F4
+
+#define mmTPC5_CMDQ_CQ_RD_RATE_LIM_SAT 0xF490F8
+
+#define mmTPC5_CMDQ_CQ_RD_RATE_LIM_TOUT 0xF490FC
+
+#define mmTPC5_CMDQ_CQ_IFIFO_CNT 0xF49108
+
+#define mmTPC5_CMDQ_CP_MSG_BASE0_ADDR_LO 0xF49120
+
+#define mmTPC5_CMDQ_CP_MSG_BASE0_ADDR_HI 0xF49124
+
+#define mmTPC5_CMDQ_CP_MSG_BASE1_ADDR_LO 0xF49128
+
+#define mmTPC5_CMDQ_CP_MSG_BASE1_ADDR_HI 0xF4912C
+
+#define mmTPC5_CMDQ_CP_MSG_BASE2_ADDR_LO 0xF49130
+
+#define mmTPC5_CMDQ_CP_MSG_BASE2_ADDR_HI 0xF49134
+
+#define mmTPC5_CMDQ_CP_MSG_BASE3_ADDR_LO 0xF49138
+
+#define mmTPC5_CMDQ_CP_MSG_BASE3_ADDR_HI 0xF4913C
+
+#define mmTPC5_CMDQ_CP_LDMA_TSIZE_OFFSET 0xF49140
+
+#define mmTPC5_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xF49144
+
+#define mmTPC5_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xF49148
+
+#define mmTPC5_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xF4914C
+
+#define mmTPC5_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xF49150
+
+#define mmTPC5_CMDQ_CP_LDMA_COMMIT_OFFSET 0xF49154
+
+#define mmTPC5_CMDQ_CP_FENCE0_RDATA 0xF49158
+
+#define mmTPC5_CMDQ_CP_FENCE1_RDATA 0xF4915C
+
+#define mmTPC5_CMDQ_CP_FENCE2_RDATA 0xF49160
+
+#define mmTPC5_CMDQ_CP_FENCE3_RDATA 0xF49164
+
+#define mmTPC5_CMDQ_CP_FENCE0_CNT 0xF49168
+
+#define mmTPC5_CMDQ_CP_FENCE1_CNT 0xF4916C
+
+#define mmTPC5_CMDQ_CP_FENCE2_CNT 0xF49170
+
+#define mmTPC5_CMDQ_CP_FENCE3_CNT 0xF49174
+
+#define mmTPC5_CMDQ_CP_STS 0xF49178
+
+#define mmTPC5_CMDQ_CP_CURRENT_INST_LO 0xF4917C
+
+#define mmTPC5_CMDQ_CP_CURRENT_INST_HI 0xF49180
+
+#define mmTPC5_CMDQ_CP_BARRIER_CFG 0xF49184
+
+#define mmTPC5_CMDQ_CP_DBG_0 0xF49188
+
+#define mmTPC5_CMDQ_CQ_BUF_ADDR 0xF49308
+
+#define mmTPC5_CMDQ_CQ_BUF_RDATA 0xF4930C
+
+#endif /* ASIC_REG_TPC5_CMDQ_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h
new file mode 100644
index 000000000000..be2e68624709
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC5_QM_REGS_H_
+#define ASIC_REG_TPC5_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC5_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC5_QM_GLBL_CFG0 0xF48000
+
+#define mmTPC5_QM_GLBL_CFG1 0xF48004
+
+#define mmTPC5_QM_GLBL_PROT 0xF48008
+
+#define mmTPC5_QM_GLBL_ERR_CFG 0xF4800C
+
+#define mmTPC5_QM_GLBL_ERR_ADDR_LO 0xF48010
+
+#define mmTPC5_QM_GLBL_ERR_ADDR_HI 0xF48014
+
+#define mmTPC5_QM_GLBL_ERR_WDATA 0xF48018
+
+#define mmTPC5_QM_GLBL_SECURE_PROPS 0xF4801C
+
+#define mmTPC5_QM_GLBL_NON_SECURE_PROPS 0xF48020
+
+#define mmTPC5_QM_GLBL_STS0 0xF48024
+
+#define mmTPC5_QM_GLBL_STS1 0xF48028
+
+#define mmTPC5_QM_PQ_BASE_LO 0xF48060
+
+#define mmTPC5_QM_PQ_BASE_HI 0xF48064
+
+#define mmTPC5_QM_PQ_SIZE 0xF48068
+
+#define mmTPC5_QM_PQ_PI 0xF4806C
+
+#define mmTPC5_QM_PQ_CI 0xF48070
+
+#define mmTPC5_QM_PQ_CFG0 0xF48074
+
+#define mmTPC5_QM_PQ_CFG1 0xF48078
+
+#define mmTPC5_QM_PQ_ARUSER 0xF4807C
+
+#define mmTPC5_QM_PQ_PUSH0 0xF48080
+
+#define mmTPC5_QM_PQ_PUSH1 0xF48084
+
+#define mmTPC5_QM_PQ_PUSH2 0xF48088
+
+#define mmTPC5_QM_PQ_PUSH3 0xF4808C
+
+#define mmTPC5_QM_PQ_STS0 0xF48090
+
+#define mmTPC5_QM_PQ_STS1 0xF48094
+
+#define mmTPC5_QM_PQ_RD_RATE_LIM_EN 0xF480A0
+
+#define mmTPC5_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xF480A4
+
+#define mmTPC5_QM_PQ_RD_RATE_LIM_SAT 0xF480A8
+
+#define mmTPC5_QM_PQ_RD_RATE_LIM_TOUT 0xF480AC
+
+#define mmTPC5_QM_CQ_CFG0 0xF480B0
+
+#define mmTPC5_QM_CQ_CFG1 0xF480B4
+
+#define mmTPC5_QM_CQ_ARUSER 0xF480B8
+
+#define mmTPC5_QM_CQ_PTR_LO 0xF480C0
+
+#define mmTPC5_QM_CQ_PTR_HI 0xF480C4
+
+#define mmTPC5_QM_CQ_TSIZE 0xF480C8
+
+#define mmTPC5_QM_CQ_CTL 0xF480CC
+
+#define mmTPC5_QM_CQ_PTR_LO_STS 0xF480D4
+
+#define mmTPC5_QM_CQ_PTR_HI_STS 0xF480D8
+
+#define mmTPC5_QM_CQ_TSIZE_STS 0xF480DC
+
+#define mmTPC5_QM_CQ_CTL_STS 0xF480E0
+
+#define mmTPC5_QM_CQ_STS0 0xF480E4
+
+#define mmTPC5_QM_CQ_STS1 0xF480E8
+
+#define mmTPC5_QM_CQ_RD_RATE_LIM_EN 0xF480F0
+
+#define mmTPC5_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xF480F4
+
+#define mmTPC5_QM_CQ_RD_RATE_LIM_SAT 0xF480F8
+
+#define mmTPC5_QM_CQ_RD_RATE_LIM_TOUT 0xF480FC
+
+#define mmTPC5_QM_CQ_IFIFO_CNT 0xF48108
+
+#define mmTPC5_QM_CP_MSG_BASE0_ADDR_LO 0xF48120
+
+#define mmTPC5_QM_CP_MSG_BASE0_ADDR_HI 0xF48124
+
+#define mmTPC5_QM_CP_MSG_BASE1_ADDR_LO 0xF48128
+
+#define mmTPC5_QM_CP_MSG_BASE1_ADDR_HI 0xF4812C
+
+#define mmTPC5_QM_CP_MSG_BASE2_ADDR_LO 0xF48130
+
+#define mmTPC5_QM_CP_MSG_BASE2_ADDR_HI 0xF48134
+
+#define mmTPC5_QM_CP_MSG_BASE3_ADDR_LO 0xF48138
+
+#define mmTPC5_QM_CP_MSG_BASE3_ADDR_HI 0xF4813C
+
+#define mmTPC5_QM_CP_LDMA_TSIZE_OFFSET 0xF48140
+
+#define mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xF48144
+
+#define mmTPC5_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xF48148
+
+#define mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xF4814C
+
+#define mmTPC5_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xF48150
+
+#define mmTPC5_QM_CP_LDMA_COMMIT_OFFSET 0xF48154
+
+#define mmTPC5_QM_CP_FENCE0_RDATA 0xF48158
+
+#define mmTPC5_QM_CP_FENCE1_RDATA 0xF4815C
+
+#define mmTPC5_QM_CP_FENCE2_RDATA 0xF48160
+
+#define mmTPC5_QM_CP_FENCE3_RDATA 0xF48164
+
+#define mmTPC5_QM_CP_FENCE0_CNT 0xF48168
+
+#define mmTPC5_QM_CP_FENCE1_CNT 0xF4816C
+
+#define mmTPC5_QM_CP_FENCE2_CNT 0xF48170
+
+#define mmTPC5_QM_CP_FENCE3_CNT 0xF48174
+
+#define mmTPC5_QM_CP_STS 0xF48178
+
+#define mmTPC5_QM_CP_CURRENT_INST_LO 0xF4817C
+
+#define mmTPC5_QM_CP_CURRENT_INST_HI 0xF48180
+
+#define mmTPC5_QM_CP_BARRIER_CFG 0xF48184
+
+#define mmTPC5_QM_CP_DBG_0 0xF48188
+
+#define mmTPC5_QM_PQ_BUF_ADDR 0xF48300
+
+#define mmTPC5_QM_PQ_BUF_RDATA 0xF48304
+
+#define mmTPC5_QM_CQ_BUF_ADDR 0xF48308
+
+#define mmTPC5_QM_CQ_BUF_RDATA 0xF4830C
+
+#endif /* ASIC_REG_TPC5_QM_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h
new file mode 100644
index 000000000000..6f301c7bbc2f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC5_RTR_REGS_H_
+#define ASIC_REG_TPC5_RTR_REGS_H_
+
+/*
+ *****************************************
+ * TPC5_RTR (Prototype: TPC_RTR)
+ *****************************************
+ */
+
+#define mmTPC5_RTR_HBW_RD_RQ_E_ARB 0xF40100
+
+#define mmTPC5_RTR_HBW_RD_RQ_W_ARB 0xF40104
+
+#define mmTPC5_RTR_HBW_RD_RQ_N_ARB 0xF40108
+
+#define mmTPC5_RTR_HBW_RD_RQ_S_ARB 0xF4010C
+
+#define mmTPC5_RTR_HBW_RD_RQ_L_ARB 0xF40110
+
+#define mmTPC5_RTR_HBW_E_ARB_MAX 0xF40120
+
+#define mmTPC5_RTR_HBW_W_ARB_MAX 0xF40124
+
+#define mmTPC5_RTR_HBW_N_ARB_MAX 0xF40128
+
+#define mmTPC5_RTR_HBW_S_ARB_MAX 0xF4012C
+
+#define mmTPC5_RTR_HBW_L_ARB_MAX 0xF40130
+
+#define mmTPC5_RTR_HBW_RD_RS_E_ARB 0xF40140
+
+#define mmTPC5_RTR_HBW_RD_RS_W_ARB 0xF40144
+
+#define mmTPC5_RTR_HBW_RD_RS_N_ARB 0xF40148
+
+#define mmTPC5_RTR_HBW_RD_RS_S_ARB 0xF4014C
+
+#define mmTPC5_RTR_HBW_RD_RS_L_ARB 0xF40150
+
+#define mmTPC5_RTR_HBW_WR_RQ_E_ARB 0xF40170
+
+#define mmTPC5_RTR_HBW_WR_RQ_W_ARB 0xF40174
+
+#define mmTPC5_RTR_HBW_WR_RQ_N_ARB 0xF40178
+
+#define mmTPC5_RTR_HBW_WR_RQ_S_ARB 0xF4017C
+
+#define mmTPC5_RTR_HBW_WR_RQ_L_ARB 0xF40180
+
+#define mmTPC5_RTR_HBW_WR_RS_E_ARB 0xF40190
+
+#define mmTPC5_RTR_HBW_WR_RS_W_ARB 0xF40194
+
+#define mmTPC5_RTR_HBW_WR_RS_N_ARB 0xF40198
+
+#define mmTPC5_RTR_HBW_WR_RS_S_ARB 0xF4019C
+
+#define mmTPC5_RTR_HBW_WR_RS_L_ARB 0xF401A0
+
+#define mmTPC5_RTR_LBW_RD_RQ_E_ARB 0xF40200
+
+#define mmTPC5_RTR_LBW_RD_RQ_W_ARB 0xF40204
+
+#define mmTPC5_RTR_LBW_RD_RQ_N_ARB 0xF40208
+
+#define mmTPC5_RTR_LBW_RD_RQ_S_ARB 0xF4020C
+
+#define mmTPC5_RTR_LBW_RD_RQ_L_ARB 0xF40210
+
+#define mmTPC5_RTR_LBW_E_ARB_MAX 0xF40220
+
+#define mmTPC5_RTR_LBW_W_ARB_MAX 0xF40224
+
+#define mmTPC5_RTR_LBW_N_ARB_MAX 0xF40228
+
+#define mmTPC5_RTR_LBW_S_ARB_MAX 0xF4022C
+
+#define mmTPC5_RTR_LBW_L_ARB_MAX 0xF40230
+
+#define mmTPC5_RTR_LBW_RD_RS_E_ARB 0xF40250
+
+#define mmTPC5_RTR_LBW_RD_RS_W_ARB 0xF40254
+
+#define mmTPC5_RTR_LBW_RD_RS_N_ARB 0xF40258
+
+#define mmTPC5_RTR_LBW_RD_RS_S_ARB 0xF4025C
+
+#define mmTPC5_RTR_LBW_RD_RS_L_ARB 0xF40260
+
+#define mmTPC5_RTR_LBW_WR_RQ_E_ARB 0xF40270
+
+#define mmTPC5_RTR_LBW_WR_RQ_W_ARB 0xF40274
+
+#define mmTPC5_RTR_LBW_WR_RQ_N_ARB 0xF40278
+
+#define mmTPC5_RTR_LBW_WR_RQ_S_ARB 0xF4027C
+
+#define mmTPC5_RTR_LBW_WR_RQ_L_ARB 0xF40280
+
+#define mmTPC5_RTR_LBW_WR_RS_E_ARB 0xF40290
+
+#define mmTPC5_RTR_LBW_WR_RS_W_ARB 0xF40294
+
+#define mmTPC5_RTR_LBW_WR_RS_N_ARB 0xF40298
+
+#define mmTPC5_RTR_LBW_WR_RS_S_ARB 0xF4029C
+
+#define mmTPC5_RTR_LBW_WR_RS_L_ARB 0xF402A0
+
+#define mmTPC5_RTR_DBG_E_ARB 0xF40300
+
+#define mmTPC5_RTR_DBG_W_ARB 0xF40304
+
+#define mmTPC5_RTR_DBG_N_ARB 0xF40308
+
+#define mmTPC5_RTR_DBG_S_ARB 0xF4030C
+
+#define mmTPC5_RTR_DBG_L_ARB 0xF40310
+
+#define mmTPC5_RTR_DBG_E_ARB_MAX 0xF40320
+
+#define mmTPC5_RTR_DBG_W_ARB_MAX 0xF40324
+
+#define mmTPC5_RTR_DBG_N_ARB_MAX 0xF40328
+
+#define mmTPC5_RTR_DBG_S_ARB_MAX 0xF4032C
+
+#define mmTPC5_RTR_DBG_L_ARB_MAX 0xF40330
+
+#define mmTPC5_RTR_SPLIT_COEF_0 0xF40400
+
+#define mmTPC5_RTR_SPLIT_COEF_1 0xF40404
+
+#define mmTPC5_RTR_SPLIT_COEF_2 0xF40408
+
+#define mmTPC5_RTR_SPLIT_COEF_3 0xF4040C
+
+#define mmTPC5_RTR_SPLIT_COEF_4 0xF40410
+
+#define mmTPC5_RTR_SPLIT_COEF_5 0xF40414
+
+#define mmTPC5_RTR_SPLIT_COEF_6 0xF40418
+
+#define mmTPC5_RTR_SPLIT_COEF_7 0xF4041C
+
+#define mmTPC5_RTR_SPLIT_COEF_8 0xF40420
+
+#define mmTPC5_RTR_SPLIT_COEF_9 0xF40424
+
+#define mmTPC5_RTR_SPLIT_CFG 0xF40440
+
+#define mmTPC5_RTR_SPLIT_RD_SAT 0xF40444
+
+#define mmTPC5_RTR_SPLIT_RD_RST_TOKEN 0xF40448
+
+#define mmTPC5_RTR_SPLIT_RD_TIMEOUT_0 0xF4044C
+
+#define mmTPC5_RTR_SPLIT_RD_TIMEOUT_1 0xF40450
+
+#define mmTPC5_RTR_SPLIT_WR_SAT 0xF40454
+
+#define mmTPC5_RTR_WPLIT_WR_TST_TOLEN 0xF40458
+
+#define mmTPC5_RTR_SPLIT_WR_TIMEOUT_0 0xF4045C
+
+#define mmTPC5_RTR_SPLIT_WR_TIMEOUT_1 0xF40460
+
+#define mmTPC5_RTR_HBW_RANGE_HIT 0xF40470
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_L_0 0xF40480
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_L_1 0xF40484
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_L_2 0xF40488
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_L_3 0xF4048C
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_L_4 0xF40490
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_L_5 0xF40494
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_L_6 0xF40498
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_L_7 0xF4049C
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_H_0 0xF404A0
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_H_1 0xF404A4
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_H_2 0xF404A8
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_H_3 0xF404AC
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_H_4 0xF404B0
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_H_5 0xF404B4
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_H_6 0xF404B8
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_H_7 0xF404BC
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_L_0 0xF404C0
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_L_1 0xF404C4
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_L_2 0xF404C8
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_L_3 0xF404CC
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_L_4 0xF404D0
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_L_5 0xF404D4
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_L_6 0xF404D8
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_L_7 0xF404DC
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_H_0 0xF404E0
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_H_1 0xF404E4
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_H_2 0xF404E8
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_H_3 0xF404EC
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_H_4 0xF404F0
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_H_5 0xF404F4
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_H_6 0xF404F8
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_H_7 0xF404FC
+
+#define mmTPC5_RTR_LBW_RANGE_HIT 0xF40500
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_0 0xF40510
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_1 0xF40514
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_2 0xF40518
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_3 0xF4051C
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_4 0xF40520
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_5 0xF40524
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_6 0xF40528
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_7 0xF4052C
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_8 0xF40530
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_9 0xF40534
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_10 0xF40538
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_11 0xF4053C
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_12 0xF40540
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_13 0xF40544
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_14 0xF40548
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_15 0xF4054C
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_0 0xF40550
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_1 0xF40554
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_2 0xF40558
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_3 0xF4055C
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_4 0xF40560
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_5 0xF40564
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_6 0xF40568
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_7 0xF4056C
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_8 0xF40570
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_9 0xF40574
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_10 0xF40578
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_11 0xF4057C
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_12 0xF40580
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_13 0xF40584
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_14 0xF40588
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_15 0xF4058C
+
+#define mmTPC5_RTR_RGLTR 0xF40590
+
+#define mmTPC5_RTR_RGLTR_WR_RESULT 0xF40594
+
+#define mmTPC5_RTR_RGLTR_RD_RESULT 0xF40598
+
+#define mmTPC5_RTR_SCRAMB_EN 0xF40600
+
+#define mmTPC5_RTR_NON_LIN_SCRAMB 0xF40604
+
+#endif /* ASIC_REG_TPC5_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h
new file mode 100644
index 000000000000..1e1168601c41
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h
@@ -0,0 +1,887 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC6_CFG_REGS_H_
+#define ASIC_REG_TPC6_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC6_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xF86400
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xF86404
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xF86408
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xF8640C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xF86410
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xF86414
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xF86418
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xF8641C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xF86420
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xF86424
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xF86428
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xF8642C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xF86430
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xF86434
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xF86438
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xF8643C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xF86440
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xF86444
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xF86448
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xF8644C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xF86450
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xF86454
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xF86458
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xF8645C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xF86460
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xF86464
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xF86468
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xF8646C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xF86470
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xF86474
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xF86478
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xF8647C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xF86480
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xF86484
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xF86488
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xF8648C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xF86490
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xF86494
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xF86498
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xF8649C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xF864A0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xF864A4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xF864A8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xF864AC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xF864B0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xF864B4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xF864B8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xF864BC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xF864C0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xF864C4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xF864C8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xF864CC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xF864D0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xF864D4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xF864D8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xF864DC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xF864E0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xF864E4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xF864E8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xF864EC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xF864F0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xF864F4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xF864F8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xF864FC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xF86500
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xF86504
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xF86508
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xF8650C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xF86510
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xF86514
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xF86518
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xF8651C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xF86520
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xF86524
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xF86528
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xF8652C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xF86530
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xF86534
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xF86538
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xF8653C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xF86540
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xF86544
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xF86548
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xF8654C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xF86550
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xF86554
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xF86558
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xF8655C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xF86560
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xF86564
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xF86568
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xF8656C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xF86570
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xF86574
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xF86578
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xF8657C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xF86580
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xF86584
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xF86588
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xF8658C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xF86590
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xF86594
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xF86598
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xF8659C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xF865A0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xF865A4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xF865A8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xF865AC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xF865B0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xF865B4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xF865B8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xF865BC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xF865C0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xF865C4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xF865C8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xF865CC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xF865D0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xF865D4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xF865D8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xF865DC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xF865E0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xF865E4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xF865E8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xF865EC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xF865F0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xF865F4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xF865F8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xF865FC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xF86600
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xF86604
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xF86608
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xF8660C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xF86610
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xF86614
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xF86618
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xF8661C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xF86620
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xF86624
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xF86628
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xF8662C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xF86630
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xF86634
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xF86638
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xF8663C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xF86640
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xF86644
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xF86648
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xF8664C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xF86650
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xF86654
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xF86658
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xF8665C
+
+#define mmTPC6_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xF86660
+
+#define mmTPC6_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xF86664
+
+#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_0 0xF86668
+
+#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_0 0xF8666C
+
+#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_1 0xF86670
+
+#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_1 0xF86674
+
+#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_2 0xF86678
+
+#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_2 0xF8667C
+
+#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_3 0xF86680
+
+#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_3 0xF86684
+
+#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_4 0xF86688
+
+#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_4 0xF8668C
+
+#define mmTPC6_CFG_KERNEL_SRF_0 0xF86690
+
+#define mmTPC6_CFG_KERNEL_SRF_1 0xF86694
+
+#define mmTPC6_CFG_KERNEL_SRF_2 0xF86698
+
+#define mmTPC6_CFG_KERNEL_SRF_3 0xF8669C
+
+#define mmTPC6_CFG_KERNEL_SRF_4 0xF866A0
+
+#define mmTPC6_CFG_KERNEL_SRF_5 0xF866A4
+
+#define mmTPC6_CFG_KERNEL_SRF_6 0xF866A8
+
+#define mmTPC6_CFG_KERNEL_SRF_7 0xF866AC
+
+#define mmTPC6_CFG_KERNEL_SRF_8 0xF866B0
+
+#define mmTPC6_CFG_KERNEL_SRF_9 0xF866B4
+
+#define mmTPC6_CFG_KERNEL_SRF_10 0xF866B8
+
+#define mmTPC6_CFG_KERNEL_SRF_11 0xF866BC
+
+#define mmTPC6_CFG_KERNEL_SRF_12 0xF866C0
+
+#define mmTPC6_CFG_KERNEL_SRF_13 0xF866C4
+
+#define mmTPC6_CFG_KERNEL_SRF_14 0xF866C8
+
+#define mmTPC6_CFG_KERNEL_SRF_15 0xF866CC
+
+#define mmTPC6_CFG_KERNEL_SRF_16 0xF866D0
+
+#define mmTPC6_CFG_KERNEL_SRF_17 0xF866D4
+
+#define mmTPC6_CFG_KERNEL_SRF_18 0xF866D8
+
+#define mmTPC6_CFG_KERNEL_SRF_19 0xF866DC
+
+#define mmTPC6_CFG_KERNEL_SRF_20 0xF866E0
+
+#define mmTPC6_CFG_KERNEL_SRF_21 0xF866E4
+
+#define mmTPC6_CFG_KERNEL_SRF_22 0xF866E8
+
+#define mmTPC6_CFG_KERNEL_SRF_23 0xF866EC
+
+#define mmTPC6_CFG_KERNEL_SRF_24 0xF866F0
+
+#define mmTPC6_CFG_KERNEL_SRF_25 0xF866F4
+
+#define mmTPC6_CFG_KERNEL_SRF_26 0xF866F8
+
+#define mmTPC6_CFG_KERNEL_SRF_27 0xF866FC
+
+#define mmTPC6_CFG_KERNEL_SRF_28 0xF86700
+
+#define mmTPC6_CFG_KERNEL_SRF_29 0xF86704
+
+#define mmTPC6_CFG_KERNEL_SRF_30 0xF86708
+
+#define mmTPC6_CFG_KERNEL_SRF_31 0xF8670C
+
+#define mmTPC6_CFG_KERNEL_KERNEL_CONFIG 0xF86710
+
+#define mmTPC6_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xF86714
+
+#define mmTPC6_CFG_RESERVED_DESC_END 0xF86738
+
+#define mmTPC6_CFG_ROUND_CSR 0xF867FC
+
+#define mmTPC6_CFG_TBUF_BASE_ADDR_LOW 0xF86800
+
+#define mmTPC6_CFG_TBUF_BASE_ADDR_HIGH 0xF86804
+
+#define mmTPC6_CFG_SEMAPHORE 0xF86808
+
+#define mmTPC6_CFG_VFLAGS 0xF8680C
+
+#define mmTPC6_CFG_SFLAGS 0xF86810
+
+#define mmTPC6_CFG_LFSR_POLYNOM 0xF86818
+
+#define mmTPC6_CFG_STATUS 0xF8681C
+
+#define mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH 0xF86820
+
+#define mmTPC6_CFG_CFG_SUBTRACT_VALUE 0xF86824
+
+#define mmTPC6_CFG_SM_BASE_ADDRESS_LOW 0xF86828
+
+#define mmTPC6_CFG_SM_BASE_ADDRESS_HIGH 0xF8682C
+
+#define mmTPC6_CFG_TPC_CMD 0xF86830
+
+#define mmTPC6_CFG_TPC_EXECUTE 0xF86838
+
+#define mmTPC6_CFG_TPC_STALL 0xF8683C
+
+#define mmTPC6_CFG_ICACHE_BASE_ADDERESS_LOW 0xF86840
+
+#define mmTPC6_CFG_ICACHE_BASE_ADDERESS_HIGH 0xF86844
+
+#define mmTPC6_CFG_MSS_CONFIG 0xF86854
+
+#define mmTPC6_CFG_TPC_INTR_CAUSE 0xF86858
+
+#define mmTPC6_CFG_TPC_INTR_MASK 0xF8685C
+
+#define mmTPC6_CFG_TSB_CONFIG 0xF86860
+
+#define mmTPC6_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xF86A00
+
+#define mmTPC6_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xF86A04
+
+#define mmTPC6_CFG_QM_TENSOR_0_PADDING_VALUE 0xF86A08
+
+#define mmTPC6_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xF86A0C
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_0_SIZE 0xF86A10
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xF86A14
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xF86A18
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_1_SIZE 0xF86A1C
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xF86A20
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xF86A24
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_2_SIZE 0xF86A28
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xF86A2C
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xF86A30
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_3_SIZE 0xF86A34
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xF86A38
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xF86A3C
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_4_SIZE 0xF86A40
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xF86A44
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xF86A48
+
+#define mmTPC6_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xF86A4C
+
+#define mmTPC6_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xF86A50
+
+#define mmTPC6_CFG_QM_TENSOR_1_PADDING_VALUE 0xF86A54
+
+#define mmTPC6_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xF86A58
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_0_SIZE 0xF86A5C
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xF86A60
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xF86A64
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_1_SIZE 0xF86A68
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xF86A6C
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xF86A70
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_2_SIZE 0xF86A74
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xF86A78
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xF86A7C
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_3_SIZE 0xF86A80
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xF86A84
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xF86A88
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_4_SIZE 0xF86A8C
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xF86A90
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xF86A94
+
+#define mmTPC6_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xF86A98
+
+#define mmTPC6_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xF86A9C
+
+#define mmTPC6_CFG_QM_TENSOR_2_PADDING_VALUE 0xF86AA0
+
+#define mmTPC6_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xF86AA4
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_0_SIZE 0xF86AA8
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xF86AAC
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xF86AB0
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_1_SIZE 0xF86AB4
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xF86AB8
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xF86ABC
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_2_SIZE 0xF86AC0
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xF86AC4
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xF86AC8
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_3_SIZE 0xF86ACC
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xF86AD0
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xF86AD4
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_4_SIZE 0xF86AD8
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xF86ADC
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xF86AE0
+
+#define mmTPC6_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xF86AE4
+
+#define mmTPC6_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xF86AE8
+
+#define mmTPC6_CFG_QM_TENSOR_3_PADDING_VALUE 0xF86AEC
+
+#define mmTPC6_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xF86AF0
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_0_SIZE 0xF86AF4
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xF86AF8
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xF86AFC
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_1_SIZE 0xF86B00
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xF86B04
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xF86B08
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_2_SIZE 0xF86B0C
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xF86B10
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xF86B14
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_3_SIZE 0xF86B18
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xF86B1C
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xF86B20
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_4_SIZE 0xF86B24
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xF86B28
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xF86B2C
+
+#define mmTPC6_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xF86B30
+
+#define mmTPC6_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xF86B34
+
+#define mmTPC6_CFG_QM_TENSOR_4_PADDING_VALUE 0xF86B38
+
+#define mmTPC6_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xF86B3C
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_0_SIZE 0xF86B40
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xF86B44
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xF86B48
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_1_SIZE 0xF86B4C
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xF86B50
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xF86B54
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_2_SIZE 0xF86B58
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xF86B5C
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xF86B60
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_3_SIZE 0xF86B64
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xF86B68
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xF86B6C
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_4_SIZE 0xF86B70
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xF86B74
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xF86B78
+
+#define mmTPC6_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xF86B7C
+
+#define mmTPC6_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xF86B80
+
+#define mmTPC6_CFG_QM_TENSOR_5_PADDING_VALUE 0xF86B84
+
+#define mmTPC6_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xF86B88
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_0_SIZE 0xF86B8C
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xF86B90
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xF86B94
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_1_SIZE 0xF86B98
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xF86B9C
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xF86BA0
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_2_SIZE 0xF86BA4
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xF86BA8
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xF86BAC
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_3_SIZE 0xF86BB0
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xF86BB4
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xF86BB8
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_4_SIZE 0xF86BBC
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xF86BC0
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xF86BC4
+
+#define mmTPC6_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xF86BC8
+
+#define mmTPC6_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xF86BCC
+
+#define mmTPC6_CFG_QM_TENSOR_6_PADDING_VALUE 0xF86BD0
+
+#define mmTPC6_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xF86BD4
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_0_SIZE 0xF86BD8
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xF86BDC
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xF86BE0
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_1_SIZE 0xF86BE4
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xF86BE8
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xF86BEC
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_2_SIZE 0xF86BF0
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xF86BF4
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xF86BF8
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_3_SIZE 0xF86BFC
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xF86C00
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xF86C04
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_4_SIZE 0xF86C08
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xF86C0C
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xF86C10
+
+#define mmTPC6_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xF86C14
+
+#define mmTPC6_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xF86C18
+
+#define mmTPC6_CFG_QM_TENSOR_7_PADDING_VALUE 0xF86C1C
+
+#define mmTPC6_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xF86C20
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_0_SIZE 0xF86C24
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xF86C28
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xF86C2C
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_1_SIZE 0xF86C30
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xF86C34
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xF86C38
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_2_SIZE 0xF86C3C
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xF86C40
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xF86C44
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_3_SIZE 0xF86C48
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xF86C4C
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xF86C50
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_4_SIZE 0xF86C54
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xF86C58
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xF86C5C
+
+#define mmTPC6_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xF86C60
+
+#define mmTPC6_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xF86C64
+
+#define mmTPC6_CFG_QM_TID_BASE_DIM_0 0xF86C68
+
+#define mmTPC6_CFG_QM_TID_SIZE_DIM_0 0xF86C6C
+
+#define mmTPC6_CFG_QM_TID_BASE_DIM_1 0xF86C70
+
+#define mmTPC6_CFG_QM_TID_SIZE_DIM_1 0xF86C74
+
+#define mmTPC6_CFG_QM_TID_BASE_DIM_2 0xF86C78
+
+#define mmTPC6_CFG_QM_TID_SIZE_DIM_2 0xF86C7C
+
+#define mmTPC6_CFG_QM_TID_BASE_DIM_3 0xF86C80
+
+#define mmTPC6_CFG_QM_TID_SIZE_DIM_3 0xF86C84
+
+#define mmTPC6_CFG_QM_TID_BASE_DIM_4 0xF86C88
+
+#define mmTPC6_CFG_QM_TID_SIZE_DIM_4 0xF86C8C
+
+#define mmTPC6_CFG_QM_SRF_0 0xF86C90
+
+#define mmTPC6_CFG_QM_SRF_1 0xF86C94
+
+#define mmTPC6_CFG_QM_SRF_2 0xF86C98
+
+#define mmTPC6_CFG_QM_SRF_3 0xF86C9C
+
+#define mmTPC6_CFG_QM_SRF_4 0xF86CA0
+
+#define mmTPC6_CFG_QM_SRF_5 0xF86CA4
+
+#define mmTPC6_CFG_QM_SRF_6 0xF86CA8
+
+#define mmTPC6_CFG_QM_SRF_7 0xF86CAC
+
+#define mmTPC6_CFG_QM_SRF_8 0xF86CB0
+
+#define mmTPC6_CFG_QM_SRF_9 0xF86CB4
+
+#define mmTPC6_CFG_QM_SRF_10 0xF86CB8
+
+#define mmTPC6_CFG_QM_SRF_11 0xF86CBC
+
+#define mmTPC6_CFG_QM_SRF_12 0xF86CC0
+
+#define mmTPC6_CFG_QM_SRF_13 0xF86CC4
+
+#define mmTPC6_CFG_QM_SRF_14 0xF86CC8
+
+#define mmTPC6_CFG_QM_SRF_15 0xF86CCC
+
+#define mmTPC6_CFG_QM_SRF_16 0xF86CD0
+
+#define mmTPC6_CFG_QM_SRF_17 0xF86CD4
+
+#define mmTPC6_CFG_QM_SRF_18 0xF86CD8
+
+#define mmTPC6_CFG_QM_SRF_19 0xF86CDC
+
+#define mmTPC6_CFG_QM_SRF_20 0xF86CE0
+
+#define mmTPC6_CFG_QM_SRF_21 0xF86CE4
+
+#define mmTPC6_CFG_QM_SRF_22 0xF86CE8
+
+#define mmTPC6_CFG_QM_SRF_23 0xF86CEC
+
+#define mmTPC6_CFG_QM_SRF_24 0xF86CF0
+
+#define mmTPC6_CFG_QM_SRF_25 0xF86CF4
+
+#define mmTPC6_CFG_QM_SRF_26 0xF86CF8
+
+#define mmTPC6_CFG_QM_SRF_27 0xF86CFC
+
+#define mmTPC6_CFG_QM_SRF_28 0xF86D00
+
+#define mmTPC6_CFG_QM_SRF_29 0xF86D04
+
+#define mmTPC6_CFG_QM_SRF_30 0xF86D08
+
+#define mmTPC6_CFG_QM_SRF_31 0xF86D0C
+
+#define mmTPC6_CFG_QM_KERNEL_CONFIG 0xF86D10
+
+#define mmTPC6_CFG_QM_SYNC_OBJECT_MESSAGE 0xF86D14
+
+#define mmTPC6_CFG_ARUSER 0xF86D18
+
+#define mmTPC6_CFG_AWUSER 0xF86D1C
+
+#define mmTPC6_CFG_FUNC_MBIST_CNTRL 0xF86E00
+
+#define mmTPC6_CFG_FUNC_MBIST_PAT 0xF86E04
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_0 0xF86E08
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_1 0xF86E0C
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_2 0xF86E10
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_3 0xF86E14
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_4 0xF86E18
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_5 0xF86E1C
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_6 0xF86E20
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_7 0xF86E24
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_8 0xF86E28
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_9 0xF86E2C
+
+#endif /* ASIC_REG_TPC6_CFG_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h
new file mode 100644
index 000000000000..fbca6b47284e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC6_CMDQ_REGS_H_
+#define ASIC_REG_TPC6_CMDQ_REGS_H_
+
+/*
+ *****************************************
+ * TPC6_CMDQ (Prototype: CMDQ)
+ *****************************************
+ */
+
+#define mmTPC6_CMDQ_GLBL_CFG0 0xF89000
+
+#define mmTPC6_CMDQ_GLBL_CFG1 0xF89004
+
+#define mmTPC6_CMDQ_GLBL_PROT 0xF89008
+
+#define mmTPC6_CMDQ_GLBL_ERR_CFG 0xF8900C
+
+#define mmTPC6_CMDQ_GLBL_ERR_ADDR_LO 0xF89010
+
+#define mmTPC6_CMDQ_GLBL_ERR_ADDR_HI 0xF89014
+
+#define mmTPC6_CMDQ_GLBL_ERR_WDATA 0xF89018
+
+#define mmTPC6_CMDQ_GLBL_SECURE_PROPS 0xF8901C
+
+#define mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS 0xF89020
+
+#define mmTPC6_CMDQ_GLBL_STS0 0xF89024
+
+#define mmTPC6_CMDQ_GLBL_STS1 0xF89028
+
+#define mmTPC6_CMDQ_CQ_CFG0 0xF890B0
+
+#define mmTPC6_CMDQ_CQ_CFG1 0xF890B4
+
+#define mmTPC6_CMDQ_CQ_ARUSER 0xF890B8
+
+#define mmTPC6_CMDQ_CQ_PTR_LO 0xF890C0
+
+#define mmTPC6_CMDQ_CQ_PTR_HI 0xF890C4
+
+#define mmTPC6_CMDQ_CQ_TSIZE 0xF890C8
+
+#define mmTPC6_CMDQ_CQ_CTL 0xF890CC
+
+#define mmTPC6_CMDQ_CQ_PTR_LO_STS 0xF890D4
+
+#define mmTPC6_CMDQ_CQ_PTR_HI_STS 0xF890D8
+
+#define mmTPC6_CMDQ_CQ_TSIZE_STS 0xF890DC
+
+#define mmTPC6_CMDQ_CQ_CTL_STS 0xF890E0
+
+#define mmTPC6_CMDQ_CQ_STS0 0xF890E4
+
+#define mmTPC6_CMDQ_CQ_STS1 0xF890E8
+
+#define mmTPC6_CMDQ_CQ_RD_RATE_LIM_EN 0xF890F0
+
+#define mmTPC6_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xF890F4
+
+#define mmTPC6_CMDQ_CQ_RD_RATE_LIM_SAT 0xF890F8
+
+#define mmTPC6_CMDQ_CQ_RD_RATE_LIM_TOUT 0xF890FC
+
+#define mmTPC6_CMDQ_CQ_IFIFO_CNT 0xF89108
+
+#define mmTPC6_CMDQ_CP_MSG_BASE0_ADDR_LO 0xF89120
+
+#define mmTPC6_CMDQ_CP_MSG_BASE0_ADDR_HI 0xF89124
+
+#define mmTPC6_CMDQ_CP_MSG_BASE1_ADDR_LO 0xF89128
+
+#define mmTPC6_CMDQ_CP_MSG_BASE1_ADDR_HI 0xF8912C
+
+#define mmTPC6_CMDQ_CP_MSG_BASE2_ADDR_LO 0xF89130
+
+#define mmTPC6_CMDQ_CP_MSG_BASE2_ADDR_HI 0xF89134
+
+#define mmTPC6_CMDQ_CP_MSG_BASE3_ADDR_LO 0xF89138
+
+#define mmTPC6_CMDQ_CP_MSG_BASE3_ADDR_HI 0xF8913C
+
+#define mmTPC6_CMDQ_CP_LDMA_TSIZE_OFFSET 0xF89140
+
+#define mmTPC6_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xF89144
+
+#define mmTPC6_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xF89148
+
+#define mmTPC6_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xF8914C
+
+#define mmTPC6_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xF89150
+
+#define mmTPC6_CMDQ_CP_LDMA_COMMIT_OFFSET 0xF89154
+
+#define mmTPC6_CMDQ_CP_FENCE0_RDATA 0xF89158
+
+#define mmTPC6_CMDQ_CP_FENCE1_RDATA 0xF8915C
+
+#define mmTPC6_CMDQ_CP_FENCE2_RDATA 0xF89160
+
+#define mmTPC6_CMDQ_CP_FENCE3_RDATA 0xF89164
+
+#define mmTPC6_CMDQ_CP_FENCE0_CNT 0xF89168
+
+#define mmTPC6_CMDQ_CP_FENCE1_CNT 0xF8916C
+
+#define mmTPC6_CMDQ_CP_FENCE2_CNT 0xF89170
+
+#define mmTPC6_CMDQ_CP_FENCE3_CNT 0xF89174
+
+#define mmTPC6_CMDQ_CP_STS 0xF89178
+
+#define mmTPC6_CMDQ_CP_CURRENT_INST_LO 0xF8917C
+
+#define mmTPC6_CMDQ_CP_CURRENT_INST_HI 0xF89180
+
+#define mmTPC6_CMDQ_CP_BARRIER_CFG 0xF89184
+
+#define mmTPC6_CMDQ_CP_DBG_0 0xF89188
+
+#define mmTPC6_CMDQ_CQ_BUF_ADDR 0xF89308
+
+#define mmTPC6_CMDQ_CQ_BUF_RDATA 0xF8930C
+
+#endif /* ASIC_REG_TPC6_CMDQ_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h
new file mode 100644
index 000000000000..bf32465dabcb
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC6_QM_REGS_H_
+#define ASIC_REG_TPC6_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC6_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC6_QM_GLBL_CFG0 0xF88000
+
+#define mmTPC6_QM_GLBL_CFG1 0xF88004
+
+#define mmTPC6_QM_GLBL_PROT 0xF88008
+
+#define mmTPC6_QM_GLBL_ERR_CFG 0xF8800C
+
+#define mmTPC6_QM_GLBL_ERR_ADDR_LO 0xF88010
+
+#define mmTPC6_QM_GLBL_ERR_ADDR_HI 0xF88014
+
+#define mmTPC6_QM_GLBL_ERR_WDATA 0xF88018
+
+#define mmTPC6_QM_GLBL_SECURE_PROPS 0xF8801C
+
+#define mmTPC6_QM_GLBL_NON_SECURE_PROPS 0xF88020
+
+#define mmTPC6_QM_GLBL_STS0 0xF88024
+
+#define mmTPC6_QM_GLBL_STS1 0xF88028
+
+#define mmTPC6_QM_PQ_BASE_LO 0xF88060
+
+#define mmTPC6_QM_PQ_BASE_HI 0xF88064
+
+#define mmTPC6_QM_PQ_SIZE 0xF88068
+
+#define mmTPC6_QM_PQ_PI 0xF8806C
+
+#define mmTPC6_QM_PQ_CI 0xF88070
+
+#define mmTPC6_QM_PQ_CFG0 0xF88074
+
+#define mmTPC6_QM_PQ_CFG1 0xF88078
+
+#define mmTPC6_QM_PQ_ARUSER 0xF8807C
+
+#define mmTPC6_QM_PQ_PUSH0 0xF88080
+
+#define mmTPC6_QM_PQ_PUSH1 0xF88084
+
+#define mmTPC6_QM_PQ_PUSH2 0xF88088
+
+#define mmTPC6_QM_PQ_PUSH3 0xF8808C
+
+#define mmTPC6_QM_PQ_STS0 0xF88090
+
+#define mmTPC6_QM_PQ_STS1 0xF88094
+
+#define mmTPC6_QM_PQ_RD_RATE_LIM_EN 0xF880A0
+
+#define mmTPC6_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xF880A4
+
+#define mmTPC6_QM_PQ_RD_RATE_LIM_SAT 0xF880A8
+
+#define mmTPC6_QM_PQ_RD_RATE_LIM_TOUT 0xF880AC
+
+#define mmTPC6_QM_CQ_CFG0 0xF880B0
+
+#define mmTPC6_QM_CQ_CFG1 0xF880B4
+
+#define mmTPC6_QM_CQ_ARUSER 0xF880B8
+
+#define mmTPC6_QM_CQ_PTR_LO 0xF880C0
+
+#define mmTPC6_QM_CQ_PTR_HI 0xF880C4
+
+#define mmTPC6_QM_CQ_TSIZE 0xF880C8
+
+#define mmTPC6_QM_CQ_CTL 0xF880CC
+
+#define mmTPC6_QM_CQ_PTR_LO_STS 0xF880D4
+
+#define mmTPC6_QM_CQ_PTR_HI_STS 0xF880D8
+
+#define mmTPC6_QM_CQ_TSIZE_STS 0xF880DC
+
+#define mmTPC6_QM_CQ_CTL_STS 0xF880E0
+
+#define mmTPC6_QM_CQ_STS0 0xF880E4
+
+#define mmTPC6_QM_CQ_STS1 0xF880E8
+
+#define mmTPC6_QM_CQ_RD_RATE_LIM_EN 0xF880F0
+
+#define mmTPC6_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xF880F4
+
+#define mmTPC6_QM_CQ_RD_RATE_LIM_SAT 0xF880F8
+
+#define mmTPC6_QM_CQ_RD_RATE_LIM_TOUT 0xF880FC
+
+#define mmTPC6_QM_CQ_IFIFO_CNT 0xF88108
+
+#define mmTPC6_QM_CP_MSG_BASE0_ADDR_LO 0xF88120
+
+#define mmTPC6_QM_CP_MSG_BASE0_ADDR_HI 0xF88124
+
+#define mmTPC6_QM_CP_MSG_BASE1_ADDR_LO 0xF88128
+
+#define mmTPC6_QM_CP_MSG_BASE1_ADDR_HI 0xF8812C
+
+#define mmTPC6_QM_CP_MSG_BASE2_ADDR_LO 0xF88130
+
+#define mmTPC6_QM_CP_MSG_BASE2_ADDR_HI 0xF88134
+
+#define mmTPC6_QM_CP_MSG_BASE3_ADDR_LO 0xF88138
+
+#define mmTPC6_QM_CP_MSG_BASE3_ADDR_HI 0xF8813C
+
+#define mmTPC6_QM_CP_LDMA_TSIZE_OFFSET 0xF88140
+
+#define mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xF88144
+
+#define mmTPC6_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xF88148
+
+#define mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xF8814C
+
+#define mmTPC6_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xF88150
+
+#define mmTPC6_QM_CP_LDMA_COMMIT_OFFSET 0xF88154
+
+#define mmTPC6_QM_CP_FENCE0_RDATA 0xF88158
+
+#define mmTPC6_QM_CP_FENCE1_RDATA 0xF8815C
+
+#define mmTPC6_QM_CP_FENCE2_RDATA 0xF88160
+
+#define mmTPC6_QM_CP_FENCE3_RDATA 0xF88164
+
+#define mmTPC6_QM_CP_FENCE0_CNT 0xF88168
+
+#define mmTPC6_QM_CP_FENCE1_CNT 0xF8816C
+
+#define mmTPC6_QM_CP_FENCE2_CNT 0xF88170
+
+#define mmTPC6_QM_CP_FENCE3_CNT 0xF88174
+
+#define mmTPC6_QM_CP_STS 0xF88178
+
+#define mmTPC6_QM_CP_CURRENT_INST_LO 0xF8817C
+
+#define mmTPC6_QM_CP_CURRENT_INST_HI 0xF88180
+
+#define mmTPC6_QM_CP_BARRIER_CFG 0xF88184
+
+#define mmTPC6_QM_CP_DBG_0 0xF88188
+
+#define mmTPC6_QM_PQ_BUF_ADDR 0xF88300
+
+#define mmTPC6_QM_PQ_BUF_RDATA 0xF88304
+
+#define mmTPC6_QM_CQ_BUF_ADDR 0xF88308
+
+#define mmTPC6_QM_CQ_BUF_RDATA 0xF8830C
+
+#endif /* ASIC_REG_TPC6_QM_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h
new file mode 100644
index 000000000000..609bb90e1046
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC6_RTR_REGS_H_
+#define ASIC_REG_TPC6_RTR_REGS_H_
+
+/*
+ *****************************************
+ * TPC6_RTR (Prototype: TPC_RTR)
+ *****************************************
+ */
+
+#define mmTPC6_RTR_HBW_RD_RQ_E_ARB 0xF80100
+
+#define mmTPC6_RTR_HBW_RD_RQ_W_ARB 0xF80104
+
+#define mmTPC6_RTR_HBW_RD_RQ_N_ARB 0xF80108
+
+#define mmTPC6_RTR_HBW_RD_RQ_S_ARB 0xF8010C
+
+#define mmTPC6_RTR_HBW_RD_RQ_L_ARB 0xF80110
+
+#define mmTPC6_RTR_HBW_E_ARB_MAX 0xF80120
+
+#define mmTPC6_RTR_HBW_W_ARB_MAX 0xF80124
+
+#define mmTPC6_RTR_HBW_N_ARB_MAX 0xF80128
+
+#define mmTPC6_RTR_HBW_S_ARB_MAX 0xF8012C
+
+#define mmTPC6_RTR_HBW_L_ARB_MAX 0xF80130
+
+#define mmTPC6_RTR_HBW_RD_RS_E_ARB 0xF80140
+
+#define mmTPC6_RTR_HBW_RD_RS_W_ARB 0xF80144
+
+#define mmTPC6_RTR_HBW_RD_RS_N_ARB 0xF80148
+
+#define mmTPC6_RTR_HBW_RD_RS_S_ARB 0xF8014C
+
+#define mmTPC6_RTR_HBW_RD_RS_L_ARB 0xF80150
+
+#define mmTPC6_RTR_HBW_WR_RQ_E_ARB 0xF80170
+
+#define mmTPC6_RTR_HBW_WR_RQ_W_ARB 0xF80174
+
+#define mmTPC6_RTR_HBW_WR_RQ_N_ARB 0xF80178
+
+#define mmTPC6_RTR_HBW_WR_RQ_S_ARB 0xF8017C
+
+#define mmTPC6_RTR_HBW_WR_RQ_L_ARB 0xF80180
+
+#define mmTPC6_RTR_HBW_WR_RS_E_ARB 0xF80190
+
+#define mmTPC6_RTR_HBW_WR_RS_W_ARB 0xF80194
+
+#define mmTPC6_RTR_HBW_WR_RS_N_ARB 0xF80198
+
+#define mmTPC6_RTR_HBW_WR_RS_S_ARB 0xF8019C
+
+#define mmTPC6_RTR_HBW_WR_RS_L_ARB 0xF801A0
+
+#define mmTPC6_RTR_LBW_RD_RQ_E_ARB 0xF80200
+
+#define mmTPC6_RTR_LBW_RD_RQ_W_ARB 0xF80204
+
+#define mmTPC6_RTR_LBW_RD_RQ_N_ARB 0xF80208
+
+#define mmTPC6_RTR_LBW_RD_RQ_S_ARB 0xF8020C
+
+#define mmTPC6_RTR_LBW_RD_RQ_L_ARB 0xF80210
+
+#define mmTPC6_RTR_LBW_E_ARB_MAX 0xF80220
+
+#define mmTPC6_RTR_LBW_W_ARB_MAX 0xF80224
+
+#define mmTPC6_RTR_LBW_N_ARB_MAX 0xF80228
+
+#define mmTPC6_RTR_LBW_S_ARB_MAX 0xF8022C
+
+#define mmTPC6_RTR_LBW_L_ARB_MAX 0xF80230
+
+#define mmTPC6_RTR_LBW_RD_RS_E_ARB 0xF80250
+
+#define mmTPC6_RTR_LBW_RD_RS_W_ARB 0xF80254
+
+#define mmTPC6_RTR_LBW_RD_RS_N_ARB 0xF80258
+
+#define mmTPC6_RTR_LBW_RD_RS_S_ARB 0xF8025C
+
+#define mmTPC6_RTR_LBW_RD_RS_L_ARB 0xF80260
+
+#define mmTPC6_RTR_LBW_WR_RQ_E_ARB 0xF80270
+
+#define mmTPC6_RTR_LBW_WR_RQ_W_ARB 0xF80274
+
+#define mmTPC6_RTR_LBW_WR_RQ_N_ARB 0xF80278
+
+#define mmTPC6_RTR_LBW_WR_RQ_S_ARB 0xF8027C
+
+#define mmTPC6_RTR_LBW_WR_RQ_L_ARB 0xF80280
+
+#define mmTPC6_RTR_LBW_WR_RS_E_ARB 0xF80290
+
+#define mmTPC6_RTR_LBW_WR_RS_W_ARB 0xF80294
+
+#define mmTPC6_RTR_LBW_WR_RS_N_ARB 0xF80298
+
+#define mmTPC6_RTR_LBW_WR_RS_S_ARB 0xF8029C
+
+#define mmTPC6_RTR_LBW_WR_RS_L_ARB 0xF802A0
+
+#define mmTPC6_RTR_DBG_E_ARB 0xF80300
+
+#define mmTPC6_RTR_DBG_W_ARB 0xF80304
+
+#define mmTPC6_RTR_DBG_N_ARB 0xF80308
+
+#define mmTPC6_RTR_DBG_S_ARB 0xF8030C
+
+#define mmTPC6_RTR_DBG_L_ARB 0xF80310
+
+#define mmTPC6_RTR_DBG_E_ARB_MAX 0xF80320
+
+#define mmTPC6_RTR_DBG_W_ARB_MAX 0xF80324
+
+#define mmTPC6_RTR_DBG_N_ARB_MAX 0xF80328
+
+#define mmTPC6_RTR_DBG_S_ARB_MAX 0xF8032C
+
+#define mmTPC6_RTR_DBG_L_ARB_MAX 0xF80330
+
+#define mmTPC6_RTR_SPLIT_COEF_0 0xF80400
+
+#define mmTPC6_RTR_SPLIT_COEF_1 0xF80404
+
+#define mmTPC6_RTR_SPLIT_COEF_2 0xF80408
+
+#define mmTPC6_RTR_SPLIT_COEF_3 0xF8040C
+
+#define mmTPC6_RTR_SPLIT_COEF_4 0xF80410
+
+#define mmTPC6_RTR_SPLIT_COEF_5 0xF80414
+
+#define mmTPC6_RTR_SPLIT_COEF_6 0xF80418
+
+#define mmTPC6_RTR_SPLIT_COEF_7 0xF8041C
+
+#define mmTPC6_RTR_SPLIT_COEF_8 0xF80420
+
+#define mmTPC6_RTR_SPLIT_COEF_9 0xF80424
+
+#define mmTPC6_RTR_SPLIT_CFG 0xF80440
+
+#define mmTPC6_RTR_SPLIT_RD_SAT 0xF80444
+
+#define mmTPC6_RTR_SPLIT_RD_RST_TOKEN 0xF80448
+
+#define mmTPC6_RTR_SPLIT_RD_TIMEOUT_0 0xF8044C
+
+#define mmTPC6_RTR_SPLIT_RD_TIMEOUT_1 0xF80450
+
+#define mmTPC6_RTR_SPLIT_WR_SAT 0xF80454
+
+#define mmTPC6_RTR_WPLIT_WR_TST_TOLEN 0xF80458
+
+#define mmTPC6_RTR_SPLIT_WR_TIMEOUT_0 0xF8045C
+
+#define mmTPC6_RTR_SPLIT_WR_TIMEOUT_1 0xF80460
+
+#define mmTPC6_RTR_HBW_RANGE_HIT 0xF80470
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_L_0 0xF80480
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_L_1 0xF80484
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_L_2 0xF80488
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_L_3 0xF8048C
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_L_4 0xF80490
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_L_5 0xF80494
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_L_6 0xF80498
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_L_7 0xF8049C
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_H_0 0xF804A0
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_H_1 0xF804A4
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_H_2 0xF804A8
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_H_3 0xF804AC
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_H_4 0xF804B0
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_H_5 0xF804B4
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_H_6 0xF804B8
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_H_7 0xF804BC
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_L_0 0xF804C0
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_L_1 0xF804C4
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_L_2 0xF804C8
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_L_3 0xF804CC
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_L_4 0xF804D0
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_L_5 0xF804D4
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_L_6 0xF804D8
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_L_7 0xF804DC
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_H_0 0xF804E0
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_H_1 0xF804E4
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_H_2 0xF804E8
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_H_3 0xF804EC
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_H_4 0xF804F0
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_H_5 0xF804F4
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_H_6 0xF804F8
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_H_7 0xF804FC
+
+#define mmTPC6_RTR_LBW_RANGE_HIT 0xF80500
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_0 0xF80510
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_1 0xF80514
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_2 0xF80518
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_3 0xF8051C
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_4 0xF80520
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_5 0xF80524
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_6 0xF80528
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_7 0xF8052C
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_8 0xF80530
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_9 0xF80534
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_10 0xF80538
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_11 0xF8053C
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_12 0xF80540
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_13 0xF80544
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_14 0xF80548
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_15 0xF8054C
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_0 0xF80550
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_1 0xF80554
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_2 0xF80558
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_3 0xF8055C
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_4 0xF80560
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_5 0xF80564
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_6 0xF80568
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_7 0xF8056C
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_8 0xF80570
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_9 0xF80574
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_10 0xF80578
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_11 0xF8057C
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_12 0xF80580
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_13 0xF80584
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_14 0xF80588
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_15 0xF8058C
+
+#define mmTPC6_RTR_RGLTR 0xF80590
+
+#define mmTPC6_RTR_RGLTR_WR_RESULT 0xF80594
+
+#define mmTPC6_RTR_RGLTR_RD_RESULT 0xF80598
+
+#define mmTPC6_RTR_SCRAMB_EN 0xF80600
+
+#define mmTPC6_RTR_NON_LIN_SCRAMB 0xF80604
+
+#endif /* ASIC_REG_TPC6_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h
new file mode 100644
index 000000000000..bf2fd0f73906
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h
@@ -0,0 +1,887 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC7_CFG_REGS_H_
+#define ASIC_REG_TPC7_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC7_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xFC6400
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xFC6404
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xFC6408
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xFC640C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xFC6410
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xFC6414
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xFC6418
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xFC641C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xFC6420
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xFC6424
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xFC6428
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xFC642C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xFC6430
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xFC6434
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xFC6438
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xFC643C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xFC6440
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xFC6444
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xFC6448
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xFC644C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xFC6450
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xFC6454
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xFC6458
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xFC645C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xFC6460
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xFC6464
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xFC6468
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xFC646C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xFC6470
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xFC6474
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xFC6478
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xFC647C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xFC6480
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xFC6484
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xFC6488
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xFC648C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xFC6490
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xFC6494
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xFC6498
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xFC649C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xFC64A0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xFC64A4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xFC64A8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xFC64AC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xFC64B0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xFC64B4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xFC64B8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xFC64BC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xFC64C0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xFC64C4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xFC64C8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xFC64CC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xFC64D0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xFC64D4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xFC64D8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xFC64DC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xFC64E0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xFC64E4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xFC64E8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xFC64EC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xFC64F0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xFC64F4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xFC64F8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xFC64FC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xFC6500
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xFC6504
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xFC6508
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xFC650C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xFC6510
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xFC6514
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xFC6518
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xFC651C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xFC6520
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xFC6524
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xFC6528
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xFC652C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xFC6530
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xFC6534
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xFC6538
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xFC653C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xFC6540
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xFC6544
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xFC6548
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xFC654C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xFC6550
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xFC6554
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xFC6558
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xFC655C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xFC6560
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xFC6564
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xFC6568
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xFC656C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xFC6570
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xFC6574
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xFC6578
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xFC657C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xFC6580
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xFC6584
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xFC6588
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xFC658C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xFC6590
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xFC6594
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xFC6598
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xFC659C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xFC65A0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xFC65A4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xFC65A8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xFC65AC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xFC65B0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xFC65B4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xFC65B8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xFC65BC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xFC65C0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xFC65C4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xFC65C8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xFC65CC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xFC65D0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xFC65D4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xFC65D8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xFC65DC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xFC65E0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xFC65E4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xFC65E8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xFC65EC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xFC65F0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xFC65F4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xFC65F8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xFC65FC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xFC6600
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xFC6604
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xFC6608
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xFC660C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xFC6610
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xFC6614
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xFC6618
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xFC661C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xFC6620
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xFC6624
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xFC6628
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xFC662C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xFC6630
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xFC6634
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xFC6638
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xFC663C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xFC6640
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xFC6644
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xFC6648
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xFC664C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xFC6650
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xFC6654
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xFC6658
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xFC665C
+
+#define mmTPC7_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xFC6660
+
+#define mmTPC7_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xFC6664
+
+#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_0 0xFC6668
+
+#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_0 0xFC666C
+
+#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_1 0xFC6670
+
+#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_1 0xFC6674
+
+#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_2 0xFC6678
+
+#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_2 0xFC667C
+
+#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_3 0xFC6680
+
+#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_3 0xFC6684
+
+#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_4 0xFC6688
+
+#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_4 0xFC668C
+
+#define mmTPC7_CFG_KERNEL_SRF_0 0xFC6690
+
+#define mmTPC7_CFG_KERNEL_SRF_1 0xFC6694
+
+#define mmTPC7_CFG_KERNEL_SRF_2 0xFC6698
+
+#define mmTPC7_CFG_KERNEL_SRF_3 0xFC669C
+
+#define mmTPC7_CFG_KERNEL_SRF_4 0xFC66A0
+
+#define mmTPC7_CFG_KERNEL_SRF_5 0xFC66A4
+
+#define mmTPC7_CFG_KERNEL_SRF_6 0xFC66A8
+
+#define mmTPC7_CFG_KERNEL_SRF_7 0xFC66AC
+
+#define mmTPC7_CFG_KERNEL_SRF_8 0xFC66B0
+
+#define mmTPC7_CFG_KERNEL_SRF_9 0xFC66B4
+
+#define mmTPC7_CFG_KERNEL_SRF_10 0xFC66B8
+
+#define mmTPC7_CFG_KERNEL_SRF_11 0xFC66BC
+
+#define mmTPC7_CFG_KERNEL_SRF_12 0xFC66C0
+
+#define mmTPC7_CFG_KERNEL_SRF_13 0xFC66C4
+
+#define mmTPC7_CFG_KERNEL_SRF_14 0xFC66C8
+
+#define mmTPC7_CFG_KERNEL_SRF_15 0xFC66CC
+
+#define mmTPC7_CFG_KERNEL_SRF_16 0xFC66D0
+
+#define mmTPC7_CFG_KERNEL_SRF_17 0xFC66D4
+
+#define mmTPC7_CFG_KERNEL_SRF_18 0xFC66D8
+
+#define mmTPC7_CFG_KERNEL_SRF_19 0xFC66DC
+
+#define mmTPC7_CFG_KERNEL_SRF_20 0xFC66E0
+
+#define mmTPC7_CFG_KERNEL_SRF_21 0xFC66E4
+
+#define mmTPC7_CFG_KERNEL_SRF_22 0xFC66E8
+
+#define mmTPC7_CFG_KERNEL_SRF_23 0xFC66EC
+
+#define mmTPC7_CFG_KERNEL_SRF_24 0xFC66F0
+
+#define mmTPC7_CFG_KERNEL_SRF_25 0xFC66F4
+
+#define mmTPC7_CFG_KERNEL_SRF_26 0xFC66F8
+
+#define mmTPC7_CFG_KERNEL_SRF_27 0xFC66FC
+
+#define mmTPC7_CFG_KERNEL_SRF_28 0xFC6700
+
+#define mmTPC7_CFG_KERNEL_SRF_29 0xFC6704
+
+#define mmTPC7_CFG_KERNEL_SRF_30 0xFC6708
+
+#define mmTPC7_CFG_KERNEL_SRF_31 0xFC670C
+
+#define mmTPC7_CFG_KERNEL_KERNEL_CONFIG 0xFC6710
+
+#define mmTPC7_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xFC6714
+
+#define mmTPC7_CFG_RESERVED_DESC_END 0xFC6738
+
+#define mmTPC7_CFG_ROUND_CSR 0xFC67FC
+
+#define mmTPC7_CFG_TBUF_BASE_ADDR_LOW 0xFC6800
+
+#define mmTPC7_CFG_TBUF_BASE_ADDR_HIGH 0xFC6804
+
+#define mmTPC7_CFG_SEMAPHORE 0xFC6808
+
+#define mmTPC7_CFG_VFLAGS 0xFC680C
+
+#define mmTPC7_CFG_SFLAGS 0xFC6810
+
+#define mmTPC7_CFG_LFSR_POLYNOM 0xFC6818
+
+#define mmTPC7_CFG_STATUS 0xFC681C
+
+#define mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH 0xFC6820
+
+#define mmTPC7_CFG_CFG_SUBTRACT_VALUE 0xFC6824
+
+#define mmTPC7_CFG_SM_BASE_ADDRESS_LOW 0xFC6828
+
+#define mmTPC7_CFG_SM_BASE_ADDRESS_HIGH 0xFC682C
+
+#define mmTPC7_CFG_TPC_CMD 0xFC6830
+
+#define mmTPC7_CFG_TPC_EXECUTE 0xFC6838
+
+#define mmTPC7_CFG_TPC_STALL 0xFC683C
+
+#define mmTPC7_CFG_ICACHE_BASE_ADDERESS_LOW 0xFC6840
+
+#define mmTPC7_CFG_ICACHE_BASE_ADDERESS_HIGH 0xFC6844
+
+#define mmTPC7_CFG_MSS_CONFIG 0xFC6854
+
+#define mmTPC7_CFG_TPC_INTR_CAUSE 0xFC6858
+
+#define mmTPC7_CFG_TPC_INTR_MASK 0xFC685C
+
+#define mmTPC7_CFG_TSB_CONFIG 0xFC6860
+
+#define mmTPC7_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xFC6A00
+
+#define mmTPC7_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xFC6A04
+
+#define mmTPC7_CFG_QM_TENSOR_0_PADDING_VALUE 0xFC6A08
+
+#define mmTPC7_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xFC6A0C
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_0_SIZE 0xFC6A10
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xFC6A14
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xFC6A18
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_1_SIZE 0xFC6A1C
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xFC6A20
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xFC6A24
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_2_SIZE 0xFC6A28
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xFC6A2C
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xFC6A30
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_3_SIZE 0xFC6A34
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xFC6A38
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xFC6A3C
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_4_SIZE 0xFC6A40
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xFC6A44
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xFC6A48
+
+#define mmTPC7_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xFC6A4C
+
+#define mmTPC7_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xFC6A50
+
+#define mmTPC7_CFG_QM_TENSOR_1_PADDING_VALUE 0xFC6A54
+
+#define mmTPC7_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xFC6A58
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_0_SIZE 0xFC6A5C
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xFC6A60
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xFC6A64
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_1_SIZE 0xFC6A68
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xFC6A6C
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xFC6A70
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_2_SIZE 0xFC6A74
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xFC6A78
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xFC6A7C
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_3_SIZE 0xFC6A80
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xFC6A84
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xFC6A88
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_4_SIZE 0xFC6A8C
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xFC6A90
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xFC6A94
+
+#define mmTPC7_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xFC6A98
+
+#define mmTPC7_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xFC6A9C
+
+#define mmTPC7_CFG_QM_TENSOR_2_PADDING_VALUE 0xFC6AA0
+
+#define mmTPC7_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xFC6AA4
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_0_SIZE 0xFC6AA8
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xFC6AAC
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xFC6AB0
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_1_SIZE 0xFC6AB4
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xFC6AB8
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xFC6ABC
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_2_SIZE 0xFC6AC0
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xFC6AC4
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xFC6AC8
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_3_SIZE 0xFC6ACC
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xFC6AD0
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xFC6AD4
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_4_SIZE 0xFC6AD8
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xFC6ADC
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xFC6AE0
+
+#define mmTPC7_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xFC6AE4
+
+#define mmTPC7_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xFC6AE8
+
+#define mmTPC7_CFG_QM_TENSOR_3_PADDING_VALUE 0xFC6AEC
+
+#define mmTPC7_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xFC6AF0
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_0_SIZE 0xFC6AF4
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xFC6AF8
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xFC6AFC
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_1_SIZE 0xFC6B00
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xFC6B04
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xFC6B08
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_2_SIZE 0xFC6B0C
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xFC6B10
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xFC6B14
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_3_SIZE 0xFC6B18
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xFC6B1C
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xFC6B20
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_4_SIZE 0xFC6B24
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xFC6B28
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xFC6B2C
+
+#define mmTPC7_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xFC6B30
+
+#define mmTPC7_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xFC6B34
+
+#define mmTPC7_CFG_QM_TENSOR_4_PADDING_VALUE 0xFC6B38
+
+#define mmTPC7_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xFC6B3C
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_0_SIZE 0xFC6B40
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xFC6B44
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xFC6B48
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_1_SIZE 0xFC6B4C
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xFC6B50
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xFC6B54
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_2_SIZE 0xFC6B58
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xFC6B5C
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xFC6B60
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_3_SIZE 0xFC6B64
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xFC6B68
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xFC6B6C
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_4_SIZE 0xFC6B70
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xFC6B74
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xFC6B78
+
+#define mmTPC7_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xFC6B7C
+
+#define mmTPC7_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xFC6B80
+
+#define mmTPC7_CFG_QM_TENSOR_5_PADDING_VALUE 0xFC6B84
+
+#define mmTPC7_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xFC6B88
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_0_SIZE 0xFC6B8C
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xFC6B90
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xFC6B94
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_1_SIZE 0xFC6B98
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xFC6B9C
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xFC6BA0
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_2_SIZE 0xFC6BA4
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xFC6BA8
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xFC6BAC
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_3_SIZE 0xFC6BB0
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xFC6BB4
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xFC6BB8
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_4_SIZE 0xFC6BBC
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xFC6BC0
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xFC6BC4
+
+#define mmTPC7_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xFC6BC8
+
+#define mmTPC7_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xFC6BCC
+
+#define mmTPC7_CFG_QM_TENSOR_6_PADDING_VALUE 0xFC6BD0
+
+#define mmTPC7_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xFC6BD4
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_0_SIZE 0xFC6BD8
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xFC6BDC
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xFC6BE0
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_1_SIZE 0xFC6BE4
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xFC6BE8
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xFC6BEC
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_2_SIZE 0xFC6BF0
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xFC6BF4
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xFC6BF8
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_3_SIZE 0xFC6BFC
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xFC6C00
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xFC6C04
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_4_SIZE 0xFC6C08
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xFC6C0C
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xFC6C10
+
+#define mmTPC7_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xFC6C14
+
+#define mmTPC7_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xFC6C18
+
+#define mmTPC7_CFG_QM_TENSOR_7_PADDING_VALUE 0xFC6C1C
+
+#define mmTPC7_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xFC6C20
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_0_SIZE 0xFC6C24
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xFC6C28
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xFC6C2C
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_1_SIZE 0xFC6C30
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xFC6C34
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xFC6C38
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_2_SIZE 0xFC6C3C
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xFC6C40
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xFC6C44
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_3_SIZE 0xFC6C48
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xFC6C4C
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xFC6C50
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_4_SIZE 0xFC6C54
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xFC6C58
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xFC6C5C
+
+#define mmTPC7_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xFC6C60
+
+#define mmTPC7_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xFC6C64
+
+#define mmTPC7_CFG_QM_TID_BASE_DIM_0 0xFC6C68
+
+#define mmTPC7_CFG_QM_TID_SIZE_DIM_0 0xFC6C6C
+
+#define mmTPC7_CFG_QM_TID_BASE_DIM_1 0xFC6C70
+
+#define mmTPC7_CFG_QM_TID_SIZE_DIM_1 0xFC6C74
+
+#define mmTPC7_CFG_QM_TID_BASE_DIM_2 0xFC6C78
+
+#define mmTPC7_CFG_QM_TID_SIZE_DIM_2 0xFC6C7C
+
+#define mmTPC7_CFG_QM_TID_BASE_DIM_3 0xFC6C80
+
+#define mmTPC7_CFG_QM_TID_SIZE_DIM_3 0xFC6C84
+
+#define mmTPC7_CFG_QM_TID_BASE_DIM_4 0xFC6C88
+
+#define mmTPC7_CFG_QM_TID_SIZE_DIM_4 0xFC6C8C
+
+#define mmTPC7_CFG_QM_SRF_0 0xFC6C90
+
+#define mmTPC7_CFG_QM_SRF_1 0xFC6C94
+
+#define mmTPC7_CFG_QM_SRF_2 0xFC6C98
+
+#define mmTPC7_CFG_QM_SRF_3 0xFC6C9C
+
+#define mmTPC7_CFG_QM_SRF_4 0xFC6CA0
+
+#define mmTPC7_CFG_QM_SRF_5 0xFC6CA4
+
+#define mmTPC7_CFG_QM_SRF_6 0xFC6CA8
+
+#define mmTPC7_CFG_QM_SRF_7 0xFC6CAC
+
+#define mmTPC7_CFG_QM_SRF_8 0xFC6CB0
+
+#define mmTPC7_CFG_QM_SRF_9 0xFC6CB4
+
+#define mmTPC7_CFG_QM_SRF_10 0xFC6CB8
+
+#define mmTPC7_CFG_QM_SRF_11 0xFC6CBC
+
+#define mmTPC7_CFG_QM_SRF_12 0xFC6CC0
+
+#define mmTPC7_CFG_QM_SRF_13 0xFC6CC4
+
+#define mmTPC7_CFG_QM_SRF_14 0xFC6CC8
+
+#define mmTPC7_CFG_QM_SRF_15 0xFC6CCC
+
+#define mmTPC7_CFG_QM_SRF_16 0xFC6CD0
+
+#define mmTPC7_CFG_QM_SRF_17 0xFC6CD4
+
+#define mmTPC7_CFG_QM_SRF_18 0xFC6CD8
+
+#define mmTPC7_CFG_QM_SRF_19 0xFC6CDC
+
+#define mmTPC7_CFG_QM_SRF_20 0xFC6CE0
+
+#define mmTPC7_CFG_QM_SRF_21 0xFC6CE4
+
+#define mmTPC7_CFG_QM_SRF_22 0xFC6CE8
+
+#define mmTPC7_CFG_QM_SRF_23 0xFC6CEC
+
+#define mmTPC7_CFG_QM_SRF_24 0xFC6CF0
+
+#define mmTPC7_CFG_QM_SRF_25 0xFC6CF4
+
+#define mmTPC7_CFG_QM_SRF_26 0xFC6CF8
+
+#define mmTPC7_CFG_QM_SRF_27 0xFC6CFC
+
+#define mmTPC7_CFG_QM_SRF_28 0xFC6D00
+
+#define mmTPC7_CFG_QM_SRF_29 0xFC6D04
+
+#define mmTPC7_CFG_QM_SRF_30 0xFC6D08
+
+#define mmTPC7_CFG_QM_SRF_31 0xFC6D0C
+
+#define mmTPC7_CFG_QM_KERNEL_CONFIG 0xFC6D10
+
+#define mmTPC7_CFG_QM_SYNC_OBJECT_MESSAGE 0xFC6D14
+
+#define mmTPC7_CFG_ARUSER 0xFC6D18
+
+#define mmTPC7_CFG_AWUSER 0xFC6D1C
+
+#define mmTPC7_CFG_FUNC_MBIST_CNTRL 0xFC6E00
+
+#define mmTPC7_CFG_FUNC_MBIST_PAT 0xFC6E04
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_0 0xFC6E08
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_1 0xFC6E0C
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_2 0xFC6E10
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_3 0xFC6E14
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_4 0xFC6E18
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_5 0xFC6E1C
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_6 0xFC6E20
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_7 0xFC6E24
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_8 0xFC6E28
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_9 0xFC6E2C
+
+#endif /* ASIC_REG_TPC7_CFG_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h
new file mode 100644
index 000000000000..65d83043bf63
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC7_CMDQ_REGS_H_
+#define ASIC_REG_TPC7_CMDQ_REGS_H_
+
+/*
+ *****************************************
+ * TPC7_CMDQ (Prototype: CMDQ)
+ *****************************************
+ */
+
+#define mmTPC7_CMDQ_GLBL_CFG0 0xFC9000
+
+#define mmTPC7_CMDQ_GLBL_CFG1 0xFC9004
+
+#define mmTPC7_CMDQ_GLBL_PROT 0xFC9008
+
+#define mmTPC7_CMDQ_GLBL_ERR_CFG 0xFC900C
+
+#define mmTPC7_CMDQ_GLBL_ERR_ADDR_LO 0xFC9010
+
+#define mmTPC7_CMDQ_GLBL_ERR_ADDR_HI 0xFC9014
+
+#define mmTPC7_CMDQ_GLBL_ERR_WDATA 0xFC9018
+
+#define mmTPC7_CMDQ_GLBL_SECURE_PROPS 0xFC901C
+
+#define mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS 0xFC9020
+
+#define mmTPC7_CMDQ_GLBL_STS0 0xFC9024
+
+#define mmTPC7_CMDQ_GLBL_STS1 0xFC9028
+
+#define mmTPC7_CMDQ_CQ_CFG0 0xFC90B0
+
+#define mmTPC7_CMDQ_CQ_CFG1 0xFC90B4
+
+#define mmTPC7_CMDQ_CQ_ARUSER 0xFC90B8
+
+#define mmTPC7_CMDQ_CQ_PTR_LO 0xFC90C0
+
+#define mmTPC7_CMDQ_CQ_PTR_HI 0xFC90C4
+
+#define mmTPC7_CMDQ_CQ_TSIZE 0xFC90C8
+
+#define mmTPC7_CMDQ_CQ_CTL 0xFC90CC
+
+#define mmTPC7_CMDQ_CQ_PTR_LO_STS 0xFC90D4
+
+#define mmTPC7_CMDQ_CQ_PTR_HI_STS 0xFC90D8
+
+#define mmTPC7_CMDQ_CQ_TSIZE_STS 0xFC90DC
+
+#define mmTPC7_CMDQ_CQ_CTL_STS 0xFC90E0
+
+#define mmTPC7_CMDQ_CQ_STS0 0xFC90E4
+
+#define mmTPC7_CMDQ_CQ_STS1 0xFC90E8
+
+#define mmTPC7_CMDQ_CQ_RD_RATE_LIM_EN 0xFC90F0
+
+#define mmTPC7_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xFC90F4
+
+#define mmTPC7_CMDQ_CQ_RD_RATE_LIM_SAT 0xFC90F8
+
+#define mmTPC7_CMDQ_CQ_RD_RATE_LIM_TOUT 0xFC90FC
+
+#define mmTPC7_CMDQ_CQ_IFIFO_CNT 0xFC9108
+
+#define mmTPC7_CMDQ_CP_MSG_BASE0_ADDR_LO 0xFC9120
+
+#define mmTPC7_CMDQ_CP_MSG_BASE0_ADDR_HI 0xFC9124
+
+#define mmTPC7_CMDQ_CP_MSG_BASE1_ADDR_LO 0xFC9128
+
+#define mmTPC7_CMDQ_CP_MSG_BASE1_ADDR_HI 0xFC912C
+
+#define mmTPC7_CMDQ_CP_MSG_BASE2_ADDR_LO 0xFC9130
+
+#define mmTPC7_CMDQ_CP_MSG_BASE2_ADDR_HI 0xFC9134
+
+#define mmTPC7_CMDQ_CP_MSG_BASE3_ADDR_LO 0xFC9138
+
+#define mmTPC7_CMDQ_CP_MSG_BASE3_ADDR_HI 0xFC913C
+
+#define mmTPC7_CMDQ_CP_LDMA_TSIZE_OFFSET 0xFC9140
+
+#define mmTPC7_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xFC9144
+
+#define mmTPC7_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xFC9148
+
+#define mmTPC7_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xFC914C
+
+#define mmTPC7_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xFC9150
+
+#define mmTPC7_CMDQ_CP_LDMA_COMMIT_OFFSET 0xFC9154
+
+#define mmTPC7_CMDQ_CP_FENCE0_RDATA 0xFC9158
+
+#define mmTPC7_CMDQ_CP_FENCE1_RDATA 0xFC915C
+
+#define mmTPC7_CMDQ_CP_FENCE2_RDATA 0xFC9160
+
+#define mmTPC7_CMDQ_CP_FENCE3_RDATA 0xFC9164
+
+#define mmTPC7_CMDQ_CP_FENCE0_CNT 0xFC9168
+
+#define mmTPC7_CMDQ_CP_FENCE1_CNT 0xFC916C
+
+#define mmTPC7_CMDQ_CP_FENCE2_CNT 0xFC9170
+
+#define mmTPC7_CMDQ_CP_FENCE3_CNT 0xFC9174
+
+#define mmTPC7_CMDQ_CP_STS 0xFC9178
+
+#define mmTPC7_CMDQ_CP_CURRENT_INST_LO 0xFC917C
+
+#define mmTPC7_CMDQ_CP_CURRENT_INST_HI 0xFC9180
+
+#define mmTPC7_CMDQ_CP_BARRIER_CFG 0xFC9184
+
+#define mmTPC7_CMDQ_CP_DBG_0 0xFC9188
+
+#define mmTPC7_CMDQ_CQ_BUF_ADDR 0xFC9308
+
+#define mmTPC7_CMDQ_CQ_BUF_RDATA 0xFC930C
+
+#endif /* ASIC_REG_TPC7_CMDQ_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h
new file mode 100644
index 000000000000..3d5848d87304
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC7_NRTR_REGS_H_
+#define ASIC_REG_TPC7_NRTR_REGS_H_
+
+/*
+ *****************************************
+ * TPC7_NRTR (Prototype: IF_NRTR)
+ *****************************************
+ */
+
+#define mmTPC7_NRTR_HBW_MAX_CRED 0xFC0100
+
+#define mmTPC7_NRTR_LBW_MAX_CRED 0xFC0120
+
+#define mmTPC7_NRTR_DBG_E_ARB 0xFC0300
+
+#define mmTPC7_NRTR_DBG_W_ARB 0xFC0304
+
+#define mmTPC7_NRTR_DBG_N_ARB 0xFC0308
+
+#define mmTPC7_NRTR_DBG_S_ARB 0xFC030C
+
+#define mmTPC7_NRTR_DBG_L_ARB 0xFC0310
+
+#define mmTPC7_NRTR_DBG_E_ARB_MAX 0xFC0320
+
+#define mmTPC7_NRTR_DBG_W_ARB_MAX 0xFC0324
+
+#define mmTPC7_NRTR_DBG_N_ARB_MAX 0xFC0328
+
+#define mmTPC7_NRTR_DBG_S_ARB_MAX 0xFC032C
+
+#define mmTPC7_NRTR_DBG_L_ARB_MAX 0xFC0330
+
+#define mmTPC7_NRTR_SPLIT_COEF_0 0xFC0400
+
+#define mmTPC7_NRTR_SPLIT_COEF_1 0xFC0404
+
+#define mmTPC7_NRTR_SPLIT_COEF_2 0xFC0408
+
+#define mmTPC7_NRTR_SPLIT_COEF_3 0xFC040C
+
+#define mmTPC7_NRTR_SPLIT_COEF_4 0xFC0410
+
+#define mmTPC7_NRTR_SPLIT_COEF_5 0xFC0414
+
+#define mmTPC7_NRTR_SPLIT_COEF_6 0xFC0418
+
+#define mmTPC7_NRTR_SPLIT_COEF_7 0xFC041C
+
+#define mmTPC7_NRTR_SPLIT_COEF_8 0xFC0420
+
+#define mmTPC7_NRTR_SPLIT_COEF_9 0xFC0424
+
+#define mmTPC7_NRTR_SPLIT_CFG 0xFC0440
+
+#define mmTPC7_NRTR_SPLIT_RD_SAT 0xFC0444
+
+#define mmTPC7_NRTR_SPLIT_RD_RST_TOKEN 0xFC0448
+
+#define mmTPC7_NRTR_SPLIT_RD_TIMEOUT_0 0xFC044C
+
+#define mmTPC7_NRTR_SPLIT_RD_TIMEOUT_1 0xFC0450
+
+#define mmTPC7_NRTR_SPLIT_WR_SAT 0xFC0454
+
+#define mmTPC7_NRTR_WPLIT_WR_TST_TOLEN 0xFC0458
+
+#define mmTPC7_NRTR_SPLIT_WR_TIMEOUT_0 0xFC045C
+
+#define mmTPC7_NRTR_SPLIT_WR_TIMEOUT_1 0xFC0460
+
+#define mmTPC7_NRTR_HBW_RANGE_HIT 0xFC0470
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_L_0 0xFC0480
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_L_1 0xFC0484
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_L_2 0xFC0488
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_L_3 0xFC048C
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_L_4 0xFC0490
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_L_5 0xFC0494
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_L_6 0xFC0498
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_L_7 0xFC049C
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_H_0 0xFC04A0
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_H_1 0xFC04A4
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_H_2 0xFC04A8
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_H_3 0xFC04AC
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_H_4 0xFC04B0
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_H_5 0xFC04B4
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_H_6 0xFC04B8
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_H_7 0xFC04BC
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_L_0 0xFC04C0
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_L_1 0xFC04C4
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_L_2 0xFC04C8
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_L_3 0xFC04CC
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_L_4 0xFC04D0
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_L_5 0xFC04D4
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_L_6 0xFC04D8
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_L_7 0xFC04DC
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_H_0 0xFC04E0
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_H_1 0xFC04E4
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_H_2 0xFC04E8
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_H_3 0xFC04EC
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_H_4 0xFC04F0
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_H_5 0xFC04F4
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_H_6 0xFC04F8
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_H_7 0xFC04FC
+
+#define mmTPC7_NRTR_LBW_RANGE_HIT 0xFC0500
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_0 0xFC0510
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_1 0xFC0514
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_2 0xFC0518
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_3 0xFC051C
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_4 0xFC0520
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_5 0xFC0524
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_6 0xFC0528
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_7 0xFC052C
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_8 0xFC0530
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_9 0xFC0534
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_10 0xFC0538
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_11 0xFC053C
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_12 0xFC0540
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_13 0xFC0544
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_14 0xFC0548
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_15 0xFC054C
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_0 0xFC0550
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_1 0xFC0554
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_2 0xFC0558
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_3 0xFC055C
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_4 0xFC0560
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_5 0xFC0564
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_6 0xFC0568
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_7 0xFC056C
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_8 0xFC0570
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_9 0xFC0574
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_10 0xFC0578
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_11 0xFC057C
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_12 0xFC0580
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_13 0xFC0584
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_14 0xFC0588
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_15 0xFC058C
+
+#define mmTPC7_NRTR_RGLTR 0xFC0590
+
+#define mmTPC7_NRTR_RGLTR_WR_RESULT 0xFC0594
+
+#define mmTPC7_NRTR_RGLTR_RD_RESULT 0xFC0598
+
+#define mmTPC7_NRTR_SCRAMB_EN 0xFC0600
+
+#define mmTPC7_NRTR_NON_LIN_SCRAMB 0xFC0604
+
+#endif /* ASIC_REG_TPC7_NRTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h
new file mode 100644
index 000000000000..25f5095f68fb
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC7_QM_REGS_H_
+#define ASIC_REG_TPC7_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC7_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC7_QM_GLBL_CFG0 0xFC8000
+
+#define mmTPC7_QM_GLBL_CFG1 0xFC8004
+
+#define mmTPC7_QM_GLBL_PROT 0xFC8008
+
+#define mmTPC7_QM_GLBL_ERR_CFG 0xFC800C
+
+#define mmTPC7_QM_GLBL_ERR_ADDR_LO 0xFC8010
+
+#define mmTPC7_QM_GLBL_ERR_ADDR_HI 0xFC8014
+
+#define mmTPC7_QM_GLBL_ERR_WDATA 0xFC8018
+
+#define mmTPC7_QM_GLBL_SECURE_PROPS 0xFC801C
+
+#define mmTPC7_QM_GLBL_NON_SECURE_PROPS 0xFC8020
+
+#define mmTPC7_QM_GLBL_STS0 0xFC8024
+
+#define mmTPC7_QM_GLBL_STS1 0xFC8028
+
+#define mmTPC7_QM_PQ_BASE_LO 0xFC8060
+
+#define mmTPC7_QM_PQ_BASE_HI 0xFC8064
+
+#define mmTPC7_QM_PQ_SIZE 0xFC8068
+
+#define mmTPC7_QM_PQ_PI 0xFC806C
+
+#define mmTPC7_QM_PQ_CI 0xFC8070
+
+#define mmTPC7_QM_PQ_CFG0 0xFC8074
+
+#define mmTPC7_QM_PQ_CFG1 0xFC8078
+
+#define mmTPC7_QM_PQ_ARUSER 0xFC807C
+
+#define mmTPC7_QM_PQ_PUSH0 0xFC8080
+
+#define mmTPC7_QM_PQ_PUSH1 0xFC8084
+
+#define mmTPC7_QM_PQ_PUSH2 0xFC8088
+
+#define mmTPC7_QM_PQ_PUSH3 0xFC808C
+
+#define mmTPC7_QM_PQ_STS0 0xFC8090
+
+#define mmTPC7_QM_PQ_STS1 0xFC8094
+
+#define mmTPC7_QM_PQ_RD_RATE_LIM_EN 0xFC80A0
+
+#define mmTPC7_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xFC80A4
+
+#define mmTPC7_QM_PQ_RD_RATE_LIM_SAT 0xFC80A8
+
+#define mmTPC7_QM_PQ_RD_RATE_LIM_TOUT 0xFC80AC
+
+#define mmTPC7_QM_CQ_CFG0 0xFC80B0
+
+#define mmTPC7_QM_CQ_CFG1 0xFC80B4
+
+#define mmTPC7_QM_CQ_ARUSER 0xFC80B8
+
+#define mmTPC7_QM_CQ_PTR_LO 0xFC80C0
+
+#define mmTPC7_QM_CQ_PTR_HI 0xFC80C4
+
+#define mmTPC7_QM_CQ_TSIZE 0xFC80C8
+
+#define mmTPC7_QM_CQ_CTL 0xFC80CC
+
+#define mmTPC7_QM_CQ_PTR_LO_STS 0xFC80D4
+
+#define mmTPC7_QM_CQ_PTR_HI_STS 0xFC80D8
+
+#define mmTPC7_QM_CQ_TSIZE_STS 0xFC80DC
+
+#define mmTPC7_QM_CQ_CTL_STS 0xFC80E0
+
+#define mmTPC7_QM_CQ_STS0 0xFC80E4
+
+#define mmTPC7_QM_CQ_STS1 0xFC80E8
+
+#define mmTPC7_QM_CQ_RD_RATE_LIM_EN 0xFC80F0
+
+#define mmTPC7_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xFC80F4
+
+#define mmTPC7_QM_CQ_RD_RATE_LIM_SAT 0xFC80F8
+
+#define mmTPC7_QM_CQ_RD_RATE_LIM_TOUT 0xFC80FC
+
+#define mmTPC7_QM_CQ_IFIFO_CNT 0xFC8108
+
+#define mmTPC7_QM_CP_MSG_BASE0_ADDR_LO 0xFC8120
+
+#define mmTPC7_QM_CP_MSG_BASE0_ADDR_HI 0xFC8124
+
+#define mmTPC7_QM_CP_MSG_BASE1_ADDR_LO 0xFC8128
+
+#define mmTPC7_QM_CP_MSG_BASE1_ADDR_HI 0xFC812C
+
+#define mmTPC7_QM_CP_MSG_BASE2_ADDR_LO 0xFC8130
+
+#define mmTPC7_QM_CP_MSG_BASE2_ADDR_HI 0xFC8134
+
+#define mmTPC7_QM_CP_MSG_BASE3_ADDR_LO 0xFC8138
+
+#define mmTPC7_QM_CP_MSG_BASE3_ADDR_HI 0xFC813C
+
+#define mmTPC7_QM_CP_LDMA_TSIZE_OFFSET 0xFC8140
+
+#define mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xFC8144
+
+#define mmTPC7_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xFC8148
+
+#define mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xFC814C
+
+#define mmTPC7_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xFC8150
+
+#define mmTPC7_QM_CP_LDMA_COMMIT_OFFSET 0xFC8154
+
+#define mmTPC7_QM_CP_FENCE0_RDATA 0xFC8158
+
+#define mmTPC7_QM_CP_FENCE1_RDATA 0xFC815C
+
+#define mmTPC7_QM_CP_FENCE2_RDATA 0xFC8160
+
+#define mmTPC7_QM_CP_FENCE3_RDATA 0xFC8164
+
+#define mmTPC7_QM_CP_FENCE0_CNT 0xFC8168
+
+#define mmTPC7_QM_CP_FENCE1_CNT 0xFC816C
+
+#define mmTPC7_QM_CP_FENCE2_CNT 0xFC8170
+
+#define mmTPC7_QM_CP_FENCE3_CNT 0xFC8174
+
+#define mmTPC7_QM_CP_STS 0xFC8178
+
+#define mmTPC7_QM_CP_CURRENT_INST_LO 0xFC817C
+
+#define mmTPC7_QM_CP_CURRENT_INST_HI 0xFC8180
+
+#define mmTPC7_QM_CP_BARRIER_CFG 0xFC8184
+
+#define mmTPC7_QM_CP_DBG_0 0xFC8188
+
+#define mmTPC7_QM_PQ_BUF_ADDR 0xFC8300
+
+#define mmTPC7_QM_PQ_BUF_RDATA 0xFC8304
+
+#define mmTPC7_QM_CQ_BUF_ADDR 0xFC8308
+
+#define mmTPC7_QM_CQ_BUF_RDATA 0xFC830C
+
+#endif /* ASIC_REG_TPC7_QM_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h
new file mode 100644
index 000000000000..920231d0afa5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC_PLL_REGS_H_
+#define ASIC_REG_TPC_PLL_REGS_H_
+
+/*
+ *****************************************
+ * TPC_PLL (Prototype: PLL)
+ *****************************************
+ */
+
+#define mmTPC_PLL_NR 0xE01100
+
+#define mmTPC_PLL_NF 0xE01104
+
+#define mmTPC_PLL_OD 0xE01108
+
+#define mmTPC_PLL_NB 0xE0110C
+
+#define mmTPC_PLL_CFG 0xE01110
+
+#define mmTPC_PLL_LOSE_MASK 0xE01120
+
+#define mmTPC_PLL_LOCK_INTR 0xE01128
+
+#define mmTPC_PLL_LOCK_BYPASS 0xE0112C
+
+#define mmTPC_PLL_DATA_CHNG 0xE01130
+
+#define mmTPC_PLL_RST 0xE01134
+
+#define mmTPC_PLL_SLIP_WD_CNTR 0xE01150
+
+#define mmTPC_PLL_DIV_FACTOR_0 0xE01200
+
+#define mmTPC_PLL_DIV_FACTOR_1 0xE01204
+
+#define mmTPC_PLL_DIV_FACTOR_2 0xE01208
+
+#define mmTPC_PLL_DIV_FACTOR_3 0xE0120C
+
+#define mmTPC_PLL_DIV_FACTOR_CMD_0 0xE01220
+
+#define mmTPC_PLL_DIV_FACTOR_CMD_1 0xE01224
+
+#define mmTPC_PLL_DIV_FACTOR_CMD_2 0xE01228
+
+#define mmTPC_PLL_DIV_FACTOR_CMD_3 0xE0122C
+
+#define mmTPC_PLL_DIV_SEL_0 0xE01280
+
+#define mmTPC_PLL_DIV_SEL_1 0xE01284
+
+#define mmTPC_PLL_DIV_SEL_2 0xE01288
+
+#define mmTPC_PLL_DIV_SEL_3 0xE0128C
+
+#define mmTPC_PLL_DIV_EN_0 0xE012A0
+
+#define mmTPC_PLL_DIV_EN_1 0xE012A4
+
+#define mmTPC_PLL_DIV_EN_2 0xE012A8
+
+#define mmTPC_PLL_DIV_EN_3 0xE012AC
+
+#define mmTPC_PLL_DIV_FACTOR_BUSY_0 0xE012C0
+
+#define mmTPC_PLL_DIV_FACTOR_BUSY_1 0xE012C4
+
+#define mmTPC_PLL_DIV_FACTOR_BUSY_2 0xE012C8
+
+#define mmTPC_PLL_DIV_FACTOR_BUSY_3 0xE012CC
+
+#define mmTPC_PLL_CLK_GATER 0xE01300
+
+#define mmTPC_PLL_CLK_RLX_0 0xE01310
+
+#define mmTPC_PLL_CLK_RLX_1 0xE01314
+
+#define mmTPC_PLL_CLK_RLX_2 0xE01318
+
+#define mmTPC_PLL_CLK_RLX_3 0xE0131C
+
+#define mmTPC_PLL_REF_CNTR_PERIOD 0xE01400
+
+#define mmTPC_PLL_REF_LOW_THRESHOLD 0xE01410
+
+#define mmTPC_PLL_REF_HIGH_THRESHOLD 0xE01420
+
+#define mmTPC_PLL_PLL_NOT_STABLE 0xE01430
+
+#define mmTPC_PLL_FREQ_CALC_EN 0xE01440
+
+#endif /* ASIC_REG_TPC_PLL_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/goya.h b/drivers/misc/habanalabs/include/goya/goya.h
new file mode 100644
index 000000000000..614149efa412
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/goya.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GOYA_H
+#define GOYA_H
+
+#include "asic_reg/goya_regs.h"
+
+#include <linux/types.h>
+
+#define SRAM_CFG_BAR_ID 0
+#define MSIX_BAR_ID 2
+#define DDR_BAR_ID 4
+
+#define CFG_BAR_SIZE 0x10000000ull /* 256MB */
+#define MSIX_BAR_SIZE 0x1000ull /* 4KB */
+
+#define CFG_BASE 0x7FFC000000ull
+#define CFG_SIZE 0x4000000 /* 32MB CFG + 32MB DBG*/
+
+#define SRAM_BASE_ADDR 0x7FF0000000ull
+#define SRAM_SIZE 0x32A0000 /* 50.625MB */
+
+#define DRAM_PHYS_BASE 0x0ull
+
+#define HOST_PHYS_BASE 0x8000000000ull /* 0.5TB */
+#define HOST_PHYS_SIZE 0x1000000000000ull /* 0.25PB (48 bits) */
+
+#define GOYA_MSIX_ENTRIES 8
+
+#define QMAN_PQ_ENTRY_SIZE 16 /* Bytes */
+
+#define MAX_ASID 1024
+
+#define PROT_BITS_OFFS 0xF80
+
+#define DMA_MAX_NUM 5
+
+#define TPC_MAX_NUM 8
+
+#endif /* GOYA_H */
diff --git a/drivers/misc/habanalabs/include/goya/goya_async_events.h b/drivers/misc/habanalabs/include/goya/goya_async_events.h
new file mode 100644
index 000000000000..497937a17ee9
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/goya_async_events.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef __GOYA_ASYNC_EVENTS_H_
+#define __GOYA_ASYNC_EVENTS_H_
+
+enum goya_async_event_id {
+ GOYA_ASYNC_EVENT_ID_PCIE_IF = 33,
+ GOYA_ASYNC_EVENT_ID_TPC0_ECC = 36,
+ GOYA_ASYNC_EVENT_ID_TPC1_ECC = 39,
+ GOYA_ASYNC_EVENT_ID_TPC2_ECC = 42,
+ GOYA_ASYNC_EVENT_ID_TPC3_ECC = 45,
+ GOYA_ASYNC_EVENT_ID_TPC4_ECC = 48,
+ GOYA_ASYNC_EVENT_ID_TPC5_ECC = 51,
+ GOYA_ASYNC_EVENT_ID_TPC6_ECC = 54,
+ GOYA_ASYNC_EVENT_ID_TPC7_ECC = 57,
+ GOYA_ASYNC_EVENT_ID_MME_ECC = 60,
+ GOYA_ASYNC_EVENT_ID_MME_ECC_EXT = 61,
+ GOYA_ASYNC_EVENT_ID_MMU_ECC = 63,
+ GOYA_ASYNC_EVENT_ID_DMA_MACRO = 64,
+ GOYA_ASYNC_EVENT_ID_DMA_ECC = 66,
+ GOYA_ASYNC_EVENT_ID_CPU_IF_ECC = 75,
+ GOYA_ASYNC_EVENT_ID_PSOC_MEM = 78,
+ GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT = 79,
+ GOYA_ASYNC_EVENT_ID_SRAM0 = 81,
+ GOYA_ASYNC_EVENT_ID_SRAM1 = 82,
+ GOYA_ASYNC_EVENT_ID_SRAM2 = 83,
+ GOYA_ASYNC_EVENT_ID_SRAM3 = 84,
+ GOYA_ASYNC_EVENT_ID_SRAM4 = 85,
+ GOYA_ASYNC_EVENT_ID_SRAM5 = 86,
+ GOYA_ASYNC_EVENT_ID_SRAM6 = 87,
+ GOYA_ASYNC_EVENT_ID_SRAM7 = 88,
+ GOYA_ASYNC_EVENT_ID_SRAM8 = 89,
+ GOYA_ASYNC_EVENT_ID_SRAM9 = 90,
+ GOYA_ASYNC_EVENT_ID_SRAM10 = 91,
+ GOYA_ASYNC_EVENT_ID_SRAM11 = 92,
+ GOYA_ASYNC_EVENT_ID_SRAM12 = 93,
+ GOYA_ASYNC_EVENT_ID_SRAM13 = 94,
+ GOYA_ASYNC_EVENT_ID_SRAM14 = 95,
+ GOYA_ASYNC_EVENT_ID_SRAM15 = 96,
+ GOYA_ASYNC_EVENT_ID_SRAM16 = 97,
+ GOYA_ASYNC_EVENT_ID_SRAM17 = 98,
+ GOYA_ASYNC_EVENT_ID_SRAM18 = 99,
+ GOYA_ASYNC_EVENT_ID_SRAM19 = 100,
+ GOYA_ASYNC_EVENT_ID_SRAM20 = 101,
+ GOYA_ASYNC_EVENT_ID_SRAM21 = 102,
+ GOYA_ASYNC_EVENT_ID_SRAM22 = 103,
+ GOYA_ASYNC_EVENT_ID_SRAM23 = 104,
+ GOYA_ASYNC_EVENT_ID_SRAM24 = 105,
+ GOYA_ASYNC_EVENT_ID_SRAM25 = 106,
+ GOYA_ASYNC_EVENT_ID_SRAM26 = 107,
+ GOYA_ASYNC_EVENT_ID_SRAM27 = 108,
+ GOYA_ASYNC_EVENT_ID_SRAM28 = 109,
+ GOYA_ASYNC_EVENT_ID_SRAM29 = 110,
+ GOYA_ASYNC_EVENT_ID_GIC500 = 112,
+ GOYA_ASYNC_EVENT_ID_PCIE_DEC = 115,
+ GOYA_ASYNC_EVENT_ID_TPC0_DEC = 117,
+ GOYA_ASYNC_EVENT_ID_TPC1_DEC = 120,
+ GOYA_ASYNC_EVENT_ID_TPC2_DEC = 123,
+ GOYA_ASYNC_EVENT_ID_TPC3_DEC = 126,
+ GOYA_ASYNC_EVENT_ID_TPC4_DEC = 129,
+ GOYA_ASYNC_EVENT_ID_TPC5_DEC = 132,
+ GOYA_ASYNC_EVENT_ID_TPC6_DEC = 135,
+ GOYA_ASYNC_EVENT_ID_TPC7_DEC = 138,
+ GOYA_ASYNC_EVENT_ID_AXI_ECC = 139,
+ GOYA_ASYNC_EVENT_ID_L2_RAM_ECC = 140,
+ GOYA_ASYNC_EVENT_ID_MME_WACS = 141,
+ GOYA_ASYNC_EVENT_ID_MME_WACSD = 142,
+ GOYA_ASYNC_EVENT_ID_PLL0 = 143,
+ GOYA_ASYNC_EVENT_ID_PLL1 = 144,
+ GOYA_ASYNC_EVENT_ID_PLL3 = 146,
+ GOYA_ASYNC_EVENT_ID_PLL4 = 147,
+ GOYA_ASYNC_EVENT_ID_PLL5 = 148,
+ GOYA_ASYNC_EVENT_ID_PLL6 = 149,
+ GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER = 155,
+ GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC = 159,
+ GOYA_ASYNC_EVENT_ID_PSOC = 160,
+ GOYA_ASYNC_EVENT_ID_PCIE_FLR = 171,
+ GOYA_ASYNC_EVENT_ID_PCIE_HOT_RESET = 172,
+ GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG0 = 174,
+ GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG1 = 175,
+ GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG2 = 176,
+ GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG3 = 177,
+ GOYA_ASYNC_EVENT_ID_PCIE_QID1_ENG0 = 178,
+ GOYA_ASYNC_EVENT_ID_PCIE_QID1_ENG1 = 179,
+ GOYA_ASYNC_EVENT_ID_PCIE_QID1_ENG2 = 180,
+ GOYA_ASYNC_EVENT_ID_PCIE_QID1_ENG3 = 181,
+ GOYA_ASYNC_EVENT_ID_PCIE_APB = 182,
+ GOYA_ASYNC_EVENT_ID_PCIE_QDB = 183,
+ GOYA_ASYNC_EVENT_ID_PCIE_BM_D_P_WR = 184,
+ GOYA_ASYNC_EVENT_ID_PCIE_BM_D_RD = 185,
+ GOYA_ASYNC_EVENT_ID_PCIE_BM_U_P_WR = 186,
+ GOYA_ASYNC_EVENT_ID_PCIE_BM_U_RD = 187,
+ GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU = 190,
+ GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR = 191,
+ GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU = 200,
+ GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR = 201,
+ GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU = 210,
+ GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR = 211,
+ GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU = 220,
+ GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR = 221,
+ GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU = 230,
+ GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR = 231,
+ GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU = 240,
+ GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR = 241,
+ GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU = 250,
+ GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR = 251,
+ GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU = 260,
+ GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR = 261,
+ GOYA_ASYNC_EVENT_ID_MMU_SBA_SPMU0 = 270,
+ GOYA_ASYNC_EVENT_ID_MMU_SBA_SPMU1 = 271,
+ GOYA_ASYNC_EVENT_ID_MME_WACS_UP = 272,
+ GOYA_ASYNC_EVENT_ID_MME_WACS_DOWN = 273,
+ GOYA_ASYNC_EVENT_ID_MMU_PAGE_FAULT = 280,
+ GOYA_ASYNC_EVENT_ID_MMU_WR_PERM = 281,
+ GOYA_ASYNC_EVENT_ID_MMU_DBG_BM = 282,
+ GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 = 290,
+ GOYA_ASYNC_EVENT_ID_DMA_BM_CH1 = 291,
+ GOYA_ASYNC_EVENT_ID_DMA_BM_CH2 = 292,
+ GOYA_ASYNC_EVENT_ID_DMA_BM_CH3 = 293,
+ GOYA_ASYNC_EVENT_ID_DMA_BM_CH4 = 294,
+ GOYA_ASYNC_EVENT_ID_DDR0_PHY_DFI = 300,
+ GOYA_ASYNC_EVENT_ID_DDR0_ECC_SCRUB = 301,
+ GOYA_ASYNC_EVENT_ID_DDR0_DB_ECC = 302,
+ GOYA_ASYNC_EVENT_ID_DDR0_SB_ECC = 303,
+ GOYA_ASYNC_EVENT_ID_DDR0_SB_ECC_MC = 304,
+ GOYA_ASYNC_EVENT_ID_DDR0_AXI_RD = 305,
+ GOYA_ASYNC_EVENT_ID_DDR0_AXI_WR = 306,
+ GOYA_ASYNC_EVENT_ID_DDR1_PHY_DFI = 310,
+ GOYA_ASYNC_EVENT_ID_DDR1_ECC_SCRUB = 311,
+ GOYA_ASYNC_EVENT_ID_DDR1_DB_ECC = 312,
+ GOYA_ASYNC_EVENT_ID_DDR1_SB_ECC = 313,
+ GOYA_ASYNC_EVENT_ID_DDR1_SB_ECC_MC = 314,
+ GOYA_ASYNC_EVENT_ID_DDR1_AXI_RD = 315,
+ GOYA_ASYNC_EVENT_ID_DDR1_AXI_WR = 316,
+ GOYA_ASYNC_EVENT_ID_CPU_BMON = 320,
+ GOYA_ASYNC_EVENT_ID_TS_EAST = 322,
+ GOYA_ASYNC_EVENT_ID_TS_WEST = 323,
+ GOYA_ASYNC_EVENT_ID_TS_NORTH = 324,
+ GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_0 = 330,
+ GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_1 = 331,
+ GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_2 = 332,
+ GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET = 356,
+ GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT = 361,
+ GOYA_ASYNC_EVENT_ID_TPC0_CMDQ = 430,
+ GOYA_ASYNC_EVENT_ID_TPC1_CMDQ = 431,
+ GOYA_ASYNC_EVENT_ID_TPC2_CMDQ = 432,
+ GOYA_ASYNC_EVENT_ID_TPC3_CMDQ = 433,
+ GOYA_ASYNC_EVENT_ID_TPC4_CMDQ = 434,
+ GOYA_ASYNC_EVENT_ID_TPC5_CMDQ = 435,
+ GOYA_ASYNC_EVENT_ID_TPC6_CMDQ = 436,
+ GOYA_ASYNC_EVENT_ID_TPC7_CMDQ = 437,
+ GOYA_ASYNC_EVENT_ID_TPC0_QM = 438,
+ GOYA_ASYNC_EVENT_ID_TPC1_QM = 439,
+ GOYA_ASYNC_EVENT_ID_TPC2_QM = 440,
+ GOYA_ASYNC_EVENT_ID_TPC3_QM = 441,
+ GOYA_ASYNC_EVENT_ID_TPC4_QM = 442,
+ GOYA_ASYNC_EVENT_ID_TPC5_QM = 443,
+ GOYA_ASYNC_EVENT_ID_TPC6_QM = 444,
+ GOYA_ASYNC_EVENT_ID_TPC7_QM = 445,
+ GOYA_ASYNC_EVENT_ID_MME_QM = 447,
+ GOYA_ASYNC_EVENT_ID_MME_CMDQ = 448,
+ GOYA_ASYNC_EVENT_ID_DMA0_QM = 449,
+ GOYA_ASYNC_EVENT_ID_DMA1_QM = 450,
+ GOYA_ASYNC_EVENT_ID_DMA2_QM = 451,
+ GOYA_ASYNC_EVENT_ID_DMA3_QM = 452,
+ GOYA_ASYNC_EVENT_ID_DMA4_QM = 453,
+ GOYA_ASYNC_EVENT_ID_DMA_ON_HBW = 454,
+ GOYA_ASYNC_EVENT_ID_DMA0_CH = 455,
+ GOYA_ASYNC_EVENT_ID_DMA1_CH = 456,
+ GOYA_ASYNC_EVENT_ID_DMA2_CH = 457,
+ GOYA_ASYNC_EVENT_ID_DMA3_CH = 458,
+ GOYA_ASYNC_EVENT_ID_DMA4_CH = 459,
+ GOYA_ASYNC_EVENT_ID_PI_UPDATE = 484,
+ GOYA_ASYNC_EVENT_ID_HALT_MACHINE = 485,
+ GOYA_ASYNC_EVENT_ID_INTS_REGISTER = 486,
+ GOYA_ASYNC_EVENT_ID_SOFT_RESET = 487,
+ GOYA_ASYNC_EVENT_ID_LAST_VALID_ID = 1023,
+ GOYA_ASYNC_EVENT_ID_SIZE
+};
+
+#endif /* __GOYA_ASYNC_EVENTS_H_ */
diff --git a/drivers/misc/habanalabs/include/goya/goya_fw_if.h b/drivers/misc/habanalabs/include/goya/goya_fw_if.h
new file mode 100644
index 000000000000..a9920cb4a07b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/goya_fw_if.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GOYA_FW_IF_H
+#define GOYA_FW_IF_H
+
+#define CPU_BOOT_ADDR 0x7FF8040000ull
+
+#define UBOOT_FW_OFFSET 0x100000 /* 1MB in SRAM */
+#define LINUX_FW_OFFSET 0x800000 /* 8MB in DDR */
+
+enum goya_pll_index {
+ CPU_PLL = 0,
+ IC_PLL,
+ MC_PLL,
+ MME_PLL,
+ PCI_PLL,
+ EMMC_PLL,
+ TPC_PLL
+};
+
+#define GOYA_PLL_FREQ_LOW 50000000 /* 50 MHz */
+
+#endif /* GOYA_FW_IF_H */
diff --git a/drivers/misc/habanalabs/include/goya/goya_packets.h b/drivers/misc/habanalabs/include/goya/goya_packets.h
new file mode 100644
index 000000000000..a14407b975e4
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/goya_packets.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2017-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GOYA_PACKETS_H
+#define GOYA_PACKETS_H
+
+#include <linux/types.h>
+
+#define PACKET_HEADER_PACKET_ID_SHIFT 56
+#define PACKET_HEADER_PACKET_ID_MASK 0x1F00000000000000ull
+
+enum packet_id {
+ PACKET_WREG_32 = 0x1,
+ PACKET_WREG_BULK = 0x2,
+ PACKET_MSG_LONG = 0x3,
+ PACKET_MSG_SHORT = 0x4,
+ PACKET_CP_DMA = 0x5,
+ PACKET_MSG_PROT = 0x7,
+ PACKET_FENCE = 0x8,
+ PACKET_LIN_DMA = 0x9,
+ PACKET_NOP = 0xA,
+ PACKET_STOP = 0xB,
+ MAX_PACKET_ID = (PACKET_HEADER_PACKET_ID_MASK >>
+ PACKET_HEADER_PACKET_ID_SHIFT) + 1
+};
+
+enum goya_dma_direction {
+ DMA_HOST_TO_DRAM,
+ DMA_HOST_TO_SRAM,
+ DMA_DRAM_TO_SRAM,
+ DMA_SRAM_TO_DRAM,
+ DMA_SRAM_TO_HOST,
+ DMA_DRAM_TO_HOST,
+ DMA_DRAM_TO_DRAM,
+ DMA_SRAM_TO_SRAM,
+ DMA_ENUM_MAX
+};
+
+#define GOYA_PKT_CTL_OPCODE_SHIFT 24
+#define GOYA_PKT_CTL_OPCODE_MASK 0x1F000000
+
+#define GOYA_PKT_CTL_EB_SHIFT 29
+#define GOYA_PKT_CTL_EB_MASK 0x20000000
+
+#define GOYA_PKT_CTL_RB_SHIFT 30
+#define GOYA_PKT_CTL_RB_MASK 0x40000000
+
+#define GOYA_PKT_CTL_MB_SHIFT 31
+#define GOYA_PKT_CTL_MB_MASK 0x80000000
+
+struct packet_nop {
+ __le32 reserved;
+ __le32 ctl;
+};
+
+struct packet_stop {
+ __le32 reserved;
+ __le32 ctl;
+};
+
+#define GOYA_PKT_WREG32_CTL_REG_OFFSET_SHIFT 0
+#define GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK 0x0000FFFF
+
+struct packet_wreg32 {
+ __le32 value;
+ __le32 ctl;
+};
+
+struct packet_wreg_bulk {
+ __le32 size64;
+ __le32 ctl;
+ __le64 values[0]; /* data starts here */
+};
+
+struct packet_msg_long {
+ __le32 value;
+ __le32 ctl;
+ __le64 addr;
+};
+
+struct packet_msg_short {
+ __le32 value;
+ __le32 ctl;
+};
+
+struct packet_msg_prot {
+ __le32 value;
+ __le32 ctl;
+ __le64 addr;
+};
+
+struct packet_fence {
+ __le32 cfg;
+ __le32 ctl;
+};
+
+#define GOYA_PKT_LIN_DMA_CTL_WO_SHIFT 0
+#define GOYA_PKT_LIN_DMA_CTL_WO_MASK 0x00000001
+
+#define GOYA_PKT_LIN_DMA_CTL_RDCOMP_SHIFT 1
+#define GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK 0x00000002
+
+#define GOYA_PKT_LIN_DMA_CTL_WRCOMP_SHIFT 2
+#define GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK 0x00000004
+
+#define GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT 6
+#define GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK 0x00000040
+
+#define GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT 20
+#define GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK 0x00700000
+
+struct packet_lin_dma {
+ __le32 tsize;
+ __le32 ctl;
+ __le64 src_addr;
+ __le64 dst_addr;
+};
+
+struct packet_cp_dma {
+ __le32 tsize;
+ __le32 ctl;
+ __le64 src_addr;
+};
+
+#endif /* GOYA_PACKETS_H */
diff --git a/drivers/misc/habanalabs/include/hl_boot_if.h b/drivers/misc/habanalabs/include/hl_boot_if.h
new file mode 100644
index 000000000000..7475732b9996
--- /dev/null
+++ b/drivers/misc/habanalabs/include/hl_boot_if.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef HL_BOOT_IF_H
+#define HL_BOOT_IF_H
+
+enum cpu_boot_status {
+ CPU_BOOT_STATUS_NA = 0, /* Default value after reset of chip */
+ CPU_BOOT_STATUS_IN_WFE,
+ CPU_BOOT_STATUS_DRAM_RDY,
+ CPU_BOOT_STATUS_SRAM_AVAIL,
+ CPU_BOOT_STATUS_IN_BTL, /* BTL is H/W FSM */
+ CPU_BOOT_STATUS_IN_PREBOOT,
+ CPU_BOOT_STATUS_IN_SPL,
+ CPU_BOOT_STATUS_IN_UBOOT,
+ CPU_BOOT_STATUS_DRAM_INIT_FAIL,
+ CPU_BOOT_STATUS_FIT_CORRUPTED
+};
+
+enum kmd_msg {
+ KMD_MSG_NA = 0,
+ KMD_MSG_GOTO_WFE,
+ KMD_MSG_FIT_RDY
+};
+
+#endif /* HL_BOOT_IF_H */
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
new file mode 100644
index 000000000000..b680052ee3f0
--- /dev/null
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef INCLUDE_MMU_GENERAL_H_
+#define INCLUDE_MMU_GENERAL_H_
+
+#define PAGE_SHIFT_4KB 12
+#define PAGE_SHIFT_2MB 21
+#define PAGE_SIZE_2MB (_AC(1, UL) << PAGE_SHIFT_2MB)
+#define PAGE_SIZE_4KB (_AC(1, UL) << PAGE_SHIFT_4KB)
+#define PAGE_MASK_2MB (~(PAGE_SIZE_2MB - 1))
+
+#define PAGE_PRESENT_MASK 0x0000000000001
+#define SWAP_OUT_MASK 0x0000000000004
+#define LAST_MASK 0x0000000000800
+#define PHYS_ADDR_MASK 0x3FFFFFFFFF000ull
+#define HOP0_MASK 0x3000000000000ull
+#define HOP1_MASK 0x0FF8000000000ull
+#define HOP2_MASK 0x0007FC0000000ull
+#define HOP3_MASK 0x000003FE00000
+#define HOP4_MASK 0x00000001FF000
+#define OFFSET_MASK 0x0000000000FFF
+
+#define HOP0_SHIFT 48
+#define HOP1_SHIFT 39
+#define HOP2_SHIFT 30
+#define HOP3_SHIFT 21
+#define HOP4_SHIFT 12
+
+#define PTE_PHYS_ADDR_SHIFT 12
+#define PTE_PHYS_ADDR_MASK ~0xFFF
+
+#define HL_PTE_SIZE sizeof(u64)
+#define HOP_TABLE_SIZE PAGE_SIZE_4KB
+#define PTE_ENTRIES_IN_HOP (HOP_TABLE_SIZE / HL_PTE_SIZE)
+#define HOP0_TABLES_TOTAL_SIZE (HOP_TABLE_SIZE * MAX_ASID)
+
+#define MMU_HOP0_PA43_12_SHIFT 12
+#define MMU_HOP0_PA49_44_SHIFT (12 + 32)
+
+#define MMU_CONFIG_TIMEOUT_USEC 2000 /* 2 ms */
+
+#endif /* INCLUDE_MMU_GENERAL_H_ */
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h
new file mode 100644
index 000000000000..8539dd041f2c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef INCLUDE_MMU_V1_0_H_
+#define INCLUDE_MMU_V1_0_H_
+
+#define MMU_HOP0_PA43_12 0x490004
+#define MMU_HOP0_PA49_44 0x490008
+#define MMU_ASID_BUSY 0x490000
+
+#endif /* INCLUDE_MMU_V1_0_H_ */
diff --git a/drivers/misc/habanalabs/include/qman_if.h b/drivers/misc/habanalabs/include/qman_if.h
new file mode 100644
index 000000000000..bf59bbe27fdc
--- /dev/null
+++ b/drivers/misc/habanalabs/include/qman_if.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef QMAN_IF_H
+#define QMAN_IF_H
+
+#include <linux/types.h>
+
+/*
+ * PRIMARY QUEUE
+ */
+
+struct hl_bd {
+ __le64 ptr;
+ __le32 len;
+ __le32 ctl;
+};
+
+#define HL_BD_SIZE sizeof(struct hl_bd)
+
+/*
+ * BD_CTL_REPEAT_VALID tells the CP whether the repeat field in the BD CTL is
+ * valid. 1 means the repeat field is valid, 0 means not-valid,
+ * i.e. repeat == 1
+ */
+#define BD_CTL_REPEAT_VALID_SHIFT 24
+#define BD_CTL_REPEAT_VALID_MASK 0x01000000
+
+#define BD_CTL_SHADOW_INDEX_SHIFT 0
+#define BD_CTL_SHADOW_INDEX_MASK 0x00000FFF
+
+/*
+ * COMPLETION QUEUE
+ */
+
+struct hl_cq_entry {
+ __le32 data;
+};
+
+#define HL_CQ_ENTRY_SIZE sizeof(struct hl_cq_entry)
+
+#define CQ_ENTRY_READY_SHIFT 31
+#define CQ_ENTRY_READY_MASK 0x80000000
+
+#define CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT 30
+#define CQ_ENTRY_SHADOW_INDEX_VALID_MASK 0x40000000
+
+#define CQ_ENTRY_SHADOW_INDEX_SHIFT BD_CTL_SHADOW_INDEX_SHIFT
+#define CQ_ENTRY_SHADOW_INDEX_MASK BD_CTL_SHADOW_INDEX_MASK
+
+
+#endif /* QMAN_IF_H */
diff --git a/drivers/misc/habanalabs/irq.c b/drivers/misc/habanalabs/irq.c
new file mode 100644
index 000000000000..e69a09c10e3f
--- /dev/null
+++ b/drivers/misc/habanalabs/irq.c
@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+
+#include <linux/slab.h>
+
+/**
+ * This structure is used to schedule work of EQ entry and armcp_reset event
+ *
+ * @eq_work - workqueue object to run when EQ entry is received
+ * @hdev - pointer to device structure
+ * @eq_entry - copy of the EQ entry
+ */
+struct hl_eqe_work {
+ struct work_struct eq_work;
+ struct hl_device *hdev;
+ struct hl_eq_entry eq_entry;
+};
+
+/*
+ * hl_cq_inc_ptr - increment ci or pi of cq
+ *
+ * @ptr: the current ci or pi value of the completion queue
+ *
+ * Increment ptr by 1. If it reaches the number of completion queue
+ * entries, set it to 0
+ */
+inline u32 hl_cq_inc_ptr(u32 ptr)
+{
+ ptr++;
+ if (unlikely(ptr == HL_CQ_LENGTH))
+ ptr = 0;
+ return ptr;
+}
+
+/*
+ * hl_eq_inc_ptr - increment ci of eq
+ *
+ * @ptr: the current ci value of the event queue
+ *
+ * Increment ptr by 1. If it reaches the number of event queue
+ * entries, set it to 0
+ */
+inline u32 hl_eq_inc_ptr(u32 ptr)
+{
+ ptr++;
+ if (unlikely(ptr == HL_EQ_LENGTH))
+ ptr = 0;
+ return ptr;
+}
+
+static void irq_handle_eqe(struct work_struct *work)
+{
+ struct hl_eqe_work *eqe_work = container_of(work, struct hl_eqe_work,
+ eq_work);
+ struct hl_device *hdev = eqe_work->hdev;
+
+ hdev->asic_funcs->handle_eqe(hdev, &eqe_work->eq_entry);
+
+ kfree(eqe_work);
+}
+
+/*
+ * hl_irq_handler_cq - irq handler for completion queue
+ *
+ * @irq: irq number
+ * @arg: pointer to completion queue structure
+ *
+ */
+irqreturn_t hl_irq_handler_cq(int irq, void *arg)
+{
+ struct hl_cq *cq = arg;
+ struct hl_device *hdev = cq->hdev;
+ struct hl_hw_queue *queue;
+ struct hl_cs_job *job;
+ bool shadow_index_valid;
+ u16 shadow_index;
+ u32 *cq_entry;
+ u32 *cq_base;
+
+ if (hdev->disabled) {
+ dev_dbg(hdev->dev,
+ "Device disabled but received IRQ %d for CQ %d\n",
+ irq, cq->hw_queue_id);
+ return IRQ_HANDLED;
+ }
+
+ cq_base = (u32 *) (uintptr_t) cq->kernel_address;
+
+ while (1) {
+ bool entry_ready = ((cq_base[cq->ci] & CQ_ENTRY_READY_MASK)
+ >> CQ_ENTRY_READY_SHIFT);
+
+ if (!entry_ready)
+ break;
+
+ cq_entry = (u32 *) &cq_base[cq->ci];
+
+ /*
+ * Make sure we read CQ entry contents after we've
+ * checked the ownership bit.
+ */
+ dma_rmb();
+
+ shadow_index_valid =
+ ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_VALID_MASK)
+ >> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT);
+
+ shadow_index = (u16)
+ ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_MASK)
+ >> CQ_ENTRY_SHADOW_INDEX_SHIFT);
+
+ queue = &hdev->kernel_queues[cq->hw_queue_id];
+
+ if ((shadow_index_valid) && (!hdev->disabled)) {
+ job = queue->shadow_queue[hl_pi_2_offset(shadow_index)];
+ queue_work(hdev->cq_wq, &job->finish_work);
+ }
+
+ /*
+ * Update ci of the context's queue. There is no
+ * need to protect it with spinlock because this update is
+ * done only inside IRQ and there is a different IRQ per
+ * queue
+ */
+ queue->ci = hl_queue_inc_ptr(queue->ci);
+
+ /* Clear CQ entry ready bit */
+ cq_base[cq->ci] &= ~CQ_ENTRY_READY_MASK;
+
+ cq->ci = hl_cq_inc_ptr(cq->ci);
+
+ /* Increment free slots */
+ atomic_inc(&cq->free_slots_cnt);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * hl_irq_handler_eq - irq handler for event queue
+ *
+ * @irq: irq number
+ * @arg: pointer to event queue structure
+ *
+ */
+irqreturn_t hl_irq_handler_eq(int irq, void *arg)
+{
+ struct hl_eq *eq = arg;
+ struct hl_device *hdev = eq->hdev;
+ struct hl_eq_entry *eq_entry;
+ struct hl_eq_entry *eq_base;
+ struct hl_eqe_work *handle_eqe_work;
+
+ eq_base = (struct hl_eq_entry *) (uintptr_t) eq->kernel_address;
+
+ while (1) {
+ bool entry_ready =
+ ((__le32_to_cpu(eq_base[eq->ci].hdr.ctl) &
+ EQ_CTL_READY_MASK) >> EQ_CTL_READY_SHIFT);
+
+ if (!entry_ready)
+ break;
+
+ eq_entry = &eq_base[eq->ci];
+
+ /*
+ * Make sure we read EQ entry contents after we've
+ * checked the ownership bit.
+ */
+ dma_rmb();
+
+ if (hdev->disabled) {
+ dev_warn(hdev->dev,
+ "Device disabled but received IRQ %d for EQ\n",
+ irq);
+ goto skip_irq;
+ }
+
+ handle_eqe_work = kmalloc(sizeof(*handle_eqe_work), GFP_ATOMIC);
+ if (handle_eqe_work) {
+ INIT_WORK(&handle_eqe_work->eq_work, irq_handle_eqe);
+ handle_eqe_work->hdev = hdev;
+
+ memcpy(&handle_eqe_work->eq_entry, eq_entry,
+ sizeof(*eq_entry));
+
+ queue_work(hdev->eq_wq, &handle_eqe_work->eq_work);
+ }
+skip_irq:
+ /* Clear EQ entry ready bit */
+ eq_entry->hdr.ctl =
+ __cpu_to_le32(__le32_to_cpu(eq_entry->hdr.ctl) &
+ ~EQ_CTL_READY_MASK);
+
+ eq->ci = hl_eq_inc_ptr(eq->ci);
+
+ hdev->asic_funcs->update_eq_ci(hdev, eq->ci);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * hl_cq_init - main initialization function for an cq object
+ *
+ * @hdev: pointer to device structure
+ * @q: pointer to cq structure
+ * @hw_queue_id: The H/W queue ID this completion queue belongs to
+ *
+ * Allocate dma-able memory for the completion queue and initialize fields
+ * Returns 0 on success
+ */
+int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
+{
+ void *p;
+
+ BUILD_BUG_ON(HL_CQ_SIZE_IN_BYTES > HL_PAGE_SIZE);
+
+ p = hdev->asic_funcs->dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
+ &q->bus_address, GFP_KERNEL | __GFP_ZERO);
+ if (!p)
+ return -ENOMEM;
+
+ q->hdev = hdev;
+ q->kernel_address = (u64) (uintptr_t) p;
+ q->hw_queue_id = hw_queue_id;
+ q->ci = 0;
+ q->pi = 0;
+
+ atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH);
+
+ return 0;
+}
+
+/*
+ * hl_cq_fini - destroy completion queue
+ *
+ * @hdev: pointer to device structure
+ * @q: pointer to cq structure
+ *
+ * Free the completion queue memory
+ */
+void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q)
+{
+ hdev->asic_funcs->dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
+ (void *) (uintptr_t) q->kernel_address, q->bus_address);
+}
+
+void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
+{
+ q->ci = 0;
+ q->pi = 0;
+
+ atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH);
+
+ /*
+ * It's not enough to just reset the PI/CI because the H/W may have
+ * written valid completion entries before it was halted and therefore
+ * we need to clean the actual queues so we won't process old entries
+ * when the device is operational again
+ */
+
+ memset((void *) (uintptr_t) q->kernel_address, 0, HL_CQ_SIZE_IN_BYTES);
+}
+
+/*
+ * hl_eq_init - main initialization function for an event queue object
+ *
+ * @hdev: pointer to device structure
+ * @q: pointer to eq structure
+ *
+ * Allocate dma-able memory for the event queue and initialize fields
+ * Returns 0 on success
+ */
+int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
+{
+ void *p;
+
+ BUILD_BUG_ON(HL_EQ_SIZE_IN_BYTES > HL_PAGE_SIZE);
+
+ p = hdev->asic_funcs->dma_alloc_coherent(hdev, HL_EQ_SIZE_IN_BYTES,
+ &q->bus_address, GFP_KERNEL | __GFP_ZERO);
+ if (!p)
+ return -ENOMEM;
+
+ q->hdev = hdev;
+ q->kernel_address = (u64) (uintptr_t) p;
+ q->ci = 0;
+
+ return 0;
+}
+
+/*
+ * hl_eq_fini - destroy event queue
+ *
+ * @hdev: pointer to device structure
+ * @q: pointer to eq structure
+ *
+ * Free the event queue memory
+ */
+void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
+{
+ flush_workqueue(hdev->eq_wq);
+
+ hdev->asic_funcs->dma_free_coherent(hdev, HL_EQ_SIZE_IN_BYTES,
+ (void *) (uintptr_t) q->kernel_address, q->bus_address);
+}
+
+void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
+{
+ q->ci = 0;
+
+ /*
+ * It's not enough to just reset the PI/CI because the H/W may have
+ * written valid completion entries before it was halted and therefore
+ * we need to clean the actual queues so we won't process old entries
+ * when the device is operational again
+ */
+
+ memset((void *) (uintptr_t) q->kernel_address, 0, HL_EQ_SIZE_IN_BYTES);
+}
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
new file mode 100644
index 000000000000..3a12fd1a5274
--- /dev/null
+++ b/drivers/misc/habanalabs/memory.c
@@ -0,0 +1,1723 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include <uapi/misc/habanalabs.h>
+#include "habanalabs.h"
+#include "include/hw_ip/mmu/mmu_general.h"
+
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/genalloc.h>
+
+#define PGS_IN_2MB_PAGE (PAGE_SIZE_2MB >> PAGE_SHIFT)
+#define HL_MMU_DEBUG 0
+
+/*
+ * The va ranges in context object contain a list with the available chunks of
+ * device virtual memory.
+ * There is one range for host allocations and one for DRAM allocations.
+ *
+ * On initialization each range contains one chunk of all of its available
+ * virtual range which is a half of the total device virtual range.
+ *
+ * On each mapping of physical pages, a suitable virtual range chunk (with a
+ * minimum size) is selected from the list. If the chunk size equals the
+ * requested size, the chunk is returned. Otherwise, the chunk is split into
+ * two chunks - one to return as result and a remainder to stay in the list.
+ *
+ * On each Unmapping of a virtual address, the relevant virtual chunk is
+ * returned to the list. The chunk is added to the list and if its edges match
+ * the edges of the adjacent chunks (means a contiguous chunk can be created),
+ * the chunks are merged.
+ *
+ * On finish, the list is checked to have only one chunk of all the relevant
+ * virtual range (which is a half of the device total virtual range).
+ * If not (means not all mappings were unmapped), a warning is printed.
+ */
+
+/*
+ * alloc_device_memory - allocate device memory
+ *
+ * @ctx : current context
+ * @args : host parameters containing the requested size
+ * @ret_handle : result handle
+ *
+ * This function does the following:
+ * - Allocate the requested size rounded up to 2MB pages
+ * - Return unique handle
+ */
+static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
+ u32 *ret_handle)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_vm *vm = &hdev->vm;
+ struct hl_vm_phys_pg_pack *phys_pg_pack;
+ u64 paddr = 0;
+ u32 total_size, num_pgs, num_curr_pgs, page_size, page_shift;
+ int handle, rc, i;
+ bool contiguous;
+
+ num_curr_pgs = 0;
+ page_size = hdev->asic_prop.dram_page_size;
+ page_shift = __ffs(page_size);
+ num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift;
+ total_size = num_pgs << page_shift;
+
+ contiguous = args->flags & HL_MEM_CONTIGUOUS;
+
+ if (contiguous) {
+ paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
+ if (!paddr) {
+ dev_err(hdev->dev,
+ "failed to allocate %u huge contiguous pages\n",
+ num_pgs);
+ return -ENOMEM;
+ }
+ }
+
+ phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
+ if (!phys_pg_pack) {
+ rc = -ENOMEM;
+ goto pages_pack_err;
+ }
+
+ phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK;
+ phys_pg_pack->asid = ctx->asid;
+ phys_pg_pack->npages = num_pgs;
+ phys_pg_pack->page_size = page_size;
+ phys_pg_pack->total_size = total_size;
+ phys_pg_pack->flags = args->flags;
+ phys_pg_pack->contiguous = contiguous;
+
+ phys_pg_pack->pages = kcalloc(num_pgs, sizeof(u64), GFP_KERNEL);
+ if (!phys_pg_pack->pages) {
+ rc = -ENOMEM;
+ goto pages_arr_err;
+ }
+
+ if (phys_pg_pack->contiguous) {
+ for (i = 0 ; i < num_pgs ; i++)
+ phys_pg_pack->pages[i] = paddr + i * page_size;
+ } else {
+ for (i = 0 ; i < num_pgs ; i++) {
+ phys_pg_pack->pages[i] = (u64) gen_pool_alloc(
+ vm->dram_pg_pool,
+ page_size);
+ if (!phys_pg_pack->pages[i]) {
+ dev_err(hdev->dev,
+ "ioctl failed to allocate page\n");
+ rc = -ENOMEM;
+ goto page_err;
+ }
+
+ num_curr_pgs++;
+ }
+ }
+
+ spin_lock(&vm->idr_lock);
+ handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
+ GFP_ATOMIC);
+ spin_unlock(&vm->idr_lock);
+
+ if (handle < 0) {
+ dev_err(hdev->dev, "Failed to get handle for page\n");
+ rc = -EFAULT;
+ goto idr_err;
+ }
+
+ for (i = 0 ; i < num_pgs ; i++)
+ kref_get(&vm->dram_pg_pool_refcount);
+
+ phys_pg_pack->handle = handle;
+
+ atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem);
+ atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem);
+
+ *ret_handle = handle;
+
+ return 0;
+
+idr_err:
+page_err:
+ if (!phys_pg_pack->contiguous)
+ for (i = 0 ; i < num_curr_pgs ; i++)
+ gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
+ page_size);
+
+ kfree(phys_pg_pack->pages);
+pages_arr_err:
+ kfree(phys_pg_pack);
+pages_pack_err:
+ if (contiguous)
+ gen_pool_free(vm->dram_pg_pool, paddr, total_size);
+
+ return rc;
+}
+
+/*
+ * get_userptr_from_host_va - initialize userptr structure from given host
+ * virtual address
+ *
+ * @hdev : habanalabs device structure
+ * @args : parameters containing the virtual address and size
+ * @p_userptr : pointer to result userptr structure
+ *
+ * This function does the following:
+ * - Allocate userptr structure
+ * - Pin the given host memory using the userptr structure
+ * - Perform DMA mapping to have the DMA addresses of the pages
+ */
+static int get_userptr_from_host_va(struct hl_device *hdev,
+ struct hl_mem_in *args, struct hl_userptr **p_userptr)
+{
+ struct hl_userptr *userptr;
+ int rc;
+
+ userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
+ if (!userptr) {
+ rc = -ENOMEM;
+ goto userptr_err;
+ }
+
+ rc = hl_pin_host_memory(hdev, args->map_host.host_virt_addr,
+ args->map_host.mem_size, userptr);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to pin host memory\n");
+ goto pin_err;
+ }
+
+ rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
+ userptr->sgt->nents, DMA_BIDIRECTIONAL);
+ if (rc) {
+ dev_err(hdev->dev, "failed to map sgt with DMA region\n");
+ goto dma_map_err;
+ }
+
+ userptr->dma_mapped = true;
+ userptr->dir = DMA_BIDIRECTIONAL;
+ userptr->vm_type = VM_TYPE_USERPTR;
+
+ *p_userptr = userptr;
+
+ return 0;
+
+dma_map_err:
+ hl_unpin_host_memory(hdev, userptr);
+pin_err:
+ kfree(userptr);
+userptr_err:
+
+ return rc;
+}
+
+/*
+ * free_userptr - free userptr structure
+ *
+ * @hdev : habanalabs device structure
+ * @userptr : userptr to free
+ *
+ * This function does the following:
+ * - Unpins the physical pages
+ * - Frees the userptr structure
+ */
+static void free_userptr(struct hl_device *hdev, struct hl_userptr *userptr)
+{
+ hl_unpin_host_memory(hdev, userptr);
+ kfree(userptr);
+}
+
+/*
+ * dram_pg_pool_do_release - free DRAM pages pool
+ *
+ * @ref : pointer to reference object
+ *
+ * This function does the following:
+ * - Frees the idr structure of physical pages handles
+ * - Frees the generic pool of DRAM physical pages
+ */
+static void dram_pg_pool_do_release(struct kref *ref)
+{
+ struct hl_vm *vm = container_of(ref, struct hl_vm,
+ dram_pg_pool_refcount);
+
+ /*
+ * free the idr here as only here we know for sure that there are no
+ * allocated physical pages and hence there are no handles in use
+ */
+ idr_destroy(&vm->phys_pg_pack_handles);
+ gen_pool_destroy(vm->dram_pg_pool);
+}
+
+/*
+ * free_phys_pg_pack - free physical page pack
+ *
+ * @hdev : habanalabs device structure
+ * @phys_pg_pack : physical page pack to free
+ *
+ * This function does the following:
+ * - For DRAM memory only, iterate over the pack and free each physical block
+ * structure by returning it to the general pool
+ * - Free the hl_vm_phys_pg_pack structure
+ */
+static void free_phys_pg_pack(struct hl_device *hdev,
+ struct hl_vm_phys_pg_pack *phys_pg_pack)
+{
+ struct hl_vm *vm = &hdev->vm;
+ int i;
+
+ if (!phys_pg_pack->created_from_userptr) {
+ if (phys_pg_pack->contiguous) {
+ gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
+ phys_pg_pack->total_size);
+
+ for (i = 0; i < phys_pg_pack->npages ; i++)
+ kref_put(&vm->dram_pg_pool_refcount,
+ dram_pg_pool_do_release);
+ } else {
+ for (i = 0 ; i < phys_pg_pack->npages ; i++) {
+ gen_pool_free(vm->dram_pg_pool,
+ phys_pg_pack->pages[i],
+ phys_pg_pack->page_size);
+ kref_put(&vm->dram_pg_pool_refcount,
+ dram_pg_pool_do_release);
+ }
+ }
+ }
+
+ kfree(phys_pg_pack->pages);
+ kfree(phys_pg_pack);
+}
+
+/*
+ * free_device_memory - free device memory
+ *
+ * @ctx : current context
+ * @handle : handle of the memory chunk to free
+ *
+ * This function does the following:
+ * - Free the device memory related to the given handle
+ */
+static int free_device_memory(struct hl_ctx *ctx, u32 handle)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_vm *vm = &hdev->vm;
+ struct hl_vm_phys_pg_pack *phys_pg_pack;
+
+ spin_lock(&vm->idr_lock);
+ phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
+ if (phys_pg_pack) {
+ if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
+ dev_err(hdev->dev, "handle %u is mapped, cannot free\n",
+ handle);
+ spin_unlock(&vm->idr_lock);
+ return -EINVAL;
+ }
+
+ /*
+ * must remove from idr before the freeing of the physical
+ * pages as the refcount of the pool is also the trigger of the
+ * idr destroy
+ */
+ idr_remove(&vm->phys_pg_pack_handles, handle);
+ spin_unlock(&vm->idr_lock);
+
+ atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
+ atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
+
+ free_phys_pg_pack(hdev, phys_pg_pack);
+ } else {
+ spin_unlock(&vm->idr_lock);
+ dev_err(hdev->dev,
+ "free device memory failed, no match for handle %u\n",
+ handle);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * clear_va_list_locked - free virtual addresses list
+ *
+ * @hdev : habanalabs device structure
+ * @va_list : list of virtual addresses to free
+ *
+ * This function does the following:
+ * - Iterate over the list and free each virtual addresses block
+ *
+ * This function should be called only when va_list lock is taken
+ */
+static void clear_va_list_locked(struct hl_device *hdev,
+ struct list_head *va_list)
+{
+ struct hl_vm_va_block *va_block, *tmp;
+
+ list_for_each_entry_safe(va_block, tmp, va_list, node) {
+ list_del(&va_block->node);
+ kfree(va_block);
+ }
+}
+
+/*
+ * print_va_list_locked - print virtual addresses list
+ *
+ * @hdev : habanalabs device structure
+ * @va_list : list of virtual addresses to print
+ *
+ * This function does the following:
+ * - Iterate over the list and print each virtual addresses block
+ *
+ * This function should be called only when va_list lock is taken
+ */
+static void print_va_list_locked(struct hl_device *hdev,
+ struct list_head *va_list)
+{
+#if HL_MMU_DEBUG
+ struct hl_vm_va_block *va_block;
+
+ dev_dbg(hdev->dev, "print va list:\n");
+
+ list_for_each_entry(va_block, va_list, node)
+ dev_dbg(hdev->dev,
+ "va block, start: 0x%llx, end: 0x%llx, size: %llu\n",
+ va_block->start, va_block->end, va_block->size);
+#endif
+}
+
+/*
+ * merge_va_blocks_locked - merge a virtual block if possible
+ *
+ * @hdev : pointer to the habanalabs device structure
+ * @va_list : pointer to the virtual addresses block list
+ * @va_block : virtual block to merge with adjacent blocks
+ *
+ * This function does the following:
+ * - Merge the given blocks with the adjacent blocks if their virtual ranges
+ * create a contiguous virtual range
+ *
+ * This Function should be called only when va_list lock is taken
+ */
+static void merge_va_blocks_locked(struct hl_device *hdev,
+ struct list_head *va_list, struct hl_vm_va_block *va_block)
+{
+ struct hl_vm_va_block *prev, *next;
+
+ prev = list_prev_entry(va_block, node);
+ if (&prev->node != va_list && prev->end + 1 == va_block->start) {
+ prev->end = va_block->end;
+ prev->size = prev->end - prev->start;
+ list_del(&va_block->node);
+ kfree(va_block);
+ va_block = prev;
+ }
+
+ next = list_next_entry(va_block, node);
+ if (&next->node != va_list && va_block->end + 1 == next->start) {
+ next->start = va_block->start;
+ next->size = next->end - next->start;
+ list_del(&va_block->node);
+ kfree(va_block);
+ }
+}
+
+/*
+ * add_va_block_locked - add a virtual block to the virtual addresses list
+ *
+ * @hdev : pointer to the habanalabs device structure
+ * @va_list : pointer to the virtual addresses block list
+ * @start : start virtual address
+ * @end : end virtual address
+ *
+ * This function does the following:
+ * - Add the given block to the virtual blocks list and merge with other
+ * blocks if a contiguous virtual block can be created
+ *
+ * This Function should be called only when va_list lock is taken
+ */
+static int add_va_block_locked(struct hl_device *hdev,
+ struct list_head *va_list, u64 start, u64 end)
+{
+ struct hl_vm_va_block *va_block, *res = NULL;
+ u64 size = end - start;
+
+ print_va_list_locked(hdev, va_list);
+
+ list_for_each_entry(va_block, va_list, node) {
+ /* TODO: remove upon matureness */
+ if (hl_mem_area_crosses_range(start, size, va_block->start,
+ va_block->end)) {
+ dev_err(hdev->dev,
+ "block crossing ranges at start 0x%llx, end 0x%llx\n",
+ va_block->start, va_block->end);
+ return -EINVAL;
+ }
+
+ if (va_block->end < start)
+ res = va_block;
+ }
+
+ va_block = kmalloc(sizeof(*va_block), GFP_KERNEL);
+ if (!va_block)
+ return -ENOMEM;
+
+ va_block->start = start;
+ va_block->end = end;
+ va_block->size = size;
+
+ if (!res)
+ list_add(&va_block->node, va_list);
+ else
+ list_add(&va_block->node, &res->node);
+
+ merge_va_blocks_locked(hdev, va_list, va_block);
+
+ print_va_list_locked(hdev, va_list);
+
+ return 0;
+}
+
+/*
+ * add_va_block - wrapper for add_va_block_locked
+ *
+ * @hdev : pointer to the habanalabs device structure
+ * @va_list : pointer to the virtual addresses block list
+ * @start : start virtual address
+ * @end : end virtual address
+ *
+ * This function does the following:
+ * - Takes the list lock and calls add_va_block_locked
+ */
+static inline int add_va_block(struct hl_device *hdev,
+ struct hl_va_range *va_range, u64 start, u64 end)
+{
+ int rc;
+
+ mutex_lock(&va_range->lock);
+ rc = add_va_block_locked(hdev, &va_range->list, start, end);
+ mutex_unlock(&va_range->lock);
+
+ return rc;
+}
+
+/*
+ * get_va_block - get a virtual block with the requested size
+ *
+ * @hdev : pointer to the habanalabs device structure
+ * @va_range : pointer to the virtual addresses range
+ * @size : requested block size
+ * @hint_addr : hint for request address by the user
+ * @is_userptr : is host or DRAM memory
+ *
+ * This function does the following:
+ * - Iterate on the virtual block list to find a suitable virtual block for the
+ * requested size
+ * - Reserve the requested block and update the list
+ * - Return the start address of the virtual block
+ */
+static u64 get_va_block(struct hl_device *hdev,
+ struct hl_va_range *va_range, u32 size, u64 hint_addr,
+ bool is_userptr)
+{
+ struct hl_vm_va_block *va_block, *new_va_block = NULL;
+ u64 valid_start, valid_size, prev_start, prev_end, page_mask,
+ res_valid_start = 0, res_valid_size = 0;
+ u32 page_size;
+ bool add_prev = false;
+
+ if (is_userptr) {
+ /*
+ * We cannot know if the user allocated memory with huge pages
+ * or not, hence we continue with the biggest possible
+ * granularity.
+ */
+ page_size = PAGE_SIZE_2MB;
+ page_mask = PAGE_MASK_2MB;
+ } else {
+ page_size = hdev->asic_prop.dram_page_size;
+ page_mask = ~((u64)page_size - 1);
+ }
+
+ mutex_lock(&va_range->lock);
+
+ print_va_list_locked(hdev, &va_range->list);
+
+ list_for_each_entry(va_block, &va_range->list, node) {
+ /* calc the first possible aligned addr */
+ valid_start = va_block->start;
+
+
+ if (valid_start & (page_size - 1)) {
+ valid_start &= page_mask;
+ valid_start += page_size;
+ if (valid_start > va_block->end)
+ continue;
+ }
+
+ valid_size = va_block->end - valid_start;
+
+ if (valid_size >= size &&
+ (!new_va_block || valid_size < res_valid_size)) {
+
+ new_va_block = va_block;
+ res_valid_start = valid_start;
+ res_valid_size = valid_size;
+ }
+
+ if (hint_addr && hint_addr >= valid_start &&
+ ((hint_addr + size) <= va_block->end)) {
+ new_va_block = va_block;
+ res_valid_start = hint_addr;
+ res_valid_size = valid_size;
+ break;
+ }
+ }
+
+ if (!new_va_block) {
+ dev_err(hdev->dev, "no available va block for size %u\n", size);
+ goto out;
+ }
+
+ if (res_valid_start > new_va_block->start) {
+ prev_start = new_va_block->start;
+ prev_end = res_valid_start - 1;
+
+ new_va_block->start = res_valid_start;
+ new_va_block->size = res_valid_size;
+
+ add_prev = true;
+ }
+
+ if (new_va_block->size > size) {
+ new_va_block->start += size;
+ new_va_block->size = new_va_block->end - new_va_block->start;
+ } else {
+ list_del(&new_va_block->node);
+ kfree(new_va_block);
+ }
+
+ if (add_prev)
+ add_va_block_locked(hdev, &va_range->list, prev_start,
+ prev_end);
+
+ print_va_list_locked(hdev, &va_range->list);
+out:
+ mutex_unlock(&va_range->lock);
+
+ return res_valid_start;
+}
+
+/*
+ * get_sg_info - get number of pages and the DMA address from SG list
+ *
+ * @sg : the SG list
+ * @dma_addr : pointer to DMA address to return
+ *
+ * Calculate the number of consecutive pages described by the SG list. Take the
+ * offset of the address in the first page, add to it the length and round it up
+ * to the number of needed pages.
+ */
+static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
+{
+ *dma_addr = sg_dma_address(sg);
+
+ return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) +
+ (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+}
+
+/*
+ * init_phys_pg_pack_from_userptr - initialize physical page pack from host
+ * memory
+ *
+ * @ctx : current context
+ * @userptr : userptr to initialize from
+ * @pphys_pg_pack : res pointer
+ *
+ * This function does the following:
+ * - Pin the physical pages related to the given virtual block
+ * - Create a physical page pack from the physical pages related to the given
+ * virtual block
+ */
+static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
+ struct hl_userptr *userptr,
+ struct hl_vm_phys_pg_pack **pphys_pg_pack)
+{
+ struct hl_vm_phys_pg_pack *phys_pg_pack;
+ struct scatterlist *sg;
+ dma_addr_t dma_addr;
+ u64 page_mask;
+ u32 npages, total_npages, page_size = PAGE_SIZE;
+ bool first = true, is_huge_page_opt = true;
+ int rc, i, j;
+
+ phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
+ if (!phys_pg_pack)
+ return -ENOMEM;
+
+ phys_pg_pack->vm_type = userptr->vm_type;
+ phys_pg_pack->created_from_userptr = true;
+ phys_pg_pack->asid = ctx->asid;
+ atomic_set(&phys_pg_pack->mapping_cnt, 1);
+
+ /* Only if all dma_addrs are aligned to 2MB and their
+ * sizes is at least 2MB, we can use huge page mapping.
+ * We limit the 2MB optimization to this condition,
+ * since later on we acquire the related VA range as one
+ * consecutive block.
+ */
+ total_npages = 0;
+ for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
+ npages = get_sg_info(sg, &dma_addr);
+
+ total_npages += npages;
+
+ if (first) {
+ first = false;
+ dma_addr &= PAGE_MASK_2MB;
+ }
+
+ if ((npages % PGS_IN_2MB_PAGE) ||
+ (dma_addr & (PAGE_SIZE_2MB - 1)))
+ is_huge_page_opt = false;
+ }
+
+ if (is_huge_page_opt) {
+ page_size = PAGE_SIZE_2MB;
+ total_npages /= PGS_IN_2MB_PAGE;
+ }
+
+ page_mask = ~(((u64) page_size) - 1);
+
+ phys_pg_pack->pages = kcalloc(total_npages, sizeof(u64), GFP_KERNEL);
+ if (!phys_pg_pack->pages) {
+ rc = -ENOMEM;
+ goto page_pack_arr_mem_err;
+ }
+
+ phys_pg_pack->npages = total_npages;
+ phys_pg_pack->page_size = page_size;
+ phys_pg_pack->total_size = total_npages * page_size;
+
+ j = 0;
+ first = true;
+ for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
+ npages = get_sg_info(sg, &dma_addr);
+
+ /* align down to physical page size and save the offset */
+ if (first) {
+ first = false;
+ phys_pg_pack->offset = dma_addr & (page_size - 1);
+ dma_addr &= page_mask;
+ }
+
+ while (npages) {
+ phys_pg_pack->pages[j++] = dma_addr;
+ dma_addr += page_size;
+
+ if (is_huge_page_opt)
+ npages -= PGS_IN_2MB_PAGE;
+ else
+ npages--;
+ }
+ }
+
+ *pphys_pg_pack = phys_pg_pack;
+
+ return 0;
+
+page_pack_arr_mem_err:
+ kfree(phys_pg_pack);
+
+ return rc;
+}
+
+/*
+ * map_phys_page_pack - maps the physical page pack
+ *
+ * @ctx : current context
+ * @vaddr : start address of the virtual area to map from
+ * @phys_pg_pack : the pack of physical pages to map to
+ *
+ * This function does the following:
+ * - Maps each chunk of virtual memory to matching physical chunk
+ * - Stores number of successful mappings in the given argument
+ * - Returns 0 on success, error code otherwise.
+ */
+static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
+ struct hl_vm_phys_pg_pack *phys_pg_pack)
+{
+ struct hl_device *hdev = ctx->hdev;
+ u64 next_vaddr = vaddr, paddr;
+ u32 page_size = phys_pg_pack->page_size;
+ int i, rc = 0, mapped_pg_cnt = 0;
+
+ for (i = 0 ; i < phys_pg_pack->npages ; i++) {
+ paddr = phys_pg_pack->pages[i];
+
+ /* For accessing the host we need to turn on bit 39 */
+ if (phys_pg_pack->created_from_userptr)
+ paddr += hdev->asic_prop.host_phys_base_address;
+
+ rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size);
+ if (rc) {
+ dev_err(hdev->dev,
+ "map failed for handle %u, npages: %d, mapped: %d",
+ phys_pg_pack->handle, phys_pg_pack->npages,
+ mapped_pg_cnt);
+ goto err;
+ }
+
+ mapped_pg_cnt++;
+ next_vaddr += page_size;
+ }
+
+ return 0;
+
+err:
+ next_vaddr = vaddr;
+ for (i = 0 ; i < mapped_pg_cnt ; i++) {
+ if (hl_mmu_unmap(ctx, next_vaddr, page_size))
+ dev_warn_ratelimited(hdev->dev,
+ "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
+ phys_pg_pack->handle, next_vaddr,
+ phys_pg_pack->pages[i], page_size);
+
+ next_vaddr += page_size;
+ }
+
+ return rc;
+}
+
+static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
+ u64 *paddr)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_vm *vm = &hdev->vm;
+ struct hl_vm_phys_pg_pack *phys_pg_pack;
+ u32 handle;
+
+ handle = lower_32_bits(args->map_device.handle);
+ spin_lock(&vm->idr_lock);
+ phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
+ if (!phys_pg_pack) {
+ spin_unlock(&vm->idr_lock);
+ dev_err(hdev->dev, "no match for handle %u\n", handle);
+ return -EINVAL;
+ }
+
+ *paddr = phys_pg_pack->pages[0];
+
+ spin_unlock(&vm->idr_lock);
+
+ return 0;
+}
+
+/*
+ * map_device_va - map the given memory
+ *
+ * @ctx : current context
+ * @args : host parameters with handle/host virtual address
+ * @device_addr : pointer to result device virtual address
+ *
+ * This function does the following:
+ * - If given a physical device memory handle, map to a device virtual block
+ * and return the start address of this block
+ * - If given a host virtual address and size, find the related physical pages,
+ * map a device virtual block to this pages and return the start address of
+ * this block
+ */
+static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
+ u64 *device_addr)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_vm *vm = &hdev->vm;
+ struct hl_vm_phys_pg_pack *phys_pg_pack;
+ struct hl_userptr *userptr = NULL;
+ struct hl_vm_hash_node *hnode;
+ enum vm_type_t *vm_type;
+ u64 ret_vaddr, hint_addr;
+ u32 handle = 0;
+ int rc;
+ bool is_userptr = args->flags & HL_MEM_USERPTR;
+
+ /* Assume failure */
+ *device_addr = 0;
+
+ if (is_userptr) {
+ rc = get_userptr_from_host_va(hdev, args, &userptr);
+ if (rc) {
+ dev_err(hdev->dev, "failed to get userptr from va\n");
+ return rc;
+ }
+
+ rc = init_phys_pg_pack_from_userptr(ctx, userptr,
+ &phys_pg_pack);
+ if (rc) {
+ dev_err(hdev->dev,
+ "unable to init page pack for vaddr 0x%llx\n",
+ args->map_host.host_virt_addr);
+ goto init_page_pack_err;
+ }
+
+ vm_type = (enum vm_type_t *) userptr;
+ hint_addr = args->map_host.hint_addr;
+ } else {
+ handle = lower_32_bits(args->map_device.handle);
+
+ spin_lock(&vm->idr_lock);
+ phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
+ if (!phys_pg_pack) {
+ spin_unlock(&vm->idr_lock);
+ dev_err(hdev->dev,
+ "no match for handle %u\n", handle);
+ return -EINVAL;
+ }
+
+ /* increment now to avoid freeing device memory while mapping */
+ atomic_inc(&phys_pg_pack->mapping_cnt);
+
+ spin_unlock(&vm->idr_lock);
+
+ vm_type = (enum vm_type_t *) phys_pg_pack;
+
+ hint_addr = args->map_device.hint_addr;
+ }
+
+ /*
+ * relevant for mapping device physical memory only, as host memory is
+ * implicitly shared
+ */
+ if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) &&
+ phys_pg_pack->asid != ctx->asid) {
+ dev_err(hdev->dev,
+ "Failed to map memory, handle %u is not shared\n",
+ handle);
+ rc = -EPERM;
+ goto shared_err;
+ }
+
+ hnode = kzalloc(sizeof(*hnode), GFP_KERNEL);
+ if (!hnode) {
+ rc = -ENOMEM;
+ goto hnode_err;
+ }
+
+ ret_vaddr = get_va_block(hdev,
+ is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
+ phys_pg_pack->total_size, hint_addr, is_userptr);
+ if (!ret_vaddr) {
+ dev_err(hdev->dev, "no available va block for handle %u\n",
+ handle);
+ rc = -ENOMEM;
+ goto va_block_err;
+ }
+
+ mutex_lock(&ctx->mmu_lock);
+
+ rc = map_phys_page_pack(ctx, ret_vaddr, phys_pg_pack);
+ if (rc) {
+ mutex_unlock(&ctx->mmu_lock);
+ dev_err(hdev->dev, "mapping page pack failed for handle %u\n",
+ handle);
+ goto map_err;
+ }
+
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, false);
+
+ mutex_unlock(&ctx->mmu_lock);
+
+ ret_vaddr += phys_pg_pack->offset;
+
+ hnode->ptr = vm_type;
+ hnode->vaddr = ret_vaddr;
+
+ mutex_lock(&ctx->mem_hash_lock);
+ hash_add(ctx->mem_hash, &hnode->node, ret_vaddr);
+ mutex_unlock(&ctx->mem_hash_lock);
+
+ *device_addr = ret_vaddr;
+
+ if (is_userptr)
+ free_phys_pg_pack(hdev, phys_pg_pack);
+
+ return 0;
+
+map_err:
+ if (add_va_block(hdev,
+ is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
+ ret_vaddr,
+ ret_vaddr + phys_pg_pack->total_size - 1))
+ dev_warn(hdev->dev,
+ "release va block failed for handle 0x%x, vaddr: 0x%llx\n",
+ handle, ret_vaddr);
+
+va_block_err:
+ kfree(hnode);
+hnode_err:
+shared_err:
+ atomic_dec(&phys_pg_pack->mapping_cnt);
+ if (is_userptr)
+ free_phys_pg_pack(hdev, phys_pg_pack);
+init_page_pack_err:
+ if (is_userptr)
+ free_userptr(hdev, userptr);
+
+ return rc;
+}
+
+/*
+ * unmap_device_va - unmap the given device virtual address
+ *
+ * @ctx : current context
+ * @vaddr : device virtual address to unmap
+ *
+ * This function does the following:
+ * - Unmap the physical pages related to the given virtual address
+ * - return the device virtual block to the virtual block list
+ */
+static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
+ struct hl_vm_hash_node *hnode = NULL;
+ struct hl_userptr *userptr = NULL;
+ enum vm_type_t *vm_type;
+ u64 next_vaddr;
+ u32 page_size;
+ bool is_userptr;
+ int i, rc;
+
+ /* protect from double entrance */
+ mutex_lock(&ctx->mem_hash_lock);
+ hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr)
+ if (vaddr == hnode->vaddr)
+ break;
+
+ if (!hnode) {
+ mutex_unlock(&ctx->mem_hash_lock);
+ dev_err(hdev->dev,
+ "unmap failed, no mem hnode for vaddr 0x%llx\n",
+ vaddr);
+ return -EINVAL;
+ }
+
+ hash_del(&hnode->node);
+ mutex_unlock(&ctx->mem_hash_lock);
+
+ vm_type = hnode->ptr;
+
+ if (*vm_type == VM_TYPE_USERPTR) {
+ is_userptr = true;
+ userptr = hnode->ptr;
+ rc = init_phys_pg_pack_from_userptr(ctx, userptr,
+ &phys_pg_pack);
+ if (rc) {
+ dev_err(hdev->dev,
+ "unable to init page pack for vaddr 0x%llx\n",
+ vaddr);
+ goto vm_type_err;
+ }
+ } else if (*vm_type == VM_TYPE_PHYS_PACK) {
+ is_userptr = false;
+ phys_pg_pack = hnode->ptr;
+ } else {
+ dev_warn(hdev->dev,
+ "unmap failed, unknown vm desc for vaddr 0x%llx\n",
+ vaddr);
+ rc = -EFAULT;
+ goto vm_type_err;
+ }
+
+ if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) {
+ dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr);
+ rc = -EINVAL;
+ goto mapping_cnt_err;
+ }
+
+ page_size = phys_pg_pack->page_size;
+ vaddr &= ~(((u64) page_size) - 1);
+
+ next_vaddr = vaddr;
+
+ mutex_lock(&ctx->mmu_lock);
+
+ for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size)
+ if (hl_mmu_unmap(ctx, next_vaddr, page_size))
+ dev_warn_ratelimited(hdev->dev,
+ "unmap failed for vaddr: 0x%llx\n", next_vaddr);
+
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
+
+ mutex_unlock(&ctx->mmu_lock);
+
+ if (add_va_block(hdev,
+ is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
+ vaddr,
+ vaddr + phys_pg_pack->total_size - 1))
+ dev_warn(hdev->dev, "add va block failed for vaddr: 0x%llx\n",
+ vaddr);
+
+ atomic_dec(&phys_pg_pack->mapping_cnt);
+ kfree(hnode);
+
+ if (is_userptr) {
+ free_phys_pg_pack(hdev, phys_pg_pack);
+ free_userptr(hdev, userptr);
+ }
+
+ return 0;
+
+mapping_cnt_err:
+ if (is_userptr)
+ free_phys_pg_pack(hdev, phys_pg_pack);
+vm_type_err:
+ mutex_lock(&ctx->mem_hash_lock);
+ hash_add(ctx->mem_hash, &hnode->node, vaddr);
+ mutex_unlock(&ctx->mem_hash_lock);
+
+ return rc;
+}
+
+int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
+{
+ union hl_mem_args *args = data;
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_ctx *ctx = hpriv->ctx;
+ u64 device_addr = 0;
+ u32 handle = 0;
+ int rc;
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ dev_warn_ratelimited(hdev->dev,
+ "Device is disabled or in reset. Can't execute memory IOCTL\n");
+ return -EBUSY;
+ }
+
+ if (hdev->mmu_enable) {
+ switch (args->in.op) {
+ case HL_MEM_OP_ALLOC:
+ if (!hdev->dram_supports_virtual_memory) {
+ dev_err(hdev->dev,
+ "DRAM alloc is not supported\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ if (args->in.alloc.mem_size == 0) {
+ dev_err(hdev->dev,
+ "alloc size must be larger than 0\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ rc = alloc_device_memory(ctx, &args->in, &handle);
+
+ memset(args, 0, sizeof(*args));
+ args->out.handle = (__u64) handle;
+ break;
+
+ case HL_MEM_OP_FREE:
+ if (!hdev->dram_supports_virtual_memory) {
+ dev_err(hdev->dev,
+ "DRAM free is not supported\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ rc = free_device_memory(ctx, args->in.free.handle);
+ break;
+
+ case HL_MEM_OP_MAP:
+ rc = map_device_va(ctx, &args->in, &device_addr);
+
+ memset(args, 0, sizeof(*args));
+ args->out.device_virt_addr = device_addr;
+ break;
+
+ case HL_MEM_OP_UNMAP:
+ rc = unmap_device_va(ctx,
+ args->in.unmap.device_virt_addr);
+ break;
+
+ default:
+ dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
+ rc = -ENOTTY;
+ break;
+ }
+ } else {
+ switch (args->in.op) {
+ case HL_MEM_OP_ALLOC:
+ if (args->in.alloc.mem_size == 0) {
+ dev_err(hdev->dev,
+ "alloc size must be larger than 0\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Force contiguous as there are no real MMU
+ * translations to overcome physical memory gaps
+ */
+ args->in.flags |= HL_MEM_CONTIGUOUS;
+ rc = alloc_device_memory(ctx, &args->in, &handle);
+
+ memset(args, 0, sizeof(*args));
+ args->out.handle = (__u64) handle;
+ break;
+
+ case HL_MEM_OP_FREE:
+ rc = free_device_memory(ctx, args->in.free.handle);
+ break;
+
+ case HL_MEM_OP_MAP:
+ if (args->in.flags & HL_MEM_USERPTR) {
+ device_addr = args->in.map_host.host_virt_addr;
+ rc = 0;
+ } else {
+ rc = get_paddr_from_handle(ctx, &args->in,
+ &device_addr);
+ }
+
+ memset(args, 0, sizeof(*args));
+ args->out.device_virt_addr = device_addr;
+ break;
+
+ case HL_MEM_OP_UNMAP:
+ rc = 0;
+ break;
+
+ default:
+ dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
+ rc = -ENOTTY;
+ break;
+ }
+ }
+
+out:
+ return rc;
+}
+
+/*
+ * hl_pin_host_memory - pins a chunk of host memory
+ *
+ * @hdev : pointer to the habanalabs device structure
+ * @addr : the user-space virtual address of the memory area
+ * @size : the size of the memory area
+ * @userptr : pointer to hl_userptr structure
+ *
+ * This function does the following:
+ * - Pins the physical pages
+ * - Create a SG list from those pages
+ */
+int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
+ struct hl_userptr *userptr)
+{
+ u64 start, end;
+ u32 npages, offset;
+ int rc;
+
+ if (!size) {
+ dev_err(hdev->dev, "size to pin is invalid - %llu\n", size);
+ return -EINVAL;
+ }
+
+ if (!access_ok((void __user *) (uintptr_t) addr, size)) {
+ dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
+ return -EFAULT;
+ }
+
+ /*
+ * If the combination of the address and size requested for this memory
+ * region causes an integer overflow, return error.
+ */
+ if (((addr + size) < addr) ||
+ PAGE_ALIGN(addr + size) < (addr + size)) {
+ dev_err(hdev->dev,
+ "user pointer 0x%llx + %llu causes integer overflow\n",
+ addr, size);
+ return -EINVAL;
+ }
+
+ start = addr & PAGE_MASK;
+ offset = addr & ~PAGE_MASK;
+ end = PAGE_ALIGN(addr + size);
+ npages = (end - start) >> PAGE_SHIFT;
+
+ userptr->size = size;
+ userptr->addr = addr;
+ userptr->dma_mapped = false;
+ INIT_LIST_HEAD(&userptr->job_node);
+
+ userptr->vec = frame_vector_create(npages);
+ if (!userptr->vec) {
+ dev_err(hdev->dev, "Failed to create frame vector\n");
+ return -ENOMEM;
+ }
+
+ rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
+ userptr->vec);
+
+ if (rc != npages) {
+ dev_err(hdev->dev,
+ "Failed to map host memory, user ptr probably wrong\n");
+ if (rc < 0)
+ goto destroy_framevec;
+ rc = -EFAULT;
+ goto put_framevec;
+ }
+
+ if (frame_vector_to_pages(userptr->vec) < 0) {
+ dev_err(hdev->dev,
+ "Failed to translate frame vector to pages\n");
+ rc = -EFAULT;
+ goto put_framevec;
+ }
+
+ userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC);
+ if (!userptr->sgt) {
+ rc = -ENOMEM;
+ goto put_framevec;
+ }
+
+ rc = sg_alloc_table_from_pages(userptr->sgt,
+ frame_vector_pages(userptr->vec),
+ npages, offset, size, GFP_ATOMIC);
+ if (rc < 0) {
+ dev_err(hdev->dev, "failed to create SG table from pages\n");
+ goto free_sgt;
+ }
+
+ hl_debugfs_add_userptr(hdev, userptr);
+
+ return 0;
+
+free_sgt:
+ kfree(userptr->sgt);
+put_framevec:
+ put_vaddr_frames(userptr->vec);
+destroy_framevec:
+ frame_vector_destroy(userptr->vec);
+ return rc;
+}
+
+/*
+ * hl_unpin_host_memory - unpins a chunk of host memory
+ *
+ * @hdev : pointer to the habanalabs device structure
+ * @userptr : pointer to hl_userptr structure
+ *
+ * This function does the following:
+ * - Unpins the physical pages related to the host memory
+ * - Free the SG list
+ */
+int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
+{
+ struct page **pages;
+
+ hl_debugfs_remove_userptr(hdev, userptr);
+
+ if (userptr->dma_mapped)
+ hdev->asic_funcs->hl_dma_unmap_sg(hdev,
+ userptr->sgt->sgl,
+ userptr->sgt->nents,
+ userptr->dir);
+
+ pages = frame_vector_pages(userptr->vec);
+ if (!IS_ERR(pages)) {
+ int i;
+
+ for (i = 0; i < frame_vector_count(userptr->vec); i++)
+ set_page_dirty_lock(pages[i]);
+ }
+ put_vaddr_frames(userptr->vec);
+ frame_vector_destroy(userptr->vec);
+
+ list_del(&userptr->job_node);
+
+ sg_free_table(userptr->sgt);
+ kfree(userptr->sgt);
+
+ return 0;
+}
+
+/*
+ * hl_userptr_delete_list - clear userptr list
+ *
+ * @hdev : pointer to the habanalabs device structure
+ * @userptr_list : pointer to the list to clear
+ *
+ * This function does the following:
+ * - Iterates over the list and unpins the host memory and frees the userptr
+ * structure.
+ */
+void hl_userptr_delete_list(struct hl_device *hdev,
+ struct list_head *userptr_list)
+{
+ struct hl_userptr *userptr, *tmp;
+
+ list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) {
+ hl_unpin_host_memory(hdev, userptr);
+ kfree(userptr);
+ }
+
+ INIT_LIST_HEAD(userptr_list);
+}
+
+/*
+ * hl_userptr_is_pinned - returns whether the given userptr is pinned
+ *
+ * @hdev : pointer to the habanalabs device structure
+ * @userptr_list : pointer to the list to clear
+ * @userptr : pointer to userptr to check
+ *
+ * This function does the following:
+ * - Iterates over the list and checks if the given userptr is in it, means is
+ * pinned. If so, returns true, otherwise returns false.
+ */
+bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
+ u32 size, struct list_head *userptr_list,
+ struct hl_userptr **userptr)
+{
+ list_for_each_entry((*userptr), userptr_list, job_node) {
+ if ((addr == (*userptr)->addr) && (size == (*userptr)->size))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * hl_va_range_init - initialize virtual addresses range
+ *
+ * @hdev : pointer to the habanalabs device structure
+ * @va_range : pointer to the range to initialize
+ * @start : range start address
+ * @end : range end address
+ *
+ * This function does the following:
+ * - Initializes the virtual addresses list of the given range with the given
+ * addresses.
+ */
+static int hl_va_range_init(struct hl_device *hdev,
+ struct hl_va_range *va_range, u64 start, u64 end)
+{
+ int rc;
+
+ INIT_LIST_HEAD(&va_range->list);
+
+ /* PAGE_SIZE alignment */
+
+ if (start & (PAGE_SIZE - 1)) {
+ start &= PAGE_MASK;
+ start += PAGE_SIZE;
+ }
+
+ if (end & (PAGE_SIZE - 1))
+ end &= PAGE_MASK;
+
+ if (start >= end) {
+ dev_err(hdev->dev, "too small vm range for va list\n");
+ return -EFAULT;
+ }
+
+ rc = add_va_block(hdev, va_range, start, end);
+
+ if (rc) {
+ dev_err(hdev->dev, "Failed to init host va list\n");
+ return rc;
+ }
+
+ va_range->start_addr = start;
+ va_range->end_addr = end;
+
+ return 0;
+}
+
+/*
+ * hl_vm_ctx_init_with_ranges - initialize virtual memory for context
+ *
+ * @ctx : pointer to the habanalabs context structure
+ * @host_range_start : host virtual addresses range start
+ * @host_range_end : host virtual addresses range end
+ * @dram_range_start : dram virtual addresses range start
+ * @dram_range_end : dram virtual addresses range end
+ *
+ * This function initializes the following:
+ * - MMU for context
+ * - Virtual address to area descriptor hashtable
+ * - Virtual block list of available virtual memory
+ */
+static int hl_vm_ctx_init_with_ranges(struct hl_ctx *ctx, u64 host_range_start,
+ u64 host_range_end, u64 dram_range_start,
+ u64 dram_range_end)
+{
+ struct hl_device *hdev = ctx->hdev;
+ int rc;
+
+ rc = hl_mmu_ctx_init(ctx);
+ if (rc) {
+ dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
+ return rc;
+ }
+
+ mutex_init(&ctx->mem_hash_lock);
+ hash_init(ctx->mem_hash);
+
+ mutex_init(&ctx->host_va_range.lock);
+
+ rc = hl_va_range_init(hdev, &ctx->host_va_range, host_range_start,
+ host_range_end);
+ if (rc) {
+ dev_err(hdev->dev, "failed to init host vm range\n");
+ goto host_vm_err;
+ }
+
+ mutex_init(&ctx->dram_va_range.lock);
+
+ rc = hl_va_range_init(hdev, &ctx->dram_va_range, dram_range_start,
+ dram_range_end);
+ if (rc) {
+ dev_err(hdev->dev, "failed to init dram vm range\n");
+ goto dram_vm_err;
+ }
+
+ hl_debugfs_add_ctx_mem_hash(hdev, ctx);
+
+ return 0;
+
+dram_vm_err:
+ mutex_destroy(&ctx->dram_va_range.lock);
+
+ mutex_lock(&ctx->host_va_range.lock);
+ clear_va_list_locked(hdev, &ctx->host_va_range.list);
+ mutex_unlock(&ctx->host_va_range.lock);
+host_vm_err:
+ mutex_destroy(&ctx->host_va_range.lock);
+ mutex_destroy(&ctx->mem_hash_lock);
+ hl_mmu_ctx_fini(ctx);
+
+ return rc;
+}
+
+int hl_vm_ctx_init(struct hl_ctx *ctx)
+{
+ struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
+ u64 host_range_start, host_range_end, dram_range_start,
+ dram_range_end;
+
+ atomic64_set(&ctx->dram_phys_mem, 0);
+
+ /*
+ * - If MMU is enabled, init the ranges as usual.
+ * - If MMU is disabled, in case of host mapping, the returned address
+ * is the given one.
+ * In case of DRAM mapping, the returned address is the physical
+ * address of the memory related to the given handle.
+ */
+ if (ctx->hdev->mmu_enable) {
+ dram_range_start = prop->va_space_dram_start_address;
+ dram_range_end = prop->va_space_dram_end_address;
+ host_range_start = prop->va_space_host_start_address;
+ host_range_end = prop->va_space_host_end_address;
+ } else {
+ dram_range_start = prop->dram_user_base_address;
+ dram_range_end = prop->dram_end_address;
+ host_range_start = prop->dram_user_base_address;
+ host_range_end = prop->dram_end_address;
+ }
+
+ return hl_vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
+ dram_range_start, dram_range_end);
+}
+
+/*
+ * hl_va_range_fini - clear a virtual addresses range
+ *
+ * @hdev : pointer to the habanalabs structure
+ * va_range : pointer to virtual addresses range
+ *
+ * This function initializes the following:
+ * - Checks that the given range contains the whole initial range
+ * - Frees the virtual addresses block list and its lock
+ */
+static void hl_va_range_fini(struct hl_device *hdev,
+ struct hl_va_range *va_range)
+{
+ struct hl_vm_va_block *va_block;
+
+ if (list_empty(&va_range->list)) {
+ dev_warn(hdev->dev,
+ "va list should not be empty on cleanup!\n");
+ goto out;
+ }
+
+ if (!list_is_singular(&va_range->list)) {
+ dev_warn(hdev->dev,
+ "va list should not contain multiple blocks on cleanup!\n");
+ goto free_va_list;
+ }
+
+ va_block = list_first_entry(&va_range->list, typeof(*va_block), node);
+
+ if (va_block->start != va_range->start_addr ||
+ va_block->end != va_range->end_addr) {
+ dev_warn(hdev->dev,
+ "wrong va block on cleanup, from 0x%llx to 0x%llx\n",
+ va_block->start, va_block->end);
+ goto free_va_list;
+ }
+
+free_va_list:
+ mutex_lock(&va_range->lock);
+ clear_va_list_locked(hdev, &va_range->list);
+ mutex_unlock(&va_range->lock);
+
+out:
+ mutex_destroy(&va_range->lock);
+}
+
+/*
+ * hl_vm_ctx_fini - virtual memory teardown of context
+ *
+ * @ctx : pointer to the habanalabs context structure
+ *
+ * This function perform teardown the following:
+ * - Virtual block list of available virtual memory
+ * - Virtual address to area descriptor hashtable
+ * - MMU for context
+ *
+ * In addition this function does the following:
+ * - Unmaps the existing hashtable nodes if the hashtable is not empty. The
+ * hashtable should be empty as no valid mappings should exist at this
+ * point.
+ * - Frees any existing physical page list from the idr which relates to the
+ * current context asid.
+ * - This function checks the virtual block list for correctness. At this point
+ * the list should contain one element which describes the whole virtual
+ * memory range of the context. Otherwise, a warning is printed.
+ */
+void hl_vm_ctx_fini(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_vm *vm = &hdev->vm;
+ struct hl_vm_phys_pg_pack *phys_pg_list;
+ struct hl_vm_hash_node *hnode;
+ struct hlist_node *tmp_node;
+ int i;
+
+ hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
+
+ if (!hash_empty(ctx->mem_hash))
+ dev_notice(hdev->dev, "ctx is freed while it has va in use\n");
+
+ hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
+ dev_dbg(hdev->dev,
+ "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
+ hnode->vaddr, ctx->asid);
+ unmap_device_va(ctx, hnode->vaddr);
+ }
+
+ spin_lock(&vm->idr_lock);
+ idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
+ if (phys_pg_list->asid == ctx->asid) {
+ dev_dbg(hdev->dev,
+ "page list 0x%p of asid %d is still alive\n",
+ phys_pg_list, ctx->asid);
+ free_phys_pg_pack(hdev, phys_pg_list);
+ idr_remove(&vm->phys_pg_pack_handles, i);
+ }
+ spin_unlock(&vm->idr_lock);
+
+ hl_va_range_fini(hdev, &ctx->dram_va_range);
+ hl_va_range_fini(hdev, &ctx->host_va_range);
+
+ mutex_destroy(&ctx->mem_hash_lock);
+ hl_mmu_ctx_fini(ctx);
+}
+
+/*
+ * hl_vm_init - initialize virtual memory module
+ *
+ * @hdev : pointer to the habanalabs device structure
+ *
+ * This function initializes the following:
+ * - MMU module
+ * - DRAM physical pages pool of 2MB
+ * - Idr for device memory allocation handles
+ */
+int hl_vm_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_vm *vm = &hdev->vm;
+ int rc;
+
+ rc = hl_mmu_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to init MMU\n");
+ return rc;
+ }
+
+ vm->dram_pg_pool = gen_pool_create(__ffs(prop->dram_page_size), -1);
+ if (!vm->dram_pg_pool) {
+ dev_err(hdev->dev, "Failed to create dram page pool\n");
+ rc = -ENOMEM;
+ goto pool_create_err;
+ }
+
+ kref_init(&vm->dram_pg_pool_refcount);
+
+ rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address,
+ prop->dram_end_address - prop->dram_user_base_address,
+ -1);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to add memory to dram page pool %d\n", rc);
+ goto pool_add_err;
+ }
+
+ spin_lock_init(&vm->idr_lock);
+ idr_init(&vm->phys_pg_pack_handles);
+
+ atomic64_set(&hdev->dram_used_mem, 0);
+
+ vm->init_done = true;
+
+ return 0;
+
+pool_add_err:
+ gen_pool_destroy(vm->dram_pg_pool);
+pool_create_err:
+ hl_mmu_fini(hdev);
+
+ return rc;
+}
+
+/*
+ * hl_vm_fini - virtual memory module teardown
+ *
+ * @hdev : pointer to the habanalabs device structure
+ *
+ * This function perform teardown to the following:
+ * - Idr for device memory allocation handles
+ * - DRAM physical pages pool of 2MB
+ * - MMU module
+ */
+void hl_vm_fini(struct hl_device *hdev)
+{
+ struct hl_vm *vm = &hdev->vm;
+
+ if (!vm->init_done)
+ return;
+
+ /*
+ * At this point all the contexts should be freed and hence no DRAM
+ * memory should be in use. Hence the DRAM pool should be freed here.
+ */
+ if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1)
+ dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n",
+ __func__);
+
+ hl_mmu_fini(hdev);
+
+ vm->init_done = false;
+}
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c
new file mode 100644
index 000000000000..2f2e99cb2743
--- /dev/null
+++ b/drivers/misc/habanalabs/mmu.c
@@ -0,0 +1,906 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+#include "include/hw_ip/mmu/mmu_general.h"
+
+#include <linux/genalloc.h>
+#include <linux/slab.h>
+
+static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 addr)
+{
+ struct pgt_info *pgt_info = NULL;
+
+ hash_for_each_possible(ctx->mmu_hash, pgt_info, node,
+ (unsigned long) addr)
+ if (addr == pgt_info->addr)
+ break;
+
+ return pgt_info;
+}
+
+static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
+{
+ struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
+
+ gen_pool_free(pgt_info->ctx->hdev->mmu_pgt_pool, pgt_info->addr,
+ ctx->hdev->asic_prop.mmu_hop_table_size);
+ hash_del(&pgt_info->node);
+
+ kfree(pgt_info);
+}
+
+static u64 alloc_hop(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct pgt_info *pgt_info;
+ u64 addr;
+
+ pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
+ if (!pgt_info)
+ return ULLONG_MAX;
+
+ addr = (u64) gen_pool_alloc(hdev->mmu_pgt_pool,
+ hdev->asic_prop.mmu_hop_table_size);
+ if (!addr) {
+ dev_err(hdev->dev, "failed to allocate page\n");
+ kfree(pgt_info);
+ return ULLONG_MAX;
+ }
+
+ pgt_info->addr = addr;
+ pgt_info->ctx = ctx;
+ pgt_info->num_of_ptes = 0;
+ hash_add(ctx->mmu_hash, &pgt_info->node, addr);
+
+ return addr;
+}
+
+static inline void clear_pte(struct hl_device *hdev, u64 pte_addr)
+{
+ /* clear the last and present bits */
+ hdev->asic_funcs->write_pte(hdev, pte_addr, 0);
+}
+
+static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr)
+{
+ get_pgt_info(ctx, hop_addr)->num_of_ptes++;
+}
+
+/*
+ * put_pte - decrement the num of ptes and free the hop if possible
+ *
+ * @ctx: pointer to the context structure
+ * @hop_addr: addr of the hop
+ *
+ * This function returns the number of ptes left on this hop. If the number is
+ * 0, it means the pte was freed.
+ */
+static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
+{
+ struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
+ int num_of_ptes_left;
+
+ pgt_info->num_of_ptes--;
+
+ /*
+ * Need to save the number of ptes left because free_hop might free
+ * the pgt_info
+ */
+ num_of_ptes_left = pgt_info->num_of_ptes;
+ if (!num_of_ptes_left)
+ free_hop(ctx, hop_addr);
+
+ return num_of_ptes_left;
+}
+
+static inline u64 get_hop0_addr(struct hl_ctx *ctx)
+{
+ return ctx->hdev->asic_prop.mmu_pgt_addr +
+ (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
+}
+
+static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
+ u64 virt_addr, u64 mask, u64 shift)
+{
+ return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
+ ((virt_addr & mask) >> shift);
+}
+
+static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP0_MASK, HOP0_SHIFT);
+}
+
+static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP1_MASK, HOP1_SHIFT);
+}
+
+static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP2_MASK, HOP2_SHIFT);
+}
+
+static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP3_MASK, HOP3_SHIFT);
+}
+
+static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP4_MASK, HOP4_SHIFT);
+}
+
+static inline u64 get_next_hop_addr(u64 curr_pte)
+{
+ if (curr_pte & PAGE_PRESENT_MASK)
+ return curr_pte & PHYS_ADDR_MASK;
+ else
+ return ULLONG_MAX;
+}
+
+static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
+ bool *is_new_hop)
+{
+ u64 hop_addr = get_next_hop_addr(curr_pte);
+
+ if (hop_addr == ULLONG_MAX) {
+ hop_addr = alloc_hop(ctx);
+ *is_new_hop = (hop_addr != ULLONG_MAX);
+ }
+
+ return hop_addr;
+}
+
+/*
+ * hl_mmu_init - init the mmu module
+ *
+ * @hdev: pointer to the habanalabs device structure
+ *
+ * This function does the following:
+ * - Allocate max_asid zeroed hop0 pgts so no mapping is available
+ * - Enable mmu in hw
+ * - Invalidate the mmu cache
+ * - Create a pool of pages for pgts
+ * - Returns 0 on success
+ *
+ * This function depends on DMA QMAN to be working!
+ */
+int hl_mmu_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int rc;
+
+ if (!hdev->mmu_enable)
+ return 0;
+
+ /* MMU HW init was already done in device hw_init() */
+
+ mutex_init(&hdev->mmu_cache_lock);
+
+ hdev->mmu_pgt_pool =
+ gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
+
+ if (!hdev->mmu_pgt_pool) {
+ dev_err(hdev->dev, "Failed to create page gen pool\n");
+ rc = -ENOMEM;
+ goto err_pool_create;
+ }
+
+ rc = gen_pool_add(hdev->mmu_pgt_pool, prop->mmu_pgt_addr +
+ prop->mmu_hop0_tables_total_size,
+ prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size,
+ -1);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
+ goto err_pool_add;
+ }
+
+ return 0;
+
+err_pool_add:
+ gen_pool_destroy(hdev->mmu_pgt_pool);
+err_pool_create:
+ mutex_destroy(&hdev->mmu_cache_lock);
+
+ return rc;
+}
+
+/*
+ * hl_mmu_fini - release the mmu module.
+ *
+ * @hdev: pointer to the habanalabs device structure
+ *
+ * This function does the following:
+ * - Disable mmu in hw
+ * - free the pgts pool
+ *
+ * All ctxs should be freed before calling this func
+ */
+void hl_mmu_fini(struct hl_device *hdev)
+{
+ if (!hdev->mmu_enable)
+ return;
+
+ gen_pool_destroy(hdev->mmu_pgt_pool);
+
+ mutex_destroy(&hdev->mmu_cache_lock);
+
+ /* MMU HW fini will be done in device hw_fini() */
+}
+
+/**
+ * hl_mmu_ctx_init() - initialize a context for using the MMU module.
+ * @ctx: pointer to the context structure to initialize.
+ *
+ * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
+ * page tables hops related to this context and an optional DRAM default page
+ * mapping.
+ * Return: 0 on success, non-zero otherwise.
+ */
+int hl_mmu_ctx_init(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 num_of_hop3, total_hops, hop1_addr, hop2_addr, hop2_pte_addr,
+ hop3_pte_addr, pte_val;
+ int rc, i, j, hop3_allocated = 0;
+
+ if (!hdev->mmu_enable)
+ return 0;
+
+ mutex_init(&ctx->mmu_lock);
+ hash_init(ctx->mmu_hash);
+
+ if (!hdev->dram_supports_virtual_memory ||
+ !hdev->dram_default_page_mapping)
+ return 0;
+
+ num_of_hop3 = prop->dram_size_for_default_page_mapping;
+ do_div(num_of_hop3, prop->dram_page_size);
+ do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
+
+ /* add hop1 and hop2 */
+ total_hops = num_of_hop3 + 2;
+
+ ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops, GFP_KERNEL);
+ if (!ctx->dram_default_hops) {
+ rc = -ENOMEM;
+ goto alloc_err;
+ }
+
+ hop1_addr = alloc_hop(ctx);
+ if (hop1_addr == ULLONG_MAX) {
+ dev_err(hdev->dev, "failed to alloc hop 1\n");
+ rc = -ENOMEM;
+ goto hop1_err;
+ }
+
+ ctx->dram_default_hops[total_hops - 1] = hop1_addr;
+
+ hop2_addr = alloc_hop(ctx);
+ if (hop2_addr == ULLONG_MAX) {
+ dev_err(hdev->dev, "failed to alloc hop 2\n");
+ rc = -ENOMEM;
+ goto hop2_err;
+ }
+
+ ctx->dram_default_hops[total_hops - 2] = hop2_addr;
+
+ for (i = 0 ; i < num_of_hop3 ; i++) {
+ ctx->dram_default_hops[i] = alloc_hop(ctx);
+ if (ctx->dram_default_hops[i] == ULLONG_MAX) {
+ dev_err(hdev->dev, "failed to alloc hop 3, i: %d\n", i);
+ rc = -ENOMEM;
+ goto hop3_err;
+ }
+ hop3_allocated++;
+ }
+
+ /* need only pte 0 in hops 0 and 1 */
+ pte_val = (hop1_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ hdev->asic_funcs->write_pte(hdev, get_hop0_addr(ctx), pte_val);
+
+ pte_val = (hop2_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ hdev->asic_funcs->write_pte(hdev, hop1_addr, pte_val);
+ get_pte(ctx, hop1_addr);
+
+ hop2_pte_addr = hop2_addr;
+ for (i = 0 ; i < num_of_hop3 ; i++) {
+ pte_val = (ctx->dram_default_hops[i] & PTE_PHYS_ADDR_MASK) |
+ PAGE_PRESENT_MASK;
+ hdev->asic_funcs->write_pte(hdev, hop2_pte_addr, pte_val);
+ get_pte(ctx, hop2_addr);
+ hop2_pte_addr += HL_PTE_SIZE;
+ }
+
+ pte_val = (prop->mmu_dram_default_page_addr & PTE_PHYS_ADDR_MASK) |
+ LAST_MASK | PAGE_PRESENT_MASK;
+
+ for (i = 0 ; i < num_of_hop3 ; i++) {
+ hop3_pte_addr = ctx->dram_default_hops[i];
+ for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
+ hdev->asic_funcs->write_pte(hdev, hop3_pte_addr,
+ pte_val);
+ get_pte(ctx, ctx->dram_default_hops[i]);
+ hop3_pte_addr += HL_PTE_SIZE;
+ }
+ }
+
+ /* flush all writes to reach PCI */
+ mb();
+ hdev->asic_funcs->read_pte(hdev, hop2_addr);
+
+ return 0;
+
+hop3_err:
+ for (i = 0 ; i < hop3_allocated ; i++)
+ free_hop(ctx, ctx->dram_default_hops[i]);
+ free_hop(ctx, hop2_addr);
+hop2_err:
+ free_hop(ctx, hop1_addr);
+hop1_err:
+ kfree(ctx->dram_default_hops);
+alloc_err:
+ mutex_destroy(&ctx->mmu_lock);
+
+ return rc;
+}
+
+/*
+ * hl_mmu_ctx_fini - disable a ctx from using the mmu module
+ *
+ * @ctx: pointer to the context structure
+ *
+ * This function does the following:
+ * - Free any pgts which were not freed yet
+ * - Free the mutex
+ * - Free DRAM default page mapping hops
+ */
+void hl_mmu_ctx_fini(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct pgt_info *pgt_info;
+ struct hlist_node *tmp;
+ u64 num_of_hop3, total_hops, hop1_addr, hop2_addr, hop2_pte_addr,
+ hop3_pte_addr;
+ int i, j;
+
+ if (!ctx->hdev->mmu_enable)
+ return;
+
+ if (hdev->dram_supports_virtual_memory &&
+ hdev->dram_default_page_mapping) {
+
+ num_of_hop3 = prop->dram_size_for_default_page_mapping;
+ do_div(num_of_hop3, prop->dram_page_size);
+ do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
+
+ /* add hop1 and hop2 */
+ total_hops = num_of_hop3 + 2;
+ hop1_addr = ctx->dram_default_hops[total_hops - 1];
+ hop2_addr = ctx->dram_default_hops[total_hops - 2];
+
+ for (i = 0 ; i < num_of_hop3 ; i++) {
+ hop3_pte_addr = ctx->dram_default_hops[i];
+ for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
+ clear_pte(hdev, hop3_pte_addr);
+ put_pte(ctx, ctx->dram_default_hops[i]);
+ hop3_pte_addr += HL_PTE_SIZE;
+ }
+ }
+
+ hop2_pte_addr = hop2_addr;
+ for (i = 0 ; i < num_of_hop3 ; i++) {
+ clear_pte(hdev, hop2_pte_addr);
+ put_pte(ctx, hop2_addr);
+ hop2_pte_addr += HL_PTE_SIZE;
+ }
+
+ clear_pte(hdev, hop1_addr);
+ put_pte(ctx, hop1_addr);
+ clear_pte(hdev, get_hop0_addr(ctx));
+
+ kfree(ctx->dram_default_hops);
+
+ /* flush all writes to reach PCI */
+ mb();
+ hdev->asic_funcs->read_pte(hdev, hop2_addr);
+ }
+
+ if (!hash_empty(ctx->mmu_hash))
+ dev_err(hdev->dev, "ctx is freed while it has pgts in use\n");
+
+ hash_for_each_safe(ctx->mmu_hash, i, tmp, pgt_info, node) {
+ dev_err(hdev->dev,
+ "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
+ pgt_info->addr, ctx->asid, pgt_info->num_of_ptes);
+ free_hop(ctx, pgt_info->addr);
+ }
+
+ mutex_destroy(&ctx->mmu_lock);
+}
+
+static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 hop0_addr = 0, hop0_pte_addr = 0,
+ hop1_addr = 0, hop1_pte_addr = 0,
+ hop2_addr = 0, hop2_pte_addr = 0,
+ hop3_addr = 0, hop3_pte_addr = 0,
+ hop4_addr = 0, hop4_pte_addr = 0,
+ curr_pte;
+ int clear_hop3 = 1;
+ bool is_dram_addr, is_huge, is_dram_default_page_mapping;
+
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, PAGE_SIZE_2MB,
+ prop->va_space_dram_start_address,
+ prop->va_space_dram_end_address);
+
+ hop0_addr = get_hop0_addr(ctx);
+
+ hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
+
+ curr_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
+
+ hop1_addr = get_next_hop_addr(curr_pte);
+
+ if (hop1_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
+
+ curr_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
+
+ hop2_addr = get_next_hop_addr(curr_pte);
+
+ if (hop2_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
+
+ curr_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
+
+ hop3_addr = get_next_hop_addr(curr_pte);
+
+ if (hop3_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
+
+ curr_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
+
+ is_huge = curr_pte & LAST_MASK;
+
+ if (is_dram_addr && !is_huge) {
+ dev_err(hdev->dev,
+ "DRAM unmapping should use huge pages only\n");
+ return -EFAULT;
+ }
+
+ is_dram_default_page_mapping =
+ hdev->dram_default_page_mapping && is_dram_addr;
+
+ if (!is_huge) {
+ hop4_addr = get_next_hop_addr(curr_pte);
+
+ if (hop4_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
+
+ curr_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr);
+
+ clear_hop3 = 0;
+ }
+
+ if (is_dram_default_page_mapping) {
+ u64 zero_pte = (prop->mmu_dram_default_page_addr &
+ PTE_PHYS_ADDR_MASK) | LAST_MASK |
+ PAGE_PRESENT_MASK;
+ if (curr_pte == zero_pte) {
+ dev_err(hdev->dev,
+ "DRAM: hop3 PTE points to zero page, can't unmap, va: 0x%llx\n",
+ virt_addr);
+ goto not_mapped;
+ }
+
+ if (!(curr_pte & PAGE_PRESENT_MASK)) {
+ dev_err(hdev->dev,
+ "DRAM: hop3 PTE is cleared! can't unmap, va: 0x%llx\n",
+ virt_addr);
+ goto not_mapped;
+ }
+
+ hdev->asic_funcs->write_pte(hdev, hop3_pte_addr, zero_pte);
+ put_pte(ctx, hop3_addr);
+ } else {
+ if (!(curr_pte & PAGE_PRESENT_MASK))
+ goto not_mapped;
+
+ clear_pte(hdev, hop4_addr ? hop4_pte_addr : hop3_pte_addr);
+
+ if (hop4_addr && !put_pte(ctx, hop4_addr))
+ clear_hop3 = 1;
+
+ if (!clear_hop3)
+ goto flush;
+ clear_pte(hdev, hop3_pte_addr);
+
+ if (put_pte(ctx, hop3_addr))
+ goto flush;
+ clear_pte(hdev, hop2_pte_addr);
+
+ if (put_pte(ctx, hop2_addr))
+ goto flush;
+ clear_pte(hdev, hop1_pte_addr);
+
+ if (put_pte(ctx, hop1_addr))
+ goto flush;
+ clear_pte(hdev, hop0_pte_addr);
+ }
+
+flush:
+ /* flush all writes from all cores to reach PCI */
+ mb();
+
+ hdev->asic_funcs->read_pte(hdev,
+ hop4_addr ? hop4_pte_addr : hop3_pte_addr);
+
+ return 0;
+
+not_mapped:
+ dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
+ virt_addr);
+
+ return -EINVAL;
+}
+
+/*
+ * hl_mmu_unmap - unmaps a virtual addr
+ *
+ * @ctx: pointer to the context structure
+ * @virt_addr: virt addr to map from
+ * @page_size: size of the page to unmap
+ *
+ * This function does the following:
+ * - Check that the virt addr is mapped
+ * - Unmap the virt addr and frees pgts if possible
+ * - Returns 0 on success, -EINVAL if the given addr is not mapped
+ *
+ * Because this function changes the page tables in the device and because it
+ * changes the MMU hash, it must be protected by a lock.
+ * However, because it maps only a single page, the lock should be implemented
+ * in a higher level in order to protect the entire mapping of the memory area
+ */
+int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size)
+{
+ struct hl_device *hdev = ctx->hdev;
+ u64 real_virt_addr;
+ u32 real_page_size, npages;
+ int i, rc;
+
+ if (!hdev->mmu_enable)
+ return 0;
+
+ /*
+ * The H/W handles mapping of 4KB/2MB page. Hence if the host page size
+ * is bigger, we break it to sub-pages and unmap them separately.
+ */
+ if ((page_size % PAGE_SIZE_2MB) == 0) {
+ real_page_size = PAGE_SIZE_2MB;
+ } else if ((page_size % PAGE_SIZE_4KB) == 0) {
+ real_page_size = PAGE_SIZE_4KB;
+ } else {
+ dev_err(hdev->dev,
+ "page size of %u is not 4KB nor 2MB aligned, can't unmap\n",
+ page_size);
+
+ return -EFAULT;
+ }
+
+ npages = page_size / real_page_size;
+ real_virt_addr = virt_addr;
+
+ for (i = 0 ; i < npages ; i++) {
+ rc = _hl_mmu_unmap(ctx, real_virt_addr);
+ if (rc)
+ return rc;
+
+ real_virt_addr += real_page_size;
+ }
+
+ return 0;
+}
+
+static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
+ u32 page_size)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 hop0_addr = 0, hop0_pte_addr = 0,
+ hop1_addr = 0, hop1_pte_addr = 0,
+ hop2_addr = 0, hop2_pte_addr = 0,
+ hop3_addr = 0, hop3_pte_addr = 0,
+ hop4_addr = 0, hop4_pte_addr = 0,
+ curr_pte = 0;
+ bool hop1_new = false, hop2_new = false, hop3_new = false,
+ hop4_new = false, is_huge, is_dram_addr,
+ is_dram_default_page_mapping;
+ int rc = -ENOMEM;
+
+ /*
+ * This mapping function can map a 4KB/2MB page. For 2MB page there are
+ * only 3 hops rather than 4. Currently the DRAM allocation uses 2MB
+ * pages only but user memory could have been allocated with one of the
+ * two page sizes. Since this is a common code for all the three cases,
+ * we need this hugs page check.
+ */
+ is_huge = page_size == PAGE_SIZE_2MB;
+
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, page_size,
+ prop->va_space_dram_start_address,
+ prop->va_space_dram_end_address);
+
+ if (is_dram_addr && !is_huge) {
+ dev_err(hdev->dev, "DRAM mapping should use huge pages only\n");
+ return -EFAULT;
+ }
+
+ is_dram_default_page_mapping =
+ hdev->dram_default_page_mapping && is_dram_addr;
+
+ hop0_addr = get_hop0_addr(ctx);
+
+ hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
+
+ curr_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
+
+ hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new);
+
+ if (hop1_addr == ULLONG_MAX)
+ goto err;
+
+ hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
+
+ curr_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
+
+ hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new);
+
+ if (hop2_addr == ULLONG_MAX)
+ goto err;
+
+ hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
+
+ curr_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
+
+ hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new);
+
+ if (hop3_addr == ULLONG_MAX)
+ goto err;
+
+ hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
+
+ curr_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
+
+ if (!is_huge) {
+ hop4_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop4_new);
+
+ if (hop4_addr == ULLONG_MAX)
+ goto err;
+
+ hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
+
+ curr_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr);
+ }
+
+ if (is_dram_default_page_mapping) {
+ u64 zero_pte = (prop->mmu_dram_default_page_addr &
+ PTE_PHYS_ADDR_MASK) | LAST_MASK |
+ PAGE_PRESENT_MASK;
+
+ if (curr_pte != zero_pte) {
+ dev_err(hdev->dev,
+ "DRAM: mapping already exists for virt_addr 0x%llx\n",
+ virt_addr);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ if (hop1_new || hop2_new || hop3_new || hop4_new) {
+ dev_err(hdev->dev,
+ "DRAM mapping should not allocate more hops\n");
+ rc = -EFAULT;
+ goto err;
+ }
+ } else if (curr_pte & PAGE_PRESENT_MASK) {
+ dev_err(hdev->dev,
+ "mapping already exists for virt_addr 0x%llx\n",
+ virt_addr);
+
+ dev_dbg(hdev->dev, "hop0 pte: 0x%llx (0x%llx)\n",
+ hdev->asic_funcs->read_pte(hdev, hop0_pte_addr),
+ hop0_pte_addr);
+ dev_dbg(hdev->dev, "hop1 pte: 0x%llx (0x%llx)\n",
+ hdev->asic_funcs->read_pte(hdev, hop1_pte_addr),
+ hop1_pte_addr);
+ dev_dbg(hdev->dev, "hop2 pte: 0x%llx (0x%llx)\n",
+ hdev->asic_funcs->read_pte(hdev, hop2_pte_addr),
+ hop2_pte_addr);
+ dev_dbg(hdev->dev, "hop3 pte: 0x%llx (0x%llx)\n",
+ hdev->asic_funcs->read_pte(hdev, hop3_pte_addr),
+ hop3_pte_addr);
+
+ if (!is_huge)
+ dev_dbg(hdev->dev, "hop4 pte: 0x%llx (0x%llx)\n",
+ hdev->asic_funcs->read_pte(hdev,
+ hop4_pte_addr),
+ hop4_pte_addr);
+
+ rc = -EINVAL;
+ goto err;
+ }
+
+ curr_pte = (phys_addr & PTE_PHYS_ADDR_MASK) | LAST_MASK
+ | PAGE_PRESENT_MASK;
+
+ hdev->asic_funcs->write_pte(hdev,
+ is_huge ? hop3_pte_addr : hop4_pte_addr,
+ curr_pte);
+
+ if (hop1_new) {
+ curr_pte = (hop1_addr & PTE_PHYS_ADDR_MASK) |
+ PAGE_PRESENT_MASK;
+ ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop0_pte_addr,
+ curr_pte);
+ }
+ if (hop2_new) {
+ curr_pte = (hop2_addr & PTE_PHYS_ADDR_MASK) |
+ PAGE_PRESENT_MASK;
+ ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop1_pte_addr,
+ curr_pte);
+ get_pte(ctx, hop1_addr);
+ }
+ if (hop3_new) {
+ curr_pte = (hop3_addr & PTE_PHYS_ADDR_MASK) |
+ PAGE_PRESENT_MASK;
+ ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop2_pte_addr,
+ curr_pte);
+ get_pte(ctx, hop2_addr);
+ }
+
+ if (!is_huge) {
+ if (hop4_new) {
+ curr_pte = (hop4_addr & PTE_PHYS_ADDR_MASK) |
+ PAGE_PRESENT_MASK;
+ ctx->hdev->asic_funcs->write_pte(ctx->hdev,
+ hop3_pte_addr, curr_pte);
+ get_pte(ctx, hop3_addr);
+ }
+
+ get_pte(ctx, hop4_addr);
+ } else {
+ get_pte(ctx, hop3_addr);
+ }
+
+ /* flush all writes from all cores to reach PCI */
+ mb();
+
+ hdev->asic_funcs->read_pte(hdev,
+ is_huge ? hop3_pte_addr : hop4_pte_addr);
+
+ return 0;
+
+err:
+ if (hop4_new)
+ free_hop(ctx, hop4_addr);
+ if (hop3_new)
+ free_hop(ctx, hop3_addr);
+ if (hop2_new)
+ free_hop(ctx, hop2_addr);
+ if (hop1_new)
+ free_hop(ctx, hop1_addr);
+
+ return rc;
+}
+
+/*
+ * hl_mmu_map - maps a virtual addr to physical addr
+ *
+ * @ctx: pointer to the context structure
+ * @virt_addr: virt addr to map from
+ * @phys_addr: phys addr to map to
+ * @page_size: physical page size
+ *
+ * This function does the following:
+ * - Check that the virt addr is not mapped
+ * - Allocate pgts as necessary in order to map the virt addr to the phys
+ * - Returns 0 on success, -EINVAL if addr is already mapped, or -ENOMEM.
+ *
+ * Because this function changes the page tables in the device and because it
+ * changes the MMU hash, it must be protected by a lock.
+ * However, because it maps only a single page, the lock should be implemented
+ * in a higher level in order to protect the entire mapping of the memory area
+ */
+int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
+{
+ struct hl_device *hdev = ctx->hdev;
+ u64 real_virt_addr;
+ u32 real_page_size, npages;
+ int i, rc, mapped_cnt = 0;
+
+ if (!hdev->mmu_enable)
+ return 0;
+
+ /*
+ * The H/W handles mapping of 4KB/2MB page. Hence if the host page size
+ * is bigger, we break it to sub-pages and map them separately.
+ */
+ if ((page_size % PAGE_SIZE_2MB) == 0) {
+ real_page_size = PAGE_SIZE_2MB;
+ } else if ((page_size % PAGE_SIZE_4KB) == 0) {
+ real_page_size = PAGE_SIZE_4KB;
+ } else {
+ dev_err(hdev->dev,
+ "page size of %u is not 4KB nor 2MB aligned, can't map\n",
+ page_size);
+
+ return -EFAULT;
+ }
+
+ npages = page_size / real_page_size;
+ real_virt_addr = virt_addr;
+
+ for (i = 0 ; i < npages ; i++) {
+ rc = _hl_mmu_map(ctx, real_virt_addr, phys_addr,
+ real_page_size);
+ if (rc)
+ goto err;
+
+ real_virt_addr += real_page_size;
+ mapped_cnt++;
+ }
+
+ return 0;
+
+err:
+ real_virt_addr = virt_addr;
+ for (i = 0 ; i < mapped_cnt ; i++) {
+ if (_hl_mmu_unmap(ctx, real_virt_addr))
+ dev_warn_ratelimited(hdev->dev,
+ "failed to unmap va: 0x%llx\n", real_virt_addr);
+
+ real_virt_addr += real_page_size;
+ }
+
+ return rc;
+}
+
+/*
+ * hl_mmu_swap_out - marks all mapping of the given ctx as swapped out
+ *
+ * @ctx: pointer to the context structure
+ *
+ */
+void hl_mmu_swap_out(struct hl_ctx *ctx)
+{
+
+}
+
+/*
+ * hl_mmu_swap_in - marks all mapping of the given ctx as swapped in
+ *
+ * @ctx: pointer to the context structure
+ *
+ */
+void hl_mmu_swap_in(struct hl_ctx *ctx)
+{
+
+}
diff --git a/drivers/misc/habanalabs/sysfs.c b/drivers/misc/habanalabs/sysfs.c
new file mode 100644
index 000000000000..c900ab15cceb
--- /dev/null
+++ b/drivers/misc/habanalabs/sysfs.c
@@ -0,0 +1,539 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+
+#include <linux/pci.h>
+
+#define SET_CLK_PKT_TIMEOUT 1000000 /* 1s */
+#define SET_PWR_PKT_TIMEOUT 1000000 /* 1s */
+
+long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
+{
+ struct armcp_packet pkt;
+ long result;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ if (curr)
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FREQUENCY_CURR_GET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ else
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FREQUENCY_GET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.pll_index = __cpu_to_le32(pll_index);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SET_CLK_PKT_TIMEOUT, &result);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to get frequency of PLL %d, error %d\n",
+ pll_index, rc);
+ result = rc;
+ }
+
+ return result;
+}
+
+void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
+{
+ struct armcp_packet pkt;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FREQUENCY_SET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.pll_index = __cpu_to_le32(pll_index);
+ pkt.value = __cpu_to_le64(freq);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SET_CLK_PKT_TIMEOUT, NULL);
+
+ if (rc)
+ dev_err(hdev->dev,
+ "Failed to set frequency to PLL %d, error %d\n",
+ pll_index, rc);
+}
+
+u64 hl_get_max_power(struct hl_device *hdev)
+{
+ struct armcp_packet pkt;
+ long result;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_MAX_POWER_GET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SET_PWR_PKT_TIMEOUT, &result);
+
+ if (rc) {
+ dev_err(hdev->dev, "Failed to get max power, error %d\n", rc);
+ result = rc;
+ }
+
+ return result;
+}
+
+void hl_set_max_power(struct hl_device *hdev, u64 value)
+{
+ struct armcp_packet pkt;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_MAX_POWER_SET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.value = __cpu_to_le64(value);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SET_PWR_PKT_TIMEOUT, NULL);
+
+ if (rc)
+ dev_err(hdev->dev, "Failed to set max power, error %d\n", rc);
+}
+
+static ssize_t pm_mng_profile_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ return sprintf(buf, "%s\n",
+ (hdev->pm_mng_profile == PM_AUTO) ? "auto" :
+ (hdev->pm_mng_profile == PM_MANUAL) ? "manual" :
+ "unknown");
+}
+
+static ssize_t pm_mng_profile_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ count = -ENODEV;
+ goto out;
+ }
+
+ mutex_lock(&hdev->fd_open_cnt_lock);
+
+ if (atomic_read(&hdev->fd_open_cnt) > 0) {
+ dev_err(hdev->dev,
+ "Can't change PM profile while user process is opened on the device\n");
+ count = -EPERM;
+ goto unlock_mutex;
+ }
+
+ if (strncmp("auto", buf, strlen("auto")) == 0) {
+ /* Make sure we are in LOW PLL when changing modes */
+ if (hdev->pm_mng_profile == PM_MANUAL) {
+ atomic_set(&hdev->curr_pll_profile, PLL_HIGH);
+ hl_device_set_frequency(hdev, PLL_LOW);
+ hdev->pm_mng_profile = PM_AUTO;
+ }
+ } else if (strncmp("manual", buf, strlen("manual")) == 0) {
+ /* Make sure we are in LOW PLL when changing modes */
+ if (hdev->pm_mng_profile == PM_AUTO) {
+ flush_delayed_work(&hdev->work_freq);
+ hdev->pm_mng_profile = PM_MANUAL;
+ }
+ } else {
+ dev_err(hdev->dev, "value should be auto or manual\n");
+ count = -EINVAL;
+ goto unlock_mutex;
+ }
+
+unlock_mutex:
+ mutex_unlock(&hdev->fd_open_cnt_lock);
+out:
+ return count;
+}
+
+static ssize_t high_pll_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ return sprintf(buf, "%u\n", hdev->high_pll);
+}
+
+static ssize_t high_pll_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ long value;
+ int rc;
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ count = -ENODEV;
+ goto out;
+ }
+
+ rc = kstrtoul(buf, 0, &value);
+
+ if (rc) {
+ count = -EINVAL;
+ goto out;
+ }
+
+ hdev->high_pll = value;
+
+out:
+ return count;
+}
+
+static ssize_t uboot_ver_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", hdev->asic_prop.uboot_ver);
+}
+
+static ssize_t armcp_kernel_ver_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s", hdev->asic_prop.armcp_info.kernel_version);
+}
+
+static ssize_t armcp_ver_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", hdev->asic_prop.armcp_info.armcp_version);
+}
+
+static ssize_t cpld_ver_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%08x\n",
+ hdev->asic_prop.armcp_info.cpld_version);
+}
+
+static ssize_t infineon_ver_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%04x\n",
+ hdev->asic_prop.armcp_info.infineon_version);
+}
+
+static ssize_t fuse_ver_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", hdev->asic_prop.armcp_info.fuse_version);
+}
+
+static ssize_t thermal_ver_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s", hdev->asic_prop.armcp_info.thermal_version);
+}
+
+static ssize_t preboot_btl_ver_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", hdev->asic_prop.preboot_ver);
+}
+
+static ssize_t soft_reset_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ long value;
+ int rc;
+
+ rc = kstrtoul(buf, 0, &value);
+
+ if (rc) {
+ count = -EINVAL;
+ goto out;
+ }
+
+ hl_device_reset(hdev, false, false);
+
+out:
+ return count;
+}
+
+static ssize_t hard_reset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ long value;
+ int rc;
+
+ rc = kstrtoul(buf, 0, &value);
+
+ if (rc) {
+ count = -EINVAL;
+ goto out;
+ }
+
+ hl_device_reset(hdev, true, false);
+
+out:
+ return count;
+}
+
+static ssize_t device_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ char *str;
+
+ switch (hdev->asic_type) {
+ case ASIC_GOYA:
+ str = "GOYA";
+ break;
+ default:
+ dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
+ hdev->asic_type);
+ return -EINVAL;
+ }
+
+ return sprintf(buf, "%s\n", str);
+}
+
+static ssize_t pci_addr_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ /* Use dummy, fixed address for simulator */
+ if (!hdev->pdev)
+ return sprintf(buf, "0000:%02d:00.0\n", hdev->id);
+
+ return sprintf(buf, "%04x:%02x:%02x.%x\n",
+ pci_domain_nr(hdev->pdev->bus),
+ hdev->pdev->bus->number,
+ PCI_SLOT(hdev->pdev->devfn),
+ PCI_FUNC(hdev->pdev->devfn));
+}
+
+static ssize_t status_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ char *str;
+
+ if (atomic_read(&hdev->in_reset))
+ str = "In reset";
+ else if (hdev->disabled)
+ str = "Malfunction";
+ else
+ str = "Operational";
+
+ return sprintf(buf, "%s\n", str);
+}
+
+static ssize_t write_open_cnt_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", hdev->user_ctx ? 1 : 0);
+}
+
+static ssize_t soft_reset_cnt_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", hdev->soft_reset_cnt);
+}
+
+static ssize_t hard_reset_cnt_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", hdev->hard_reset_cnt);
+}
+
+static ssize_t max_power_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ long val;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ val = hl_get_max_power(hdev);
+
+ return sprintf(buf, "%lu\n", val);
+}
+
+static ssize_t max_power_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ unsigned long value;
+ int rc;
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ count = -ENODEV;
+ goto out;
+ }
+
+ rc = kstrtoul(buf, 0, &value);
+
+ if (rc) {
+ count = -EINVAL;
+ goto out;
+ }
+
+ hdev->max_power = value;
+ hl_set_max_power(hdev, value);
+
+out:
+ return count;
+}
+
+static ssize_t eeprom_read_handler(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset,
+ size_t max_size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ char *data;
+ int rc;
+
+ if (!max_size)
+ return -EINVAL;
+
+ data = kzalloc(max_size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ rc = hdev->asic_funcs->get_eeprom_data(hdev, data, max_size);
+ if (rc)
+ goto out;
+
+ memcpy(buf, data, max_size);
+
+out:
+ kfree(data);
+
+ return max_size;
+}
+
+static DEVICE_ATTR_RO(armcp_kernel_ver);
+static DEVICE_ATTR_RO(armcp_ver);
+static DEVICE_ATTR_RO(cpld_ver);
+static DEVICE_ATTR_RO(device_type);
+static DEVICE_ATTR_RO(fuse_ver);
+static DEVICE_ATTR_WO(hard_reset);
+static DEVICE_ATTR_RO(hard_reset_cnt);
+static DEVICE_ATTR_RW(high_pll);
+static DEVICE_ATTR_RO(infineon_ver);
+static DEVICE_ATTR_RW(max_power);
+static DEVICE_ATTR_RO(pci_addr);
+static DEVICE_ATTR_RW(pm_mng_profile);
+static DEVICE_ATTR_RO(preboot_btl_ver);
+static DEVICE_ATTR_WO(soft_reset);
+static DEVICE_ATTR_RO(soft_reset_cnt);
+static DEVICE_ATTR_RO(status);
+static DEVICE_ATTR_RO(thermal_ver);
+static DEVICE_ATTR_RO(uboot_ver);
+static DEVICE_ATTR_RO(write_open_cnt);
+
+static struct bin_attribute bin_attr_eeprom = {
+ .attr = {.name = "eeprom", .mode = (0444)},
+ .size = PAGE_SIZE,
+ .read = eeprom_read_handler
+};
+
+static struct attribute *hl_dev_attrs[] = {
+ &dev_attr_armcp_kernel_ver.attr,
+ &dev_attr_armcp_ver.attr,
+ &dev_attr_cpld_ver.attr,
+ &dev_attr_device_type.attr,
+ &dev_attr_fuse_ver.attr,
+ &dev_attr_hard_reset.attr,
+ &dev_attr_hard_reset_cnt.attr,
+ &dev_attr_high_pll.attr,
+ &dev_attr_infineon_ver.attr,
+ &dev_attr_max_power.attr,
+ &dev_attr_pci_addr.attr,
+ &dev_attr_pm_mng_profile.attr,
+ &dev_attr_preboot_btl_ver.attr,
+ &dev_attr_soft_reset.attr,
+ &dev_attr_soft_reset_cnt.attr,
+ &dev_attr_status.attr,
+ &dev_attr_thermal_ver.attr,
+ &dev_attr_uboot_ver.attr,
+ &dev_attr_write_open_cnt.attr,
+ NULL,
+};
+
+static struct bin_attribute *hl_dev_bin_attrs[] = {
+ &bin_attr_eeprom,
+ NULL
+};
+
+static struct attribute_group hl_dev_attr_group = {
+ .attrs = hl_dev_attrs,
+ .bin_attrs = hl_dev_bin_attrs,
+};
+
+static struct attribute_group hl_dev_clks_attr_group;
+
+static const struct attribute_group *hl_dev_attr_groups[] = {
+ &hl_dev_attr_group,
+ &hl_dev_clks_attr_group,
+ NULL,
+};
+
+int hl_sysfs_init(struct hl_device *hdev)
+{
+ int rc;
+
+ hdev->pm_mng_profile = PM_AUTO;
+ hdev->max_power = hdev->asic_prop.max_power_default;
+
+ hdev->asic_funcs->add_device_attr(hdev, &hl_dev_clks_attr_group);
+
+ rc = device_add_groups(hdev->dev, hl_dev_attr_groups);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to add groups to device, error %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+void hl_sysfs_fini(struct hl_device *hdev)
+{
+ device_remove_groups(hdev->dev, hl_dev_attr_groups);
+}
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index e9c9ef52c76a..927309b86bab 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -29,6 +29,13 @@ static struct class *ilo_class;
static unsigned int ilo_major;
static unsigned int max_ccb = 16;
static char ilo_hwdev[MAX_ILO_DEV];
+static const struct pci_device_id ilo_blacklist[] = {
+ /* auxiliary iLO */
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_HP, 0x3307, PCI_VENDOR_ID_HP, 0x1979)},
+ /* CL */
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_HP, 0x3307, PCI_VENDOR_ID_HP_3PAR, 0x0289)},
+ {}
+};
static inline int get_entry_id(int entry)
{
@@ -763,9 +770,10 @@ static int ilo_probe(struct pci_dev *pdev,
int devnum, minor, start, error = 0;
struct ilo_hwinfo *ilo_hw;
- /* Ignore subsystem_device = 0x1979 (set by BIOS) */
- if (pdev->subsystem_device == 0x1979)
- return 0;
+ if (pci_match_id(ilo_blacklist, pdev)) {
+ dev_dbg(&pdev->dev, "Not supported on this device\n");
+ return -ENODEV;
+ }
if (max_ccb > MAX_CCB)
max_ccb = MAX_CCB;
diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
index 81a0541ef3ac..294fb2f66bfe 100644
--- a/drivers/misc/ics932s401.c
+++ b/drivers/misc/ics932s401.c
@@ -146,6 +146,8 @@ static struct ics932s401_data *ics932s401_update_device(struct device *dev)
*/
for (i = 0; i < NUM_MIRRORED_REGS; i++) {
temp = i2c_smbus_read_word_data(client, regs_to_copy[i]);
+ if (temp < 0)
+ data->regs[regs_to_copy[i]] = 0;
data->regs[regs_to_copy[i]] = temp >> 8;
}
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 2837dc77478e..b51cf182b031 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -37,16 +37,9 @@
#include <linux/kprobes.h>
#include <linux/list.h>
#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/hrtimer.h>
#include <linux/slab.h>
-#include <scsi/scsi_cmnd.h>
#include <linux/debugfs.h>
-#ifdef CONFIG_IDE
-#include <linux/ide.h>
-#endif
-
#define DEFAULT_COUNT 10
static int lkdtm_debugfs_open(struct inode *inode, struct file *file);
@@ -102,9 +95,7 @@ static struct crashpoint crashpoints[] = {
CRASHPOINT("MEM_SWAPOUT", "shrink_inactive_list"),
CRASHPOINT("TIMERADD", "hrtimer_start"),
CRASHPOINT("SCSI_DISPATCH_CMD", "scsi_dispatch_cmd"),
-# ifdef CONFIG_IDE
CRASHPOINT("IDE_CORE_CP", "generic_ide_ioctl"),
-# endif
#endif
};
@@ -152,7 +143,9 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(EXEC_VMALLOC),
CRASHTYPE(EXEC_RODATA),
CRASHTYPE(EXEC_USERSPACE),
+ CRASHTYPE(EXEC_NULL),
CRASHTYPE(ACCESS_USERSPACE),
+ CRASHTYPE(ACCESS_NULL),
CRASHTYPE(WRITE_RO),
CRASHTYPE(WRITE_RO_AFTER_INIT),
CRASHTYPE(WRITE_KERN),
@@ -347,9 +340,9 @@ static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
if (buf == NULL)
return -ENOMEM;
- n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
+ n = scnprintf(buf, PAGE_SIZE, "Available crash types:\n");
for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
- n += snprintf(buf + n, PAGE_SIZE - n, "%s\n",
+ n += scnprintf(buf + n, PAGE_SIZE - n, "%s\n",
crashtypes[i].name);
}
buf[n] = '\0';
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 3c6fd327e166..b69ee004a3f7 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -45,7 +45,9 @@ void lkdtm_EXEC_KMALLOC(void);
void lkdtm_EXEC_VMALLOC(void);
void lkdtm_EXEC_RODATA(void);
void lkdtm_EXEC_USERSPACE(void);
+void lkdtm_EXEC_NULL(void);
void lkdtm_ACCESS_USERSPACE(void);
+void lkdtm_ACCESS_NULL(void);
/* lkdtm_refcount.c */
void lkdtm_REFCOUNT_INC_OVERFLOW(void);
diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
index 53b85c9d16b8..62f76d506f04 100644
--- a/drivers/misc/lkdtm/perms.c
+++ b/drivers/misc/lkdtm/perms.c
@@ -47,7 +47,7 @@ static noinline void execute_location(void *dst, bool write)
{
void (*func)(void) = dst;
- pr_info("attempting ok execution at %p\n", do_nothing);
+ pr_info("attempting ok execution at %px\n", do_nothing);
do_nothing();
if (write == CODE_WRITE) {
@@ -55,7 +55,7 @@ static noinline void execute_location(void *dst, bool write)
flush_icache_range((unsigned long)dst,
(unsigned long)dst + EXEC_SIZE);
}
- pr_info("attempting bad execution at %p\n", func);
+ pr_info("attempting bad execution at %px\n", func);
func();
}
@@ -66,14 +66,14 @@ static void execute_user_location(void *dst)
/* Intentionally crossing kernel/user memory boundary. */
void (*func)(void) = dst;
- pr_info("attempting ok execution at %p\n", do_nothing);
+ pr_info("attempting ok execution at %px\n", do_nothing);
do_nothing();
copied = access_process_vm(current, (unsigned long)dst, do_nothing,
EXEC_SIZE, FOLL_WRITE);
if (copied < EXEC_SIZE)
return;
- pr_info("attempting bad execution at %p\n", func);
+ pr_info("attempting bad execution at %px\n", func);
func();
}
@@ -82,7 +82,7 @@ void lkdtm_WRITE_RO(void)
/* Explicitly cast away "const" for the test. */
unsigned long *ptr = (unsigned long *)&rodata;
- pr_info("attempting bad rodata write at %p\n", ptr);
+ pr_info("attempting bad rodata write at %px\n", ptr);
*ptr ^= 0xabcd1234;
}
@@ -100,7 +100,7 @@ void lkdtm_WRITE_RO_AFTER_INIT(void)
return;
}
- pr_info("attempting bad ro_after_init write at %p\n", ptr);
+ pr_info("attempting bad ro_after_init write at %px\n", ptr);
*ptr ^= 0xabcd1234;
}
@@ -112,7 +112,7 @@ void lkdtm_WRITE_KERN(void)
size = (unsigned long)do_overwritten - (unsigned long)do_nothing;
ptr = (unsigned char *)do_overwritten;
- pr_info("attempting bad %zu byte write at %p\n", size, ptr);
+ pr_info("attempting bad %zu byte write at %px\n", size, ptr);
memcpy(ptr, (unsigned char *)do_nothing, size);
flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size));
@@ -164,6 +164,11 @@ void lkdtm_EXEC_USERSPACE(void)
vm_munmap(user_addr, PAGE_SIZE);
}
+void lkdtm_EXEC_NULL(void)
+{
+ execute_location(NULL, CODE_AS_IS);
+}
+
void lkdtm_ACCESS_USERSPACE(void)
{
unsigned long user_addr, tmp = 0;
@@ -185,16 +190,29 @@ void lkdtm_ACCESS_USERSPACE(void)
ptr = (unsigned long *)user_addr;
- pr_info("attempting bad read at %p\n", ptr);
+ pr_info("attempting bad read at %px\n", ptr);
tmp = *ptr;
tmp += 0xc0dec0de;
- pr_info("attempting bad write at %p\n", ptr);
+ pr_info("attempting bad write at %px\n", ptr);
*ptr = tmp;
vm_munmap(user_addr, PAGE_SIZE);
}
+void lkdtm_ACCESS_NULL(void)
+{
+ unsigned long tmp;
+ unsigned long *ptr = (unsigned long *)NULL;
+
+ pr_info("attempting bad read at %px\n", ptr);
+ tmp = *ptr;
+ tmp += 0xc0dec0de;
+
+ pr_info("attempting bad write at %px\n", ptr);
+ *ptr = tmp;
+}
+
void __init lkdtm_perms_init(void)
{
/* Make sure we can write to __ro_after_init values during __init */
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index c49e1d2269af..74e2c667dce0 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -43,3 +43,13 @@ config INTEL_MEI_TXE
Supported SoCs:
Intel Bay Trail
+
+config INTEL_MEI_HDCP
+ tristate "Intel HDCP2.2 services of ME Interface"
+ select INTEL_MEI_ME
+ depends on DRM_I915
+ help
+ MEI Support for HDCP2.2 Services on Intel platforms.
+
+ Enables the ME FW services required for HDCP2.2 support through
+ I915 display driver of Intel.
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index d9215fc4e499..8c2d9565a4cb 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -24,3 +24,5 @@ mei-txe-objs += hw-txe.o
mei-$(CONFIG_EVENT_TRACING) += mei-trace.o
CFLAGS_mei-trace.o = -I$(src)
+
+obj-$(CONFIG_INTEL_MEI_HDCP) += hdcp/
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 80215c312f0e..5fcac02233af 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -40,6 +40,9 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
#define MEI_UUID_MKHIF_FIX UUID_LE(0x55213584, 0x9a29, 0x4916, \
0xba, 0xdf, 0xf, 0xb7, 0xed, 0x68, 0x2a, 0xeb)
+#define MEI_UUID_HDCP UUID_LE(0xB638AB7E, 0x94E2, 0x4EA2, \
+ 0xA5, 0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04)
+
#define MEI_UUID_ANY NULL_UUID_LE
/**
@@ -71,6 +74,18 @@ static void blacklist(struct mei_cl_device *cldev)
cldev->do_match = 0;
}
+/**
+ * whitelist - forcefully whitelist client
+ *
+ * @cldev: me clients device
+ */
+static void whitelist(struct mei_cl_device *cldev)
+{
+ dev_dbg(&cldev->dev, "running hook %s\n", __func__);
+
+ cldev->do_match = 1;
+}
+
#define OSTYPE_LINUX 2
struct mei_os_ver {
__le16 build;
@@ -472,6 +487,7 @@ static struct mei_fixup {
MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc),
MEI_FIXUP(MEI_UUID_WD, mei_wd),
MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix),
+ MEI_FIXUP(MEI_UUID_HDCP, whitelist),
};
/**
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index fc3872fe7b25..65bec998eb6e 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -28,7 +28,6 @@
#include "client.h"
#define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver)
-#define to_mei_cl_device(d) container_of(d, struct mei_cl_device, dev)
/**
* __mei_cl_send - internal client send (write)
@@ -541,17 +540,9 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
goto out;
}
- if (!mei_cl_bus_module_get(cldev)) {
- dev_err(&cldev->dev, "get hw module failed");
- ret = -ENODEV;
- goto out;
- }
-
ret = mei_cl_connect(cl, cldev->me_cl, NULL);
- if (ret < 0) {
+ if (ret < 0)
dev_err(&cldev->dev, "cannot connect\n");
- mei_cl_bus_module_put(cldev);
- }
out:
mutex_unlock(&bus->device_lock);
@@ -614,7 +605,6 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
if (err < 0)
dev_err(bus->dev, "Could not disconnect from the ME client\n");
- mei_cl_bus_module_put(cldev);
out:
/* Flush queues and remove any pending read */
mei_cl_flush_queues(cl, NULL);
@@ -725,9 +715,16 @@ static int mei_cl_device_probe(struct device *dev)
if (!id)
return -ENODEV;
+ if (!mei_cl_bus_module_get(cldev)) {
+ dev_err(&cldev->dev, "get hw module failed");
+ return -ENODEV;
+ }
+
ret = cldrv->probe(cldev, id);
- if (ret)
+ if (ret) {
+ mei_cl_bus_module_put(cldev);
return ret;
+ }
__module_get(THIS_MODULE);
return 0;
@@ -755,6 +752,7 @@ static int mei_cl_device_remove(struct device *dev)
mei_cldev_unregister_callbacks(cldev);
+ mei_cl_bus_module_put(cldev);
module_put(THIS_MODULE);
dev->driver = NULL;
return ret;
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 8f7616557c97..e6207f614816 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1029,29 +1029,36 @@ static void mei_hbm_config_features(struct mei_device *dev)
dev->version.minor_version >= HBM_MINOR_VERSION_PGI)
dev->hbm_f_pg_supported = 1;
+ dev->hbm_f_dc_supported = 0;
if (dev->version.major_version >= HBM_MAJOR_VERSION_DC)
dev->hbm_f_dc_supported = 1;
+ dev->hbm_f_ie_supported = 0;
if (dev->version.major_version >= HBM_MAJOR_VERSION_IE)
dev->hbm_f_ie_supported = 1;
/* disconnect on connect timeout instead of link reset */
+ dev->hbm_f_dot_supported = 0;
if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT)
dev->hbm_f_dot_supported = 1;
/* Notification Event Support */
+ dev->hbm_f_ev_supported = 0;
if (dev->version.major_version >= HBM_MAJOR_VERSION_EV)
dev->hbm_f_ev_supported = 1;
/* Fixed Address Client Support */
+ dev->hbm_f_fa_supported = 0;
if (dev->version.major_version >= HBM_MAJOR_VERSION_FA)
dev->hbm_f_fa_supported = 1;
/* OS ver message Support */
+ dev->hbm_f_os_supported = 0;
if (dev->version.major_version >= HBM_MAJOR_VERSION_OS)
dev->hbm_f_os_supported = 1;
/* DMA Ring Support */
+ dev->hbm_f_dr_supported = 0;
if (dev->version.major_version > HBM_MAJOR_VERSION_DR ||
(dev->version.major_version == HBM_MAJOR_VERSION_DR &&
dev->version.minor_version >= HBM_MINOR_VERSION_DR))
diff --git a/drivers/misc/mei/hdcp/Makefile b/drivers/misc/mei/hdcp/Makefile
new file mode 100644
index 000000000000..adbe7506282d
--- /dev/null
+++ b/drivers/misc/mei/hdcp/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2019, Intel Corporation.
+#
+# Makefile - HDCP client driver for Intel MEI Bus Driver.
+
+obj-$(CONFIG_INTEL_MEI_HDCP) += mei_hdcp.o
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
new file mode 100644
index 000000000000..90b6ae8e9dae
--- /dev/null
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -0,0 +1,849 @@
+// SPDX-License-Identifier: (GPL-2.0)
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ * Mei_hdcp.c: HDCP client driver for mei bus
+ *
+ * Author:
+ * Ramalingam C <ramalingam.c@intel.com>
+ */
+
+/**
+ * DOC: MEI_HDCP Client Driver
+ *
+ * This is a client driver to the mei_bus to make the HDCP2.2 services of
+ * ME FW available for the interested consumers like I915.
+ *
+ * This module will act as a translation layer between HDCP protocol
+ * implementor(I915) and ME FW by translating HDCP2.2 authentication
+ * messages to ME FW command payloads and vice versa.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/mei_cl_bus.h>
+#include <linux/component.h>
+#include <drm/drm_connector.h>
+#include <drm/i915_component.h>
+#include <drm/i915_mei_hdcp_interface.h>
+
+#include "mei_hdcp.h"
+
+static inline u8 mei_get_ddi_index(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ return MEI_DDI_A;
+ case PORT_B ... PORT_F:
+ return (u8)port;
+ default:
+ return MEI_DDI_INVALID_PORT;
+ }
+}
+
+/**
+ * mei_hdcp_initiate_session() - Initiate a Wired HDCP2.2 Tx Session in ME FW
+ * @dev: device corresponding to the mei_cl_device
+ * @data: Intel HW specific hdcp data
+ * @ake_data: AKE_Init msg output.
+ *
+ * Return: 0 on Success, <0 on Failure.
+ */
+static int
+mei_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data,
+ struct hdcp2_ake_init *ake_data)
+{
+ struct wired_cmd_initiate_hdcp2_session_in session_init_in = { { 0 } };
+ struct wired_cmd_initiate_hdcp2_session_out
+ session_init_out = { { 0 } };
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !data || !ake_data)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ session_init_in.header.api_version = HDCP_API_VERSION;
+ session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION;
+ session_init_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ session_init_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN;
+
+ session_init_in.port.integrated_port_type = data->port_type;
+ session_init_in.port.physical_port = mei_get_ddi_index(data->port);
+ session_init_in.protocol = data->protocol;
+
+ byte = mei_cldev_send(cldev, (u8 *)&session_init_in,
+ sizeof(session_init_in));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ byte = mei_cldev_recv(cldev, (u8 *)&session_init_out,
+ sizeof(session_init_out));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (session_init_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n",
+ WIRED_INITIATE_HDCP2_SESSION,
+ session_init_out.header.status);
+ return -EIO;
+ }
+
+ ake_data->msg_id = HDCP_2_2_AKE_INIT;
+ ake_data->tx_caps = session_init_out.tx_caps;
+ memcpy(ake_data->r_tx, session_init_out.r_tx, HDCP_2_2_RTX_LEN);
+
+ return 0;
+}
+
+/**
+ * mei_hdcp_verify_receiver_cert_prepare_km() - Verify the Receiver Certificate
+ * AKE_Send_Cert and prepare AKE_Stored_Km/AKE_No_Stored_Km
+ * @dev: device corresponding to the mei_cl_device
+ * @data: Intel HW specific hdcp data
+ * @rx_cert: AKE_Send_Cert for verification
+ * @km_stored: Pairing status flag output
+ * @ek_pub_km: AKE_Stored_Km/AKE_No_Stored_Km output msg
+ * @msg_sz : size of AKE_XXXXX_Km output msg
+ *
+ * Return: 0 on Success, <0 on Failure
+ */
+static int
+mei_hdcp_verify_receiver_cert_prepare_km(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ake_send_cert *rx_cert,
+ bool *km_stored,
+ struct hdcp2_ake_no_stored_km
+ *ek_pub_km,
+ size_t *msg_sz)
+{
+ struct wired_cmd_verify_receiver_cert_in verify_rxcert_in = { { 0 } };
+ struct wired_cmd_verify_receiver_cert_out verify_rxcert_out = { { 0 } };
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !data || !rx_cert || !km_stored || !ek_pub_km || !msg_sz)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ verify_rxcert_in.header.api_version = HDCP_API_VERSION;
+ verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT;
+ verify_rxcert_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ verify_rxcert_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN;
+
+ verify_rxcert_in.port.integrated_port_type = data->port_type;
+ verify_rxcert_in.port.physical_port = mei_get_ddi_index(data->port);
+
+ verify_rxcert_in.cert_rx = rx_cert->cert_rx;
+ memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN);
+ memcpy(verify_rxcert_in.rx_caps, rx_cert->rx_caps, HDCP_2_2_RXCAPS_LEN);
+
+ byte = mei_cldev_send(cldev, (u8 *)&verify_rxcert_in,
+ sizeof(verify_rxcert_in));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed: %zd\n", byte);
+ return byte;
+ }
+
+ byte = mei_cldev_recv(cldev, (u8 *)&verify_rxcert_out,
+ sizeof(verify_rxcert_out));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed: %zd\n", byte);
+ return byte;
+ }
+
+ if (verify_rxcert_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n",
+ WIRED_VERIFY_RECEIVER_CERT,
+ verify_rxcert_out.header.status);
+ return -EIO;
+ }
+
+ *km_stored = !!verify_rxcert_out.km_stored;
+ if (verify_rxcert_out.km_stored) {
+ ek_pub_km->msg_id = HDCP_2_2_AKE_STORED_KM;
+ *msg_sz = sizeof(struct hdcp2_ake_stored_km);
+ } else {
+ ek_pub_km->msg_id = HDCP_2_2_AKE_NO_STORED_KM;
+ *msg_sz = sizeof(struct hdcp2_ake_no_stored_km);
+ }
+
+ memcpy(ek_pub_km->e_kpub_km, &verify_rxcert_out.ekm_buff,
+ sizeof(verify_rxcert_out.ekm_buff));
+
+ return 0;
+}
+
+/**
+ * mei_hdcp_verify_hprime() - Verify AKE_Send_H_prime at ME FW.
+ * @dev: device corresponding to the mei_cl_device
+ * @data: Intel HW specific hdcp data
+ * @rx_hprime: AKE_Send_H_prime msg for ME FW verification
+ *
+ * Return: 0 on Success, <0 on Failure
+ */
+static int
+mei_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data,
+ struct hdcp2_ake_send_hprime *rx_hprime)
+{
+ struct wired_cmd_ake_send_hprime_in send_hprime_in = { { 0 } };
+ struct wired_cmd_ake_send_hprime_out send_hprime_out = { { 0 } };
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !data || !rx_hprime)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ send_hprime_in.header.api_version = HDCP_API_VERSION;
+ send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME;
+ send_hprime_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ send_hprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN;
+
+ send_hprime_in.port.integrated_port_type = data->port_type;
+ send_hprime_in.port.physical_port = mei_get_ddi_index(data->port);
+
+ memcpy(send_hprime_in.h_prime, rx_hprime->h_prime,
+ HDCP_2_2_H_PRIME_LEN);
+
+ byte = mei_cldev_send(cldev, (u8 *)&send_hprime_in,
+ sizeof(send_hprime_in));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ byte = mei_cldev_recv(cldev, (u8 *)&send_hprime_out,
+ sizeof(send_hprime_out));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (send_hprime_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n",
+ WIRED_AKE_SEND_HPRIME, send_hprime_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * mei_hdcp_store_pairing_info() - Store pairing info received at ME FW
+ * @dev: device corresponding to the mei_cl_device
+ * @data: Intel HW specific hdcp data
+ * @pairing_info: AKE_Send_Pairing_Info msg input to ME FW
+ *
+ * Return: 0 on Success, <0 on Failure
+ */
+static int
+mei_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data,
+ struct hdcp2_ake_send_pairing_info *pairing_info)
+{
+ struct wired_cmd_ake_send_pairing_info_in pairing_info_in = { { 0 } };
+ struct wired_cmd_ake_send_pairing_info_out pairing_info_out = { { 0 } };
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !data || !pairing_info)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ pairing_info_in.header.api_version = HDCP_API_VERSION;
+ pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO;
+ pairing_info_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ pairing_info_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN;
+
+ pairing_info_in.port.integrated_port_type = data->port_type;
+ pairing_info_in.port.physical_port = mei_get_ddi_index(data->port);
+
+ memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km,
+ HDCP_2_2_E_KH_KM_LEN);
+
+ byte = mei_cldev_send(cldev, (u8 *)&pairing_info_in,
+ sizeof(pairing_info_in));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ byte = mei_cldev_recv(cldev, (u8 *)&pairing_info_out,
+ sizeof(pairing_info_out));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (pairing_info_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ dev_dbg(dev, "ME cmd 0x%08X failed. Status: 0x%X\n",
+ WIRED_AKE_SEND_PAIRING_INFO,
+ pairing_info_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * mei_hdcp_initiate_locality_check() - Prepare LC_Init
+ * @dev: device corresponding to the mei_cl_device
+ * @data: Intel HW specific hdcp data
+ * @lc_init_data: LC_Init msg output
+ *
+ * Return: 0 on Success, <0 on Failure
+ */
+static int
+mei_hdcp_initiate_locality_check(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_lc_init *lc_init_data)
+{
+ struct wired_cmd_init_locality_check_in lc_init_in = { { 0 } };
+ struct wired_cmd_init_locality_check_out lc_init_out = { { 0 } };
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !data || !lc_init_data)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ lc_init_in.header.api_version = HDCP_API_VERSION;
+ lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK;
+ lc_init_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ lc_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN;
+
+ lc_init_in.port.integrated_port_type = data->port_type;
+ lc_init_in.port.physical_port = mei_get_ddi_index(data->port);
+
+ byte = mei_cldev_send(cldev, (u8 *)&lc_init_in, sizeof(lc_init_in));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ byte = mei_cldev_recv(cldev, (u8 *)&lc_init_out, sizeof(lc_init_out));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (lc_init_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ dev_dbg(dev, "ME cmd 0x%08X Failed. status: 0x%X\n",
+ WIRED_INIT_LOCALITY_CHECK, lc_init_out.header.status);
+ return -EIO;
+ }
+
+ lc_init_data->msg_id = HDCP_2_2_LC_INIT;
+ memcpy(lc_init_data->r_n, lc_init_out.r_n, HDCP_2_2_RN_LEN);
+
+ return 0;
+}
+
+/**
+ * mei_hdcp_verify_lprime() - Verify lprime.
+ * @dev: device corresponding to the mei_cl_device
+ * @data: Intel HW specific hdcp data
+ * @rx_lprime: LC_Send_L_prime msg for ME FW verification
+ *
+ * Return: 0 on Success, <0 on Failure
+ */
+static int
+mei_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data,
+ struct hdcp2_lc_send_lprime *rx_lprime)
+{
+ struct wired_cmd_validate_locality_in verify_lprime_in = { { 0 } };
+ struct wired_cmd_validate_locality_out verify_lprime_out = { { 0 } };
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !data || !rx_lprime)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ verify_lprime_in.header.api_version = HDCP_API_VERSION;
+ verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY;
+ verify_lprime_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ verify_lprime_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN;
+
+ verify_lprime_in.port.integrated_port_type = data->port_type;
+ verify_lprime_in.port.physical_port = mei_get_ddi_index(data->port);
+
+ memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime,
+ HDCP_2_2_L_PRIME_LEN);
+
+ byte = mei_cldev_send(cldev, (u8 *)&verify_lprime_in,
+ sizeof(verify_lprime_in));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ byte = mei_cldev_recv(cldev, (u8 *)&verify_lprime_out,
+ sizeof(verify_lprime_out));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (verify_lprime_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_VALIDATE_LOCALITY,
+ verify_lprime_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * mei_hdcp_get_session_key() - Prepare SKE_Send_Eks.
+ * @dev: device corresponding to the mei_cl_device
+ * @data: Intel HW specific hdcp data
+ * @ske_data: SKE_Send_Eks msg output from ME FW.
+ *
+ * Return: 0 on Success, <0 on Failure
+ */
+static int mei_hdcp_get_session_key(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ske_send_eks *ske_data)
+{
+ struct wired_cmd_get_session_key_in get_skey_in = { { 0 } };
+ struct wired_cmd_get_session_key_out get_skey_out = { { 0 } };
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !data || !ske_data)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ get_skey_in.header.api_version = HDCP_API_VERSION;
+ get_skey_in.header.command_id = WIRED_GET_SESSION_KEY;
+ get_skey_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ get_skey_in.header.buffer_len = WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN;
+
+ get_skey_in.port.integrated_port_type = data->port_type;
+ get_skey_in.port.physical_port = mei_get_ddi_index(data->port);
+
+ byte = mei_cldev_send(cldev, (u8 *)&get_skey_in, sizeof(get_skey_in));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ byte = mei_cldev_recv(cldev, (u8 *)&get_skey_out, sizeof(get_skey_out));
+
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (get_skey_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_GET_SESSION_KEY, get_skey_out.header.status);
+ return -EIO;
+ }
+
+ ske_data->msg_id = HDCP_2_2_SKE_SEND_EKS;
+ memcpy(ske_data->e_dkey_ks, get_skey_out.e_dkey_ks,
+ HDCP_2_2_E_DKEY_KS_LEN);
+ memcpy(ske_data->riv, get_skey_out.r_iv, HDCP_2_2_RIV_LEN);
+
+ return 0;
+}
+
+/**
+ * mei_hdcp_repeater_check_flow_prepare_ack() - Validate the Downstream topology
+ * and prepare rep_ack.
+ * @dev: device corresponding to the mei_cl_device
+ * @data: Intel HW specific hdcp data
+ * @rep_topology: Receiver ID List to be validated
+ * @rep_send_ack : repeater ack from ME FW.
+ *
+ * Return: 0 on Success, <0 on Failure
+ */
+static int
+mei_hdcp_repeater_check_flow_prepare_ack(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_rep_send_receiverid_list
+ *rep_topology,
+ struct hdcp2_rep_send_ack
+ *rep_send_ack)
+{
+ struct wired_cmd_verify_repeater_in verify_repeater_in = { { 0 } };
+ struct wired_cmd_verify_repeater_out verify_repeater_out = { { 0 } };
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !rep_topology || !rep_send_ack || !data)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ verify_repeater_in.header.api_version = HDCP_API_VERSION;
+ verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER;
+ verify_repeater_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ verify_repeater_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN;
+
+ verify_repeater_in.port.integrated_port_type = data->port_type;
+ verify_repeater_in.port.physical_port = mei_get_ddi_index(data->port);
+
+ memcpy(verify_repeater_in.rx_info, rep_topology->rx_info,
+ HDCP_2_2_RXINFO_LEN);
+ memcpy(verify_repeater_in.seq_num_v, rep_topology->seq_num_v,
+ HDCP_2_2_SEQ_NUM_LEN);
+ memcpy(verify_repeater_in.v_prime, rep_topology->v_prime,
+ HDCP_2_2_V_PRIME_HALF_LEN);
+ memcpy(verify_repeater_in.receiver_ids, rep_topology->receiver_ids,
+ HDCP_2_2_RECEIVER_IDS_MAX_LEN);
+
+ byte = mei_cldev_send(cldev, (u8 *)&verify_repeater_in,
+ sizeof(verify_repeater_in));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ byte = mei_cldev_recv(cldev, (u8 *)&verify_repeater_out,
+ sizeof(verify_repeater_out));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (verify_repeater_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_VERIFY_REPEATER,
+ verify_repeater_out.header.status);
+ return -EIO;
+ }
+
+ memcpy(rep_send_ack->v, verify_repeater_out.v,
+ HDCP_2_2_V_PRIME_HALF_LEN);
+ rep_send_ack->msg_id = HDCP_2_2_REP_SEND_ACK;
+
+ return 0;
+}
+
+/**
+ * mei_hdcp_verify_mprime() - Verify mprime.
+ * @dev: device corresponding to the mei_cl_device
+ * @data: Intel HW specific hdcp data
+ * @stream_ready: RepeaterAuth_Stream_Ready msg for ME FW verification.
+ *
+ * Return: 0 on Success, <0 on Failure
+ */
+static int mei_hdcp_verify_mprime(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_rep_stream_ready *stream_ready)
+{
+ struct wired_cmd_repeater_auth_stream_req_in
+ verify_mprime_in = { { 0 } };
+ struct wired_cmd_repeater_auth_stream_req_out
+ verify_mprime_out = { { 0 } };
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !stream_ready || !data)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ verify_mprime_in.header.api_version = HDCP_API_VERSION;
+ verify_mprime_in.header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ;
+ verify_mprime_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ verify_mprime_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN;
+
+ verify_mprime_in.port.integrated_port_type = data->port_type;
+ verify_mprime_in.port.physical_port = mei_get_ddi_index(data->port);
+
+ memcpy(verify_mprime_in.m_prime, stream_ready->m_prime,
+ HDCP_2_2_MPRIME_LEN);
+ drm_hdcp2_u32_to_seq_num(verify_mprime_in.seq_num_m, data->seq_num_m);
+ memcpy(verify_mprime_in.streams, data->streams,
+ (data->k * sizeof(struct hdcp2_streamid_type)));
+
+ verify_mprime_in.k = cpu_to_be16(data->k);
+
+ byte = mei_cldev_send(cldev, (u8 *)&verify_mprime_in,
+ sizeof(verify_mprime_in));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ byte = mei_cldev_recv(cldev, (u8 *)&verify_mprime_out,
+ sizeof(verify_mprime_out));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (verify_mprime_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_REPEATER_AUTH_STREAM_REQ,
+ verify_mprime_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * mei_hdcp_enable_authentication() - Mark a port as authenticated
+ * through ME FW
+ * @dev: device corresponding to the mei_cl_device
+ * @data: Intel HW specific hdcp data
+ *
+ * Return: 0 on Success, <0 on Failure
+ */
+static int mei_hdcp_enable_authentication(struct device *dev,
+ struct hdcp_port_data *data)
+{
+ struct wired_cmd_enable_auth_in enable_auth_in = { { 0 } };
+ struct wired_cmd_enable_auth_out enable_auth_out = { { 0 } };
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !data)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ enable_auth_in.header.api_version = HDCP_API_VERSION;
+ enable_auth_in.header.command_id = WIRED_ENABLE_AUTH;
+ enable_auth_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ enable_auth_in.header.buffer_len = WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN;
+
+ enable_auth_in.port.integrated_port_type = data->port_type;
+ enable_auth_in.port.physical_port = mei_get_ddi_index(data->port);
+ enable_auth_in.stream_type = data->streams[0].stream_type;
+
+ byte = mei_cldev_send(cldev, (u8 *)&enable_auth_in,
+ sizeof(enable_auth_in));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ byte = mei_cldev_recv(cldev, (u8 *)&enable_auth_out,
+ sizeof(enable_auth_out));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (enable_auth_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_ENABLE_AUTH, enable_auth_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * mei_hdcp_close_session() - Close the Wired HDCP Tx session of ME FW per port.
+ * This also disables the authenticated state of the port.
+ * @dev: device corresponding to the mei_cl_device
+ * @data: Intel HW specific hdcp data
+ *
+ * Return: 0 on Success, <0 on Failure
+ */
+static int
+mei_hdcp_close_session(struct device *dev, struct hdcp_port_data *data)
+{
+ struct wired_cmd_close_session_in session_close_in = { { 0 } };
+ struct wired_cmd_close_session_out session_close_out = { { 0 } };
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !data)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ session_close_in.header.api_version = HDCP_API_VERSION;
+ session_close_in.header.command_id = WIRED_CLOSE_SESSION;
+ session_close_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ session_close_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN;
+
+ session_close_in.port.integrated_port_type = data->port_type;
+ session_close_in.port.physical_port = mei_get_ddi_index(data->port);
+
+ byte = mei_cldev_send(cldev, (u8 *)&session_close_in,
+ sizeof(session_close_in));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ byte = mei_cldev_recv(cldev, (u8 *)&session_close_out,
+ sizeof(session_close_out));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (session_close_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ dev_dbg(dev, "Session Close Failed. status: 0x%X\n",
+ session_close_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct i915_hdcp_component_ops mei_hdcp_ops = {
+ .owner = THIS_MODULE,
+ .initiate_hdcp2_session = mei_hdcp_initiate_session,
+ .verify_receiver_cert_prepare_km =
+ mei_hdcp_verify_receiver_cert_prepare_km,
+ .verify_hprime = mei_hdcp_verify_hprime,
+ .store_pairing_info = mei_hdcp_store_pairing_info,
+ .initiate_locality_check = mei_hdcp_initiate_locality_check,
+ .verify_lprime = mei_hdcp_verify_lprime,
+ .get_session_key = mei_hdcp_get_session_key,
+ .repeater_check_flow_prepare_ack =
+ mei_hdcp_repeater_check_flow_prepare_ack,
+ .verify_mprime = mei_hdcp_verify_mprime,
+ .enable_hdcp_authentication = mei_hdcp_enable_authentication,
+ .close_hdcp_session = mei_hdcp_close_session,
+};
+
+static int mei_component_master_bind(struct device *dev)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ struct i915_hdcp_comp_master *comp_master =
+ mei_cldev_get_drvdata(cldev);
+ int ret;
+
+ dev_dbg(dev, "%s\n", __func__);
+ comp_master->ops = &mei_hdcp_ops;
+ comp_master->mei_dev = dev;
+ ret = component_bind_all(dev, comp_master);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void mei_component_master_unbind(struct device *dev)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ struct i915_hdcp_comp_master *comp_master =
+ mei_cldev_get_drvdata(cldev);
+
+ dev_dbg(dev, "%s\n", __func__);
+ component_unbind_all(dev, comp_master);
+}
+
+static const struct component_master_ops mei_component_master_ops = {
+ .bind = mei_component_master_bind,
+ .unbind = mei_component_master_unbind,
+};
+
+static int mei_hdcp_component_match(struct device *dev, int subcomponent,
+ void *data)
+{
+ return !strcmp(dev->driver->name, "i915") &&
+ subcomponent == I915_COMPONENT_HDCP;
+}
+
+static int mei_hdcp_probe(struct mei_cl_device *cldev,
+ const struct mei_cl_device_id *id)
+{
+ struct i915_hdcp_comp_master *comp_master;
+ struct component_match *master_match;
+ int ret;
+
+ ret = mei_cldev_enable(cldev);
+ if (ret < 0) {
+ dev_err(&cldev->dev, "mei_cldev_enable Failed. %d\n", ret);
+ goto enable_err_exit;
+ }
+
+ comp_master = kzalloc(sizeof(*comp_master), GFP_KERNEL);
+ if (!comp_master) {
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ master_match = NULL;
+ component_match_add_typed(&cldev->dev, &master_match,
+ mei_hdcp_component_match, comp_master);
+ if (IS_ERR_OR_NULL(master_match)) {
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ mei_cldev_set_drvdata(cldev, comp_master);
+ ret = component_master_add_with_match(&cldev->dev,
+ &mei_component_master_ops,
+ master_match);
+ if (ret < 0) {
+ dev_err(&cldev->dev, "Master comp add failed %d\n", ret);
+ goto err_exit;
+ }
+
+ return 0;
+
+err_exit:
+ mei_cldev_set_drvdata(cldev, NULL);
+ kfree(comp_master);
+ mei_cldev_disable(cldev);
+enable_err_exit:
+ return ret;
+}
+
+static int mei_hdcp_remove(struct mei_cl_device *cldev)
+{
+ struct i915_hdcp_comp_master *comp_master =
+ mei_cldev_get_drvdata(cldev);
+
+ component_master_del(&cldev->dev, &mei_component_master_ops);
+ kfree(comp_master);
+ mei_cldev_set_drvdata(cldev, NULL);
+
+ return mei_cldev_disable(cldev);
+}
+
+#define MEI_UUID_HDCP GUID_INIT(0xB638AB7E, 0x94E2, 0x4EA2, 0xA5, \
+ 0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04)
+
+static struct mei_cl_device_id mei_hdcp_tbl[] = {
+ { .uuid = MEI_UUID_HDCP, .version = MEI_CL_VERSION_ANY },
+ { }
+};
+MODULE_DEVICE_TABLE(mei, mei_hdcp_tbl);
+
+static struct mei_cl_driver mei_hdcp_driver = {
+ .id_table = mei_hdcp_tbl,
+ .name = KBUILD_MODNAME,
+ .probe = mei_hdcp_probe,
+ .remove = mei_hdcp_remove,
+};
+
+module_mei_cl_driver(mei_hdcp_driver);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MEI HDCP");
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.h b/drivers/misc/mei/hdcp/mei_hdcp.h
new file mode 100644
index 000000000000..5f74b908e486
--- /dev/null
+++ b/drivers/misc/mei/hdcp/mei_hdcp.h
@@ -0,0 +1,377 @@
+/* SPDX-License-Identifier: (GPL-2.0+) */
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ * Authors:
+ * Ramalingam C <ramalingam.c@intel.com>
+ */
+
+#ifndef __MEI_HDCP_H__
+#define __MEI_HDCP_H__
+
+#include <drm/drm_hdcp.h>
+
+/* me_hdcp_status: Enumeration of all HDCP Status Codes */
+enum me_hdcp_status {
+ ME_HDCP_STATUS_SUCCESS = 0x0000,
+
+ /* WiDi Generic Status Codes */
+ ME_HDCP_STATUS_INTERNAL_ERROR = 0x1000,
+ ME_HDCP_STATUS_UNKNOWN_ERROR = 0x1001,
+ ME_HDCP_STATUS_INCORRECT_API_VERSION = 0x1002,
+ ME_HDCP_STATUS_INVALID_FUNCTION = 0x1003,
+ ME_HDCP_STATUS_INVALID_BUFFER_LENGTH = 0x1004,
+ ME_HDCP_STATUS_INVALID_PARAMS = 0x1005,
+ ME_HDCP_STATUS_AUTHENTICATION_FAILED = 0x1006,
+
+ /* WiDi Status Codes */
+ ME_HDCP_INVALID_SESSION_STATE = 0x6000,
+ ME_HDCP_SRM_FRAGMENT_UNEXPECTED = 0x6001,
+ ME_HDCP_SRM_INVALID_LENGTH = 0x6002,
+ ME_HDCP_SRM_FRAGMENT_OFFSET_INVALID = 0x6003,
+ ME_HDCP_SRM_VERIFICATION_FAILED = 0x6004,
+ ME_HDCP_SRM_VERSION_TOO_OLD = 0x6005,
+ ME_HDCP_RX_CERT_VERIFICATION_FAILED = 0x6006,
+ ME_HDCP_RX_REVOKED = 0x6007,
+ ME_HDCP_H_VERIFICATION_FAILED = 0x6008,
+ ME_HDCP_REPEATER_CHECK_UNEXPECTED = 0x6009,
+ ME_HDCP_TOPOLOGY_MAX_EXCEEDED = 0x600A,
+ ME_HDCP_V_VERIFICATION_FAILED = 0x600B,
+ ME_HDCP_L_VERIFICATION_FAILED = 0x600C,
+ ME_HDCP_STREAM_KEY_ALLOC_FAILED = 0x600D,
+ ME_HDCP_BASE_KEY_RESET_FAILED = 0x600E,
+ ME_HDCP_NONCE_GENERATION_FAILED = 0x600F,
+ ME_HDCP_STATUS_INVALID_E_KEY_STATE = 0x6010,
+ ME_HDCP_STATUS_INVALID_CS_ICV = 0x6011,
+ ME_HDCP_STATUS_INVALID_KB_KEY_STATE = 0x6012,
+ ME_HDCP_STATUS_INVALID_PAVP_MODE_ICV = 0x6013,
+ ME_HDCP_STATUS_INVALID_PAVP_MODE = 0x6014,
+ ME_HDCP_STATUS_LC_MAX_ATTEMPTS = 0x6015,
+
+ /* New status for HDCP 2.1 */
+ ME_HDCP_STATUS_MISMATCH_IN_M = 0x6016,
+
+ /* New status code for HDCP 2.2 Rx */
+ ME_HDCP_STATUS_RX_PROV_NOT_ALLOWED = 0x6017,
+ ME_HDCP_STATUS_RX_PROV_WRONG_SUBJECT = 0x6018,
+ ME_HDCP_RX_NEEDS_PROVISIONING = 0x6019,
+ ME_HDCP_BKSV_ICV_AUTH_FAILED = 0x6020,
+ ME_HDCP_STATUS_INVALID_STREAM_ID = 0x6021,
+ ME_HDCP_STATUS_CHAIN_NOT_INITIALIZED = 0x6022,
+ ME_HDCP_FAIL_NOT_EXPECTED = 0x6023,
+ ME_HDCP_FAIL_HDCP_OFF = 0x6024,
+ ME_HDCP_FAIL_INVALID_PAVP_MEMORY_MODE = 0x6025,
+ ME_HDCP_FAIL_AES_ECB_FAILURE = 0x6026,
+ ME_HDCP_FEATURE_NOT_SUPPORTED = 0x6027,
+ ME_HDCP_DMA_READ_ERROR = 0x6028,
+ ME_HDCP_DMA_WRITE_ERROR = 0x6029,
+ ME_HDCP_FAIL_INVALID_PACKET_SIZE = 0x6030,
+ ME_HDCP_H264_PARSING_ERROR = 0x6031,
+ ME_HDCP_HDCP2_ERRATA_VIDEO_VIOLATION = 0x6032,
+ ME_HDCP_HDCP2_ERRATA_AUDIO_VIOLATION = 0x6033,
+ ME_HDCP_TX_ACTIVE_ERROR = 0x6034,
+ ME_HDCP_MODE_CHANGE_ERROR = 0x6035,
+ ME_HDCP_STREAM_TYPE_ERROR = 0x6036,
+ ME_HDCP_STREAM_MANAGE_NOT_POSSIBLE = 0x6037,
+
+ ME_HDCP_STATUS_PORT_INVALID_COMMAND = 0x6038,
+ ME_HDCP_STATUS_UNSUPPORTED_PROTOCOL = 0x6039,
+ ME_HDCP_STATUS_INVALID_PORT_INDEX = 0x603a,
+ ME_HDCP_STATUS_TX_AUTH_NEEDED = 0x603b,
+ ME_HDCP_STATUS_NOT_INTEGRATED_PORT = 0x603c,
+ ME_HDCP_STATUS_SESSION_MAX_REACHED = 0x603d,
+
+ /* hdcp capable bit is not set in rx_caps(error is unique to DP) */
+ ME_HDCP_STATUS_NOT_HDCP_CAPABLE = 0x6041,
+
+ ME_HDCP_STATUS_INVALID_STREAM_COUNT = 0x6042,
+};
+
+#define HDCP_API_VERSION 0x00010000
+
+#define HDCP_M_LEN 16
+#define HDCP_KH_LEN 16
+
+/* Payload Buffer size(Excluding Header) for CMDs and corresponding response */
+/* Wired_Tx_AKE */
+#define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN (4 + 1)
+#define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_OUT (4 + 8 + 3)
+
+#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN (4 + 522 + 8 + 3)
+#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MIN_OUT (4 + 1 + 3 + 16 + 16)
+#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MAX_OUT (4 + 1 + 3 + 128)
+
+#define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN (4 + 32)
+#define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_OUT (4)
+
+#define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN (4 + 16)
+#define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_OUT (4)
+
+#define WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN (4)
+#define WIRED_CMD_BUF_LEN_CLOSE_SESSION_OUT (4)
+
+/* Wired_Tx_LC */
+#define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN (4)
+#define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_OUT (4 + 8)
+
+#define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN (4 + 32)
+#define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_OUT (4)
+
+/* Wired_Tx_SKE */
+#define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN (4)
+#define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_OUT (4 + 16 + 8)
+
+/* Wired_Tx_SKE */
+#define WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN (4 + 1)
+#define WIRED_CMD_BUF_LEN_ENABLE_AUTH_OUT (4)
+
+/* Wired_Tx_Repeater */
+#define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN (4 + 2 + 3 + 16 + 155)
+#define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_OUT (4 + 1 + 16)
+
+#define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN (4 + 3 + \
+ 32 + 2 + 2)
+
+#define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_OUT (4)
+
+/* hdcp_command_id: Enumeration of all WIRED HDCP Command IDs */
+enum hdcp_command_id {
+ _WIDI_COMMAND_BASE = 0x00030000,
+ WIDI_INITIATE_HDCP2_SESSION = _WIDI_COMMAND_BASE,
+ HDCP_GET_SRM_STATUS,
+ HDCP_SEND_SRM_FRAGMENT,
+
+ /* The wired HDCP Tx commands */
+ _WIRED_COMMAND_BASE = 0x00031000,
+ WIRED_INITIATE_HDCP2_SESSION = _WIRED_COMMAND_BASE,
+ WIRED_VERIFY_RECEIVER_CERT,
+ WIRED_AKE_SEND_HPRIME,
+ WIRED_AKE_SEND_PAIRING_INFO,
+ WIRED_INIT_LOCALITY_CHECK,
+ WIRED_VALIDATE_LOCALITY,
+ WIRED_GET_SESSION_KEY,
+ WIRED_ENABLE_AUTH,
+ WIRED_VERIFY_REPEATER,
+ WIRED_REPEATER_AUTH_STREAM_REQ,
+ WIRED_CLOSE_SESSION,
+
+ _WIRED_COMMANDS_COUNT,
+};
+
+union encrypted_buff {
+ u8 e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN];
+ u8 e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN];
+ struct {
+ u8 e_kh_km[HDCP_KH_LEN];
+ u8 m[HDCP_M_LEN];
+ } __packed;
+};
+
+/* HDCP HECI message header. All header values are little endian. */
+struct hdcp_cmd_header {
+ u32 api_version;
+ u32 command_id;
+ enum me_hdcp_status status;
+ /* Length of the HECI message (excluding the header) */
+ u32 buffer_len;
+} __packed;
+
+/* Empty command request or response. No data follows the header. */
+struct hdcp_cmd_no_data {
+ struct hdcp_cmd_header header;
+} __packed;
+
+/* Uniquely identifies the hdcp port being addressed for a given command. */
+struct hdcp_port_id {
+ u8 integrated_port_type;
+ u8 physical_port;
+ u16 reserved;
+} __packed;
+
+/*
+ * Data structures for integrated wired HDCP2 Tx in
+ * support of the AKE protocol
+ */
+/* HECI struct for integrated wired HDCP Tx session initiation. */
+struct wired_cmd_initiate_hdcp2_session_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 protocol; /* for HDMI vs DP */
+} __packed;
+
+struct wired_cmd_initiate_hdcp2_session_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 r_tx[HDCP_2_2_RTX_LEN];
+ struct hdcp2_tx_caps tx_caps;
+} __packed;
+
+/* HECI struct for ending an integrated wired HDCP Tx session. */
+struct wired_cmd_close_session_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+struct wired_cmd_close_session_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+/* HECI struct for integrated wired HDCP Tx Rx Cert verification. */
+struct wired_cmd_verify_receiver_cert_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ struct hdcp2_cert_rx cert_rx;
+ u8 r_rx[HDCP_2_2_RRX_LEN];
+ u8 rx_caps[HDCP_2_2_RXCAPS_LEN];
+} __packed;
+
+struct wired_cmd_verify_receiver_cert_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 km_stored;
+ u8 reserved[3];
+ union encrypted_buff ekm_buff;
+} __packed;
+
+/* HECI struct for verification of Rx's Hprime in a HDCP Tx session */
+struct wired_cmd_ake_send_hprime_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 h_prime[HDCP_2_2_H_PRIME_LEN];
+} __packed;
+
+struct wired_cmd_ake_send_hprime_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+/*
+ * HECI struct for sending in AKE pairing data generated by the Rx in an
+ * integrated wired HDCP Tx session.
+ */
+struct wired_cmd_ake_send_pairing_info_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 e_kh_km[HDCP_2_2_E_KH_KM_LEN];
+} __packed;
+
+struct wired_cmd_ake_send_pairing_info_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+/* Data structures for integrated wired HDCP2 Tx in support of the LC protocol*/
+/*
+ * HECI struct for initiating locality check with an
+ * integrated wired HDCP Tx session.
+ */
+struct wired_cmd_init_locality_check_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+struct wired_cmd_init_locality_check_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 r_n[HDCP_2_2_RN_LEN];
+} __packed;
+
+/*
+ * HECI struct for validating an Rx's LPrime value in an
+ * integrated wired HDCP Tx session.
+ */
+struct wired_cmd_validate_locality_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 l_prime[HDCP_2_2_L_PRIME_LEN];
+} __packed;
+
+struct wired_cmd_validate_locality_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+/*
+ * Data structures for integrated wired HDCP2 Tx in support of the
+ * SKE protocol
+ */
+/* HECI struct for creating session key */
+struct wired_cmd_get_session_key_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+struct wired_cmd_get_session_key_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN];
+ u8 r_iv[HDCP_2_2_RIV_LEN];
+} __packed;
+
+/* HECI struct for the Tx enable authentication command */
+struct wired_cmd_enable_auth_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 stream_type;
+} __packed;
+
+struct wired_cmd_enable_auth_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+/*
+ * Data structures for integrated wired HDCP2 Tx in support of
+ * the repeater protocols
+ */
+/*
+ * HECI struct for verifying the downstream repeater's HDCP topology in an
+ * integrated wired HDCP Tx session.
+ */
+struct wired_cmd_verify_repeater_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 rx_info[HDCP_2_2_RXINFO_LEN];
+ u8 seq_num_v[HDCP_2_2_SEQ_NUM_LEN];
+ u8 v_prime[HDCP_2_2_V_PRIME_HALF_LEN];
+ u8 receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN];
+} __packed;
+
+struct wired_cmd_verify_repeater_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 content_type_supported;
+ u8 v[HDCP_2_2_V_PRIME_HALF_LEN];
+} __packed;
+
+/*
+ * HECI struct in support of stream management in an
+ * integrated wired HDCP Tx session.
+ */
+struct wired_cmd_repeater_auth_stream_req_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN];
+ u8 m_prime[HDCP_2_2_MPRIME_LEN];
+ __be16 k;
+ struct hdcp2_streamid_type streams[1];
+} __packed;
+
+struct wired_cmd_repeater_auth_stream_req_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+enum mei_fw_ddi {
+ MEI_DDI_INVALID_PORT = 0x0,
+
+ MEI_DDI_B = 1,
+ MEI_DDI_C,
+ MEI_DDI_D,
+ MEI_DDI_E,
+ MEI_DDI_F,
+ MEI_DDI_A = 7,
+ MEI_DDI_RANGE_END = MEI_DDI_A,
+};
+#endif /* __MEI_HDCP_H__ */
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index 2b7f7677f8cc..b7d2487b8409 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -311,7 +311,8 @@ struct mei_client_properties {
u8 protocol_version;
u8 max_number_of_connections;
u8 fixed_address;
- u8 single_recv_buf;
+ u8 single_recv_buf:1;
+ u8 reserved:7;
u32 max_msg_length;
} __packed;
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index 227cc7443671..242dcee14689 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -38,7 +38,6 @@ comment "VOP Bus Driver"
config VOP_BUS
tristate "VOP Bus Driver"
- depends on 64BIT && PCI && X86 && X86_DEV_DMA_OPS
help
This option is selected by any driver which registers a
device or driver on the VOP Bus, such as CONFIG_INTEL_MIC_HOST
@@ -132,7 +131,7 @@ comment "VOP Driver"
config VOP
tristate "VOP Driver"
- depends on 64BIT && PCI && X86 && VOP_BUS
+ depends on VOP_BUS
select VHOST_RING
select VIRTIO
help
diff --git a/drivers/misc/mic/bus/scif_bus.h b/drivers/misc/mic/bus/scif_bus.h
index ff59568219ad..377a4f38cd7e 100644
--- a/drivers/misc/mic/bus/scif_bus.h
+++ b/drivers/misc/mic/bus/scif_bus.h
@@ -88,8 +88,8 @@ struct scif_driver {
* @send_intr: Send an interrupt to the remote node on a specified doorbell.
* @send_p2p_intr: Send an interrupt to the peer node on a specified doorbell
* which is specifically targeted for a peer to peer node.
- * @ioremap: Map a buffer with the specified physical address and length.
- * @iounmap: Unmap a buffer previously mapped.
+ * @remap: Map a buffer with the specified physical address and length.
+ * @unmap: Unmap a buffer previously mapped.
*/
struct scif_hw_ops {
int (*next_db)(struct scif_hw_dev *sdev);
@@ -104,9 +104,9 @@ struct scif_hw_ops {
void (*send_intr)(struct scif_hw_dev *sdev, int db);
void (*send_p2p_intr)(struct scif_hw_dev *sdev, int db,
struct mic_mw *mw);
- void __iomem * (*ioremap)(struct scif_hw_dev *sdev,
+ void __iomem * (*remap)(struct scif_hw_dev *sdev,
phys_addr_t pa, size_t len);
- void (*iounmap)(struct scif_hw_dev *sdev, void __iomem *va);
+ void (*unmap)(struct scif_hw_dev *sdev, void __iomem *va);
};
int scif_register_driver(struct scif_driver *driver);
diff --git a/drivers/misc/mic/bus/vop_bus.h b/drivers/misc/mic/bus/vop_bus.h
index fff7a865d721..cf5f3fae573c 100644
--- a/drivers/misc/mic/bus/vop_bus.h
+++ b/drivers/misc/mic/bus/vop_bus.h
@@ -87,8 +87,8 @@ struct vop_driver {
* @get_dp: Get access to the virtio device page used by the self
* node to add/remove/configure virtio devices.
* @send_intr: Send an interrupt to the peer node on a specified doorbell.
- * @ioremap: Map a buffer with the specified DMA address and length.
- * @iounmap: Unmap a buffer previously mapped.
+ * @remap: Map a buffer with the specified DMA address and length.
+ * @unmap: Unmap a buffer previously mapped.
* @dma_filter: The DMA filter function to use for obtaining access to
* a DMA channel on the peer node.
*/
@@ -104,9 +104,9 @@ struct vop_hw_ops {
void __iomem * (*get_remote_dp)(struct vop_device *vpdev);
void * (*get_dp)(struct vop_device *vpdev);
void (*send_intr)(struct vop_device *vpdev, int db);
- void __iomem * (*ioremap)(struct vop_device *vpdev,
+ void __iomem * (*remap)(struct vop_device *vpdev,
dma_addr_t pa, size_t len);
- void (*iounmap)(struct vop_device *vpdev, void __iomem *va);
+ void (*unmap)(struct vop_device *vpdev, void __iomem *va);
};
struct vop_device *
diff --git a/drivers/misc/mic/card/mic_device.c b/drivers/misc/mic/card/mic_device.c
index e749af48f736..dcd07ef29801 100644
--- a/drivers/misc/mic/card/mic_device.c
+++ b/drivers/misc/mic/card/mic_device.c
@@ -245,8 +245,8 @@ static struct scif_hw_ops scif_hw_ops = {
.next_db = ___mic_next_db,
.send_intr = ___mic_send_intr,
.send_p2p_intr = ___mic_send_p2p_intr,
- .ioremap = ___mic_ioremap,
- .iounmap = ___mic_iounmap,
+ .remap = ___mic_ioremap,
+ .unmap = ___mic_iounmap,
};
static inline struct mic_driver *vpdev_to_mdrv(struct vop_device *vpdev)
@@ -316,8 +316,8 @@ static struct vop_hw_ops vop_hw_ops = {
.next_db = __mic_next_db,
.get_remote_dp = __mic_get_remote_dp,
.send_intr = __mic_send_intr,
- .ioremap = __mic_ioremap,
- .iounmap = __mic_iounmap,
+ .remap = __mic_ioremap,
+ .unmap = __mic_iounmap,
};
static int mic_request_dma_chans(struct mic_driver *mdrv)
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index 6479435ac96b..079c36f0ce6e 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -133,8 +133,8 @@ static struct vop_hw_ops vop_hw_ops = {
.get_dp = __mic_get_dp,
.get_remote_dp = __mic_get_remote_dp,
.send_intr = __mic_send_intr,
- .ioremap = __mic_ioremap,
- .iounmap = __mic_iounmap,
+ .remap = __mic_ioremap,
+ .unmap = __mic_iounmap,
};
static inline struct mic_device *scdev_to_mdev(struct scif_hw_dev *scdev)
@@ -315,8 +315,8 @@ static struct scif_hw_ops scif_hw_ops = {
.ack_interrupt = ___mic_ack_interrupt,
.next_db = ___mic_next_db,
.send_intr = ___mic_send_intr,
- .ioremap = ___mic_ioremap,
- .iounmap = ___mic_iounmap,
+ .remap = ___mic_ioremap,
+ .unmap = ___mic_iounmap,
};
static inline struct mic_device *mbdev_to_mdev(struct mbus_device *mbdev)
diff --git a/drivers/misc/mic/scif/scif_map.h b/drivers/misc/mic/scif/scif_map.h
index 3e86360ba5a6..7b380534eba1 100644
--- a/drivers/misc/mic/scif/scif_map.h
+++ b/drivers/misc/mic/scif/scif_map.h
@@ -97,7 +97,7 @@ scif_ioremap(dma_addr_t phys, size_t size, struct scif_dev *scifdev)
out_virt = phys_to_virt(phys);
else
out_virt = (void __force *)
- sdev->hw_ops->ioremap(sdev, phys, size);
+ sdev->hw_ops->remap(sdev, phys, size);
return out_virt;
}
@@ -107,7 +107,7 @@ scif_iounmap(void *virt, size_t len, struct scif_dev *scifdev)
if (!scifdev_self(scifdev)) {
struct scif_hw_dev *sdev = scifdev->sdev;
- sdev->hw_ops->iounmap(sdev, (void __force __iomem *)virt);
+ sdev->hw_ops->unmap(sdev, (void __force __iomem *)virt);
}
}
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index 749321eb91ae..f62216628fa6 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -672,8 +672,8 @@ int scif_unregister_window(struct scif_window *window)
{
window->unreg_state = OP_IN_PROGRESS;
send_msg = true;
- /* fall through */
}
+ /* fall through */
case OP_IN_PROGRESS:
{
scif_get_window(window, 1);
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
index 744757f541be..e37b2c2152a2 100644
--- a/drivers/misc/mic/vop/vop_main.c
+++ b/drivers/misc/mic/vop/vop_main.c
@@ -34,6 +34,7 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/dma-mapping.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include "vop_main.h"
@@ -118,7 +119,7 @@ _vop_total_desc_size(struct mic_device_desc __iomem *desc)
static u64 vop_get_features(struct virtio_device *vdev)
{
unsigned int i, bits;
- u32 features = 0;
+ u64 features = 0;
struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
u8 __iomem *in_features = _vop_vq_features(desc);
int feature_len = ioread8(&desc->feature_len);
@@ -126,7 +127,7 @@ static u64 vop_get_features(struct virtio_device *vdev)
bits = min_t(unsigned, feature_len, sizeof(vdev->features)) * 8;
for (i = 0; i < bits; i++)
if (ioread8(&in_features[i / 8]) & (BIT(i % 8)))
- features |= BIT(i);
+ features |= BIT_ULL(i);
return features;
}
@@ -228,7 +229,7 @@ static void vop_reset_inform_host(struct virtio_device *dev)
if (ioread8(&dc->host_ack))
break;
msleep(100);
- };
+ }
dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
@@ -269,7 +270,7 @@ static void vop_del_vq(struct virtqueue *vq, int n)
free_pages((unsigned long)vdev->used_virt[n],
get_order(vdev->used_size[n]));
vring_del_virtqueue(vq);
- vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]);
+ vpdev->hw_ops->unmap(vpdev, vdev->vr[n]);
vdev->vr[n] = NULL;
}
@@ -337,8 +338,7 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
memcpy_fromio(&config, vqconfig, sizeof(config));
_vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
- va = vpdev->hw_ops->ioremap(vpdev, le64_to_cpu(config.address),
- vr_size);
+ va = vpdev->hw_ops->remap(vpdev, le64_to_cpu(config.address), vr_size);
if (!va)
return ERR_PTR(-ENOMEM);
vdev->vr[index] = va;
@@ -392,7 +392,7 @@ free_used:
free_pages((unsigned long)used,
get_order(vdev->used_size[index]));
unmap:
- vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]);
+ vpdev->hw_ops->unmap(vpdev, vdev->vr[index]);
return ERR_PTR(err);
}
@@ -437,7 +437,7 @@ static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs,
if (!ioread8(&dc->used_address_updated))
break;
msleep(100);
- };
+ }
dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
if (!retry) {
@@ -513,7 +513,7 @@ static int _vop_add_device(struct mic_device_desc __iomem *d,
vdev->desc = d;
vdev->dc = (void __iomem *)d + _vop_aligned_desc_size(d);
vdev->dnode = dnode;
- vdev->vdev.priv = (void *)(u64)dnode;
+ vdev->vdev.priv = (void *)(unsigned long)dnode;
init_completion(&vdev->reset_done);
vdev->h2c_vdev_db = vpdev->hw_ops->next_db(vpdev);
@@ -535,7 +535,7 @@ static int _vop_add_device(struct mic_device_desc __iomem *d,
offset, type);
goto free_irq;
}
- writeq((u64)vdev, &vdev->dc->vdev);
+ writeq((unsigned long)vdev, &vdev->dc->vdev);
dev_dbg(_vop_dev(vdev), "%s: registered vop device %u type %u vdev %p\n",
__func__, offset, type, vdev);
@@ -562,13 +562,18 @@ static int vop_match_desc(struct device *dev, void *data)
return vdev->desc == (void __iomem *)data;
}
+static struct _vop_vdev *vop_dc_to_vdev(struct mic_device_ctrl *dc)
+{
+ return (struct _vop_vdev *)(unsigned long)readq(&dc->vdev);
+}
+
static void _vop_handle_config_change(struct mic_device_desc __iomem *d,
unsigned int offset,
struct vop_device *vpdev)
{
struct mic_device_ctrl __iomem *dc
= (void __iomem *)d + _vop_aligned_desc_size(d);
- struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev);
+ struct _vop_vdev *vdev = vop_dc_to_vdev(dc);
if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED)
return;
@@ -587,7 +592,7 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d,
{
struct mic_device_ctrl __iomem *dc
= (void __iomem *)d + _vop_aligned_desc_size(d);
- struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev);
+ struct _vop_vdev *vdev = vop_dc_to_vdev(dc);
u8 status;
int ret = -1;
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
index cbc8ebcff5cf..3632fce40590 100644
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -80,7 +80,7 @@ static void vop_virtio_init_post(struct vop_vdev *vdev)
continue;
}
vdev->vvr[i].vrh.vring.used =
- (void __force *)vpdev->hw_ops->ioremap(
+ (void __force *)vpdev->hw_ops->remap(
vpdev,
le64_to_cpu(vqconfig[i].used_address),
used_size);
@@ -528,15 +528,15 @@ static int vop_virtio_copy_to_user(struct vop_vdev *vdev, void __user *ubuf,
int vr_idx)
{
struct vop_device *vpdev = vdev->vpdev;
- void __iomem *dbuf = vpdev->hw_ops->ioremap(vpdev, daddr, len);
+ void __iomem *dbuf = vpdev->hw_ops->remap(vpdev, daddr, len);
struct vop_vringh *vvr = &vdev->vvr[vr_idx];
struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
- size_t dma_alignment = 1 << vi->dma_ch->device->copy_align;
- bool x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
+ size_t dma_alignment;
+ bool x200;
size_t dma_offset, partlen;
int err;
- if (!VOP_USE_DMA) {
+ if (!VOP_USE_DMA || !vi->dma_ch) {
if (copy_to_user(ubuf, (void __force *)dbuf, len)) {
err = -EFAULT;
dev_err(vop_dev(vdev), "%s %d err %d\n",
@@ -548,6 +548,9 @@ static int vop_virtio_copy_to_user(struct vop_vdev *vdev, void __user *ubuf,
goto err;
}
+ dma_alignment = 1 << vi->dma_ch->device->copy_align;
+ x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
+
dma_offset = daddr - round_down(daddr, dma_alignment);
daddr -= dma_offset;
len += dma_offset;
@@ -585,9 +588,9 @@ static int vop_virtio_copy_to_user(struct vop_vdev *vdev, void __user *ubuf,
}
err = 0;
err:
- vpdev->hw_ops->iounmap(vpdev, dbuf);
+ vpdev->hw_ops->unmap(vpdev, dbuf);
dev_dbg(vop_dev(vdev),
- "%s: ubuf %p dbuf %p len 0x%lx vr_idx 0x%x\n",
+ "%s: ubuf %p dbuf %p len 0x%zx vr_idx 0x%x\n",
__func__, ubuf, dbuf, len, vr_idx);
return err;
}
@@ -603,21 +606,26 @@ static int vop_virtio_copy_from_user(struct vop_vdev *vdev, void __user *ubuf,
int vr_idx)
{
struct vop_device *vpdev = vdev->vpdev;
- void __iomem *dbuf = vpdev->hw_ops->ioremap(vpdev, daddr, len);
+ void __iomem *dbuf = vpdev->hw_ops->remap(vpdev, daddr, len);
struct vop_vringh *vvr = &vdev->vvr[vr_idx];
struct vop_info *vi = dev_get_drvdata(&vdev->vpdev->dev);
- size_t dma_alignment = 1 << vi->dma_ch->device->copy_align;
- bool x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
+ size_t dma_alignment;
+ bool x200;
size_t partlen;
- bool dma = VOP_USE_DMA;
+ bool dma = VOP_USE_DMA && vi->dma_ch;
int err = 0;
- if (daddr & (dma_alignment - 1)) {
- vdev->tx_dst_unaligned += len;
- dma = false;
- } else if (ALIGN(len, dma_alignment) > dlen) {
- vdev->tx_len_unaligned += len;
- dma = false;
+ if (dma) {
+ dma_alignment = 1 << vi->dma_ch->device->copy_align;
+ x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
+
+ if (daddr & (dma_alignment - 1)) {
+ vdev->tx_dst_unaligned += len;
+ dma = false;
+ } else if (ALIGN(len, dma_alignment) > dlen) {
+ vdev->tx_len_unaligned += len;
+ dma = false;
+ }
}
if (!dma)
@@ -668,9 +676,9 @@ memcpy:
vdev->out_bytes += len;
err = 0;
err:
- vpdev->hw_ops->iounmap(vpdev, dbuf);
+ vpdev->hw_ops->unmap(vpdev, dbuf);
dev_dbg(vop_dev(vdev),
- "%s: ubuf %p dbuf %p len 0x%lx vr_idx 0x%x\n",
+ "%s: ubuf %p dbuf %p len 0x%zx vr_idx 0x%x\n",
__func__, ubuf, dbuf, len, vr_idx);
return err;
}
@@ -704,16 +712,17 @@ static int vop_vringh_copy(struct vop_vdev *vdev, struct vringh_kiov *iov,
while (len && iov->i < iov->used) {
struct kvec *kiov = &iov->iov[iov->i];
+ unsigned long daddr = (unsigned long)kiov->iov_base;
partlen = min(kiov->iov_len, len);
if (read)
ret = vop_virtio_copy_to_user(vdev, ubuf, partlen,
- (u64)kiov->iov_base,
+ daddr,
kiov->iov_len,
vr_idx);
else
ret = vop_virtio_copy_from_user(vdev, ubuf, partlen,
- (u64)kiov->iov_base,
+ daddr,
kiov->iov_len,
vr_idx);
if (ret) {
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 93be82fc338a..2ec5808ba464 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -616,8 +616,8 @@ irqreturn_t gru_intr_mblade(int irq, void *dev_id)
for_each_possible_blade(blade) {
if (uv_blade_nr_possible_cpus(blade))
continue;
- gru_intr(0, blade);
- gru_intr(1, blade);
+ gru_intr(0, blade);
+ gru_intr(1, blade);
}
return IRQ_HANDLED;
}
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 869ec842729e..ad807d5a3141 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -34,7 +34,6 @@
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
-MODULE_VERSION("1.5.0.0-k");
MODULE_ALIAS("dmi:*:svnVMware*:*");
MODULE_ALIAS("vmware_vmmemctl");
MODULE_LICENSE("GPL");
@@ -73,15 +72,26 @@ enum vmwballoon_capabilities {
VMW_BALLOON_BATCHED_CMDS = (1 << 2),
VMW_BALLOON_BATCHED_2M_CMDS = (1 << 3),
VMW_BALLOON_SIGNALLED_WAKEUP_CMD = (1 << 4),
+ VMW_BALLOON_64_BIT_TARGET = (1 << 5)
};
-#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_BASIC_CMDS \
+#define VMW_BALLOON_CAPABILITIES_COMMON (VMW_BALLOON_BASIC_CMDS \
| VMW_BALLOON_BATCHED_CMDS \
| VMW_BALLOON_BATCHED_2M_CMDS \
| VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
#define VMW_BALLOON_2M_ORDER (PMD_SHIFT - PAGE_SHIFT)
+/*
+ * 64-bit targets are only supported in 64-bit
+ */
+#ifdef CONFIG_64BIT
+#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_CAPABILITIES_COMMON \
+ | VMW_BALLOON_64_BIT_TARGET)
+#else
+#define VMW_BALLOON_CAPABILITIES VMW_BALLOON_CAPABILITIES_COMMON
+#endif
+
enum vmballoon_page_size_type {
VMW_BALLOON_4K_PAGE,
VMW_BALLOON_2M_PAGE,
@@ -602,8 +612,9 @@ static int vmballoon_send_get_target(struct vmballoon *b)
limit = totalram_pages();
- /* Ensure limit fits in 32-bits */
- if (limit != (u32)limit)
+ /* Ensure limit fits in 32-bits if 64-bit targets are not supported */
+ if (!(b->capabilities & VMW_BALLOON_64_BIT_TARGET) &&
+ limit != (u32)limit)
return -EINVAL;
status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
@@ -1319,7 +1330,7 @@ static void vmballoon_reset(struct vmballoon *b)
vmballoon_pop(b);
if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
- return;
+ goto unlock;
if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
if (vmballoon_init_batching(b)) {
@@ -1330,7 +1341,7 @@ static void vmballoon_reset(struct vmballoon *b)
* The guest will retry in one second.
*/
vmballoon_send_start(b, 0);
- return;
+ goto unlock;
}
} else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
vmballoon_deinit_batching(b);
@@ -1346,6 +1357,7 @@ static void vmballoon_reset(struct vmballoon *b)
if (vmballoon_send_guest_id(b))
pr_err("failed to send guest ID to the host\n");
+unlock:
up_write(&b->conf_sem);
}
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
index b3fa738ae005..7824c7494916 100644
--- a/drivers/misc/vmw_vmci/vmci_doorbell.c
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
@@ -330,7 +330,7 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
/*
* Register the notification bitmap with the host.
*/
-bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn)
+bool vmci_dbell_register_notification_bitmap(u64 bitmap_ppn)
{
int result;
struct vmci_notify_bm_set_msg bitmap_set_msg;
@@ -340,11 +340,14 @@ bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn)
bitmap_set_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
bitmap_set_msg.hdr.payload_size = sizeof(bitmap_set_msg) -
VMCI_DG_HEADERSIZE;
- bitmap_set_msg.bitmap_ppn = bitmap_ppn;
+ if (vmci_use_ppn64())
+ bitmap_set_msg.bitmap_ppn64 = bitmap_ppn;
+ else
+ bitmap_set_msg.bitmap_ppn32 = (u32) bitmap_ppn;
result = vmci_send_datagram(&bitmap_set_msg.hdr);
if (result != VMCI_SUCCESS) {
- pr_devel("Failed to register (PPN=%u) as notification bitmap (error=%d)\n",
+ pr_devel("Failed to register (PPN=%llu) as notification bitmap (error=%d)\n",
bitmap_ppn, result);
return false;
}
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.h b/drivers/misc/vmw_vmci/vmci_doorbell.h
index e4c0b17486a5..410a21f8436f 100644
--- a/drivers/misc/vmw_vmci/vmci_doorbell.h
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.h
@@ -45,7 +45,7 @@ struct dbell_cpt_state {
int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle);
int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags);
-bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn);
+bool vmci_dbell_register_notification_bitmap(u64 bitmap_ppn);
void vmci_dbell_scan_notification_entries(u8 *bitmap);
#endif /* VMCI_DOORBELL_H */
diff --git a/drivers/misc/vmw_vmci/vmci_driver.h b/drivers/misc/vmw_vmci/vmci_driver.h
index cee9e977d318..2fbf4a0ac657 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.h
+++ b/drivers/misc/vmw_vmci/vmci_driver.h
@@ -54,4 +54,6 @@ void vmci_guest_exit(void);
bool vmci_guest_code_active(void);
u32 vmci_get_vm_context_id(void);
+bool vmci_use_ppn64(void);
+
#endif /* _VMCI_DRIVER_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index dad5abee656e..928708128177 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -64,6 +64,13 @@ struct vmci_guest_device {
dma_addr_t notification_base;
};
+static bool use_ppn64;
+
+bool vmci_use_ppn64(void)
+{
+ return use_ppn64;
+}
+
/* vmci_dev singleton device and supporting data*/
struct pci_dev *vmci_pdev;
static struct vmci_guest_device *vmci_dev_g;
@@ -432,6 +439,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
struct vmci_guest_device *vmci_dev;
void __iomem *iobase;
unsigned int capabilities;
+ unsigned int caps_in_use;
unsigned long cmd;
int vmci_err;
int error;
@@ -496,6 +504,23 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
error = -ENXIO;
goto err_free_data_buffer;
}
+ caps_in_use = VMCI_CAPS_DATAGRAM;
+
+ /*
+ * Use 64-bit PPNs if the device supports.
+ *
+ * There is no check for the return value of dma_set_mask_and_coherent
+ * since this driver can handle the default mask values if
+ * dma_set_mask_and_coherent fails.
+ */
+ if (capabilities & VMCI_CAPS_PPN64) {
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ use_ppn64 = true;
+ caps_in_use |= VMCI_CAPS_PPN64;
+ } else {
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+ use_ppn64 = false;
+ }
/*
* If the hardware supports notifications, we will use that as
@@ -510,14 +535,14 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
"Unable to allocate notification bitmap\n");
} else {
memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE);
- capabilities |= VMCI_CAPS_NOTIFICATIONS;
+ caps_in_use |= VMCI_CAPS_NOTIFICATIONS;
}
}
- dev_info(&pdev->dev, "Using capabilities 0x%x\n", capabilities);
+ dev_info(&pdev->dev, "Using capabilities 0x%x\n", caps_in_use);
/* Let the host know which capabilities we intend to use. */
- iowrite32(capabilities, vmci_dev->iobase + VMCI_CAPS_ADDR);
+ iowrite32(caps_in_use, vmci_dev->iobase + VMCI_CAPS_ADDR);
/* Set up global device so that we can start sending datagrams */
spin_lock_irq(&vmci_dev_spinlock);
@@ -529,13 +554,13 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
* Register notification bitmap with device if that capability is
* used.
*/
- if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
+ if (caps_in_use & VMCI_CAPS_NOTIFICATIONS) {
unsigned long bitmap_ppn =
vmci_dev->notification_base >> PAGE_SHIFT;
if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) {
dev_warn(&pdev->dev,
- "VMCI device unable to register notification bitmap with PPN 0x%x\n",
- (u32) bitmap_ppn);
+ "VMCI device unable to register notification bitmap with PPN 0x%lx\n",
+ bitmap_ppn);
error = -ENXIO;
goto err_remove_vmci_dev_g;
}
@@ -611,7 +636,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
/* Enable specific interrupt bits. */
cmd = VMCI_IMR_DATAGRAM;
- if (capabilities & VMCI_CAPS_NOTIFICATIONS)
+ if (caps_in_use & VMCI_CAPS_NOTIFICATIONS)
cmd |= VMCI_IMR_NOTIFICATION;
iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR);
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 264f4ed8eef2..f5f1aac9d163 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -435,8 +435,8 @@ static int qp_alloc_ppn_set(void *prod_q,
void *cons_q,
u64 num_consume_pages, struct ppn_set *ppn_set)
{
- u32 *produce_ppns;
- u32 *consume_ppns;
+ u64 *produce_ppns;
+ u64 *consume_ppns;
struct vmci_queue *produce_q = prod_q;
struct vmci_queue *consume_q = cons_q;
u64 i;
@@ -462,31 +462,13 @@ static int qp_alloc_ppn_set(void *prod_q,
return VMCI_ERROR_NO_MEM;
}
- for (i = 0; i < num_produce_pages; i++) {
- unsigned long pfn;
-
+ for (i = 0; i < num_produce_pages; i++)
produce_ppns[i] =
produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
- pfn = produce_ppns[i];
-
- /* Fail allocation if PFN isn't supported by hypervisor. */
- if (sizeof(pfn) > sizeof(*produce_ppns)
- && pfn != produce_ppns[i])
- goto ppn_error;
- }
-
- for (i = 0; i < num_consume_pages; i++) {
- unsigned long pfn;
+ for (i = 0; i < num_consume_pages; i++)
consume_ppns[i] =
consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
- pfn = consume_ppns[i];
-
- /* Fail allocation if PFN isn't supported by hypervisor. */
- if (sizeof(pfn) > sizeof(*consume_ppns)
- && pfn != consume_ppns[i])
- goto ppn_error;
- }
ppn_set->num_produce_pages = num_produce_pages;
ppn_set->num_consume_pages = num_consume_pages;
@@ -494,11 +476,6 @@ static int qp_alloc_ppn_set(void *prod_q,
ppn_set->consume_ppns = consume_ppns;
ppn_set->initialized = true;
return VMCI_SUCCESS;
-
- ppn_error:
- kfree(produce_ppns);
- kfree(consume_ppns);
- return VMCI_ERROR_INVALID_ARGS;
}
/*
@@ -520,12 +497,28 @@ static void qp_free_ppn_set(struct ppn_set *ppn_set)
*/
static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
{
- memcpy(call_buf, ppn_set->produce_ppns,
- ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns));
- memcpy(call_buf +
- ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns),
- ppn_set->consume_ppns,
- ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns));
+ if (vmci_use_ppn64()) {
+ memcpy(call_buf, ppn_set->produce_ppns,
+ ppn_set->num_produce_pages *
+ sizeof(*ppn_set->produce_ppns));
+ memcpy(call_buf +
+ ppn_set->num_produce_pages *
+ sizeof(*ppn_set->produce_ppns),
+ ppn_set->consume_ppns,
+ ppn_set->num_consume_pages *
+ sizeof(*ppn_set->consume_ppns));
+ } else {
+ int i;
+ u32 *ppns = (u32 *) call_buf;
+
+ for (i = 0; i < ppn_set->num_produce_pages; i++)
+ ppns[i] = (u32) ppn_set->produce_ppns[i];
+
+ ppns = &ppns[ppn_set->num_produce_pages];
+
+ for (i = 0; i < ppn_set->num_consume_pages; i++)
+ ppns[i] = (u32) ppn_set->consume_ppns[i];
+ }
return VMCI_SUCCESS;
}
@@ -951,13 +944,15 @@ static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry)
{
struct vmci_qp_alloc_msg *alloc_msg;
size_t msg_size;
+ size_t ppn_size;
int result;
if (!entry || entry->num_ppns <= 2)
return VMCI_ERROR_INVALID_ARGS;
+ ppn_size = vmci_use_ppn64() ? sizeof(u64) : sizeof(u32);
msg_size = sizeof(*alloc_msg) +
- (size_t) entry->num_ppns * sizeof(u32);
+ (size_t) entry->num_ppns * ppn_size;
alloc_msg = kmalloc(msg_size, GFP_KERNEL);
if (!alloc_msg)
return VMCI_ERROR_NO_MEM;
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.h b/drivers/misc/vmw_vmci/vmci_queue_pair.h
index ed177f04ef24..46c0b6c7bafb 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.h
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.h
@@ -28,8 +28,8 @@ typedef int (*vmci_event_release_cb) (void *client_data);
struct ppn_set {
u64 num_produce_pages;
u64 num_consume_pages;
- u32 *produce_ppns;
- u32 *consume_ppns;
+ u64 *produce_ppns;
+ u64 *consume_ppns;
bool initialized;
};
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h
index c49ff8970ce3..e071e28bca3f 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.h
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.h
@@ -53,6 +53,7 @@
#include <linux/ntb.h>
#include <linux/pci.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
/* PCI device IDs */
#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF 0x3725
@@ -218,33 +219,4 @@ static inline int pdev_is_gen3(struct pci_dev *pdev)
return 0;
}
-#ifndef ioread64
-#ifdef readq
-#define ioread64 readq
-#else
-#define ioread64 _ioread64
-static inline u64 _ioread64(void __iomem *mmio)
-{
- u64 low, high;
-
- low = ioread32(mmio);
- high = ioread32(mmio + sizeof(u32));
- return low | (high << 32);
-}
-#endif
-#endif
-
-#ifndef iowrite64
-#ifdef writeq
-#define iowrite64 writeq
-#else
-#define iowrite64 _iowrite64
-static inline void _iowrite64(u64 val, void __iomem *mmio)
-{
- iowrite32(val, mmio);
- iowrite32(val >> 32, mmio + sizeof(u32));
-}
-#endif
-#endif
-
#endif
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
index f1eaa3c4d46a..f2df2d39c65b 100644
--- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
@@ -13,13 +13,14 @@
*
*/
-#include <linux/switchtec.h>
-#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/delay.h>
#include <linux/kthread.h>
-#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/ntb.h>
#include <linux/pci.h>
+#include <linux/switchtec.h>
MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
MODULE_VERSION("0.1");
@@ -36,35 +37,6 @@ module_param(use_lut_mws, bool, 0644);
MODULE_PARM_DESC(use_lut_mws,
"Enable the use of the LUT based memory windows");
-#ifndef ioread64
-#ifdef readq
-#define ioread64 readq
-#else
-#define ioread64 _ioread64
-static inline u64 _ioread64(void __iomem *mmio)
-{
- u64 low, high;
-
- low = ioread32(mmio);
- high = ioread32(mmio + sizeof(u32));
- return low | (high << 32);
-}
-#endif
-#endif
-
-#ifndef iowrite64
-#ifdef writeq
-#define iowrite64 writeq
-#else
-#define iowrite64 _iowrite64
-static inline void _iowrite64(u64 val, void __iomem *mmio)
-{
- iowrite32(val, mmio);
- iowrite32(val >> 32, mmio + sizeof(u32));
-}
-#endif
-#endif
-
#define SWITCHTEC_NTB_MAGIC 0x45CC0001
#define MAX_MWS 128
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 4ad846ceac7c..530d570724c9 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -26,7 +26,7 @@ config NVMEM_IMX_IIM
config NVMEM_IMX_OCOTP
tristate "i.MX6 On-Chip OTP Controller support"
- depends on SOC_IMX6 || COMPILE_TEST
+ depends on SOC_IMX6 || SOC_IMX7D || COMPILE_TEST
depends on HAS_IOMEM
help
This is a driver for the On-Chip OTP Controller (OCOTP) available on
diff --git a/drivers/nvmem/bcm-ocotp.c b/drivers/nvmem/bcm-ocotp.c
index 4159b3f41d79..a8097511582a 100644
--- a/drivers/nvmem/bcm-ocotp.c
+++ b/drivers/nvmem/bcm-ocotp.c
@@ -11,13 +11,14 @@
* GNU General Public License for more details.
*/
+#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
-#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
/*
@@ -78,9 +79,9 @@ static struct otpc_map otp_map_v2 = {
};
struct otpc_priv {
- struct device *dev;
- void __iomem *base;
- struct otpc_map *map;
+ struct device *dev;
+ void __iomem *base;
+ const struct otpc_map *map;
struct nvmem_config *config;
};
@@ -237,16 +238,22 @@ static struct nvmem_config bcm_otpc_nvmem_config = {
};
static const struct of_device_id bcm_otpc_dt_ids[] = {
- { .compatible = "brcm,ocotp" },
- { .compatible = "brcm,ocotp-v2" },
+ { .compatible = "brcm,ocotp", .data = &otp_map },
+ { .compatible = "brcm,ocotp-v2", .data = &otp_map_v2 },
{ },
};
MODULE_DEVICE_TABLE(of, bcm_otpc_dt_ids);
+static const struct acpi_device_id bcm_otpc_acpi_ids[] = {
+ { .id = "BRCM0700", .driver_data = (kernel_ulong_t)&otp_map },
+ { .id = "BRCM0701", .driver_data = (kernel_ulong_t)&otp_map_v2 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, bcm_otpc_acpi_ids);
+
static int bcm_otpc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *dn = dev->of_node;
struct resource *res;
struct otpc_priv *priv;
struct nvmem_device *nvmem;
@@ -257,14 +264,9 @@ static int bcm_otpc_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- if (of_device_is_compatible(dev->of_node, "brcm,ocotp"))
- priv->map = &otp_map;
- else if (of_device_is_compatible(dev->of_node, "brcm,ocotp-v2"))
- priv->map = &otp_map_v2;
- else {
- dev_err(dev, "%s otpc config map not defined\n", __func__);
- return -EINVAL;
- }
+ priv->map = device_get_match_data(dev);
+ if (!priv->map)
+ return -ENODEV;
/* Get OTP base address register. */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -281,7 +283,7 @@ static int bcm_otpc_probe(struct platform_device *pdev)
reset_start_bit(priv->base);
/* Read size of memory in words. */
- err = of_property_read_u32(dn, "brcm,ocotp-size", &num_words);
+ err = device_property_read_u32(dev, "brcm,ocotp-size", &num_words);
if (err) {
dev_err(dev, "size parameter not specified\n");
return -EINVAL;
@@ -294,7 +296,7 @@ static int bcm_otpc_probe(struct platform_device *pdev)
bcm_otpc_nvmem_config.dev = dev;
bcm_otpc_nvmem_config.priv = priv;
- if (of_device_is_compatible(dev->of_node, "brcm,ocotp-v2")) {
+ if (priv->map == &otp_map_v2) {
bcm_otpc_nvmem_config.word_size = 8;
bcm_otpc_nvmem_config.stride = 8;
}
@@ -315,6 +317,7 @@ static struct platform_driver bcm_otpc_driver = {
.driver = {
.name = "brcm-otpc",
.of_match_table = bcm_otpc_dt_ids,
+ .acpi_match_table = ACPI_PTR(bcm_otpc_acpi_ids),
},
};
module_platform_driver(bcm_otpc_driver);
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index f7301bb4ef3b..f24008b66826 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -525,12 +525,14 @@ out:
static struct nvmem_cell *
nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
{
- struct nvmem_cell *cell = NULL;
+ struct nvmem_cell *iter, *cell = NULL;
mutex_lock(&nvmem_mutex);
- list_for_each_entry(cell, &nvmem->cells, node) {
- if (strcmp(cell_id, cell->name) == 0)
+ list_for_each_entry(iter, &nvmem->cells, node) {
+ if (strcmp(cell_id, iter->name) == 0) {
+ cell = iter;
break;
+ }
}
mutex_unlock(&nvmem_mutex);
@@ -646,8 +648,8 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
config->name ? config->id : nvmem->id);
}
- nvmem->read_only = device_property_present(config->dev, "read-only") |
- config->read_only;
+ nvmem->read_only = device_property_present(config->dev, "read-only") ||
+ config->read_only || !nvmem->reg_write;
if (config->root_only)
nvmem->dev.groups = nvmem->read_only ?
@@ -686,9 +688,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
if (rval)
goto err_remove_cells;
- rval = blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
- if (rval)
- goto err_remove_cells;
+ blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
return nvmem;
@@ -809,6 +809,7 @@ static struct nvmem_device *__nvmem_device_get(struct device_node *np,
"could not increase module refcount for cell %s\n",
nvmem_dev_name(nvmem));
+ put_device(&nvmem->dev);
return ERR_PTR(-EINVAL);
}
@@ -819,6 +820,7 @@ static struct nvmem_device *__nvmem_device_get(struct device_node *np,
static void __nvmem_device_put(struct nvmem_device *nvmem)
{
+ put_device(&nvmem->dev);
module_put(nvmem->owner);
kref_put(&nvmem->refcnt, nvmem_device_release);
}
@@ -837,13 +839,14 @@ struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
{
struct device_node *nvmem_np;
- int index;
+ int index = 0;
- index = of_property_match_string(np, "nvmem-names", id);
+ if (id)
+ index = of_property_match_string(np, "nvmem-names", id);
nvmem_np = of_parse_phandle(np, "nvmem", index);
if (!nvmem_np)
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENOENT);
return __nvmem_device_get(nvmem_np, NULL);
}
@@ -871,7 +874,7 @@ struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
}
- return nvmem_find(dev_name);
+ return __nvmem_device_get(NULL, dev_name);
}
EXPORT_SYMBOL_GPL(nvmem_device_get);
@@ -972,7 +975,7 @@ nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
if (IS_ERR(nvmem)) {
/* Provider may not be registered yet. */
cell = ERR_CAST(nvmem);
- goto out;
+ break;
}
cell = nvmem_find_cell_by_name(nvmem,
@@ -980,12 +983,11 @@ nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
if (!cell) {
__nvmem_device_put(nvmem);
cell = ERR_PTR(-ENOENT);
- goto out;
}
+ break;
}
}
-out:
mutex_unlock(&nvmem_lookup_mutex);
return cell;
}
@@ -994,12 +996,14 @@ out:
static struct nvmem_cell *
nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
{
- struct nvmem_cell *cell = NULL;
+ struct nvmem_cell *iter, *cell = NULL;
mutex_lock(&nvmem_mutex);
- list_for_each_entry(cell, &nvmem->cells, node) {
- if (np == cell->np)
+ list_for_each_entry(iter, &nvmem->cells, node) {
+ if (np == iter->np) {
+ cell = iter;
break;
+ }
}
mutex_unlock(&nvmem_mutex);
@@ -1031,7 +1035,7 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
cell_np = of_parse_phandle(np, "nvmem-cells", index);
if (!cell_np)
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENOENT);
nvmem_np = of_get_next_parent(cell_np);
if (!nvmem_np)
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index afb429a417fe..08a9b1ef8ae4 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -427,19 +427,32 @@ static const struct ocotp_params imx6ul_params = {
.set_timing = imx_ocotp_set_imx6_timing,
};
+static const struct ocotp_params imx6ull_params = {
+ .nregs = 64,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+};
+
static const struct ocotp_params imx7d_params = {
.nregs = 64,
.bank_address_words = 4,
.set_timing = imx_ocotp_set_imx7_timing,
};
+static const struct ocotp_params imx7ulp_params = {
+ .nregs = 256,
+ .bank_address_words = 0,
+};
+
static const struct of_device_id imx_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx6q-ocotp", .data = &imx6q_params },
{ .compatible = "fsl,imx6sl-ocotp", .data = &imx6sl_params },
{ .compatible = "fsl,imx6sx-ocotp", .data = &imx6sx_params },
{ .compatible = "fsl,imx6ul-ocotp", .data = &imx6ul_params },
+ { .compatible = "fsl,imx6ull-ocotp", .data = &imx6ull_params },
{ .compatible = "fsl,imx7d-ocotp", .data = &imx7d_params },
{ .compatible = "fsl,imx6sll-ocotp", .data = &imx6sll_params },
+ { .compatible = "fsl,imx7ulp-ocotp", .data = &imx7ulp_params },
{ },
};
MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
diff --git a/drivers/nvmem/sc27xx-efuse.c b/drivers/nvmem/sc27xx-efuse.c
index 33185d8d82cf..c6ee21018d80 100644
--- a/drivers/nvmem/sc27xx-efuse.c
+++ b/drivers/nvmem/sc27xx-efuse.c
@@ -106,10 +106,12 @@ static int sc27xx_efuse_poll_status(struct sc27xx_efuse *efuse, u32 bits)
static int sc27xx_efuse_read(void *context, u32 offset, void *val, size_t bytes)
{
struct sc27xx_efuse *efuse = context;
- u32 buf;
+ u32 buf, blk_index = offset / SC27XX_EFUSE_BLOCK_WIDTH;
+ u32 blk_offset = (offset % SC27XX_EFUSE_BLOCK_WIDTH) * BITS_PER_BYTE;
int ret;
- if (offset > SC27XX_EFUSE_BLOCK_MAX || bytes > SC27XX_EFUSE_BLOCK_WIDTH)
+ if (blk_index > SC27XX_EFUSE_BLOCK_MAX ||
+ bytes > SC27XX_EFUSE_BLOCK_WIDTH)
return -EINVAL;
ret = sc27xx_efuse_lock(efuse);
@@ -133,7 +135,7 @@ static int sc27xx_efuse_read(void *context, u32 offset, void *val, size_t bytes)
/* Set the block address to be read. */
ret = regmap_write(efuse->regmap,
efuse->base + SC27XX_EFUSE_BLOCK_INDEX,
- offset & SC27XX_EFUSE_BLOCK_MASK);
+ blk_index & SC27XX_EFUSE_BLOCK_MASK);
if (ret)
goto disable_efuse;
@@ -171,8 +173,10 @@ disable_efuse:
unlock_efuse:
sc27xx_efuse_unlock(efuse);
- if (!ret)
+ if (!ret) {
+ buf >>= blk_offset;
memcpy(val, &buf, bytes);
+ }
return ret;
}
diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c
index 5484a46dafda..56dd83a45e55 100644
--- a/drivers/parport/daisy.c
+++ b/drivers/parport/daisy.c
@@ -213,10 +213,12 @@ void parport_daisy_fini(struct parport *port)
struct pardevice *parport_open(int devnum, const char *name)
{
struct daisydev *p = topology;
+ struct pardev_cb par_cb;
struct parport *port;
struct pardevice *dev;
int daisy;
+ memset(&par_cb, 0, sizeof(par_cb));
spin_lock(&topology_lock);
while (p && p->devnum != devnum)
p = p->next;
@@ -230,7 +232,7 @@ struct pardevice *parport_open(int devnum, const char *name)
port = parport_get_port(p->port);
spin_unlock(&topology_lock);
- dev = parport_register_device(port, name, NULL, NULL, NULL, 0, NULL);
+ dev = parport_register_dev_model(port, name, &par_cb, devnum);
parport_put_port(port);
if (!dev)
return NULL;
@@ -480,3 +482,31 @@ static int assign_addrs(struct parport *port)
kfree(deviceid);
return detected;
}
+
+static int daisy_drv_probe(struct pardevice *par_dev)
+{
+ struct device_driver *drv = par_dev->dev.driver;
+
+ if (strcmp(drv->name, "daisy_drv"))
+ return -ENODEV;
+ if (strcmp(par_dev->name, daisy_dev_name))
+ return -ENODEV;
+
+ return 0;
+}
+
+static struct parport_driver daisy_driver = {
+ .name = "daisy_drv",
+ .probe = daisy_drv_probe,
+ .devmodel = true,
+};
+
+int daisy_drv_init(void)
+{
+ return parport_register_driver(&daisy_driver);
+}
+
+void daisy_drv_exit(void)
+{
+ parport_unregister_driver(&daisy_driver);
+}
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 9c8249f74479..6296dbb83d47 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1377,7 +1377,7 @@ static struct superio_struct *find_superio(struct parport *p)
{
int i;
for (i = 0; i < NR_SUPERIOS; i++)
- if (superios[i].io != p->base)
+ if (superios[i].io == p->base)
return &superios[i];
return NULL;
}
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c
index e035174ba205..e5e6a463a941 100644
--- a/drivers/parport/probe.c
+++ b/drivers/parport/probe.c
@@ -257,7 +257,7 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
ssize_t parport_device_id (int devnum, char *buffer, size_t count)
{
ssize_t retval = -ENXIO;
- struct pardevice *dev = parport_open (devnum, "Device ID probe");
+ struct pardevice *dev = parport_open(devnum, daisy_dev_name);
if (!dev)
return -ENXIO;
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 5dc53d420ca8..0171b8dbcdcd 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -137,11 +137,19 @@ static struct bus_type parport_bus_type = {
int parport_bus_init(void)
{
- return bus_register(&parport_bus_type);
+ int retval;
+
+ retval = bus_register(&parport_bus_type);
+ if (retval)
+ return retval;
+ daisy_drv_init();
+
+ return 0;
}
void parport_bus_exit(void)
{
+ daisy_drv_exit();
bus_unregister(&parport_bus_type);
}
diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig
index 479031aa4f88..74fdfa68d1f2 100644
--- a/drivers/platform/goldfish/Kconfig
+++ b/drivers/platform/goldfish/Kconfig
@@ -2,7 +2,7 @@ menuconfig GOLDFISH
bool "Platform support for Goldfish virtual devices"
depends on X86_32 || X86_64 || ARM || ARM64 || MIPS
depends on HAS_IOMEM
- ---help---
+ help
Say Y here to get to see options for the Goldfish virtual platform.
This option alone does not add any kernel code.
@@ -12,7 +12,7 @@ if GOLDFISH
config GOLDFISH_PIPE
tristate "Goldfish virtual device for QEMU pipes"
- ---help---
+ help
This is a virtual device to drive the QEMU pipe interface used by
the Goldfish Android Virtual Device.
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index f38882f6f37d..8f9d9e9fa695 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1369,14 +1369,14 @@ config ATARI_SCSI
tristate "Atari native SCSI support"
depends on ATARI && SCSI
select SCSI_SPI_ATTRS
- select NVRAM
---help---
If you have an Atari with built-in NCR5380 SCSI controller (TT,
Falcon, ...) say Y to get it supported. Of course also, if you have
a compatible SCSI controller (e.g. for Medusa).
- To compile this driver as a module, choose M here: the
- module will be called atari_scsi.
+ To compile this driver as a module, choose M here: the module will
+ be called atari_scsi. If you also enable NVRAM support, the SCSI
+ host's ID is taken from the setting in TT RTC NVRAM.
This driver supports both styles of NCR integration into the
system: the TT style (separate DMA), and the Falcon style (via
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index a503dc50c4f8..e809493d0d06 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -757,15 +757,17 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
if (setup_hostid >= 0) {
atari_scsi_template.this_id = setup_hostid & 7;
- } else {
+ } else if (IS_REACHABLE(CONFIG_NVRAM)) {
/* Test if a host id is set in the NVRam */
- if (ATARIHW_PRESENT(TT_CLK) && nvram_check_checksum()) {
- unsigned char b = nvram_read_byte(16);
+ if (ATARIHW_PRESENT(TT_CLK)) {
+ unsigned char b;
+ loff_t offset = 16;
+ ssize_t count = nvram_read(&b, 1, &offset);
/* Arbitration enabled? (for TOS)
* If yes, use configured host ID
*/
- if (b & 0x80)
+ if ((count == 1) && (b & 0x80))
atari_scsi_template.this_id = b & 7;
}
}
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
index 55eda5863a6b..b2f07d2043eb 100644
--- a/drivers/slimbus/core.c
+++ b/drivers/slimbus/core.c
@@ -21,7 +21,9 @@ static const struct slim_device_id *slim_match(const struct slim_device_id *id,
{
while (id->manf_id != 0 || id->prod_code != 0) {
if (id->manf_id == sbdev->e_addr.manf_id &&
- id->prod_code == sbdev->e_addr.prod_code)
+ id->prod_code == sbdev->e_addr.prod_code &&
+ id->dev_index == sbdev->e_addr.dev_index &&
+ id->instance == sbdev->e_addr.instance)
return id;
id++;
}
@@ -40,6 +42,23 @@ static int slim_device_match(struct device *dev, struct device_driver *drv)
return !!slim_match(sbdrv->id_table, sbdev);
}
+static void slim_device_update_status(struct slim_device *sbdev,
+ enum slim_device_status status)
+{
+ struct slim_driver *sbdrv;
+
+ if (sbdev->status == status)
+ return;
+
+ sbdev->status = status;
+ if (!sbdev->dev.driver)
+ return;
+
+ sbdrv = to_slim_driver(sbdev->dev.driver);
+ if (sbdrv->device_status)
+ sbdrv->device_status(sbdev, sbdev->status);
+}
+
static int slim_device_probe(struct device *dev)
{
struct slim_device *sbdev = to_slim_device(dev);
@@ -53,8 +72,7 @@ static int slim_device_probe(struct device *dev)
/* try getting the logical address after probe */
ret = slim_get_logical_addr(sbdev);
if (!ret) {
- if (sbdrv->device_status)
- sbdrv->device_status(sbdev, sbdev->status);
+ slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_UP);
} else {
dev_err(&sbdev->dev, "Failed to get logical address\n");
ret = -EPROBE_DEFER;
@@ -256,6 +274,7 @@ int slim_register_controller(struct slim_controller *ctrl)
mutex_init(&ctrl->lock);
mutex_init(&ctrl->sched.m_reconf);
init_completion(&ctrl->sched.pause_comp);
+ spin_lock_init(&ctrl->txn_lock);
dev_dbg(ctrl->dev, "Bus [%s] registered:dev:%p\n",
ctrl->name, ctrl->dev);
@@ -295,23 +314,6 @@ int slim_unregister_controller(struct slim_controller *ctrl)
}
EXPORT_SYMBOL_GPL(slim_unregister_controller);
-static void slim_device_update_status(struct slim_device *sbdev,
- enum slim_device_status status)
-{
- struct slim_driver *sbdrv;
-
- if (sbdev->status == status)
- return;
-
- sbdev->status = status;
- if (!sbdev->dev.driver)
- return;
-
- sbdrv = to_slim_driver(sbdev->dev.driver);
- if (sbdrv->device_status)
- sbdrv->device_status(sbdev, sbdev->status);
-}
-
/**
* slim_report_absent() - Controller calls this function when a device
* reports absent, OR when the device cannot be communicated with
@@ -464,6 +466,7 @@ static int slim_device_alloc_laddr(struct slim_device *sbdev,
sbdev->laddr = laddr;
sbdev->is_laddr_valid = true;
+ mutex_unlock(&ctrl->lock);
slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_UP);
@@ -471,6 +474,8 @@ static int slim_device_alloc_laddr(struct slim_device *sbdev,
laddr, sbdev->e_addr.manf_id, sbdev->e_addr.prod_code,
sbdev->e_addr.dev_index, sbdev->e_addr.instance);
+ return 0;
+
err:
mutex_unlock(&ctrl->lock);
return ret;
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 131342280b46..a57698985f9c 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -491,10 +491,10 @@ static int uio_open(struct inode *inode, struct file *filep)
if (!idev->info) {
mutex_unlock(&idev->info_lock);
ret = -EINVAL;
- goto err_alloc_listener;
+ goto err_infoopen;
}
- if (idev->info && idev->info->open)
+ if (idev->info->open)
ret = idev->info->open(idev->info, inode);
mutex_unlock(&idev->info_lock);
if (ret)
@@ -635,7 +635,7 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
goto out;
}
- if (!idev->info || !idev->info->irq) {
+ if (!idev->info->irq) {
retval = -EIO;
goto out;
}
@@ -940,9 +940,12 @@ int __uio_register_device(struct module *owner,
atomic_set(&idev->event, 0);
ret = uio_get_minor(idev);
- if (ret)
+ if (ret) {
+ kfree(idev);
return ret;
+ }
+ device_initialize(&idev->dev);
idev->dev.devt = MKDEV(uio_major, idev->minor);
idev->dev.class = &uio_class;
idev->dev.parent = parent;
@@ -953,7 +956,7 @@ int __uio_register_device(struct module *owner,
if (ret)
goto err_device_create;
- ret = device_register(&idev->dev);
+ ret = device_add(&idev->dev);
if (ret)
goto err_device_create;
@@ -985,9 +988,10 @@ int __uio_register_device(struct module *owner,
err_request_irq:
uio_dev_del_attributes(idev);
err_uio_dev_add_attributes:
- device_unregister(&idev->dev);
+ device_del(&idev->dev);
err_device_create:
uio_free_minor(idev);
+ put_device(&idev->dev);
return ret;
}
EXPORT_SYMBOL_GPL(__uio_register_device);
diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c
index 8773e373ffe5..dde5cbb27178 100644
--- a/drivers/uio/uio_pci_generic.c
+++ b/drivers/uio/uio_pci_generic.c
@@ -39,6 +39,22 @@ to_uio_pci_generic_dev(struct uio_info *info)
return container_of(info, struct uio_pci_generic_dev, info);
}
+static int release(struct uio_info *info, struct inode *inode)
+{
+ struct uio_pci_generic_dev *gdev = to_uio_pci_generic_dev(info);
+
+ /*
+ * This driver is insecure when used with devices doing DMA, but some
+  * people (mis)use it with such devices.
+  * Let's at least make sure DMA isn't left enabled after the userspace
+  * driver closes the fd.
+  * Note that there's a non-zero chance doing this will wedge the device
+  * at least until reset.
+ */
+ pci_clear_master(gdev->pdev);
+ return 0;
+}
+
/* Interrupt handler. Read/modify/write the command register to disable
* the interrupt. */
static irqreturn_t irqhandler(int irq, struct uio_info *info)
@@ -78,6 +94,7 @@ static int probe(struct pci_dev *pdev,
gdev->info.name = "uio_pci_generic";
gdev->info.version = DRIVER_VERSION;
+ gdev->info.release = release;
gdev->pdev = pdev;
if (pdev->irq) {
gdev->info.irq = pdev->irq;
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index ae7712c9687a..58a9590c9db6 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -536,7 +536,7 @@ config FB_IMSTT
bool "IMS Twin Turbo display support"
depends on (FB = y) && PCI
select FB_CFB_IMAGEBLIT
- select FB_MACMODES if PPC
+ select FB_MACMODES if PPC_PMAC
help
The IMS Twin Turbo is a PCI-based frame buffer card bundled with
many Macintosh and compatible computers.
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
index 9cb0ef7ac29e..7af8db28bb80 100644
--- a/drivers/video/fbdev/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
@@ -411,35 +411,23 @@ static int __init init_control(struct fb_info_control *p)
full = p->total_vram == 0x400000;
/* Try to pick a video mode out of NVRAM if we have one. */
-#ifdef CONFIG_NVRAM
- if (default_cmode == CMODE_NVRAM) {
+ cmode = default_cmode;
+ if (IS_REACHABLE(CONFIG_NVRAM) && cmode == CMODE_NVRAM)
cmode = nvram_read_byte(NV_CMODE);
- if(cmode < CMODE_8 || cmode > CMODE_32)
- cmode = CMODE_8;
- } else
-#endif
- cmode=default_cmode;
-#ifdef CONFIG_NVRAM
- if (default_vmode == VMODE_NVRAM) {
+ if (cmode < CMODE_8 || cmode > CMODE_32)
+ cmode = CMODE_8;
+
+ vmode = default_vmode;
+ if (IS_REACHABLE(CONFIG_NVRAM) && vmode == VMODE_NVRAM)
vmode = nvram_read_byte(NV_VMODE);
- if (vmode < 1 || vmode > VMODE_MAX ||
- control_mac_modes[vmode - 1].m[full] < cmode) {
- sense = read_control_sense(p);
- printk("Monitor sense value = 0x%x, ", sense);
- vmode = mac_map_monitor_sense(sense);
- if (control_mac_modes[vmode - 1].m[full] < cmode)
- vmode = VMODE_640_480_60;
- }
- } else
-#endif
- {
- vmode=default_vmode;
- if (control_mac_modes[vmode - 1].m[full] < cmode) {
- if (cmode > CMODE_8)
- cmode--;
- else
- vmode = VMODE_640_480_60;
- }
+ if (vmode < 1 || vmode > VMODE_MAX ||
+ control_mac_modes[vmode - 1].m[full] < cmode) {
+ sense = read_control_sense(p);
+ printk(KERN_CONT "Monitor sense value = 0x%x, ", sense);
+ vmode = mac_map_monitor_sense(sense);
+ if (control_mac_modes[vmode - 1].m[full] < 0)
+ vmode = VMODE_640_480_60;
+ cmode = min(cmode, control_mac_modes[vmode - 1].m[full]);
}
/* Initialize info structure */
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index 901ca4ed10e9..5d9670daf60e 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -30,9 +30,8 @@
#include <asm/io.h>
#include <linux/uaccess.h>
-#if defined(CONFIG_PPC)
+#if defined(CONFIG_PPC_PMAC)
#include <linux/nvram.h>
-#include <asm/prom.h>
#include "macmodes.h"
#endif
@@ -327,14 +326,13 @@ enum {
TVP = 1
};
-#define USE_NV_MODES 1
#define INIT_BPP 8
#define INIT_XRES 640
#define INIT_YRES 480
static int inverse = 0;
static char fontname[40] __initdata = { 0 };
-#if defined(CONFIG_PPC)
+#if defined(CONFIG_PPC_PMAC)
static signed char init_vmode = -1, init_cmode = -1;
#endif
@@ -1390,8 +1388,8 @@ static void init_imstt(struct fb_info *info)
}
}
-#if USE_NV_MODES && defined(CONFIG_PPC32)
- {
+#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
+ if (IS_REACHABLE(CONFIG_NVRAM) && machine_is(powermac)) {
int vmode = init_vmode, cmode = init_cmode;
if (vmode == -1) {
@@ -1409,12 +1407,13 @@ static void init_imstt(struct fb_info *info)
info->var.yres = info->var.yres_virtual = INIT_YRES;
info->var.bits_per_pixel = INIT_BPP;
}
- }
-#else
- info->var.xres = info->var.xres_virtual = INIT_XRES;
- info->var.yres = info->var.yres_virtual = INIT_YRES;
- info->var.bits_per_pixel = INIT_BPP;
+ } else
#endif
+ {
+ info->var.xres = info->var.xres_virtual = INIT_XRES;
+ info->var.yres = info->var.yres_virtual = INIT_YRES;
+ info->var.bits_per_pixel = INIT_BPP;
+ }
if ((info->var.xres * info->var.yres) * (info->var.bits_per_pixel >> 3) > info->fix.smem_len
|| !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) {
@@ -1565,7 +1564,7 @@ imsttfb_setup(char *options)
inverse = 1;
fb_invert_cmaps();
}
-#if defined(CONFIG_PPC)
+#if defined(CONFIG_PPC_PMAC)
else if (!strncmp(this_opt, "vmode:", 6)) {
int vmode = simple_strtoul(this_opt+6, NULL, 0);
if (vmode > 0 && vmode <= VMODE_MAX)
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 838869c6490c..d11b5e6210ed 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -111,12 +111,12 @@
#include "matroxfb_g450.h"
#include <linux/matroxfb.h>
#include <linux/interrupt.h>
+#include <linux/nvram.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#ifdef CONFIG_PPC_PMAC
#include <asm/machdep.h>
-unsigned char nvram_read_byte(int);
static int default_vmode = VMODE_NVRAM;
static int default_cmode = CMODE_NVRAM;
#endif
@@ -1872,10 +1872,11 @@ static int initMatrox2(struct matrox_fb_info *minfo, struct board *b)
#ifndef MODULE
if (machine_is(powermac)) {
struct fb_var_screeninfo var;
+
if (default_vmode <= 0 || default_vmode > VMODE_MAX)
default_vmode = VMODE_640_480_60;
-#ifdef CONFIG_NVRAM
- if (default_cmode == CMODE_NVRAM)
+#if defined(CONFIG_PPC32)
+ if (IS_REACHABLE(CONFIG_NVRAM) && default_cmode == CMODE_NVRAM)
default_cmode = nvram_read_byte(NV_CMODE);
#endif
if (default_cmode < CMODE_8 || default_cmode > CMODE_32)
diff --git a/drivers/video/fbdev/platinumfb.c b/drivers/video/fbdev/platinumfb.c
index bf6b7fb83cf4..76f299375a00 100644
--- a/drivers/video/fbdev/platinumfb.c
+++ b/drivers/video/fbdev/platinumfb.c
@@ -345,23 +345,18 @@ static int platinum_init_fb(struct fb_info *info)
sense = read_platinum_sense(pinfo);
printk(KERN_INFO "platinumfb: Monitor sense value = 0x%x, ", sense);
- if (default_vmode == VMODE_NVRAM) {
-#ifdef CONFIG_NVRAM
+
+ if (IS_REACHABLE(CONFIG_NVRAM) && default_vmode == VMODE_NVRAM)
default_vmode = nvram_read_byte(NV_VMODE);
- if (default_vmode <= 0 || default_vmode > VMODE_MAX ||
- !platinum_reg_init[default_vmode-1])
-#endif
- default_vmode = VMODE_CHOOSE;
- }
- if (default_vmode == VMODE_CHOOSE) {
+ if (default_vmode <= 0 || default_vmode > VMODE_MAX ||
+ !platinum_reg_init[default_vmode - 1]) {
default_vmode = mac_map_monitor_sense(sense);
+ if (!platinum_reg_init[default_vmode - 1])
+ default_vmode = VMODE_640_480_60;
}
- if (default_vmode <= 0 || default_vmode > VMODE_MAX)
- default_vmode = VMODE_640_480_60;
-#ifdef CONFIG_NVRAM
- if (default_cmode == CMODE_NVRAM)
+
+ if (IS_REACHABLE(CONFIG_NVRAM) && default_cmode == CMODE_NVRAM)
default_cmode = nvram_read_byte(NV_CMODE);
-#endif
if (default_cmode < CMODE_8 || default_cmode > CMODE_32)
default_cmode = CMODE_8;
/*
diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c
index d51c3a8009cb..e04fde9c1fcd 100644
--- a/drivers/video/fbdev/valkyriefb.c
+++ b/drivers/video/fbdev/valkyriefb.c
@@ -63,15 +63,8 @@
#include "macmodes.h"
#include "valkyriefb.h"
-#ifdef CONFIG_MAC
-/* We don't yet have functions to read the PRAM... perhaps we can
- adapt them from the PPC code? */
-static int default_vmode = VMODE_CHOOSE;
-static int default_cmode = CMODE_8;
-#else
static int default_vmode = VMODE_NVRAM;
static int default_cmode = CMODE_NVRAM;
-#endif
struct fb_par_valkyrie {
int vmode, cmode;
@@ -283,24 +276,21 @@ static void __init valkyrie_choose_mode(struct fb_info_valkyrie *p)
printk(KERN_INFO "Monitor sense value = 0x%x\n", p->sense);
/* Try to pick a video mode out of NVRAM if we have one. */
-#if !defined(CONFIG_MAC) && defined(CONFIG_NVRAM)
- if (default_vmode == VMODE_NVRAM) {
+#ifdef CONFIG_PPC_PMAC
+ if (IS_REACHABLE(CONFIG_NVRAM) && default_vmode == VMODE_NVRAM)
default_vmode = nvram_read_byte(NV_VMODE);
- if (default_vmode <= 0
- || default_vmode > VMODE_MAX
- || !valkyrie_reg_init[default_vmode - 1])
- default_vmode = VMODE_CHOOSE;
- }
#endif
- if (default_vmode == VMODE_CHOOSE)
+ if (default_vmode <= 0 || default_vmode > VMODE_MAX ||
+ !valkyrie_reg_init[default_vmode - 1]) {
default_vmode = mac_map_monitor_sense(p->sense);
- if (!valkyrie_reg_init[default_vmode - 1])
- default_vmode = VMODE_640_480_67;
-#if !defined(CONFIG_MAC) && defined(CONFIG_NVRAM)
- if (default_cmode == CMODE_NVRAM)
+ if (!valkyrie_reg_init[default_vmode - 1])
+ default_vmode = VMODE_640_480_67;
+ }
+
+#ifdef CONFIG_PPC_PMAC
+ if (IS_REACHABLE(CONFIG_NVRAM) && default_cmode == CMODE_NVRAM)
default_cmode = nvram_read_byte(NV_CMODE);
#endif
-
/*
* Reduce the pixel size if we don't have enough VRAM or bandwidth.
*/
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index 1475ed5ffcde..df7d09409efe 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -1484,8 +1484,8 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
#ifdef CONFIG_COMPAT
case VBG_IOCTL_HGCM_CALL_32(0):
f32bit = true;
- /* Fall through */
#endif
+ /* Fall through */
case VBG_IOCTL_HGCM_CALL(0):
return vbg_ioctl_hgcm_call(gdev, session, f32bit, data);
case VBG_IOCTL_LOG(0):
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
index 5b63b94ef6b5..a008f504a2d0 100644
--- a/include/asm-generic/iomap.h
+++ b/include/asm-generic/iomap.h
@@ -36,6 +36,17 @@ extern u64 ioread64(void __iomem *);
extern u64 ioread64be(void __iomem *);
#endif
+#ifdef readq
+#define ioread64_lo_hi ioread64_lo_hi
+#define ioread64_hi_lo ioread64_hi_lo
+#define ioread64be_lo_hi ioread64be_lo_hi
+#define ioread64be_hi_lo ioread64be_hi_lo
+extern u64 ioread64_lo_hi(void __iomem *addr);
+extern u64 ioread64_hi_lo(void __iomem *addr);
+extern u64 ioread64be_lo_hi(void __iomem *addr);
+extern u64 ioread64be_hi_lo(void __iomem *addr);
+#endif
+
extern void iowrite8(u8, void __iomem *);
extern void iowrite16(u16, void __iomem *);
extern void iowrite16be(u16, void __iomem *);
@@ -46,6 +57,17 @@ extern void iowrite64(u64, void __iomem *);
extern void iowrite64be(u64, void __iomem *);
#endif
+#ifdef writeq
+#define iowrite64_lo_hi iowrite64_lo_hi
+#define iowrite64_hi_lo iowrite64_hi_lo
+#define iowrite64be_lo_hi iowrite64be_lo_hi
+#define iowrite64be_hi_lo iowrite64be_hi_lo
+extern void iowrite64_lo_hi(u64 val, void __iomem *addr);
+extern void iowrite64_hi_lo(u64 val, void __iomem *addr);
+extern void iowrite64be_lo_hi(u64 val, void __iomem *addr);
+extern void iowrite64be_hi_lo(u64 val, void __iomem *addr);
+#endif
+
/*
* "string" versions of the above. Note that they
* use native byte ordering for the accesses (on
diff --git a/include/drm/drm_audio_component.h b/include/drm/drm_audio_component.h
index 4923b00328c1..93a386be38fa 100644
--- a/include/drm/drm_audio_component.h
+++ b/include/drm/drm_audio_component.h
@@ -5,6 +5,7 @@
#define _DRM_AUDIO_COMPONENT_H_
struct drm_audio_component;
+struct device;
/**
* struct drm_audio_component_ops - Ops implemented by DRM driver, called by hda driver
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
index a6de09c5e47f..c21682f76cd3 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/drm_hdcp.h
@@ -250,4 +250,22 @@ struct hdcp2_dp_errata_stream_type {
#define HDCP_2_2_HDMI_RXSTATUS_READY(x) ((x) & BIT(2))
#define HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3))
+/*
+ * Helper functions to convert 24bit big endian hdcp sequence number to
+ * host format and back
+ */
+static inline
+u32 drm_hdcp2_seq_num_to_u32(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN])
+{
+ return (u32)(seq_num[2] | seq_num[1] << 8 | seq_num[0] << 16);
+}
+
+static inline
+void drm_hdcp2_u32_to_seq_num(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN], u32 val)
+{
+ seq_num[0] = val >> 16;
+ seq_num[1] = val >> 8;
+ seq_num[2] = val;
+}
+
#endif
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index fca22d463e1b..dcb95bd9dee6 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -26,6 +26,11 @@
#include "drm_audio_component.h"
+enum i915_component_type {
+ I915_COMPONENT_AUDIO = 1,
+ I915_COMPONENT_HDCP,
+};
+
/* MAX_PORT is the number of port
* It must be sync with I915_MAX_PORTS defined i915_drv.h
*/
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index c44703f471b3..7523e9a7b6e2 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -100,4 +100,19 @@ extern struct resource intel_graphics_stolen_res;
#define INTEL_GEN11_BSM_DW1 0xc4
#define INTEL_BSM_MASK (-(1u << 20))
+enum port {
+ PORT_NONE = -1,
+
+ PORT_A = 0,
+ PORT_B,
+ PORT_C,
+ PORT_D,
+ PORT_E,
+ PORT_F,
+
+ I915_MAX_PORTS
+};
+
+#define port_name(p) ((p) + 'A')
+
#endif /* _I915_DRM_H_ */
diff --git a/include/drm/i915_mei_hdcp_interface.h b/include/drm/i915_mei_hdcp_interface.h
new file mode 100644
index 000000000000..8c344255146a
--- /dev/null
+++ b/include/drm/i915_mei_hdcp_interface.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: (GPL-2.0+) */
+/*
+ * Copyright © 2017-2018 Intel Corporation
+ *
+ * Authors:
+ * Ramalingam C <ramalingam.c@intel.com>
+ */
+
+#ifndef _I915_MEI_HDCP_INTERFACE_H_
+#define _I915_MEI_HDCP_INTERFACE_H_
+
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <drm/drm_hdcp.h>
+#include <drm/i915_drm.h>
+
+/**
+ * enum hdcp_port_type - HDCP port implementation type defined by ME FW
+ * @HDCP_PORT_TYPE_INVALID: Invalid hdcp port type
+ * @HDCP_PORT_TYPE_INTEGRATED: In-Host HDCP2.x port
+ * @HDCP_PORT_TYPE_LSPCON: HDCP2.2 discrete wired Tx port with LSPCON
+ * (HDMI 2.0) solution
+ * @HDCP_PORT_TYPE_CPDP: HDCP2.2 discrete wired Tx port using the CPDP (DP 1.3)
+ * solution
+ */
+enum hdcp_port_type {
+ HDCP_PORT_TYPE_INVALID,
+ HDCP_PORT_TYPE_INTEGRATED,
+ HDCP_PORT_TYPE_LSPCON,
+ HDCP_PORT_TYPE_CPDP
+};
+
+/**
+ * enum hdcp_wired_protocol - HDCP adaptation used on the port
+ * @HDCP_PROTOCOL_INVALID: Invalid HDCP adaptation protocol
+ * @HDCP_PROTOCOL_HDMI: HDMI adaptation of HDCP used on the port
+ * @HDCP_PROTOCOL_DP: DP adaptation of HDCP used on the port
+ */
+enum hdcp_wired_protocol {
+ HDCP_PROTOCOL_INVALID,
+ HDCP_PROTOCOL_HDMI,
+ HDCP_PROTOCOL_DP
+};
+
+/**
+ * struct hdcp_port_data - intel specific HDCP port data
+ * @port: port index as per I915
+ * @port_type: HDCP port type as per ME FW classification
+ * @protocol: HDCP adaptation as per ME FW
+ * @k: No of streams transmitted on a port. Only on DP MST this is != 1
+ * @seq_num_m: Count of RepeaterAuth_Stream_Manage msg propagated.
+ * Initialized to 0 on AKE_INIT. Incremented after every successful
+ * transmission of RepeaterAuth_Stream_Manage message. When it rolls
+ * over re-Auth has to be triggered.
+ * @streams: struct hdcp2_streamid_type[k]. Defines the type and id for the
+ * streams
+ */
+struct hdcp_port_data {
+ enum port port;
+ u8 port_type;
+ u8 protocol;
+ u16 k;
+ u32 seq_num_m;
+ struct hdcp2_streamid_type *streams;
+};
+
+/**
+ * struct i915_hdcp_component_ops- ops for HDCP2.2 services.
+ * @owner: Module providing the ops
+ * @initiate_hdcp2_session: Initiate a Wired HDCP2.2 Tx Session.
+ * And Prepare AKE_Init.
+ * @verify_receiver_cert_prepare_km: Verify the Receiver Certificate
+ * AKE_Send_Cert and prepare
+ AKE_Stored_Km/AKE_No_Stored_Km
+ * @verify_hprime: Verify AKE_Send_H_prime
+ * @store_pairing_info: Store pairing info received
+ * @initiate_locality_check: Prepare LC_Init
+ * @verify_lprime: Verify lprime
+ * @get_session_key: Prepare SKE_Send_Eks
+ * @repeater_check_flow_prepare_ack: Validate the Downstream topology
+ * and prepare rep_ack
+ * @verify_mprime: Verify mprime
+ * @enable_hdcp_authentication: Mark a port as authenticated.
+ * @close_hdcp_session: Close the Wired HDCP Tx session per port.
+ * This also disables the authenticated state of the port.
+ */
+struct i915_hdcp_component_ops {
+ /**
+ * @owner: mei_hdcp module
+ */
+ struct module *owner;
+
+ int (*initiate_hdcp2_session)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ake_init *ake_data);
+ int (*verify_receiver_cert_prepare_km)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ake_send_cert
+ *rx_cert,
+ bool *km_stored,
+ struct hdcp2_ake_no_stored_km
+ *ek_pub_km,
+ size_t *msg_sz);
+ int (*verify_hprime)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ake_send_hprime *rx_hprime);
+ int (*store_pairing_info)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ake_send_pairing_info
+ *pairing_info);
+ int (*initiate_locality_check)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_lc_init *lc_init_data);
+ int (*verify_lprime)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_lc_send_lprime *rx_lprime);
+ int (*get_session_key)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ske_send_eks *ske_data);
+ int (*repeater_check_flow_prepare_ack)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_rep_send_receiverid_list
+ *rep_topology,
+ struct hdcp2_rep_send_ack
+ *rep_send_ack);
+ int (*verify_mprime)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_rep_stream_ready *stream_ready);
+ int (*enable_hdcp_authentication)(struct device *dev,
+ struct hdcp_port_data *data);
+ int (*close_hdcp_session)(struct device *dev,
+ struct hdcp_port_data *data);
+};
+
+/**
+ * struct i915_hdcp_component_master - Used for communication between i915
+ * and mei_hdcp drivers for the HDCP2.2 services
+ * @mei_dev: device that provide the HDCP2.2 service from MEI Bus.
+ * @hdcp_ops: Ops implemented by mei_hdcp driver, used by i915 driver.
+ */
+struct i915_hdcp_comp_master {
+ struct device *mei_dev;
+ const struct i915_hdcp_component_ops *ops;
+
+ /* To protect the above members. */
+ struct mutex mutex;
+};
+
+#endif /* _I915_MEI_HDCP_INTERFACE_H_ */
diff --git a/include/dt-bindings/interconnect/qcom,sdm845.h b/include/dt-bindings/interconnect/qcom,sdm845.h
new file mode 100644
index 000000000000..7b2393be7361
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sdm845.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Qualcomm SDM845 interconnect IDs
+ *
+ * Copyright (c) 2018, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDM845_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SDM845_H
+
+#define MASTER_A1NOC_CFG 0
+#define MASTER_BLSP_1 1
+#define MASTER_TSIF 2
+#define MASTER_SDCC_2 3
+#define MASTER_SDCC_4 4
+#define MASTER_UFS_CARD 5
+#define MASTER_UFS_MEM 6
+#define MASTER_PCIE_0 7
+#define MASTER_A2NOC_CFG 8
+#define MASTER_QDSS_BAM 9
+#define MASTER_BLSP_2 10
+#define MASTER_CNOC_A2NOC 11
+#define MASTER_CRYPTO 12
+#define MASTER_IPA 13
+#define MASTER_PCIE_1 14
+#define MASTER_QDSS_ETR 15
+#define MASTER_USB3_0 16
+#define MASTER_USB3_1 17
+#define MASTER_CAMNOC_HF0_UNCOMP 18
+#define MASTER_CAMNOC_HF1_UNCOMP 19
+#define MASTER_CAMNOC_SF_UNCOMP 20
+#define MASTER_SPDM 21
+#define MASTER_TIC 22
+#define MASTER_SNOC_CNOC 23
+#define MASTER_QDSS_DAP 24
+#define MASTER_CNOC_DC_NOC 25
+#define MASTER_APPSS_PROC 26
+#define MASTER_GNOC_CFG 27
+#define MASTER_LLCC 28
+#define MASTER_TCU_0 29
+#define MASTER_MEM_NOC_CFG 30
+#define MASTER_GNOC_MEM_NOC 31
+#define MASTER_MNOC_HF_MEM_NOC 32
+#define MASTER_MNOC_SF_MEM_NOC 33
+#define MASTER_SNOC_GC_MEM_NOC 34
+#define MASTER_SNOC_SF_MEM_NOC 35
+#define MASTER_GFX3D 36
+#define MASTER_CNOC_MNOC_CFG 37
+#define MASTER_CAMNOC_HF0 38
+#define MASTER_CAMNOC_HF1 39
+#define MASTER_CAMNOC_SF 40
+#define MASTER_MDP0 41
+#define MASTER_MDP1 42
+#define MASTER_ROTATOR 43
+#define MASTER_VIDEO_P0 44
+#define MASTER_VIDEO_P1 45
+#define MASTER_VIDEO_PROC 46
+#define MASTER_SNOC_CFG 47
+#define MASTER_A1NOC_SNOC 48
+#define MASTER_A2NOC_SNOC 49
+#define MASTER_GNOC_SNOC 50
+#define MASTER_MEM_NOC_SNOC 51
+#define MASTER_ANOC_PCIE_SNOC 52
+#define MASTER_PIMEM 53
+#define MASTER_GIC 54
+#define SLAVE_A1NOC_SNOC 55
+#define SLAVE_SERVICE_A1NOC 56
+#define SLAVE_ANOC_PCIE_A1NOC_SNOC 57
+#define SLAVE_A2NOC_SNOC 58
+#define SLAVE_ANOC_PCIE_SNOC 59
+#define SLAVE_SERVICE_A2NOC 60
+#define SLAVE_CAMNOC_UNCOMP 61
+#define SLAVE_A1NOC_CFG 62
+#define SLAVE_A2NOC_CFG 63
+#define SLAVE_AOP 64
+#define SLAVE_AOSS 65
+#define SLAVE_CAMERA_CFG 66
+#define SLAVE_CLK_CTL 67
+#define SLAVE_CDSP_CFG 68
+#define SLAVE_RBCPR_CX_CFG 69
+#define SLAVE_CRYPTO_0_CFG 70
+#define SLAVE_DCC_CFG 71
+#define SLAVE_CNOC_DDRSS 72
+#define SLAVE_DISPLAY_CFG 73
+#define SLAVE_GLM 74
+#define SLAVE_GFX3D_CFG 75
+#define SLAVE_IMEM_CFG 76
+#define SLAVE_IPA_CFG 77
+#define SLAVE_CNOC_MNOC_CFG 78
+#define SLAVE_PCIE_0_CFG 79
+#define SLAVE_PCIE_1_CFG 80
+#define SLAVE_PDM 81
+#define SLAVE_SOUTH_PHY_CFG 82
+#define SLAVE_PIMEM_CFG 83
+#define SLAVE_PRNG 84
+#define SLAVE_QDSS_CFG 85
+#define SLAVE_BLSP_2 86
+#define SLAVE_BLSP_1 87
+#define SLAVE_SDCC_2 88
+#define SLAVE_SDCC_4 89
+#define SLAVE_SNOC_CFG 90
+#define SLAVE_SPDM_WRAPPER 91
+#define SLAVE_SPSS_CFG 92
+#define SLAVE_TCSR 93
+#define SLAVE_TLMM_NORTH 94
+#define SLAVE_TLMM_SOUTH 95
+#define SLAVE_TSIF 96
+#define SLAVE_UFS_CARD_CFG 97
+#define SLAVE_UFS_MEM_CFG 98
+#define SLAVE_USB3_0 99
+#define SLAVE_USB3_1 100
+#define SLAVE_VENUS_CFG 101
+#define SLAVE_VSENSE_CTRL_CFG 102
+#define SLAVE_CNOC_A2NOC 103
+#define SLAVE_SERVICE_CNOC 104
+#define SLAVE_LLCC_CFG 105
+#define SLAVE_MEM_NOC_CFG 106
+#define SLAVE_GNOC_SNOC 107
+#define SLAVE_GNOC_MEM_NOC 108
+#define SLAVE_SERVICE_GNOC 109
+#define SLAVE_EBI1 110
+#define SLAVE_MSS_PROC_MS_MPU_CFG 111
+#define SLAVE_MEM_NOC_GNOC 112
+#define SLAVE_LLCC 113
+#define SLAVE_MEM_NOC_SNOC 114
+#define SLAVE_SERVICE_MEM_NOC 115
+#define SLAVE_MNOC_SF_MEM_NOC 116
+#define SLAVE_MNOC_HF_MEM_NOC 117
+#define SLAVE_SERVICE_MNOC 118
+#define SLAVE_APPSS 119
+#define SLAVE_SNOC_CNOC 120
+#define SLAVE_SNOC_MEM_NOC_GC 121
+#define SLAVE_SNOC_MEM_NOC_SF 122
+#define SLAVE_IMEM 123
+#define SLAVE_PCIE_0 124
+#define SLAVE_PCIE_1 125
+#define SLAVE_PIMEM 126
+#define SLAVE_SERVICE_SNOC 127
+#define SLAVE_QDSS_STM 128
+#define SLAVE_TCU 129
+
+#endif
diff --git a/include/linux/component.h b/include/linux/component.h
index e71fbbbc74e2..30bcc7e590eb 100644
--- a/include/linux/component.h
+++ b/include/linux/component.h
@@ -4,16 +4,38 @@
#include <linux/stddef.h>
+
struct device;
+/**
+ * struct component_ops - callbacks for component drivers
+ *
+ * Components are registered with component_add() and unregistered with
+ * component_del().
+ */
struct component_ops {
+ /**
+ * @bind:
+ *
+ * Called through component_bind_all() when the aggregate driver is
+ * ready to bind the overall driver.
+ */
int (*bind)(struct device *comp, struct device *master,
void *master_data);
+ /**
+ * @unbind:
+ *
+ * Called through component_unbind_all() when the aggregate driver is
+ * ready to bind the overall driver, or when component_bind_all() fails
+ * part-ways through and needs to unbind some already bound components.
+ */
void (*unbind)(struct device *comp, struct device *master,
void *master_data);
};
int component_add(struct device *, const struct component_ops *);
+int component_add_typed(struct device *dev, const struct component_ops *ops,
+ int subcomponent);
void component_del(struct device *, const struct component_ops *);
int component_bind_all(struct device *master, void *master_data);
@@ -21,8 +43,42 @@ void component_unbind_all(struct device *master, void *master_data);
struct master;
+/**
+ * struct component_master_ops - callback for the aggregate driver
+ *
+ * Aggregate drivers are registered with component_master_add_with_match() and
+ * unregistered with component_master_del().
+ */
struct component_master_ops {
+ /**
+ * @bind:
+ *
+ * Called when all components or the aggregate driver, as specified in
+ * the match list passed to component_master_add_with_match(), are
+ * ready. Usually there are 3 steps to bind an aggregate driver:
+ *
+ * 1. Allocate a structure for the aggregate driver.
+ *
+ * 2. Bind all components to the aggregate driver by calling
+ * component_bind_all() with the aggregate driver structure as opaque
+ * pointer data.
+ *
+ * 3. Register the aggregate driver with the subsystem to publish its
+ * interfaces.
+ *
+ * Note that the lifetime of the aggregate driver does not align with
+ * any of the underlying &struct device instances. Therefore devm cannot
+ * be used and all resources acquired or allocated in this callback must
+ * be explicitly released in the @unbind callback.
+ */
int (*bind)(struct device *master);
+ /**
+ * @unbind:
+ *
+ * Called when either the aggregate driver, using
+ * component_master_del(), or one of its components, using
+ * component_del(), is unregistered.
+ */
void (*unbind)(struct device *master);
};
@@ -37,7 +93,27 @@ void component_match_add_release(struct device *master,
struct component_match **matchptr,
void (*release)(struct device *, void *),
int (*compare)(struct device *, void *), void *compare_data);
+void component_match_add_typed(struct device *master,
+ struct component_match **matchptr,
+ int (*compare_typed)(struct device *, int, void *), void *compare_data);
+/**
+ * component_match_add - add a compent match
+ * @master: device with the aggregate driver
+ * @matchptr: pointer to the list of component matches
+ * @compare: compare function to match against all components
+ * @compare_data: opaque pointer passed to the @compare function
+ *
+ * Adds a new component match to the list stored in @matchptr, which the @master
+ * aggregate driver needs to function. The list of component matches pointed to
+ * by @matchptr must be initialized to NULL before adding the first match. This
+ * only matches against components added with component_add().
+ *
+ * The allocated match list in @matchptr is automatically released using devm
+ * actions.
+ *
+ * See also component_match_add_release() and component_match_add_typed().
+ */
static inline void component_match_add(struct device *master,
struct component_match **matchptr,
int (*compare)(struct device *, void *), void *compare_data)
diff --git a/include/linux/gnss.h b/include/linux/gnss.h
index 43546977098c..36968a0f33e8 100644
--- a/include/linux/gnss.h
+++ b/include/linux/gnss.h
@@ -22,6 +22,7 @@ enum gnss_type {
GNSS_TYPE_NMEA = 0,
GNSS_TYPE_SIRF,
GNSS_TYPE_UBX,
+ GNSS_TYPE_MTK,
GNSS_TYPE_COUNT
};
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index dcb6977afce9..64698ec8f2ac 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -222,8 +222,8 @@ static inline u32 hv_get_avail_to_write_percent(
* struct contains the fundamental information about an offer.
*/
struct vmbus_channel_offer {
- uuid_le if_type;
- uuid_le if_instance;
+ guid_t if_type;
+ guid_t if_instance;
/*
* These two fields are not currently used.
@@ -614,8 +614,8 @@ struct vmbus_channel_initiate_contact {
/* Hyper-V socket: guest's connect()-ing to host */
struct vmbus_channel_tl_connect_request {
struct vmbus_channel_message_header header;
- uuid_le guest_endpoint_id;
- uuid_le host_service_id;
+ guid_t guest_endpoint_id;
+ guid_t host_service_id;
} __packed;
struct vmbus_channel_version_response {
@@ -714,7 +714,7 @@ enum vmbus_device_type {
struct vmbus_device {
u16 dev_type;
- uuid_le guid;
+ guid_t guid;
bool perf_device;
};
@@ -751,6 +751,19 @@ struct vmbus_channel {
u64 interrupts; /* Host to Guest interrupts */
u64 sig_events; /* Guest to Host events */
+ /*
+ * Guest to host interrupts caused by the outbound ring buffer changing
+ * from empty to not empty.
+ */
+ u64 intr_out_empty;
+
+ /*
+ * Indicates that a full outbound ring buffer was encountered. The flag
+ * is set to true when a full outbound ring buffer is encountered and
+ * set to false when a write to the outbound ring buffer is completed.
+ */
+ bool out_full_flag;
+
/* Channel callback's invoked in softirq context */
struct tasklet_struct callback_event;
void (*onchannel_callback)(void *context);
@@ -903,6 +916,24 @@ struct vmbus_channel {
* vmbus_connection.work_queue and hang: see vmbus_process_offer().
*/
struct work_struct add_channel_work;
+
+ /*
+ * Guest to host interrupts caused by the inbound ring buffer changing
+ * from full to not full while a packet is waiting.
+ */
+ u64 intr_in_full;
+
+ /*
+ * The total number of write operations that encountered a full
+ * outbound ring buffer.
+ */
+ u64 out_full_total;
+
+ /*
+ * The number of write operations that were the first to encounter a
+ * full outbound ring buffer.
+ */
+ u64 out_full_first;
};
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
@@ -936,6 +967,21 @@ static inline void *get_per_channel_state(struct vmbus_channel *c)
static inline void set_channel_pending_send_size(struct vmbus_channel *c,
u32 size)
{
+ unsigned long flags;
+
+ if (size) {
+ spin_lock_irqsave(&c->outbound.ring_lock, flags);
+ ++c->out_full_total;
+
+ if (!c->out_full_flag) {
+ ++c->out_full_first;
+ c->out_full_flag = true;
+ }
+ spin_unlock_irqrestore(&c->outbound.ring_lock, flags);
+ } else {
+ c->out_full_flag = false;
+ }
+
c->outbound.ring_buffer->pending_send_sz = size;
}
@@ -1096,7 +1142,7 @@ struct hv_driver {
bool hvsock;
/* the device type supported by this driver */
- uuid_le dev_type;
+ guid_t dev_type;
const struct hv_vmbus_device_id *id_table;
struct device_driver driver;
@@ -1116,10 +1162,10 @@ struct hv_driver {
/* Base device object */
struct hv_device {
/* the device type id of this device */
- uuid_le dev_type;
+ guid_t dev_type;
/* the device instance id of this device */
- uuid_le dev_instance;
+ guid_t dev_instance;
u16 vendor_id;
u16 device_id;
@@ -1188,102 +1234,102 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size);
* {f8615163-df3e-46c5-913f-f2d2f965ed0e}
*/
#define HV_NIC_GUID \
- .guid = UUID_LE(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
- 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
+ .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
+ 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
/*
* IDE GUID
* {32412632-86cb-44a2-9b5c-50d1417354f5}
*/
#define HV_IDE_GUID \
- .guid = UUID_LE(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
- 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
+ .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
+ 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
/*
* SCSI GUID
* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
*/
#define HV_SCSI_GUID \
- .guid = UUID_LE(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
- 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
+ .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
+ 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
/*
* Shutdown GUID
* {0e0b6031-5213-4934-818b-38d90ced39db}
*/
#define HV_SHUTDOWN_GUID \
- .guid = UUID_LE(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
- 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
+ .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
+ 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
/*
* Time Synch GUID
* {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
*/
#define HV_TS_GUID \
- .guid = UUID_LE(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
- 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
+ .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
+ 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
/*
* Heartbeat GUID
* {57164f39-9115-4e78-ab55-382f3bd5422d}
*/
#define HV_HEART_BEAT_GUID \
- .guid = UUID_LE(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
- 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
+ .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
+ 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
/*
* KVP GUID
* {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
*/
#define HV_KVP_GUID \
- .guid = UUID_LE(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
- 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
+ .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
+ 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
/*
* Dynamic memory GUID
* {525074dc-8985-46e2-8057-a307dc18a502}
*/
#define HV_DM_GUID \
- .guid = UUID_LE(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
- 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
+ .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
+ 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
/*
* Mouse GUID
* {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
*/
#define HV_MOUSE_GUID \
- .guid = UUID_LE(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
- 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
+ .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
+ 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
/*
* Keyboard GUID
* {f912ad6d-2b17-48ea-bd65-f927a61c7684}
*/
#define HV_KBD_GUID \
- .guid = UUID_LE(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
- 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
+ .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
+ 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
/*
* VSS (Backup/Restore) GUID
*/
#define HV_VSS_GUID \
- .guid = UUID_LE(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
- 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
+ .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
+ 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
/*
* Synthetic Video GUID
* {DA0A7802-E377-4aac-8E77-0558EB1073F8}
*/
#define HV_SYNTHVID_GUID \
- .guid = UUID_LE(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
- 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
+ .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
+ 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
/*
* Synthetic FC GUID
* {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
*/
#define HV_SYNTHFC_GUID \
- .guid = UUID_LE(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
- 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
+ .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
+ 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
/*
* Guest File Copy Service
@@ -1291,16 +1337,16 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size);
*/
#define HV_FCOPY_GUID \
- .guid = UUID_LE(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
- 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
+ .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
+ 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
/*
* NetworkDirect. This is the guest RDMA service.
* {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
*/
#define HV_ND_GUID \
- .guid = UUID_LE(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
- 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
+ .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
+ 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
/*
* PCI Express Pass Through
@@ -1308,8 +1354,8 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size);
*/
#define HV_PCIE_GUID \
- .guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
- 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
+ .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
+ 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
/*
* Linux doesn't support the 3 devices: the first two are for
@@ -1321,16 +1367,16 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size);
*/
#define HV_AVMA1_GUID \
- .guid = UUID_LE(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
- 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
+ .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
+ 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
#define HV_AVMA2_GUID \
- .guid = UUID_LE(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
- 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
+ .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
+ 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
#define HV_RDV_GUID \
- .guid = UUID_LE(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
- 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
+ .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
+ 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
/*
* Common header for Hyper-V ICs
@@ -1432,7 +1478,7 @@ struct ictimesync_ref_data {
struct hyperv_service_callback {
u8 msg_type;
char *log_msg;
- uuid_le data;
+ guid_t data;
struct vmbus_channel *channel;
void (*callback)(void *context);
};
@@ -1452,8 +1498,8 @@ void vmbus_setevent(struct vmbus_channel *channel);
extern __u32 vmbus_proto_version;
-int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
- const uuid_le *shv_host_servie_id);
+int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
+ const guid_t *shv_host_servie_id);
void vmbus_set_event(struct vmbus_channel *channel);
/* Get the start of the ring buffer. */
diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
new file mode 100644
index 000000000000..63caccadc2db
--- /dev/null
+++ b/include/linux/interconnect-provider.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#ifndef __LINUX_INTERCONNECT_PROVIDER_H
+#define __LINUX_INTERCONNECT_PROVIDER_H
+
+#include <linux/interconnect.h>
+
+#define icc_units_to_bps(bw) ((bw) * 1000ULL)
+
+struct icc_node;
+struct of_phandle_args;
+
+/**
+ * struct icc_onecell_data - driver data for onecell interconnect providers
+ *
+ * @num_nodes: number of nodes in this device
+ * @nodes: array of pointers to the nodes in this device
+ */
+struct icc_onecell_data {
+ unsigned int num_nodes;
+ struct icc_node *nodes[];
+};
+
+struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
+ void *data);
+
+/**
+ * struct icc_provider - interconnect provider (controller) entity that might
+ * provide multiple interconnect controls
+ *
+ * @provider_list: list of the registered interconnect providers
+ * @nodes: internal list of the interconnect provider nodes
+ * @set: pointer to device specific set operation function
+ * @aggregate: pointer to device specific aggregate operation function
+ * @xlate: provider-specific callback for mapping nodes from phandle arguments
+ * @dev: the device this interconnect provider belongs to
+ * @users: count of active users
+ * @data: pointer to private data
+ */
+struct icc_provider {
+ struct list_head provider_list;
+ struct list_head nodes;
+ int (*set)(struct icc_node *src, struct icc_node *dst);
+ int (*aggregate)(struct icc_node *node, u32 avg_bw, u32 peak_bw,
+ u32 *agg_avg, u32 *agg_peak);
+ struct icc_node* (*xlate)(struct of_phandle_args *spec, void *data);
+ struct device *dev;
+ int users;
+ void *data;
+};
+
+/**
+ * struct icc_node - entity that is part of the interconnect topology
+ *
+ * @id: platform specific node id
+ * @name: node name used in debugfs
+ * @links: a list of targets pointing to where we can go next when traversing
+ * @num_links: number of links to other interconnect nodes
+ * @provider: points to the interconnect provider of this node
+ * @node_list: the list entry in the parent provider's "nodes" list
+ * @search_list: list used when walking the nodes graph
+ * @reverse: pointer to previous node when walking the nodes graph
+ * @is_traversed: flag that is used when walking the nodes graph
+ * @req_list: a list of QoS constraint requests associated with this node
+ * @avg_bw: aggregated value of average bandwidth requests from all consumers
+ * @peak_bw: aggregated value of peak bandwidth requests from all consumers
+ * @data: pointer to private data
+ */
+struct icc_node {
+ int id;
+ const char *name;
+ struct icc_node **links;
+ size_t num_links;
+
+ struct icc_provider *provider;
+ struct list_head node_list;
+ struct list_head search_list;
+ struct icc_node *reverse;
+ u8 is_traversed:1;
+ struct hlist_head req_list;
+ u32 avg_bw;
+ u32 peak_bw;
+ void *data;
+};
+
+#if IS_ENABLED(CONFIG_INTERCONNECT)
+
+struct icc_node *icc_node_create(int id);
+void icc_node_destroy(int id);
+int icc_link_create(struct icc_node *node, const int dst_id);
+int icc_link_destroy(struct icc_node *src, struct icc_node *dst);
+void icc_node_add(struct icc_node *node, struct icc_provider *provider);
+void icc_node_del(struct icc_node *node);
+int icc_provider_add(struct icc_provider *provider);
+int icc_provider_del(struct icc_provider *provider);
+
+#else
+
+static inline struct icc_node *icc_node_create(int id)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+
+void icc_node_destroy(int id)
+{
+}
+
+static inline int icc_link_create(struct icc_node *node, const int dst_id)
+{
+ return -ENOTSUPP;
+}
+
+int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
+{
+ return -ENOTSUPP;
+}
+
+void icc_node_add(struct icc_node *node, struct icc_provider *provider)
+{
+}
+
+void icc_node_del(struct icc_node *node)
+{
+}
+
+static inline int icc_provider_add(struct icc_provider *provider)
+{
+ return -ENOTSUPP;
+}
+
+static inline int icc_provider_del(struct icc_provider *provider)
+{
+ return -ENOTSUPP;
+}
+
+#endif /* CONFIG_INTERCONNECT */
+
+#endif /* __LINUX_INTERCONNECT_PROVIDER_H */
diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h
new file mode 100644
index 000000000000..dc25864755ba
--- /dev/null
+++ b/include/linux/interconnect.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2019, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#ifndef __LINUX_INTERCONNECT_H
+#define __LINUX_INTERCONNECT_H
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+/* macros for converting to icc units */
+#define Bps_to_icc(x) ((x) / 1000)
+#define kBps_to_icc(x) (x)
+#define MBps_to_icc(x) ((x) * 1000)
+#define GBps_to_icc(x) ((x) * 1000 * 1000)
+#define bps_to_icc(x) (1)
+#define kbps_to_icc(x) ((x) / 8 + ((x) % 8 ? 1 : 0))
+#define Mbps_to_icc(x) ((x) * 1000 / 8)
+#define Gbps_to_icc(x) ((x) * 1000 * 1000 / 8)
+
+struct icc_path;
+struct device;
+
+#if IS_ENABLED(CONFIG_INTERCONNECT)
+
+struct icc_path *icc_get(struct device *dev, const int src_id,
+ const int dst_id);
+struct icc_path *of_icc_get(struct device *dev, const char *name);
+void icc_put(struct icc_path *path);
+int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw);
+
+#else
+
+static inline struct icc_path *icc_get(struct device *dev, const int src_id,
+ const int dst_id)
+{
+ return NULL;
+}
+
+static inline struct icc_path *of_icc_get(struct device *dev,
+ const char *name)
+{
+ return NULL;
+}
+
+static inline void icc_put(struct icc_path *path)
+{
+}
+
+static inline int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
+{
+ return 0;
+}
+
+#endif /* CONFIG_INTERCONNECT */
+
+#endif /* __LINUX_INTERCONNECT_H */
diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h
index 862d786a904f..ae21b72cce85 100644
--- a/include/linux/io-64-nonatomic-hi-lo.h
+++ b/include/linux/io-64-nonatomic-hi-lo.h
@@ -55,4 +55,68 @@ static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr)
#define writeq_relaxed hi_lo_writeq_relaxed
#endif
+#ifndef ioread64_hi_lo
+#define ioread64_hi_lo ioread64_hi_lo
+static inline u64 ioread64_hi_lo(void __iomem *addr)
+{
+ u32 low, high;
+
+ high = ioread32(addr + sizeof(u32));
+ low = ioread32(addr);
+
+ return low + ((u64)high << 32);
+}
+#endif
+
+#ifndef iowrite64_hi_lo
+#define iowrite64_hi_lo iowrite64_hi_lo
+static inline void iowrite64_hi_lo(u64 val, void __iomem *addr)
+{
+ iowrite32(val >> 32, addr + sizeof(u32));
+ iowrite32(val, addr);
+}
+#endif
+
+#ifndef ioread64be_hi_lo
+#define ioread64be_hi_lo ioread64be_hi_lo
+static inline u64 ioread64be_hi_lo(void __iomem *addr)
+{
+ u32 low, high;
+
+ high = ioread32be(addr);
+ low = ioread32be(addr + sizeof(u32));
+
+ return low + ((u64)high << 32);
+}
+#endif
+
+#ifndef iowrite64be_hi_lo
+#define iowrite64be_hi_lo iowrite64be_hi_lo
+static inline void iowrite64be_hi_lo(u64 val, void __iomem *addr)
+{
+ iowrite32be(val >> 32, addr);
+ iowrite32be(val, addr + sizeof(u32));
+}
+#endif
+
+#ifndef ioread64
+#define ioread64_is_nonatomic
+#define ioread64 ioread64_hi_lo
+#endif
+
+#ifndef iowrite64
+#define iowrite64_is_nonatomic
+#define iowrite64 iowrite64_hi_lo
+#endif
+
+#ifndef ioread64be
+#define ioread64be_is_nonatomic
+#define ioread64be ioread64be_hi_lo
+#endif
+
+#ifndef iowrite64be
+#define iowrite64be_is_nonatomic
+#define iowrite64be iowrite64be_hi_lo
+#endif
+
#endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */
diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h
index d042e7bb5adb..faaa842dbdb9 100644
--- a/include/linux/io-64-nonatomic-lo-hi.h
+++ b/include/linux/io-64-nonatomic-lo-hi.h
@@ -55,4 +55,68 @@ static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr)
#define writeq_relaxed lo_hi_writeq_relaxed
#endif
+#ifndef ioread64_lo_hi
+#define ioread64_lo_hi ioread64_lo_hi
+static inline u64 ioread64_lo_hi(void __iomem *addr)
+{
+ u32 low, high;
+
+ low = ioread32(addr);
+ high = ioread32(addr + sizeof(u32));
+
+ return low + ((u64)high << 32);
+}
+#endif
+
+#ifndef iowrite64_lo_hi
+#define iowrite64_lo_hi iowrite64_lo_hi
+static inline void iowrite64_lo_hi(u64 val, void __iomem *addr)
+{
+ iowrite32(val, addr);
+ iowrite32(val >> 32, addr + sizeof(u32));
+}
+#endif
+
+#ifndef ioread64be_lo_hi
+#define ioread64be_lo_hi ioread64be_lo_hi
+static inline u64 ioread64be_lo_hi(void __iomem *addr)
+{
+ u32 low, high;
+
+ low = ioread32be(addr + sizeof(u32));
+ high = ioread32be(addr);
+
+ return low + ((u64)high << 32);
+}
+#endif
+
+#ifndef iowrite64be_lo_hi
+#define iowrite64be_lo_hi iowrite64be_lo_hi
+static inline void iowrite64be_lo_hi(u64 val, void __iomem *addr)
+{
+ iowrite32be(val, addr + sizeof(u32));
+ iowrite32be(val >> 32, addr);
+}
+#endif
+
+#ifndef ioread64
+#define ioread64_is_nonatomic
+#define ioread64 ioread64_lo_hi
+#endif
+
+#ifndef iowrite64
+#define iowrite64_is_nonatomic
+#define iowrite64 iowrite64_lo_hi
+#endif
+
+#ifndef ioread64be
+#define ioread64be_is_nonatomic
+#define ioread64be ioread64be_lo_hi
+#endif
+
+#ifndef iowrite64be
+#define iowrite64be_is_nonatomic
+#define iowrite64be iowrite64be_lo_hi
+#endif
+
#endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index 7fde40e17c8b..03b6ba2a63f8 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -55,6 +55,8 @@ struct mei_cl_device {
void *priv_data;
};
+#define to_mei_cl_device(d) container_of(d, struct mei_cl_device, dev)
+
struct mei_cl_driver {
struct device_driver driver;
const char *name;
diff --git a/include/linux/nvram.h b/include/linux/nvram.h
index 28bfb9ab94ca..d29d9c93a927 100644
--- a/include/linux/nvram.h
+++ b/include/linux/nvram.h
@@ -2,13 +2,132 @@
#ifndef _LINUX_NVRAM_H
#define _LINUX_NVRAM_H
+#include <linux/errno.h>
#include <uapi/linux/nvram.h>
-/* __foo is foo without grabbing the rtc_lock - get it yourself */
-extern unsigned char __nvram_read_byte(int i);
-extern unsigned char nvram_read_byte(int i);
-extern void __nvram_write_byte(unsigned char c, int i);
-extern void nvram_write_byte(unsigned char c, int i);
-extern int __nvram_check_checksum(void);
-extern int nvram_check_checksum(void);
+#ifdef CONFIG_PPC
+#include <asm/machdep.h>
+#endif
+
+/**
+ * struct nvram_ops - NVRAM functionality made available to drivers
+ * @read: validate checksum (if any) then load a range of bytes from NVRAM
+ * @write: store a range of bytes to NVRAM then update checksum (if any)
+ * @read_byte: load a single byte from NVRAM
+ * @write_byte: store a single byte to NVRAM
+ * @get_size: return the fixed number of bytes in the NVRAM
+ *
+ * Architectures which provide an nvram ops struct need not implement all
+ * of these methods. If the NVRAM hardware can be accessed only one byte
+ * at a time then it may be sufficient to provide .read_byte and .write_byte.
+ * If the NVRAM has a checksum (and it is to be checked) the .read and
+ * .write methods can be used to implement that efficiently.
+ *
+ * Portable drivers may use the wrapper functions defined here.
+ * The nvram_read() and nvram_write() functions call the .read and .write
+ * methods when available and fall back on the .read_byte and .write_byte
+ * methods otherwise.
+ */
+
+struct nvram_ops {
+ ssize_t (*get_size)(void);
+ unsigned char (*read_byte)(int);
+ void (*write_byte)(unsigned char, int);
+ ssize_t (*read)(char *, size_t, loff_t *);
+ ssize_t (*write)(char *, size_t, loff_t *);
+#if defined(CONFIG_X86) || defined(CONFIG_M68K)
+ long (*initialize)(void);
+ long (*set_checksum)(void);
+#endif
+};
+
+extern const struct nvram_ops arch_nvram_ops;
+
+static inline ssize_t nvram_get_size(void)
+{
+#ifdef CONFIG_PPC
+ if (ppc_md.nvram_size)
+ return ppc_md.nvram_size();
+#else
+ if (arch_nvram_ops.get_size)
+ return arch_nvram_ops.get_size();
+#endif
+ return -ENODEV;
+}
+
+static inline unsigned char nvram_read_byte(int addr)
+{
+#ifdef CONFIG_PPC
+ if (ppc_md.nvram_read_val)
+ return ppc_md.nvram_read_val(addr);
+#else
+ if (arch_nvram_ops.read_byte)
+ return arch_nvram_ops.read_byte(addr);
+#endif
+ return 0xFF;
+}
+
+static inline void nvram_write_byte(unsigned char val, int addr)
+{
+#ifdef CONFIG_PPC
+ if (ppc_md.nvram_write_val)
+ ppc_md.nvram_write_val(addr, val);
+#else
+ if (arch_nvram_ops.write_byte)
+ arch_nvram_ops.write_byte(val, addr);
+#endif
+}
+
+static inline ssize_t nvram_read_bytes(char *buf, size_t count, loff_t *ppos)
+{
+ ssize_t nvram_size = nvram_get_size();
+ loff_t i;
+ char *p = buf;
+
+ if (nvram_size < 0)
+ return nvram_size;
+ for (i = *ppos; count > 0 && i < nvram_size; ++i, ++p, --count)
+ *p = nvram_read_byte(i);
+ *ppos = i;
+ return p - buf;
+}
+
+static inline ssize_t nvram_write_bytes(char *buf, size_t count, loff_t *ppos)
+{
+ ssize_t nvram_size = nvram_get_size();
+ loff_t i;
+ char *p = buf;
+
+ if (nvram_size < 0)
+ return nvram_size;
+ for (i = *ppos; count > 0 && i < nvram_size; ++i, ++p, --count)
+ nvram_write_byte(*p, i);
+ *ppos = i;
+ return p - buf;
+}
+
+static inline ssize_t nvram_read(char *buf, size_t count, loff_t *ppos)
+{
+#ifdef CONFIG_PPC
+ if (ppc_md.nvram_read)
+ return ppc_md.nvram_read(buf, count, ppos);
+#else
+ if (arch_nvram_ops.read)
+ return arch_nvram_ops.read(buf, count, ppos);
+#endif
+ return nvram_read_bytes(buf, count, ppos);
+}
+
+static inline ssize_t nvram_write(char *buf, size_t count, loff_t *ppos)
+{
+#ifdef CONFIG_PPC
+ if (ppc_md.nvram_write)
+ return ppc_md.nvram_write(buf, count, ppos);
+#else
+ if (arch_nvram_ops.write)
+ return arch_nvram_ops.write(buf, count, ppos);
+#endif
+ return nvram_write_bytes(buf, count, ppos);
+}
+
#endif /* _LINUX_NVRAM_H */
diff --git a/include/linux/parport.h b/include/linux/parport.h
index 397607a0c0eb..f41f1d041e2c 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -460,6 +460,7 @@ extern size_t parport_ieee1284_epp_read_addr (struct parport *,
void *, size_t, int);
/* IEEE1284.3 functions */
+#define daisy_dev_name "Device ID probe"
extern int parport_daisy_init (struct parport *port);
extern void parport_daisy_fini (struct parport *port);
extern struct pardevice *parport_open (int devnum, const char *name);
@@ -468,6 +469,18 @@ extern ssize_t parport_device_id (int devnum, char *buffer, size_t len);
extern void parport_daisy_deselect_all (struct parport *port);
extern int parport_daisy_select (struct parport *port, int daisy, int mode);
+#ifdef CONFIG_PARPORT_1284
+extern int daisy_drv_init(void);
+extern void daisy_drv_exit(void);
+#else
+static inline int daisy_drv_init(void)
+{
+ return 0;
+}
+
+static inline void daisy_drv_exit(void) {}
+#endif
+
/* Lowlevel drivers _can_ call this support function to handle irqs. */
static inline void parport_generic_irq(struct parport *port)
{
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
index b724ef7005de..eaa1e762bf06 100644
--- a/include/linux/vmw_vmci_defs.h
+++ b/include/linux/vmw_vmci_defs.h
@@ -45,6 +45,7 @@
#define VMCI_CAPS_GUESTCALL 0x2
#define VMCI_CAPS_DATAGRAM 0x4
#define VMCI_CAPS_NOTIFICATIONS 0x8
+#define VMCI_CAPS_PPN64 0x10
/* Interrupt Cause register bits. */
#define VMCI_ICR_DATAGRAM 0x1
@@ -569,8 +570,10 @@ struct vmci_resource_query_msg {
*/
struct vmci_notify_bm_set_msg {
struct vmci_datagram hdr;
- u32 bitmap_ppn;
- u32 _pad;
+ union {
+ u32 bitmap_ppn32;
+ u64 bitmap_ppn64;
+ };
};
/*
diff --git a/include/sound/hda_component.h b/include/sound/hda_component.h
index 2ec31b358950..d4804c72d959 100644
--- a/include/sound/hda_component.h
+++ b/include/sound/hda_component.h
@@ -20,7 +20,7 @@ int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id,
bool *audio_enabled, char *buffer, int max_bytes);
int snd_hdac_acomp_init(struct hdac_bus *bus,
const struct drm_audio_component_audio_ops *aops,
- int (*match_master)(struct device *, void *),
+ int (*match_master)(struct device *, int, void *),
size_t extra_size);
int snd_hdac_acomp_exit(struct hdac_bus *bus);
int snd_hdac_acomp_register_notifier(struct hdac_bus *bus,
@@ -47,7 +47,8 @@ static inline int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t ni
}
static inline int snd_hdac_acomp_init(struct hdac_bus *bus,
const struct drm_audio_component_audio_ops *aops,
- int (*match_master)(struct device *, void *),
+ int (*match_master)(struct device *,
+ int, void *),
size_t extra_size)
{
return -ENODEV;
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index b9ba520f7e4b..2832134e5397 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -41,6 +41,14 @@ enum {
enum {
FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
+
+ /**
+ * @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts
+ *
+ * Only when set, causes senders to include their security
+ * context
+ */
+ FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 0x1000,
};
#ifdef BINDER_IPC_32BIT
@@ -218,6 +226,7 @@ struct binder_node_info_for_ref {
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref)
+#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object)
/*
* NOTE: Two special error codes you should check for when calling
@@ -276,6 +285,11 @@ struct binder_transaction_data {
} data;
};
+struct binder_transaction_data_secctx {
+ struct binder_transaction_data transaction_data;
+ binder_uintptr_t secctx;
+};
+
struct binder_transaction_data_sg {
struct binder_transaction_data transaction_data;
binder_size_t buffers_size;
@@ -311,6 +325,11 @@ enum binder_driver_return_protocol {
BR_OK = _IO('r', 1),
/* No parameters! */
+ BR_TRANSACTION_SEC_CTX = _IOR('r', 2,
+ struct binder_transaction_data_secctx),
+ /*
+ * binder_transaction_data_secctx: the received command.
+ */
BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
/*
diff --git a/include/uapi/linux/pmu.h b/include/uapi/linux/pmu.h
index 97256f90e6df..f2fc1bd80017 100644
--- a/include/uapi/linux/pmu.h
+++ b/include/uapi/linux/pmu.h
@@ -19,7 +19,9 @@
#define PMU_POWER_CTRL 0x11 /* control power of some devices */
#define PMU_ADB_CMD 0x20 /* send ADB packet */
#define PMU_ADB_POLL_OFF 0x21 /* disable ADB auto-poll */
+#define PMU_WRITE_XPRAM 0x32 /* write eXtended Parameter RAM */
#define PMU_WRITE_NVRAM 0x33 /* write non-volatile RAM */
+#define PMU_READ_XPRAM 0x3a /* read eXtended Parameter RAM */
#define PMU_READ_NVRAM 0x3b /* read non-volatile RAM */
#define PMU_SET_RTC 0x30 /* set real-time clock */
#define PMU_READ_RTC 0x38 /* read real-time clock */
diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h
new file mode 100644
index 000000000000..6d701af9fc42
--- /dev/null
+++ b/include/uapi/misc/fastrpc.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __QCOM_FASTRPC_H__
+#define __QCOM_FASTRPC_H__
+
+#include <linux/types.h>
+
+#define FASTRPC_IOCTL_ALLOC_DMA_BUFF _IOWR('R', 1, struct fastrpc_alloc_dma_buf)
+#define FASTRPC_IOCTL_FREE_DMA_BUFF _IOWR('R', 2, __u32)
+#define FASTRPC_IOCTL_INVOKE _IOWR('R', 3, struct fastrpc_invoke)
+#define FASTRPC_IOCTL_INIT_ATTACH _IO('R', 4)
+#define FASTRPC_IOCTL_INIT_CREATE _IOWR('R', 5, struct fastrpc_init_create)
+
+struct fastrpc_invoke_args {
+ __u64 ptr;
+ __u64 length;
+ __s32 fd;
+ __u32 reserved;
+};
+
+struct fastrpc_invoke {
+ __u32 handle;
+ __u32 sc;
+ __u64 args;
+};
+
+struct fastrpc_init_create {
+ __u32 filelen; /* elf file length */
+ __s32 filefd; /* fd for the file */
+ __u32 attrs;
+ __u32 siglen;
+ __u64 file; /* pointer to elf file */
+};
+
+struct fastrpc_alloc_dma_buf {
+ __s32 fd; /* fd */
+ __u32 flags; /* flags to map with */
+ __u64 size; /* size */
+};
+
+#endif /* __QCOM_FASTRPC_H__ */
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
new file mode 100644
index 000000000000..7fd6f633534c
--- /dev/null
+++ b/include/uapi/misc/habanalabs.h
@@ -0,0 +1,450 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef HABANALABS_H_
+#define HABANALABS_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/*
+ * Defines that are asic-specific but constitutes as ABI between kernel driver
+ * and userspace
+ */
+#define GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START 0x8000 /* 32KB */
+
+/*
+ * Queue Numbering
+ *
+ * The external queues (DMA channels + CPU) MUST be before the internal queues
+ * and each group (DMA channels + CPU and internal) must be contiguous inside
+ * itself but there can be a gap between the two groups (although not
+ * recommended)
+ */
+
+enum goya_queue_id {
+ GOYA_QUEUE_ID_DMA_0 = 0,
+ GOYA_QUEUE_ID_DMA_1,
+ GOYA_QUEUE_ID_DMA_2,
+ GOYA_QUEUE_ID_DMA_3,
+ GOYA_QUEUE_ID_DMA_4,
+ GOYA_QUEUE_ID_CPU_PQ,
+ GOYA_QUEUE_ID_MME,
+ GOYA_QUEUE_ID_TPC0,
+ GOYA_QUEUE_ID_TPC1,
+ GOYA_QUEUE_ID_TPC2,
+ GOYA_QUEUE_ID_TPC3,
+ GOYA_QUEUE_ID_TPC4,
+ GOYA_QUEUE_ID_TPC5,
+ GOYA_QUEUE_ID_TPC6,
+ GOYA_QUEUE_ID_TPC7,
+ GOYA_QUEUE_ID_SIZE
+};
+
+/* Opcode for management ioctl */
+#define HL_INFO_HW_IP_INFO 0
+#define HL_INFO_HW_EVENTS 1
+#define HL_INFO_DRAM_USAGE 2
+#define HL_INFO_HW_IDLE 3
+
+#define HL_INFO_VERSION_MAX_LEN 128
+
+struct hl_info_hw_ip_info {
+ __u64 sram_base_address;
+ __u64 dram_base_address;
+ __u64 dram_size;
+ __u32 sram_size;
+ __u32 num_of_events;
+ __u32 device_id; /* PCI Device ID */
+ __u32 reserved[3];
+ __u32 armcp_cpld_version;
+ __u32 psoc_pci_pll_nr;
+ __u32 psoc_pci_pll_nf;
+ __u32 psoc_pci_pll_od;
+ __u32 psoc_pci_pll_div_factor;
+ __u8 tpc_enabled_mask;
+ __u8 dram_enabled;
+ __u8 pad[2];
+ __u8 armcp_version[HL_INFO_VERSION_MAX_LEN];
+};
+
+struct hl_info_dram_usage {
+ __u64 dram_free_mem;
+ __u64 ctx_dram_mem;
+};
+
+struct hl_info_hw_idle {
+ __u32 is_idle;
+ __u32 pad;
+};
+
+struct hl_info_args {
+ /* Location of relevant struct in userspace */
+ __u64 return_pointer;
+ /*
+ * The size of the return value. Just like "size" in "snprintf",
+ * it limits how many bytes the kernel can write
+ *
+ * For hw_events array, the size should be
+ * hl_info_hw_ip_info.num_of_events * sizeof(__u32)
+ */
+ __u32 return_size;
+
+ /* HL_INFO_* */
+ __u32 op;
+
+ /* Context ID - Currently not in use */
+ __u32 ctx_id;
+ __u32 pad;
+};
+
+/* Opcode to create a new command buffer */
+#define HL_CB_OP_CREATE 0
+/* Opcode to destroy previously created command buffer */
+#define HL_CB_OP_DESTROY 1
+
+struct hl_cb_in {
+ /* Handle of CB or 0 if we want to create one */
+ __u64 cb_handle;
+ /* HL_CB_OP_* */
+ __u32 op;
+ /* Size of CB. Maximum size is 2MB. The minimum size that will be
+ * allocated, regardless of this parameter's value, is PAGE_SIZE
+ */
+ __u32 cb_size;
+ /* Context ID - Currently not in use */
+ __u32 ctx_id;
+ __u32 pad;
+};
+
+struct hl_cb_out {
+ /* Handle of CB */
+ __u64 cb_handle;
+};
+
+union hl_cb_args {
+ struct hl_cb_in in;
+ struct hl_cb_out out;
+};
+
+/*
+ * This structure size must always be fixed to 64-bytes for backward
+ * compatibility
+ */
+struct hl_cs_chunk {
+ /*
+ * For external queue, this represents a Handle of CB on the Host
+ * For internal queue, this represents an SRAM or DRAM address of the
+ * internal CB
+ */
+ __u64 cb_handle;
+ /* Index of queue to put the CB on */
+ __u32 queue_index;
+ /*
+ * Size of command buffer with valid packets
+ * Can be smaller then actual CB size
+ */
+ __u32 cb_size;
+ /* HL_CS_CHUNK_FLAGS_* */
+ __u32 cs_chunk_flags;
+ /* Align structure to 64 bytes */
+ __u32 pad[11];
+};
+
+#define HL_CS_FLAGS_FORCE_RESTORE 0x1
+
+#define HL_CS_STATUS_SUCCESS 0
+
+struct hl_cs_in {
+ /* this holds address of array of hl_cs_chunk for restore phase */
+ __u64 chunks_restore;
+ /* this holds address of array of hl_cs_chunk for execution phase */
+ __u64 chunks_execute;
+ /* this holds address of array of hl_cs_chunk for store phase -
+ * Currently not in use
+ */
+ __u64 chunks_store;
+ /* Number of chunks in restore phase array */
+ __u32 num_chunks_restore;
+ /* Number of chunks in execution array */
+ __u32 num_chunks_execute;
+ /* Number of chunks in restore phase array - Currently not in use */
+ __u32 num_chunks_store;
+ /* HL_CS_FLAGS_* */
+ __u32 cs_flags;
+ /* Context ID - Currently not in use */
+ __u32 ctx_id;
+};
+
+struct hl_cs_out {
+ /* this holds the sequence number of the CS to pass to wait ioctl */
+ __u64 seq;
+ /* HL_CS_STATUS_* */
+ __u32 status;
+ __u32 pad;
+};
+
+union hl_cs_args {
+ struct hl_cs_in in;
+ struct hl_cs_out out;
+};
+
+struct hl_wait_cs_in {
+ /* Command submission sequence number */
+ __u64 seq;
+ /* Absolute timeout to wait in microseconds */
+ __u64 timeout_us;
+ /* Context ID - Currently not in use */
+ __u32 ctx_id;
+ __u32 pad;
+};
+
+#define HL_WAIT_CS_STATUS_COMPLETED 0
+#define HL_WAIT_CS_STATUS_BUSY 1
+#define HL_WAIT_CS_STATUS_TIMEDOUT 2
+#define HL_WAIT_CS_STATUS_ABORTED 3
+#define HL_WAIT_CS_STATUS_INTERRUPTED 4
+
+struct hl_wait_cs_out {
+ /* HL_WAIT_CS_STATUS_* */
+ __u32 status;
+ __u32 pad;
+};
+
+union hl_wait_cs_args {
+ struct hl_wait_cs_in in;
+ struct hl_wait_cs_out out;
+};
+
+/* Opcode to alloc device memory */
+#define HL_MEM_OP_ALLOC 0
+/* Opcode to free previously allocated device memory */
+#define HL_MEM_OP_FREE 1
+/* Opcode to map host memory */
+#define HL_MEM_OP_MAP 2
+/* Opcode to unmap previously mapped host memory */
+#define HL_MEM_OP_UNMAP 3
+
+/* Memory flags */
+#define HL_MEM_CONTIGUOUS 0x1
+#define HL_MEM_SHARED 0x2
+#define HL_MEM_USERPTR 0x4
+
+struct hl_mem_in {
+ union {
+ /* HL_MEM_OP_ALLOC- allocate device memory */
+ struct {
+ /* Size to alloc */
+ __u64 mem_size;
+ } alloc;
+
+ /* HL_MEM_OP_FREE - free device memory */
+ struct {
+ /* Handle returned from HL_MEM_OP_ALLOC */
+ __u64 handle;
+ } free;
+
+ /* HL_MEM_OP_MAP - map device memory */
+ struct {
+ /*
+ * Requested virtual address of mapped memory.
+ * KMD will try to map the requested region to this
+ * hint address, as long as the address is valid and
+ * not already mapped. The user should check the
+ * returned address of the IOCTL to make sure he got
+ * the hint address. Passing 0 here means that KMD
+ * will choose the address itself.
+ */
+ __u64 hint_addr;
+ /* Handle returned from HL_MEM_OP_ALLOC */
+ __u64 handle;
+ } map_device;
+
+ /* HL_MEM_OP_MAP - map host memory */
+ struct {
+ /* Address of allocated host memory */
+ __u64 host_virt_addr;
+ /*
+ * Requested virtual address of mapped memory.
+ * KMD will try to map the requested region to this
+ * hint address, as long as the address is valid and
+ * not already mapped. The user should check the
+ * returned address of the IOCTL to make sure he got
+ * the hint address. Passing 0 here means that KMD
+ * will choose the address itself.
+ */
+ __u64 hint_addr;
+ /* Size of allocated host memory */
+ __u64 mem_size;
+ } map_host;
+
+ /* HL_MEM_OP_UNMAP - unmap host memory */
+ struct {
+ /* Virtual address returned from HL_MEM_OP_MAP */
+ __u64 device_virt_addr;
+ } unmap;
+ };
+
+ /* HL_MEM_OP_* */
+ __u32 op;
+ /* HL_MEM_* flags */
+ __u32 flags;
+ /* Context ID - Currently not in use */
+ __u32 ctx_id;
+ __u32 pad;
+};
+
+struct hl_mem_out {
+ union {
+ /*
+ * Used for HL_MEM_OP_MAP as the virtual address that was
+ * assigned in the device VA space.
+ * A value of 0 means the requested operation failed.
+ */
+ __u64 device_virt_addr;
+
+ /*
+ * Used for HL_MEM_OP_ALLOC. This is the assigned
+ * handle for the allocated memory
+ */
+ __u64 handle;
+ };
+};
+
+union hl_mem_args {
+ struct hl_mem_in in;
+ struct hl_mem_out out;
+};
+
+/*
+ * Various information operations such as:
+ * - H/W IP information
+ * - Current dram usage
+ *
+ * The user calls this IOCTL with an opcode that describes the required
+ * information. The user should supply a pointer to a user-allocated memory
+ * chunk, which will be filled by the driver with the requested information.
+ *
+ * The user supplies the maximum amount of size to copy into the user's memory,
+ * in order to prevent data corruption in case of differences between the
+ * definitions of structures in kernel and userspace, e.g. in case of old
+ * userspace and new kernel driver
+ */
+#define HL_IOCTL_INFO \
+ _IOWR('H', 0x01, struct hl_info_args)
+
+/*
+ * Command Buffer
+ * - Request a Command Buffer
+ * - Destroy a Command Buffer
+ *
+ * The command buffers are memory blocks that reside in DMA-able address
+ * space and are physically contiguous so they can be accessed by the device
+ * directly. They are allocated using the coherent DMA API.
+ *
+ * When creating a new CB, the IOCTL returns a handle of it, and the user-space
+ * process needs to use that handle to mmap the buffer so it can access them.
+ *
+ */
+#define HL_IOCTL_CB \
+ _IOWR('H', 0x02, union hl_cb_args)
+
+/*
+ * Command Submission
+ *
+ * To submit work to the device, the user need to call this IOCTL with a set
+ * of JOBS. That set of JOBS constitutes a CS object.
+ * Each JOB will be enqueued on a specific queue, according to the user's input.
+ * There can be more then one JOB per queue.
+ *
+ * There are two types of queues - external and internal. External queues
+ * are DMA queues which transfer data from/to the Host. All other queues are
+ * internal. The driver will get completion notifications from the device only
+ * on JOBS which are enqueued in the external queues.
+ *
+ * For jobs on external queues, the user needs to create command buffers
+ * through the CB ioctl and give the CB's handle to the CS ioctl. For jobs on
+ * internal queues, the user needs to prepare a "command buffer" with packets
+ * on either the SRAM or DRAM, and give the device address of that buffer to
+ * the CS ioctl.
+ *
+ * This IOCTL is asynchronous in regard to the actual execution of the CS. This
+ * means it returns immediately after ALL the JOBS were enqueued on their
+ * relevant queues. Therefore, the user mustn't assume the CS has been completed
+ * or has even started to execute.
+ *
+ * Upon successful enqueue, the IOCTL returns an opaque handle which the user
+ * can use with the "Wait for CS" IOCTL to check whether the handle's CS
+ * external JOBS have been completed. Note that if the CS has internal JOBS
+ * which can execute AFTER the external JOBS have finished, the driver might
+ * report that the CS has finished executing BEFORE the internal JOBS have
+ * actually finish executing.
+ *
+ * The CS IOCTL will receive three sets of JOBS. One set is for "restore" phase,
+ * a second set is for "execution" phase and a third set is for "store" phase.
+ * The JOBS on the "restore" phase are enqueued only after context-switch
+ * (or if its the first CS for this context). The user can also order the
+ * driver to run the "restore" phase explicitly
+ *
+ */
+#define HL_IOCTL_CS \
+ _IOWR('H', 0x03, union hl_cs_args)
+
+/*
+ * Wait for Command Submission
+ *
+ * The user can call this IOCTL with a handle it received from the CS IOCTL
+ * to wait until the handle's CS has finished executing. The user will wait
+ * inside the kernel until the CS has finished or until the user-requeusted
+ * timeout has expired.
+ *
+ * The return value of the IOCTL is a standard Linux error code. The possible
+ * values are:
+ *
+ * EINTR - Kernel waiting has been interrupted, e.g. due to OS signal
+ * that the user process received
+ * ETIMEDOUT - The CS has caused a timeout on the device
+ * EIO - The CS was aborted (usually because the device was reset)
+ * ENODEV - The device wants to do hard-reset (so user need to close FD)
+ *
+ * The driver also returns a custom define inside the IOCTL which can be:
+ *
+ * HL_WAIT_CS_STATUS_COMPLETED - The CS has been completed successfully (0)
+ * HL_WAIT_CS_STATUS_BUSY - The CS is still executing (0)
+ * HL_WAIT_CS_STATUS_TIMEDOUT - The CS has caused a timeout on the device
+ * (ETIMEDOUT)
+ * HL_WAIT_CS_STATUS_ABORTED - The CS was aborted, usually because the
+ * device was reset (EIO)
+ * HL_WAIT_CS_STATUS_INTERRUPTED - Waiting for the CS was interrupted (EINTR)
+ *
+ */
+
+#define HL_IOCTL_WAIT_CS \
+ _IOWR('H', 0x04, union hl_wait_cs_args)
+
+/*
+ * Memory
+ * - Map host memory to device MMU
+ * - Unmap host memory from device MMU
+ *
+ * This IOCTL allows the user to map host memory to the device MMU
+ *
+ * For host memory, the IOCTL doesn't allocate memory. The user is supposed
+ * to allocate the memory in user-space (malloc/new). The driver pins the
+ * physical pages (up to the allowed limit by the OS), assigns a virtual
+ * address in the device VA space and initializes the device MMU.
+ *
+ * There is an option for the user to specify the requested virtual address.
+ *
+ */
+#define HL_IOCTL_MEMORY \
+ _IOWR('H', 0x05, union hl_mem_args)
+
+#define HL_COMMAND_START 0x01
+#define HL_COMMAND_END 0x06
+
+#endif /* HABANALABS_H_ */
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e6a7b01932e6..b19cc9c36475 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1682,7 +1682,6 @@ if RUNTIME_TESTING_MENU
config LKDTM
tristate "Linux Kernel Dump Test Tool Module"
depends on DEBUG_FS
- depends on BLOCK
help
This module enables testing of the different dumping mechanisms by
inducing system failures at predefined crash points.
diff --git a/lib/iomap.c b/lib/iomap.c
index 541d926da95e..e909ab71e995 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -65,8 +65,9 @@ static void bad_io_access(unsigned long port, const char *access)
#endif
#ifndef mmio_read16be
-#define mmio_read16be(addr) be16_to_cpu(__raw_readw(addr))
-#define mmio_read32be(addr) be32_to_cpu(__raw_readl(addr))
+#define mmio_read16be(addr) swab16(readw(addr))
+#define mmio_read32be(addr) swab32(readl(addr))
+#define mmio_read64be(addr) swab64(readq(addr))
#endif
unsigned int ioread8(void __iomem *addr)
@@ -100,14 +101,89 @@ EXPORT_SYMBOL(ioread16be);
EXPORT_SYMBOL(ioread32);
EXPORT_SYMBOL(ioread32be);
+#ifdef readq
+static u64 pio_read64_lo_hi(unsigned long port)
+{
+ u64 lo, hi;
+
+ lo = inl(port);
+ hi = inl(port + sizeof(u32));
+
+ return lo | (hi << 32);
+}
+
+static u64 pio_read64_hi_lo(unsigned long port)
+{
+ u64 lo, hi;
+
+ hi = inl(port + sizeof(u32));
+ lo = inl(port);
+
+ return lo | (hi << 32);
+}
+
+static u64 pio_read64be_lo_hi(unsigned long port)
+{
+ u64 lo, hi;
+
+ lo = pio_read32be(port + sizeof(u32));
+ hi = pio_read32be(port);
+
+ return lo | (hi << 32);
+}
+
+static u64 pio_read64be_hi_lo(unsigned long port)
+{
+ u64 lo, hi;
+
+ hi = pio_read32be(port);
+ lo = pio_read32be(port + sizeof(u32));
+
+ return lo | (hi << 32);
+}
+
+u64 ioread64_lo_hi(void __iomem *addr)
+{
+ IO_COND(addr, return pio_read64_lo_hi(port), return readq(addr));
+ return 0xffffffffffffffffULL;
+}
+
+u64 ioread64_hi_lo(void __iomem *addr)
+{
+ IO_COND(addr, return pio_read64_hi_lo(port), return readq(addr));
+ return 0xffffffffffffffffULL;
+}
+
+u64 ioread64be_lo_hi(void __iomem *addr)
+{
+ IO_COND(addr, return pio_read64be_lo_hi(port),
+ return mmio_read64be(addr));
+ return 0xffffffffffffffffULL;
+}
+
+u64 ioread64be_hi_lo(void __iomem *addr)
+{
+ IO_COND(addr, return pio_read64be_hi_lo(port),
+ return mmio_read64be(addr));
+ return 0xffffffffffffffffULL;
+}
+
+EXPORT_SYMBOL(ioread64_lo_hi);
+EXPORT_SYMBOL(ioread64_hi_lo);
+EXPORT_SYMBOL(ioread64be_lo_hi);
+EXPORT_SYMBOL(ioread64be_hi_lo);
+
+#endif /* readq */
+
#ifndef pio_write16be
#define pio_write16be(val,port) outw(swab16(val),port)
#define pio_write32be(val,port) outl(swab32(val),port)
#endif
#ifndef mmio_write16be
-#define mmio_write16be(val,port) __raw_writew(be16_to_cpu(val),port)
-#define mmio_write32be(val,port) __raw_writel(be32_to_cpu(val),port)
+#define mmio_write16be(val,port) writew(swab16(val),port)
+#define mmio_write32be(val,port) writel(swab32(val),port)
+#define mmio_write64be(val,port) writeq(swab64(val),port)
#endif
void iowrite8(u8 val, void __iomem *addr)
@@ -136,6 +212,62 @@ EXPORT_SYMBOL(iowrite16be);
EXPORT_SYMBOL(iowrite32);
EXPORT_SYMBOL(iowrite32be);
+#ifdef writeq
+static void pio_write64_lo_hi(u64 val, unsigned long port)
+{
+ outl(val, port);
+ outl(val >> 32, port + sizeof(u32));
+}
+
+static void pio_write64_hi_lo(u64 val, unsigned long port)
+{
+ outl(val >> 32, port + sizeof(u32));
+ outl(val, port);
+}
+
+static void pio_write64be_lo_hi(u64 val, unsigned long port)
+{
+ pio_write32be(val, port + sizeof(u32));
+ pio_write32be(val >> 32, port);
+}
+
+static void pio_write64be_hi_lo(u64 val, unsigned long port)
+{
+ pio_write32be(val >> 32, port);
+ pio_write32be(val, port + sizeof(u32));
+}
+
+void iowrite64_lo_hi(u64 val, void __iomem *addr)
+{
+ IO_COND(addr, pio_write64_lo_hi(val, port),
+ writeq(val, addr));
+}
+
+void iowrite64_hi_lo(u64 val, void __iomem *addr)
+{
+ IO_COND(addr, pio_write64_hi_lo(val, port),
+ writeq(val, addr));
+}
+
+void iowrite64be_lo_hi(u64 val, void __iomem *addr)
+{
+ IO_COND(addr, pio_write64be_lo_hi(val, port),
+ mmio_write64be(val, addr));
+}
+
+void iowrite64be_hi_lo(u64 val, void __iomem *addr)
+{
+ IO_COND(addr, pio_write64be_hi_lo(val, port),
+ mmio_write64be(val, addr));
+}
+
+EXPORT_SYMBOL(iowrite64_lo_hi);
+EXPORT_SYMBOL(iowrite64_hi_lo);
+EXPORT_SYMBOL(iowrite64be_lo_hi);
+EXPORT_SYMBOL(iowrite64be_hi_lo);
+
+#endif /* readq */
+
/*
* These are the "repeat MMIO read/write" functions.
* Note the "__raw" accesses, since we don't want to
diff --git a/scripts/ver_linux b/scripts/ver_linux
index a6c728db05ce..810e608baa24 100755
--- a/scripts/ver_linux
+++ b/scripts/ver_linux
@@ -13,6 +13,8 @@ BEGIN {
system("uname -a")
printf("\n")
+ vernum = "[0-9]+([.]?[0-9]+)+"
+
printversion("GNU C", version("gcc -dumpversion"))
printversion("GNU Make", version("make --version"))
printversion("Binutils", version("ld -v"))
@@ -34,7 +36,7 @@ BEGIN {
while (getline <"/proc/self/maps" > 0) {
if (/libc.*\.so$/) {
n = split($0, procmaps, "/")
- if (match(procmaps[n], /[0-9]+([.]?[0-9]+)+/)) {
+ if (match(procmaps[n], vernum)) {
ver = substr(procmaps[n], RSTART, RLENGTH)
printversion("Linux C Library", ver)
break
@@ -70,7 +72,7 @@ BEGIN {
function version(cmd, ver) {
cmd = cmd " 2>&1"
while (cmd | getline > 0) {
- if (match($0, /[0-9]+([.]?[0-9]+)+/)) {
+ if (match($0, vernum)) {
ver = substr($0, RSTART, RLENGTH)
break
}
diff --git a/sound/hda/hdac_component.c b/sound/hda/hdac_component.c
index a6d37b9d6413..5c95933e739a 100644
--- a/sound/hda/hdac_component.c
+++ b/sound/hda/hdac_component.c
@@ -269,7 +269,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_acomp_register_notifier);
*/
int snd_hdac_acomp_init(struct hdac_bus *bus,
const struct drm_audio_component_audio_ops *aops,
- int (*match_master)(struct device *, void *),
+ int (*match_master)(struct device *, int, void *),
size_t extra_size)
{
struct component_match *match = NULL;
@@ -288,7 +288,7 @@ int snd_hdac_acomp_init(struct hdac_bus *bus,
bus->audio_component = acomp;
devres_add(dev, acomp);
- component_match_add(dev, &match, match_master, bus);
+ component_match_add_typed(dev, &match, match_master, bus);
ret = component_master_add_with_match(dev, &hdac_component_master_ops,
match);
if (ret < 0)
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index 27eb0270a711..575198bd3cd0 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -82,9 +82,11 @@ void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
}
EXPORT_SYMBOL_GPL(snd_hdac_i915_set_bclk);
-static int i915_component_master_match(struct device *dev, void *data)
+static int i915_component_master_match(struct device *dev, int subcomponent,
+ void *data)
{
- return !strcmp(dev->driver->name, "i915");
+ return !strcmp(dev->driver->name, "i915") &&
+ subcomponent == I915_COMPONENT_AUDIO;
}
/* check whether intel graphics is present */