From 1a4a66ddc7b290ea2fd492c9c922ee7205d44724 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Mon, 30 Jan 2017 10:46:35 -0500 Subject: drm/msm: remove qcom,gpu-pwrlevels bindings The plan is to use the OPP bindings. For now, remove the documentation for qcom,gpu-pwrlevels, and make the driver fall back to a safe low clock if the node is not present. Note that no upstream dtb use this node. For now we keep compatibility with this node to avoid breaking compatibility with downstream android dt files. Signed-off-by: Rob Clark Reviewed-by: Eric Anholt Acked-by: Rob Herring --- drivers/gpu/drm/msm/adreno/adreno_device.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 893eb2b2531b..8d54cb764f77 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -224,8 +224,10 @@ static int adreno_bind(struct device *dev, struct device *master, void *data) } if (!config.fast_rate) { - dev_err(dev, "could not find clk rates\n"); - return -ENXIO; + dev_warn(dev, "could not find clk rates\n"); + /* This is a safe low speed for all devices: */ + config.fast_rate = 200000000; + config.slow_rate = 27000000; } for (i = 0; i < ARRAY_SIZE(quirks); i++) -- cgit v1.2.3-55-g7522 From 1db7afa4914642146637f891c9d369948bb026c7 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Mon, 30 Jan 2017 11:02:27 -0500 Subject: drm/msm: drop qcom,chipid The original way we determined the gpu version was based on downstream bindings from android kernel. A cleaner way is to get the version from the compatible string. Note that no upstream dtb uses these bindings. But the code still supports falling back to the legacy bindings (with a warning), so that we are still compatible with the gpu dt node from android device kernels. Signed-off-by: Rob Clark Reviewed-by: Eric Anholt Acked-by: Rob Herring --- .../devicetree/bindings/display/msm/gpu.txt | 11 +++--- drivers/gpu/drm/msm/adreno/adreno_device.c | 40 +++++++++++++++++++++- drivers/gpu/drm/msm/msm_drv.c | 1 + 3 files changed, 46 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/Documentation/devicetree/bindings/display/msm/gpu.txt b/Documentation/devicetree/bindings/display/msm/gpu.txt index 747b984c7210..7ac3052ca7b5 100644 --- a/Documentation/devicetree/bindings/display/msm/gpu.txt +++ b/Documentation/devicetree/bindings/display/msm/gpu.txt @@ -1,7 +1,11 @@ Qualcomm adreno/snapdragon GPU Required properties: -- compatible: "qcom,adreno-3xx" +- compatible: "qcom,adreno-XYZ.W", "qcom,adreno" + for example: "qcom,adreno-306.0", "qcom,adreno" + Note that you need to list the less specific "qcom,adreno" (since this + is what the device is matched on), in addition to the more specific + with the chip-id. - reg: Physical base address and length of the controller's registers. - interrupts: The interrupt signal from the gpu. - clocks: device clocks @@ -10,8 +14,6 @@ Required properties: * "core_clk" * "iface_clk" * "mem_iface_clk" -- qcom,chipid: gpu chip-id. Note this may become optional for future - devices if we can reliably read the chipid from hw Example: @@ -19,7 +21,7 @@ Example: ... gpu: qcom,kgsl-3d0@4300000 { - compatible = "qcom,adreno-3xx"; + compatible = "qcom,adreno-320.2", "qcom,adreno"; reg = <0x04300000 0x20000>; reg-names = "kgsl_3d0_reg_memory"; interrupts = ; @@ -32,6 +34,5 @@ Example: <&mmcc GFX3D_CLK>, <&mmcc GFX3D_AHB_CLK>, <&mmcc MMSS_IMEM_AHB_CLK>; - qcom,chipid = <0x03020100>; }; }; diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 8d54cb764f77..5fa51a9abc20 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -189,6 +189,43 @@ static const struct { { "qcom,gpu-quirk-fault-detect-mask", ADRENO_QUIRK_FAULT_DETECT_MASK }, }; +static int find_chipid(struct device *dev, u32 *chipid) +{ + struct device_node *node = dev->of_node; + const char *compat; + int ret; + + /* first search the compat strings for qcom,adreno-XYZ.W: */ + ret = of_property_read_string_index(node, "compatible", 0, &compat); + if (ret == 0) { + unsigned rev, patch; + + if (sscanf(compat, "qcom,adreno-%u.%u", &rev, &patch) == 2) { + *chipid = 0; + *chipid |= (rev / 100) << 24; /* core */ + rev %= 100; + *chipid |= (rev / 10) << 16; /* major */ + rev %= 10; + *chipid |= rev << 8; /* minor */ + *chipid |= patch; + + return 0; + } + } + + /* and if that fails, fall back to legacy "qcom,chipid" property: */ + ret = of_property_read_u32(node, "qcom,chipid", chipid); + if (ret) + return ret; + + dev_warn(dev, "Using legacy qcom,chipid binding!\n"); + dev_warn(dev, "Use compatible qcom,adreno-%u%u%u.%u instead.\n", + (*chipid >> 24) & 0xff, (*chipid >> 16) & 0xff, + (*chipid >> 8) & 0xff, *chipid & 0xff); + + return 0; +} + static int adreno_bind(struct device *dev, struct device *master, void *data) { static struct adreno_platform_config config = {}; @@ -196,7 +233,7 @@ static int adreno_bind(struct device *dev, struct device *master, void *data) u32 val; int ret, i; - ret = of_property_read_u32(node, "qcom,chipid", &val); + ret = find_chipid(dev, &val); if (ret) { dev_err(dev, "could not find chipid: %d\n", ret); return ret; @@ -262,6 +299,7 @@ static int adreno_remove(struct platform_device *pdev) } static const struct of_device_id dt_match[] = { + { .compatible = "qcom,adreno" }, { .compatible = "qcom,adreno-3xx" }, /* for backwards compat w/ downstream kgsl DT files: */ { .compatible = "qcom,kgsl-3d0" }, diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index e29bb66f55b1..6b85c4195252 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -985,6 +985,7 @@ static int add_display_components(struct device *dev, * as components. */ static const struct of_device_id msm_gpu_match[] = { + { .compatible = "qcom,adreno" }, { .compatible = "qcom,adreno-3xx" }, { .compatible = "qcom,kgsl-3d0" }, { }, -- cgit v1.2.3-55-g7522 From 4e09b95d72e502e200ad0f509fe89fb852add173 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Mon, 30 Jan 2017 11:15:14 -0500 Subject: drm/msm: drop quirks binding This was never documented or used in upstream dtb. It is used by downstream bindings from android device kernels. But the quirks are a property of the gpu revision, and as such are redundant to be listed separately in dt. Instead, move the quirks to the device table. Signed-off-by: Rob Clark Reviewed-by: Eric Anholt --- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 4 ++-- drivers/gpu/drm/msm/adreno/adreno_device.c | 18 ++++-------------- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 1 - drivers/gpu/drm/msm/adreno/adreno_gpu.h | 4 +--- 4 files changed, 7 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index b8647198c11c..8308bb8166c8 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -327,7 +327,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) /* Enable RBBM error reporting bits */ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001); - if (adreno_gpu->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) { + if (adreno_gpu->info->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) { /* * Mask out the activity signals from RB1-3 to avoid false * positives @@ -381,7 +381,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22)); - if (adreno_gpu->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI) + if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI) gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8)); gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100); diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 5fa51a9abc20..ece39b16a864 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -75,12 +75,14 @@ static const struct adreno_info gpulist[] = { .gmem = (SZ_1M + SZ_512K), .init = a4xx_gpu_init, }, { - .rev = ADRENO_REV(5, 3, 0, ANY_ID), + .rev = ADRENO_REV(5, 3, 0, 2), .revn = 530, .name = "A530", .pm4fw = "a530_pm4.fw", .pfpfw = "a530_pfp.fw", .gmem = SZ_1M, + .quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI | + ADRENO_QUIRK_FAULT_DETECT_MASK, .init = a5xx_gpu_init, .gpmufw = "a530v3_gpmu.fw2", }, @@ -181,14 +183,6 @@ static void set_gpu_pdev(struct drm_device *dev, priv->gpu_pdev = pdev; } -static const struct { - const char *str; - uint32_t flag; -} quirks[] = { - { "qcom,gpu-quirk-two-pass-use-wfi", ADRENO_QUIRK_TWO_PASS_USE_WFI }, - { "qcom,gpu-quirk-fault-detect-mask", ADRENO_QUIRK_FAULT_DETECT_MASK }, -}; - static int find_chipid(struct device *dev, u32 *chipid) { struct device_node *node = dev->of_node; @@ -231,7 +225,7 @@ static int adreno_bind(struct device *dev, struct device *master, void *data) static struct adreno_platform_config config = {}; struct device_node *child, *node = dev->of_node; u32 val; - int ret, i; + int ret; ret = find_chipid(dev, &val); if (ret) { @@ -267,10 +261,6 @@ static int adreno_bind(struct device *dev, struct device *master, void *data) config.slow_rate = 27000000; } - for (i = 0; i < ARRAY_SIZE(quirks); i++) - if (of_property_read_bool(node, quirks[i].str)) - config.quirks |= quirks[i].flag; - dev->platform_data = &config; set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev)); return 0; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 686a580c711a..c9bd1e6225f4 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -352,7 +352,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, adreno_gpu->gmem = adreno_gpu->info->gmem; adreno_gpu->revn = adreno_gpu->info->revn; adreno_gpu->rev = config->rev; - adreno_gpu->quirks = config->quirks; gpu->fast_rate = config->fast_rate; gpu->slow_rate = config->slow_rate; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index e8d55b0306ed..42e444a67630 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -75,6 +75,7 @@ struct adreno_info { const char *pm4fw, *pfpfw; const char *gpmufw; uint32_t gmem; + enum adreno_quirks quirks; struct msm_gpu *(*init)(struct drm_device *dev); }; @@ -116,8 +117,6 @@ struct adreno_gpu { * code (a3xx_gpu.c) and stored in this common location. */ const unsigned int *reg_offsets; - - uint32_t quirks; }; #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base) @@ -128,7 +127,6 @@ struct adreno_platform_config { #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING struct msm_bus_scale_pdata *bus_scale_table; #endif - uint32_t quirks; }; #define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000) -- cgit v1.2.3-55-g7522 From 720c3bb80235ffb10129ee930bb394871afbd235 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Mon, 30 Jan 2017 11:30:58 -0500 Subject: drm/msm: drop _clk suffix from clk names Suggested by Rob Herring. We still support the old names for compatibility with downstream android dt files. Cc: Rob Herring Signed-off-by: Rob Clark Reviewed-by: Eric Anholt Acked-by: Rob Herring --- Documentation/devicetree/bindings/display/msm/gpu.txt | 12 ++++++------ drivers/gpu/drm/msm/msm_drv.c | 19 +++++++++++++++++++ drivers/gpu/drm/msm/msm_drv.h | 1 + drivers/gpu/drm/msm/msm_gpu.c | 7 +++---- 4 files changed, 29 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/Documentation/devicetree/bindings/display/msm/gpu.txt b/Documentation/devicetree/bindings/display/msm/gpu.txt index 7ac3052ca7b5..43fac0fe09bb 100644 --- a/Documentation/devicetree/bindings/display/msm/gpu.txt +++ b/Documentation/devicetree/bindings/display/msm/gpu.txt @@ -11,9 +11,9 @@ Required properties: - clocks: device clocks See ../clocks/clock-bindings.txt for details. - clock-names: the following clocks are required: - * "core_clk" - * "iface_clk" - * "mem_iface_clk" + * "core" + * "iface" + * "mem_iface" Example: @@ -27,9 +27,9 @@ Example: interrupts = ; interrupt-names = "kgsl_3d0_irq"; clock-names = - "core_clk", - "iface_clk", - "mem_iface_clk"; + "core", + "iface", + "mem_iface"; clocks = <&mmcc GFX3D_CLK>, <&mmcc GFX3D_AHB_CLK>, diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 6b85c4195252..70226eaa5cac 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -91,6 +91,25 @@ module_param(dumpstate, bool, 0600); * Util/helpers: */ +struct clk *msm_clk_get(struct platform_device *pdev, const char *name) +{ + struct clk *clk; + char name2[32]; + + clk = devm_clk_get(&pdev->dev, name); + if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER) + return clk; + + snprintf(name2, sizeof(name2), "%s_clk", name); + + clk = devm_clk_get(&pdev->dev, name2); + if (!IS_ERR(clk)) + dev_warn(&pdev->dev, "Using legacy clk name binding. Use " + "\"%s\" instead of \"%s\"\n", name, name2); + + return clk; +} + void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, const char *dbgname) { diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index ed4dad3ca133..5f6f48f4fbd9 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -318,6 +318,7 @@ static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; } static inline void msm_rd_dump_submit(struct msm_gem_submit *submit) {} #endif +struct clk *msm_clk_get(struct platform_device *pdev, const char *name); void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, const char *dbgname); void msm_writel(u32 data, void __iomem *addr); diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index b28527a65d09..99e05aacbee1 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -560,8 +560,7 @@ static irqreturn_t irq_handler(int irq, void *data) } static const char *clk_names[] = { - "core_clk", "iface_clk", "rbbmtimer_clk", "mem_clk", - "mem_iface_clk", "alt_mem_iface_clk", + "core", "iface", "rbbmtimer", "mem", "mem_iface", "alt_mem_iface", }; int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, @@ -625,13 +624,13 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, /* Acquire clocks: */ for (i = 0; i < ARRAY_SIZE(clk_names); i++) { - gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]); + gpu->grp_clks[i] = msm_clk_get(pdev, clk_names[i]); DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]); if (IS_ERR(gpu->grp_clks[i])) gpu->grp_clks[i] = NULL; } - gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk"); + gpu->ebi1_clk = msm_clk_get(pdev, "bus"); DBG("ebi1_clk: %p", gpu->ebi1_clk); if (IS_ERR(gpu->ebi1_clk)) gpu->ebi1_clk = NULL; -- cgit v1.2.3-55-g7522 From 7f8036b7f6c6aa635b056fb3fe49ba7062982a5b Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 7 Dec 2016 11:13:53 -0500 Subject: drm/msm: let gpu wire up it's own fault handler Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 17 +++++++++++++++++ drivers/gpu/drm/msm/msm_iommu.c | 7 +++++-- drivers/gpu/drm/msm/msm_mmu.h | 9 +++++++++ 3 files changed, 31 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 8308bb8166c8..4414cf73735d 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -12,6 +12,7 @@ */ #include "msm_gem.h" +#include "msm_mmu.h" #include "a5xx_gpu.h" extern bool hang_debug; @@ -573,6 +574,19 @@ static bool a5xx_idle(struct msm_gpu *gpu) return true; } +static int a5xx_fault_handler(void *arg, unsigned long iova, int flags) +{ + struct msm_gpu *gpu = arg; + pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n", + iova, flags, + gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(4)), + gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(5)), + gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)), + gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7))); + + return -EFAULT; +} + static void a5xx_cp_err_irq(struct msm_gpu *gpu) { u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS); @@ -884,5 +898,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) return ERR_PTR(ret); } + if (gpu->aspace) + msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler); + return gpu; } diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 61aaaa1de6eb..7f5779daf5c8 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -24,9 +24,12 @@ struct msm_iommu { }; #define to_msm_iommu(x) container_of(x, struct msm_iommu, base) -static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev, +static int msm_fault_handler(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags, void *arg) { + struct msm_iommu *iommu = arg; + if (iommu->base.handler) + return iommu->base.handler(iommu->base.arg, iova, flags); pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags); return 0; } @@ -136,7 +139,7 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain) iommu->domain = domain; msm_mmu_init(&iommu->base, dev, &funcs); - iommu_set_fault_handler(domain, msm_fault_handler, dev); + iommu_set_fault_handler(domain, msm_fault_handler, iommu); return &iommu->base; } diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index f85c879e68d2..aa2c5d4580c8 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -33,6 +33,8 @@ struct msm_mmu_funcs { struct msm_mmu { const struct msm_mmu_funcs *funcs; struct device *dev; + int (*handler)(void *arg, unsigned long iova, int flags); + void *arg; }; static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, @@ -45,4 +47,11 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain); struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu); +static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg, + int (*handler)(void *arg, unsigned long iova, int flags)) +{ + mmu->arg = arg; + mmu->handler = handler; +} + #endif /* __MSM_MMU_H__ */ -- cgit v1.2.3-55-g7522 From d90d7026e75bc17cb56fd591f644225575ef6a5f Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Mon, 12 Dec 2016 15:10:21 +0530 Subject: drm/msm/mdp5: cfg: Add pipe_cursor block Define the block in advance so that the generated mdp5.xml.h doesn't break build. Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h index 050e1618c836..f681149ad8e3 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h @@ -85,6 +85,7 @@ struct mdp5_cfg_hw { struct mdp5_pipe_block pipe_vig; struct mdp5_pipe_block pipe_rgb; struct mdp5_pipe_block pipe_dma; + struct mdp5_pipe_block pipe_cursor; struct mdp5_lm_block lm; struct mdp5_sub_block dspp; struct mdp5_sub_block ad; -- cgit v1.2.3-55-g7522 From f71516bd5841c2464c7ccc847a4ff8ac4e192851 Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Mon, 16 Jan 2017 11:34:19 +0530 Subject: drm/msm/mdp5: Update generated headers Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h | 48 +++++++++++++++++---------------- 1 file changed, 25 insertions(+), 23 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h index 27d5371acee0..e6dfc518d4db 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h @@ -8,19 +8,11 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) - -Copyright (C) 2013-2016 by the following authors: +- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-01-11 05:19:19) +- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54) +- /local/mnt/workspace/source_trees/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2016-01-07 08:45:55) + +Copyright (C) 2013-2017 by the following authors: - Rob Clark (robclark) - Ilia Mirkin (imirkin) @@ -65,16 +57,19 @@ enum mdp5_intfnum { }; enum mdp5_pipe { - SSPP_VIG0 = 0, - SSPP_VIG1 = 1, - SSPP_VIG2 = 2, - SSPP_RGB0 = 3, - SSPP_RGB1 = 4, - SSPP_RGB2 = 5, - SSPP_DMA0 = 6, - SSPP_DMA1 = 7, - SSPP_VIG3 = 8, - SSPP_RGB3 = 9, + SSPP_NONE = 0, + SSPP_VIG0 = 1, + SSPP_VIG1 = 2, + SSPP_VIG2 = 3, + SSPP_RGB0 = 4, + SSPP_RGB1 = 5, + SSPP_RGB2 = 6, + SSPP_DMA0 = 7, + SSPP_DMA1 = 8, + SSPP_VIG3 = 9, + SSPP_RGB3 = 10, + SSPP_CURSOR0 = 11, + SSPP_CURSOR1 = 12, }; enum mdp5_ctl_mode { @@ -532,6 +527,7 @@ static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR1(enum mdp_mixer_stage_id va static inline uint32_t __offset_PIPE(enum mdp5_pipe idx) { switch (idx) { + case SSPP_NONE: return (INVALID_IDX(idx)); case SSPP_VIG0: return (mdp5_cfg->pipe_vig.base[0]); case SSPP_VIG1: return (mdp5_cfg->pipe_vig.base[1]); case SSPP_VIG2: return (mdp5_cfg->pipe_vig.base[2]); @@ -542,6 +538,8 @@ static inline uint32_t __offset_PIPE(enum mdp5_pipe idx) case SSPP_DMA1: return (mdp5_cfg->pipe_dma.base[1]); case SSPP_VIG3: return (mdp5_cfg->pipe_vig.base[3]); case SSPP_RGB3: return (mdp5_cfg->pipe_rgb.base[3]); + case SSPP_CURSOR0: return (mdp5_cfg->pipe_cursor.base[0]); + case SSPP_CURSOR1: return (mdp5_cfg->pipe_cursor.base[1]); default: return INVALID_IDX(idx); } } @@ -1073,6 +1071,10 @@ static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00000 #define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004 #define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008 #define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010 +#define MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA 0x00000020 +#define MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA 0x00000040 +#define MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA 0x00000080 +#define MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT 0x80000000 static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00000004 + __offset_LM(i0); } #define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000 -- cgit v1.2.3-55-g7522 From cd576abfffe43170ffc874065314478a3e14cbe0 Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Mon, 16 Jan 2017 11:35:36 +0530 Subject: drm/msm/dsi: Update generated headers Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/dsi.xml.h | 269 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 256 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h index 39dff7d5e89b..b3d70ea42891 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.xml.h +++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h @@ -8,19 +8,10 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) - -Copyright (C) 2013-2015 by the following authors: +- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-01-11 05:19:19) +- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54) + +Copyright (C) 2013-2017 by the following authors: - Rob Clark (robclark) - Ilia Mirkin (imirkin) @@ -1304,5 +1295,257 @@ static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val) #define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018 +#define REG_DSI_14nm_PHY_CMN_REVISION_ID0 0x00000000 + +#define REG_DSI_14nm_PHY_CMN_REVISION_ID1 0x00000004 + +#define REG_DSI_14nm_PHY_CMN_REVISION_ID2 0x00000008 + +#define REG_DSI_14nm_PHY_CMN_REVISION_ID3 0x0000000c + +#define REG_DSI_14nm_PHY_CMN_CLK_CFG0 0x00000010 +#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK 0x000000f0 +#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT 4 +static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK; +} +#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK 0x000000f0 +#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT 4 +static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK; +} + +#define REG_DSI_14nm_PHY_CMN_CLK_CFG1 0x00000014 +#define DSI_14nm_PHY_CMN_CLK_CFG1_DSICLK_SEL 0x00000001 + +#define REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL 0x00000018 +#define DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000004 + +#define REG_DSI_14nm_PHY_CMN_CTRL_0 0x0000001c + +#define REG_DSI_14nm_PHY_CMN_CTRL_1 0x00000020 + +#define REG_DSI_14nm_PHY_CMN_HW_TRIGGER 0x00000024 + +#define REG_DSI_14nm_PHY_CMN_SW_CFG0 0x00000028 + +#define REG_DSI_14nm_PHY_CMN_SW_CFG1 0x0000002c + +#define REG_DSI_14nm_PHY_CMN_SW_CFG2 0x00000030 + +#define REG_DSI_14nm_PHY_CMN_HW_CFG0 0x00000034 + +#define REG_DSI_14nm_PHY_CMN_HW_CFG1 0x00000038 + +#define REG_DSI_14nm_PHY_CMN_HW_CFG2 0x0000003c + +#define REG_DSI_14nm_PHY_CMN_HW_CFG3 0x00000040 + +#define REG_DSI_14nm_PHY_CMN_HW_CFG4 0x00000044 + +#define REG_DSI_14nm_PHY_CMN_PLL_CNTRL 0x00000048 +#define DSI_14nm_PHY_CMN_PLL_CNTRL_PLL_START 0x00000001 + +#define REG_DSI_14nm_PHY_CMN_LDO_CNTRL 0x0000004c +#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK 0x0000003f +#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT 0 +static inline uint32_t DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT) & DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK; +} + +static inline uint32_t REG_DSI_14nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; } + +static inline uint32_t REG_DSI_14nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; } +#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK 0x000000c0 +#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT 6 +static inline uint32_t DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT) & DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK; +} + +static inline uint32_t REG_DSI_14nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; } +#define DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN 0x00000001 + +static inline uint32_t REG_DSI_14nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; } + +static inline uint32_t REG_DSI_14nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; } + +static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; } + +static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_STR(uint32_t i0) { return 0x00000014 + 0x80*i0; } + +static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(uint32_t i0) { return 0x00000018 + 0x80*i0; } +#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff +#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT 0 +static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK; +} + +static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(uint32_t i0) { return 0x0000001c + 0x80*i0; } +#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff +#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT 0 +static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK; +} + +static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(uint32_t i0) { return 0x00000020 + 0x80*i0; } +#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff +#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT 0 +static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK; +} + +static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(uint32_t i0) { return 0x00000024 + 0x80*i0; } +#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff +#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT 0 +static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK; +} + +static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(uint32_t i0) { return 0x00000028 + 0x80*i0; } +#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff +#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT 0 +static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK; +} + +static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(uint32_t i0) { return 0x0000002c + 0x80*i0; } +#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK 0x00000007 +#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT 0 +static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK; +} +#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK 0x00000070 +#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT 4 +static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK; +} + +static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(uint32_t i0) { return 0x00000030 + 0x80*i0; } +#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK 0x00000007 +#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT 0 +static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK; +} + +static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(uint32_t i0) { return 0x00000034 + 0x80*i0; } +#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff +#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0 +static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK; +} + +static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(uint32_t i0) { return 0x00000038 + 0x80*i0; } + +static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(uint32_t i0) { return 0x0000003c + 0x80*i0; } + +static inline uint32_t REG_DSI_14nm_PHY_LN_VREG_CNTRL(uint32_t i0) { return 0x00000064 + 0x80*i0; } + +#define REG_DSI_14nm_PHY_PLL_IE_TRIM 0x00000000 + +#define REG_DSI_14nm_PHY_PLL_IP_TRIM 0x00000004 + +#define REG_DSI_14nm_PHY_PLL_IPTAT_TRIM 0x00000010 + +#define REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN 0x0000001c + +#define REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET 0x00000028 + +#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL 0x0000002c + +#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2 0x00000030 + +#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL3 0x00000034 + +#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL4 0x00000038 + +#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5 0x0000003c + +#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1 0x00000040 + +#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2 0x00000044 + +#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT1 0x00000048 + +#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT2 0x0000004c + +#define REG_DSI_14nm_PHY_PLL_VREF_CFG1 0x0000005c + +#define REG_DSI_14nm_PHY_PLL_KVCO_CODE 0x00000058 + +#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1 0x0000006c + +#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2 0x00000070 + +#define REG_DSI_14nm_PHY_PLL_VCO_COUNT1 0x00000074 + +#define REG_DSI_14nm_PHY_PLL_VCO_COUNT2 0x00000078 + +#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1 0x0000007c + +#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2 0x00000080 + +#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3 0x00000084 + +#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN 0x00000088 + +#define REG_DSI_14nm_PHY_PLL_PLL_VCO_TUNE 0x0000008c + +#define REG_DSI_14nm_PHY_PLL_DEC_START 0x00000090 + +#define REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER 0x00000094 + +#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1 0x00000098 + +#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2 0x0000009c + +#define REG_DSI_14nm_PHY_PLL_SSC_PER1 0x000000a0 + +#define REG_DSI_14nm_PHY_PLL_SSC_PER2 0x000000a4 + +#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1 0x000000a8 + +#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2 0x000000ac + +#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1 0x000000b4 + +#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2 0x000000b8 + +#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3 0x000000bc + +#define REG_DSI_14nm_PHY_PLL_TXCLK_EN 0x000000c0 + +#define REG_DSI_14nm_PHY_PLL_PLL_CRCTRL 0x000000c4 + +#define REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS 0x000000cc + +#define REG_DSI_14nm_PHY_PLL_PLL_MISC1 0x000000e8 + +#define REG_DSI_14nm_PHY_PLL_CP_SET_CUR 0x000000f0 + +#define REG_DSI_14nm_PHY_PLL_PLL_ICPMSET 0x000000f4 + +#define REG_DSI_14nm_PHY_PLL_PLL_ICPCSET 0x000000f8 + +#define REG_DSI_14nm_PHY_PLL_PLL_ICP_SET 0x000000fc + +#define REG_DSI_14nm_PHY_PLL_PLL_LPF1 0x00000100 + +#define REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV 0x00000104 + +#define REG_DSI_14nm_PHY_PLL_PLL_BANDGAP 0x00000108 + #endif /* DSI_XML */ -- cgit v1.2.3-55-g7522 From 0bb70b82c2f91e4667f3c617505235efd6d77e46 Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Wed, 11 Jan 2017 11:59:56 +0530 Subject: drm/msm/dsi: Set msm_dsi->encoders before initializing bridge The commit "drm: bridge: Link encoder and bridge in core code" updated the drm_bridge_attach() API to also include the drm_encoder pointer the bridge attaches to. The func msm_dsi_manager_bridge_init() now relies on the drm_encoder pointer stored in msm_dsi->encoders to pass the encoder to the bridge API. msm_dsi->encoders is unfortunately set after this function is called, resulting in us passing a NULL pointer to drm_brigde_attach. This results in an error and the DSI driver probe fails. Move the initialization of msm_dsi->encoders[] a bit up. Also, don't try to set the encoder's bridge. That's now managed by the bridge API. Cc: Laurent Pinchart Reviewed-by: Laurent Pinchart Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/dsi.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index ec572f8389ed..9593238ccc1b 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -205,6 +205,9 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, goto fail; } + for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) + msm_dsi->encoders[i] = encoders[i]; + msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id); if (IS_ERR(msm_dsi->bridge)) { ret = PTR_ERR(msm_dsi->bridge); @@ -213,11 +216,6 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, goto fail; } - for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) { - encoders[i]->bridge = msm_dsi->bridge; - msm_dsi->encoders[i] = encoders[i]; - } - /* * check if the dsi encoder output is connected to a panel or an * external bridge. We create a connector only if we're connected to a -- cgit v1.2.3-55-g7522 From 97e00119534bf3c9f47c4eae0b7dd982ef2de92b Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Mon, 16 Jan 2017 09:42:03 +0530 Subject: drm/msm: Construct only one encoder for DSI We currently create 2 encoders for DSI interfaces, one for command mode and other for video mode operation. This isn't needed as we can't really use both the encoders at the same time. It also makes connecting bridges harder. Switch to creating a single encoder. For now, we assume that the encoder is configured only in video mode. Later, the same encoder would be usable in both modes. Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/dsi.c | 14 +++++--------- drivers/gpu/drm/msm/dsi/dsi.h | 4 ++-- drivers/gpu/drm/msm/dsi/dsi_manager.c | 22 ++++------------------ drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c | 28 ++++++++++++---------------- drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | 20 ++++++-------------- drivers/gpu/drm/msm/msm_drv.h | 11 +++-------- 6 files changed, 32 insertions(+), 67 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index 9593238ccc1b..311c1c1e7d6c 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -18,9 +18,7 @@ struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi) if (!msm_dsi || !msm_dsi_device_connected(msm_dsi)) return NULL; - return (msm_dsi->device_flags & MIPI_DSI_MODE_VIDEO) ? - msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID] : - msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID]; + return msm_dsi->encoder; } static int dsi_get_phy(struct msm_dsi *msm_dsi) @@ -187,14 +185,13 @@ void __exit msm_dsi_unregister(void) } int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, - struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]) + struct drm_encoder *encoder) { struct msm_drm_private *priv = dev->dev_private; struct drm_bridge *ext_bridge; - int ret, i; + int ret; - if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] || - !encoders[MSM_DSI_CMD_ENCODER_ID])) + if (WARN_ON(!encoder)) return -EINVAL; msm_dsi->dev = dev; @@ -205,8 +202,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, goto fail; } - for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) - msm_dsi->encoders[i] = encoders[i]; + msm_dsi->encoder = encoder; msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id); if (IS_ERR(msm_dsi->bridge)) { diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index 03f115f532c2..ddcda8cec9a7 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -73,8 +73,8 @@ struct msm_dsi { struct device *phy_dev; bool phy_enabled; - /* the encoders we are hooked to (outside of dsi block) */ - struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]; + /* the encoder we are hooked to (outside of dsi block) */ + struct drm_encoder *encoder; int id; }; diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 2bd8dad76105..19da23d8a0b1 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -540,7 +540,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id) struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); struct drm_connector *connector = NULL; struct dsi_connector *dsi_connector; - int ret, i; + int ret; dsi_connector = kzalloc(sizeof(*dsi_connector), GFP_KERNEL); if (!dsi_connector) @@ -566,9 +566,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id) connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) - drm_mode_connector_attach_encoder(connector, - msm_dsi->encoders[i]); + drm_mode_connector_attach_encoder(connector, msm_dsi->encoder); return connector; } @@ -591,13 +589,7 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id) dsi_bridge->id = id; - /* - * HACK: we may not know the external DSI bridge device's mode - * flags here. We'll get to know them only when the device - * attaches to the dsi host. For now, assume the bridge supports - * DSI video mode - */ - encoder = msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID]; + encoder = msm_dsi->encoder; bridge = &dsi_bridge->base; bridge->funcs = &dsi_mgr_bridge_funcs; @@ -628,13 +620,7 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id) ext_bridge = msm_dsi->external_bridge = msm_dsi_host_get_bridge(msm_dsi->host); - /* - * HACK: we may not know the external DSI bridge device's mode - * flags here. We'll get to know them only when the device - * attaches to the dsi host. For now, assume the bridge supports - * DSI video mode - */ - encoder = msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID]; + encoder = msm_dsi->encoder; /* link the internal dsi bridge to the external bridge */ drm_bridge_attach(encoder, ext_bridge, int_bridge); diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index b782efd4b95f..94ea963519b2 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c @@ -260,8 +260,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms, struct drm_encoder *encoder; struct drm_connector *connector; struct device_node *panel_node; - struct drm_encoder *dsi_encs[MSM_DSI_ENCODER_NUM]; - int i, dsi_id; + int dsi_id; int ret; switch (intf_type) { @@ -322,22 +321,19 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms, if (!priv->dsi[dsi_id]) break; - for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) { - dsi_encs[i] = mdp4_dsi_encoder_init(dev); - if (IS_ERR(dsi_encs[i])) { - ret = PTR_ERR(dsi_encs[i]); - dev_err(dev->dev, - "failed to construct DSI encoder: %d\n", - ret); - return ret; - } - - /* TODO: Add DMA_S later? */ - dsi_encs[i]->possible_crtcs = 1 << DMA_P; - priv->encoders[priv->num_encoders++] = dsi_encs[i]; + encoder = mdp4_dsi_encoder_init(dev); + if (IS_ERR(encoder)) { + ret = PTR_ERR(encoder); + dev_err(dev->dev, + "failed to construct DSI encoder: %d\n", ret); + return ret; } - ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, dsi_encs); + /* TODO: Add DMA_S later? */ + encoder->possible_crtcs = 1 << DMA_P; + priv->encoders[priv->num_encoders++] = encoder; + + ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder); if (ret) { dev_err(dev->dev, "failed to initialize DSI: %d\n", ret); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index c396d459a9d0..5bad72c9e253 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -369,9 +369,6 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) case INTF_DSI: { int dsi_id = get_dsi_id_from_intf(hw_cfg, intf_num); - struct drm_encoder *dsi_encs[MSM_DSI_ENCODER_NUM]; - enum mdp5_intf_mode mode; - int i; if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) { dev_err(dev->dev, "failed to find dsi from intf %d\n", @@ -389,19 +386,14 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) break; } - for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) { - mode = (i == MSM_DSI_CMD_ENCODER_ID) ? - MDP5_INTF_DSI_MODE_COMMAND : - MDP5_INTF_DSI_MODE_VIDEO; - dsi_encs[i] = construct_encoder(mdp5_kms, INTF_DSI, - intf_num, mode, ctl); - if (IS_ERR(dsi_encs[i])) { - ret = PTR_ERR(dsi_encs[i]); - break; - } + encoder = construct_encoder(mdp5_kms, INTF_DSI, intf_num, + MDP5_INTF_DSI_MODE_VIDEO, ctl); + if (IS_ERR(encoder)) { + ret = PTR_ERR(encoder); + break; } - ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, dsi_encs); + ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder); break; } default: diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 5f6f48f4fbd9..cdd7b2f8e977 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -275,16 +275,11 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev, struct drm_encoder *encoder); struct msm_dsi; -enum msm_dsi_encoder_id { - MSM_DSI_VIDEO_ENCODER_ID = 0, - MSM_DSI_CMD_ENCODER_ID = 1, - MSM_DSI_ENCODER_NUM = 2 -}; #ifdef CONFIG_DRM_MSM_DSI void __init msm_dsi_register(void); void __exit msm_dsi_unregister(void); int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, - struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]); + struct drm_encoder *encoder); #else static inline void __init msm_dsi_register(void) { @@ -293,8 +288,8 @@ static inline void __exit msm_dsi_unregister(void) { } static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, - struct drm_device *dev, - struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]) + struct drm_device *dev, + struct drm_encoder *encoder) { return -EINVAL; } -- cgit v1.2.3-55-g7522 From 9c9f6f8d472cc9e11f2c0b370685ce78ab7eb2fa Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Mon, 5 Dec 2016 15:24:53 +0530 Subject: drm/msm: Set encoder's mode of operation using a kms func The mdp5 kms driver currently sets up multiple encoders per interface (INTF), one for each kind of mode of operation it supports. We create 2 drm_encoders for DSI, one for Video Mode and the other for Command Mode operation. The reason behind this approach could have been that we aren't aware of the DSI device's mode of operation when we create the encoders. This makes things a bit complicated, since these encoders have to be further attached to the same DSI bridge. The easier way out is to create a single encoder, and make the DSI driver set its mode of operation when we know what the DSI device's mode flags are. Start with providing a way to set the mdp5_intf_mode using a kms func that sets the encoder's mode of operation. When constructing a DSI encoder, we set the mode of operation to Video Mode as default. When the DSI device is attached to the host, we probe the DSI mode flags and set the corresponding mode of operation. Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/dsi.h | 1 + drivers/gpu/drm/msm/dsi/dsi_host.c | 2 ++ drivers/gpu/drm/msm/dsi/dsi_manager.c | 37 +++++++++++++++++++++++++++++ drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c | 17 +++++++++++++ drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | 8 +++++++ drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h | 1 + drivers/gpu/drm/msm/msm_kms.h | 3 +++ 7 files changed, 69 insertions(+) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index ddcda8cec9a7..81971b3caf3b 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -90,6 +90,7 @@ int msm_dsi_manager_phy_enable(int id, void msm_dsi_manager_phy_disable(int id); int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg); bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len); +void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags); int msm_dsi_manager_register(struct msm_dsi *msm_dsi); void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi); diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 3819fdefcae2..eb0903d37e5c 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -1482,6 +1482,8 @@ static int dsi_host_attach(struct mipi_dsi_host *host, msm_host->format = dsi->format; msm_host->mode_flags = dsi->mode_flags; + msm_dsi_manager_attach_dsi_device(msm_host->id, dsi->mode_flags); + /* Some gpios defined in panel DT need to be controlled by host */ ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev); if (ret) diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 19da23d8a0b1..bd4ba00d2524 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -168,6 +168,16 @@ static enum drm_connector_status dsi_mgr_connector_detect( msm_dsi->panel = msm_dsi_host_get_panel( other_dsi->host, NULL); + + if (msm_dsi->panel && kms->funcs->set_encoder_mode) { + bool cmd_mode = !(msm_dsi->device_flags & + MIPI_DSI_MODE_VIDEO); + struct drm_encoder *encoder = + msm_dsi_get_encoder(msm_dsi); + + kms->funcs->set_encoder_mode(kms, encoder, cmd_mode); + } + if (msm_dsi->panel && IS_DUAL_DSI()) drm_object_attach_property(&connector->base, connector->dev->mode_config.tile_property, 0); @@ -773,6 +783,33 @@ bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len) return true; } +void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags) +{ + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct drm_device *dev = msm_dsi->dev; + struct msm_drm_private *priv; + struct msm_kms *kms; + struct drm_encoder *encoder; + + /* + * drm_device pointer is assigned to msm_dsi only in the modeset_init + * path. If mipi_dsi_attach() happens in DSI driver's probe path + * (generally the case when we're connected to a drm_panel of the type + * mipi_dsi_device), this would be NULL. In such cases, try to set the + * encoder mode in the DSI connector's detect() op. + */ + if (!dev) + return; + + priv = dev->dev_private; + kms = priv->kms; + encoder = msm_dsi_get_encoder(msm_dsi); + + if (encoder && kms->funcs->set_encoder_mode) + if (!(device_flags & MIPI_DSI_MODE_VIDEO)) + kms->funcs->set_encoder_mode(kms, encoder, true); +} + int msm_dsi_manager_register(struct msm_dsi *msm_dsi) { struct msm_dsi_manager *msm_dsim = &msm_dsim_glb; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index fe0c22230883..1e7c99125fdd 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c @@ -342,6 +342,23 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder, return 0; } +void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_interface *intf = &mdp5_encoder->intf; + + /* TODO: Expand this to set writeback modes too */ + if (cmd_mode) { + WARN_ON(intf->type != INTF_DSI); + intf->mode = MDP5_INTF_DSI_MODE_COMMAND; + } else { + if (intf->type == INTF_DSI) + intf->mode = MDP5_INTF_DSI_MODE_VIDEO; + else + intf->mode = MDP5_INTF_MODE_NONE; + } +} + /* initialize encoder */ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, struct mdp5_interface *intf, struct mdp5_ctl *ctl) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 5bad72c9e253..f73d46756912 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -151,6 +151,13 @@ static int mdp5_set_split_display(struct msm_kms *kms, return mdp5_encoder_set_split_display(encoder, slave_encoder); } +static void mdp5_set_encoder_mode(struct msm_kms *kms, + struct drm_encoder *encoder, + bool cmd_mode) +{ + mdp5_encoder_set_intf_mode(encoder, cmd_mode); +} + static void mdp5_kms_destroy(struct msm_kms *kms) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); @@ -230,6 +237,7 @@ static const struct mdp_kms_funcs kms_funcs = { .get_format = mdp_get_format, .round_pixclk = mdp5_round_pixclk, .set_split_display = mdp5_set_split_display, + .set_encoder_mode = mdp5_set_encoder_mode, .destroy = mdp5_kms_destroy, #ifdef CONFIG_DEBUG_FS .debugfs_init = mdp5_kms_debugfs_init, diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index cdfc63d90c7b..3e0baed0a8a7 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -246,6 +246,7 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, struct mdp5_interface *intf, struct mdp5_ctl *ctl); int mdp5_encoder_set_split_display(struct drm_encoder *encoder, struct drm_encoder *slave_encoder); +void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode); int mdp5_encoder_get_linecount(struct drm_encoder *encoder); u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder); diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index e470f4cf8f76..117635d2b8c5 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -56,6 +56,9 @@ struct msm_kms_funcs { struct drm_encoder *encoder, struct drm_encoder *slave_encoder, bool is_cmd_mode); + void (*set_encoder_mode)(struct msm_kms *kms, + struct drm_encoder *encoder, + bool cmd_mode); /* cleanup: */ void (*destroy)(struct msm_kms *kms); #ifdef CONFIG_DEBUG_FS -- cgit v1.2.3-55-g7522 From df8a71d2b22ffcecf4df6c07486a8baa5a49870a Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Tue, 6 Dec 2016 09:21:21 +0530 Subject: drm/msm/mdp5: Prepare for merging video and command encoders Rename the mdp5_encoder_* ops for active displays to mdp5_vid_encoder_* ops. Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c | 31 ++++++++++++++++++++++------- drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | 3 ++- drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h | 4 ++-- 3 files changed, 28 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index 1e7c99125fdd..63f4135c9b5e 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c @@ -112,9 +112,9 @@ static const struct drm_encoder_funcs mdp5_encoder_funcs = { .destroy = mdp5_encoder_destroy, }; -static void mdp5_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_kms *mdp5_kms = get_kms(encoder); @@ -221,7 +221,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, mdp5_encoder->ctl); } -static void mdp5_encoder_disable(struct drm_encoder *encoder) +static void mdp5_vid_encoder_disable(struct drm_encoder *encoder) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_kms *mdp5_kms = get_kms(encoder); @@ -256,7 +256,7 @@ static void mdp5_encoder_disable(struct drm_encoder *encoder) mdp5_encoder->enabled = false; } -static void mdp5_encoder_enable(struct drm_encoder *encoder) +static void mdp5_vid_encoder_enable(struct drm_encoder *encoder) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_kms *mdp5_kms = get_kms(encoder); @@ -279,6 +279,23 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder) mdp5_encoder->enabled = true; } +static void mdp5_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + mdp5_vid_encoder_mode_set(encoder, mode, adjusted_mode); +} + +static void mdp5_encoder_disable(struct drm_encoder *encoder) +{ + mdp5_vid_encoder_disable(encoder); +} + +static void mdp5_encoder_enable(struct drm_encoder *encoder) +{ + mdp5_vid_encoder_enable(encoder); +} + static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { .mode_set = mdp5_encoder_mode_set, .disable = mdp5_encoder_disable, @@ -303,8 +320,8 @@ u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder) return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf)); } -int mdp5_encoder_set_split_display(struct drm_encoder *encoder, - struct drm_encoder *slave_encoder) +int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder, + struct drm_encoder *slave_encoder) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index f73d46756912..b85af0f1c79c 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -148,7 +148,8 @@ static int mdp5_set_split_display(struct msm_kms *kms, return mdp5_cmd_encoder_set_split_display(encoder, slave_encoder); else - return mdp5_encoder_set_split_display(encoder, slave_encoder); + return mdp5_vid_encoder_set_split_display(encoder, + slave_encoder); } static void mdp5_set_encoder_mode(struct msm_kms *kms, diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index 3e0baed0a8a7..f2419666c43e 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -244,8 +244,8 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, struct mdp5_interface *intf, struct mdp5_ctl *ctl); -int mdp5_encoder_set_split_display(struct drm_encoder *encoder, - struct drm_encoder *slave_encoder); +int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder, + struct drm_encoder *slave_encoder); void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode); int mdp5_encoder_get_linecount(struct drm_encoder *encoder); u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder); -- cgit v1.2.3-55-g7522 From b3a94705a0362fad1b25d559c3e0dd4433038f9e Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Mon, 16 Jan 2017 11:25:38 +0530 Subject: drm/msm/mdp5: Create single encoder per interface (INTF) For the DSI interfaces, the mdp5_kms core creates 2 encoders for video and command modes. Create only a single encoder per interface. When creating the encoder, set the interface type to MDP5_INTF_MODE_NONE. It's the bridge (DSI/HDMI/eDP) driver's responsibility to set a different interface type. It can use the the kms func op set_encoder_mode to change the mode of operation, which in turn would configure the interface type for the INTF. In mdp5_cmd_encoder.c, we remove the redundant code, and make the commmand mode funcs as helpers that are used in mdp5_encoder.c Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c | 135 +++--------------------- drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c | 35 +++--- drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | 20 ++-- drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h | 32 ++++-- 4 files changed, 66 insertions(+), 156 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c index c627ab6d0061..df1c8adec3f3 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c @@ -16,16 +16,6 @@ #include "drm_crtc.h" #include "drm_crtc_helper.h" -struct mdp5_cmd_encoder { - struct drm_encoder base; - struct mdp5_interface intf; - bool enabled; - uint32_t bsc; - - struct mdp5_ctl *ctl; -}; -#define to_mdp5_cmd_encoder(x) container_of(x, struct mdp5_cmd_encoder, base) - static struct mdp5_kms *get_kms(struct drm_encoder *encoder) { struct msm_drm_private *priv = encoder->dev->dev_private; @@ -36,47 +26,8 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder) #include #include #include -#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \ - { \ - .src = MSM_BUS_MASTER_MDP_PORT0, \ - .dst = MSM_BUS_SLAVE_EBI_CH0, \ - .ab = (ab_val), \ - .ib = (ib_val), \ - } - -static struct msm_bus_vectors mdp_bus_vectors[] = { - MDP_BUS_VECTOR_ENTRY(0, 0), - MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000), -}; -static struct msm_bus_paths mdp_bus_usecases[] = { { - .num_paths = 1, - .vectors = &mdp_bus_vectors[0], -}, { - .num_paths = 1, - .vectors = &mdp_bus_vectors[1], -} }; -static struct msm_bus_scale_pdata mdp_bus_scale_table = { - .usecase = mdp_bus_usecases, - .num_usecases = ARRAY_SIZE(mdp_bus_usecases), - .name = "mdss_mdp", -}; - -static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc) -{ - mdp5_cmd_enc->bsc = msm_bus_scale_register_client( - &mdp_bus_scale_table); - DBG("bus scale client: %08x", mdp5_cmd_enc->bsc); -} - -static void bs_fini(struct mdp5_cmd_encoder *mdp5_cmd_enc) -{ - if (mdp5_cmd_enc->bsc) { - msm_bus_scale_unregister_client(mdp5_cmd_enc->bsc); - mdp5_cmd_enc->bsc = 0; - } -} -static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx) +static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx) { if (mdp5_cmd_enc->bsc) { DBG("set bus scaling: %d", idx); @@ -89,14 +40,12 @@ static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx) } } #else -static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc) {} -static void bs_fini(struct mdp5_cmd_encoder *mdp5_cmd_enc) {} -static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx) {} +static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx) {} #endif #define VSYNC_CLK_RATE 19200000 static int pingpong_tearcheck_setup(struct drm_encoder *encoder, - struct drm_display_mode *mode) + struct drm_display_mode *mode) { struct mdp5_kms *mdp5_kms = get_kms(encoder); struct device *dev = encoder->dev->dev; @@ -176,23 +125,11 @@ static void pingpong_tearcheck_disable(struct drm_encoder *encoder) clk_disable_unprepare(mdp5_kms->vsync_clk); } -static void mdp5_cmd_encoder_destroy(struct drm_encoder *encoder) -{ - struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); - bs_fini(mdp5_cmd_enc); - drm_encoder_cleanup(encoder); - kfree(mdp5_cmd_enc); -} - -static const struct drm_encoder_funcs mdp5_cmd_encoder_funcs = { - .destroy = mdp5_cmd_encoder_destroy, -}; - -static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { - struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); + struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); mode = adjusted_mode; @@ -209,9 +146,9 @@ static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, mdp5_cmd_enc->ctl); } -static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) +void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) { - struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); + struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; struct mdp5_interface *intf = &mdp5_cmd_enc->intf; @@ -228,9 +165,9 @@ static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) mdp5_cmd_enc->enabled = false; } -static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) +void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) { - struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); + struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; struct mdp5_interface *intf = &mdp5_cmd_enc->intf; @@ -248,16 +185,10 @@ static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) mdp5_cmd_enc->enabled = true; } -static const struct drm_encoder_helper_funcs mdp5_cmd_encoder_helper_funcs = { - .mode_set = mdp5_cmd_encoder_mode_set, - .disable = mdp5_cmd_encoder_disable, - .enable = mdp5_cmd_encoder_enable, -}; - int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, - struct drm_encoder *slave_encoder) + struct drm_encoder *slave_encoder) { - struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); + struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); struct mdp5_kms *mdp5_kms; int intf_num; u32 data = 0; @@ -292,43 +223,3 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, return 0; } - -/* initialize command mode encoder */ -struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, - struct mdp5_interface *intf, struct mdp5_ctl *ctl) -{ - struct drm_encoder *encoder = NULL; - struct mdp5_cmd_encoder *mdp5_cmd_enc; - int ret; - - if (WARN_ON((intf->type != INTF_DSI) && - (intf->mode != MDP5_INTF_DSI_MODE_COMMAND))) { - ret = -EINVAL; - goto fail; - } - - mdp5_cmd_enc = kzalloc(sizeof(*mdp5_cmd_enc), GFP_KERNEL); - if (!mdp5_cmd_enc) { - ret = -ENOMEM; - goto fail; - } - - memcpy(&mdp5_cmd_enc->intf, intf, sizeof(mdp5_cmd_enc->intf)); - encoder = &mdp5_cmd_enc->base; - mdp5_cmd_enc->ctl = ctl; - - drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs, - DRM_MODE_ENCODER_DSI, NULL); - - drm_encoder_helper_add(encoder, &mdp5_cmd_encoder_helper_funcs); - - bs_init(mdp5_cmd_enc); - - return encoder; - -fail: - if (encoder) - mdp5_cmd_encoder_destroy(encoder); - - return ERR_PTR(ret); -} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index 63f4135c9b5e..80fa482ae8ed 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c @@ -21,17 +21,6 @@ #include "drm_crtc.h" #include "drm_crtc_helper.h" -struct mdp5_encoder { - struct drm_encoder base; - struct mdp5_interface intf; - spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */ - bool enabled; - uint32_t bsc; - - struct mdp5_ctl *ctl; -}; -#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base) - static struct mdp5_kms *get_kms(struct drm_encoder *encoder) { struct msm_drm_private *priv = encoder->dev->dev_private; @@ -283,17 +272,35 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - mdp5_vid_encoder_mode_set(encoder, mode, adjusted_mode); + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_interface *intf = &mdp5_encoder->intf; + + if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) + mdp5_cmd_encoder_mode_set(encoder, mode, adjusted_mode); + else + mdp5_vid_encoder_mode_set(encoder, mode, adjusted_mode); } static void mdp5_encoder_disable(struct drm_encoder *encoder) { - mdp5_vid_encoder_disable(encoder); + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_interface *intf = &mdp5_encoder->intf; + + if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) + mdp5_cmd_encoder_disable(encoder); + else + mdp5_vid_encoder_disable(encoder); } static void mdp5_encoder_enable(struct drm_encoder *encoder) { - mdp5_vid_encoder_enable(encoder); + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_interface *intf = &mdp5_encoder->intf; + + if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) + mdp5_cmd_encoder_disable(encoder); + else + mdp5_vid_encoder_enable(encoder); } static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index b85af0f1c79c..9794cad131cd 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -276,7 +276,7 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms) static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, enum mdp5_intf_type intf_type, int intf_num, - enum mdp5_intf_mode intf_mode, struct mdp5_ctl *ctl) + struct mdp5_ctl *ctl) { struct drm_device *dev = mdp5_kms->dev; struct msm_drm_private *priv = dev->dev_private; @@ -284,15 +284,10 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, struct mdp5_interface intf = { .num = intf_num, .type = intf_type, - .mode = intf_mode, + .mode = MDP5_INTF_MODE_NONE, }; - if ((intf_type == INTF_DSI) && - (intf_mode == MDP5_INTF_DSI_MODE_COMMAND)) - encoder = mdp5_cmd_encoder_init(dev, &intf, ctl); - else - encoder = mdp5_encoder_init(dev, &intf, ctl); - + encoder = mdp5_encoder_init(dev, &intf, ctl); if (IS_ERR(encoder)) { dev_err(dev->dev, "failed to construct encoder\n"); return encoder; @@ -347,8 +342,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) break; } - encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num, - MDP5_INTF_MODE_NONE, ctl); + encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num, ctl); if (IS_ERR(encoder)) { ret = PTR_ERR(encoder); break; @@ -366,8 +360,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) break; } - encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num, - MDP5_INTF_MODE_NONE, ctl); + encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num, ctl); if (IS_ERR(encoder)) { ret = PTR_ERR(encoder); break; @@ -395,8 +388,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) break; } - encoder = construct_encoder(mdp5_kms, INTF_DSI, intf_num, - MDP5_INTF_DSI_MODE_VIDEO, ctl); + encoder = construct_encoder(mdp5_kms, INTF_DSI, intf_num, ctl); if (IS_ERR(encoder)) { ret = PTR_ERR(encoder); break; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index f2419666c43e..40ebc5c50875 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -126,6 +126,17 @@ struct mdp5_interface { enum mdp5_intf_mode mode; }; +struct mdp5_encoder { + struct drm_encoder base; + struct mdp5_interface intf; + spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */ + bool enabled; + uint32_t bsc; + + struct mdp5_ctl *ctl; +}; +#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base) + static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data) { msm_writel(data, mdp5_kms->mmio + reg); @@ -251,15 +262,24 @@ int mdp5_encoder_get_linecount(struct drm_encoder *encoder); u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder); #ifdef CONFIG_DRM_MSM_DSI -struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, - struct mdp5_interface *intf, struct mdp5_ctl *ctl); +void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +void mdp5_cmd_encoder_disable(struct drm_encoder *encoder); +void mdp5_cmd_encoder_enable(struct drm_encoder *encoder); int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, - struct drm_encoder *slave_encoder); + struct drm_encoder *slave_encoder); #else -static inline struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, - struct mdp5_interface *intf, struct mdp5_ctl *ctl) +static inline void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ +} +static inline void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) +{ +} +static inline void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) { - return ERR_PTR(-EINVAL); } static inline int mdp5_cmd_encoder_set_split_display( struct drm_encoder *encoder, struct drm_encoder *slave_encoder) -- cgit v1.2.3-55-g7522 From 710a651fdd6958c8530c72c62970b785179fce9f Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Tue, 6 Dec 2016 12:12:59 +0530 Subject: drm/msm/mdp5: cfg: Change count to unsigned int Count can't be non-zero. Changing to uint will also prevent future warnings. Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h index f681149ad8e3..b1c7daaede86 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h @@ -32,7 +32,7 @@ extern const struct mdp5_cfg_hw *mdp5_cfg; typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS); #define MDP5_SUB_BLOCK_DEFINITION \ - int count; \ + unsigned int count; \ uint32_t base[MAX_BASES] struct mdp5_sub_block { -- cgit v1.2.3-55-g7522 From e5366ffe50cc1fff906d7bb45a670861a2174395 Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Tue, 6 Dec 2016 12:15:54 +0530 Subject: drm/msm/mdp5: Create only as many CRTCs as we need We currently create CRTCs equaling to the # of Layer Mixer blocks we have on the MDP5 HW. This number is generally more than the # of encoders (INTFs) we have in the MDSS HW. The number of encoders connected to displays on the platform (as described by DT) would be even lesser. Create only N drm_crtcs, where N is the number of drm_encoders successfully registered. To do this, we call modeset_init_intf() before we init the drm_crtcs and drm_planes. Because of this change, setting encoder->possible_crtcs needs to be moved from construct_encoder() to a later point when we know how many CRTCs we have. Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | 39 ++++++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 9794cad131cd..ecfa38949aea 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -293,7 +293,6 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, return encoder; } - encoder->possible_crtcs = (1 << priv->num_crtcs) - 1; priv->encoders[priv->num_encoders++] = encoder; return encoder; @@ -411,16 +410,35 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) struct drm_device *dev = mdp5_kms->dev; struct msm_drm_private *priv = dev->dev_private; const struct mdp5_cfg_hw *hw_cfg; + unsigned int num_crtcs; int i, ret; hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); - /* Construct planes equaling the number of hw pipes, and CRTCs - * for the N layer-mixers (LM). The first N planes become primary + /* + * Construct encoders and modeset initialize connector devices + * for each external display interface. + */ + for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) { + ret = modeset_init_intf(mdp5_kms, i); + if (ret) + goto fail; + } + + /* + * We should ideally have less number of encoders (set up by parsing + * the MDP5 interfaces) than the number of layer mixers present in HW, + * but let's be safe here anyway + */ + num_crtcs = min(priv->num_encoders, mdp5_cfg->lm.count); + + /* + * Construct planes equaling the number of hw pipes, and CRTCs for the + * N encoders set up by the driver. The first N planes become primary * planes for the CRTCs, with the remainder as overlay planes: */ for (i = 0; i < mdp5_kms->num_hwpipes; i++) { - bool primary = i < mdp5_cfg->lm.count; + bool primary = i < num_crtcs; struct drm_plane *plane; struct drm_crtc *crtc; @@ -444,13 +462,14 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) priv->crtcs[priv->num_crtcs++] = crtc; } - /* Construct encoders and modeset initialize connector devices - * for each external display interface. + /* + * Now that we know the number of crtcs we've created, set the possible + * crtcs for the encoders */ - for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) { - ret = modeset_init_intf(mdp5_kms, i); - if (ret) - goto fail; + for (i = 0; i < priv->num_encoders; i++) { + struct drm_encoder *encoder = priv->encoders[i]; + + encoder->possible_crtcs = (1 << priv->num_crtcs) - 1; } return 0; -- cgit v1.2.3-55-g7522 From 106f9727dda49e8047d56b4916e912cff30d4bff Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Mon, 12 Dec 2016 15:17:56 +0530 Subject: drm/msm/mdp5: Prepare CRTC/LM for empty stages Use SSPP_NONE in mdp5_plane_pipe() if there is now hwpipe allocated for the drm_plane. Returning '0' means we are returning VIG0 pipe. Also, use the mdp5_pipe enum to pass around the stage array. Initialize the stage to SSPP_NONE by default. We do the above because 1) Cursor plane has to be staged at the topmost blender of the LM, which can result in empty stages in between 2) In the future, when we support multiple LMs per CRTC. We could have stages which don't have any pipe assigned to them. Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | 2 +- drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c | 4 ++-- drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h | 4 ++-- drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 1ce8a01a5a28..7a47209b0c6f 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -195,7 +195,7 @@ static void blend_setup(struct drm_crtc *crtc) uint32_t lm = mdp5_crtc->lm; uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; unsigned long flags; - uint8_t stage[STAGE_MAX + 1]; + enum mdp5_pipe stage[STAGE_MAX + 1] = { SSPP_NONE }; int i, plane_cnt = 0; #define blender(stage) ((stage) - STAGE0) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c index d021edc3b307..ab339ce7425a 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c @@ -351,8 +351,8 @@ static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe, } } -int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt, - u32 ctl_blend_op_flags) +int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt, + u32 ctl_blend_op_flags) { unsigned long flags; u32 blend_cfg = 0, blend_ext_cfg = 0; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h index 96148c6f863c..fda00d33e4db 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h @@ -56,8 +56,8 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable); * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) */ #define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0) -int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt, - u32 ctl_blend_op_flags); +int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt, + u32 ctl_blend_op_flags); /** * mdp_ctl_flush_mask...() - Register FLUSH masks diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index b9fb111d3428..eb8dc7c36419 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -823,7 +823,7 @@ enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); if (WARN_ON(!pstate->hwpipe)) - return 0; + return SSPP_NONE; return pstate->hwpipe->pipe; } -- cgit v1.2.3-55-g7522 From 3b6acf144053794ca049a025b3579a68a6f4c3e5 Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Mon, 16 Jan 2017 11:46:17 +0530 Subject: drm/msm/mdp5: Use plane helpers to configure src/dst rectangles The MDP5 plane's atomic_check ops doesn't perform clipping tests. This didn't hurt us much in the past, but clipping becomes important with cursor planes. Use drm_plane_helper_check_state, the way rockchip/intel/mtk drivers already do. Use these drivers as reference. Clipping requires knowledge of the crtc width and height. This requires us to call drm_atomic_helper_check_modeset before drm_atomic_helper_check_planes in the driver's atomic_check op, because check_modetest will populate the mode for the crtc, needed to populate the clip rectangle. We update the plane_enabled(state) local helper to use state->visible, since state->visible and 'state->fb && state->crtc' represent the same thing. One issue with the existing code is that we don't have a way to disable the plane when it's completely clipped out. Until there isn't an update on the crtc (which would de-stage the plane), we would still see the plane in its last 'visible' configuration. Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | 57 ++++++++++++++++++++++++------- drivers/gpu/drm/msm/msm_atomic.c | 21 ++++++++---- 2 files changed, 59 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index eb8dc7c36419..939991d5f346 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -29,10 +29,7 @@ struct mdp5_plane { static int mdp5_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h); + struct drm_rect *src, struct drm_rect *dest); static void set_scanout_locked(struct drm_plane *plane, struct drm_framebuffer *fb); @@ -45,7 +42,7 @@ static struct mdp5_kms *get_kms(struct drm_plane *plane) static bool plane_enabled(struct drm_plane_state *state) { - return state->fb && state->crtc; + return state->visible; } static void mdp5_plane_destroy(struct drm_plane *plane) @@ -272,6 +269,7 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane, msm_framebuffer_cleanup(fb, mdp5_kms->id); } +#define FRAC_16_16(mult, div) (((mult) << 16) / (div)) static int mdp5_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) { @@ -281,10 +279,19 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, bool new_hwpipe = false; uint32_t max_width, max_height; uint32_t caps = 0; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_rect clip; + int min_scale, max_scale; + int ret; DBG("%s: check (%d -> %d)", plane->name, plane_enabled(old_state), plane_enabled(state)); + crtc = state->crtc ? state->crtc : plane->state->crtc; + if (!crtc) + return 0; + max_width = config->hw->lm.max_width << 16; max_height = config->hw->lm.max_height << 16; @@ -296,6 +303,22 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, return -ERANGE; } + crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc); + if (WARN_ON(!crtc_state)) + return -EINVAL; + + clip.x1 = 0; + clip.y1 = 0; + clip.x2 = crtc_state->adjusted_mode.hdisplay; + clip.y2 = crtc_state->adjusted_mode.vdisplay; + min_scale = FRAC_16_16(1, 8); + max_scale = FRAC_16_16(8, 1); + + ret = drm_plane_helper_check_state(state, &clip, min_scale, + max_scale, true, true); + if (ret) + return ret; + if (plane_enabled(state)) { unsigned int rotation; const struct mdp_format *format; @@ -368,10 +391,7 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane, ret = mdp5_plane_mode_set(plane, state->crtc, state->fb, - state->crtc_x, state->crtc_y, - state->crtc_w, state->crtc_h, - state->src_x, state->src_y, - state->src_w, state->src_h); + &state->src, &state->dst); /* atomic_check should have ensured that this doesn't fail */ WARN_ON(ret < 0); } @@ -664,10 +684,7 @@ static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe, static int mdp5_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h) + struct drm_rect *src, struct drm_rect *dest) { struct drm_plane_state *pstate = plane->state; struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe; @@ -683,6 +700,10 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, uint32_t pix_format; unsigned int rotation; bool vflip, hflip; + int crtc_x, crtc_y; + unsigned int crtc_w, crtc_h; + uint32_t src_x, src_y; + uint32_t src_w, src_h; unsigned long flags; int ret; @@ -695,6 +716,16 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, format = to_mdp_format(msm_framebuffer_format(fb)); pix_format = format->base.pixel_format; + src_x = src->x1; + src_y = src->y1; + src_w = drm_rect_width(src); + src_h = drm_rect_height(src); + + crtc_x = dest->x1; + crtc_y = dest->y1; + crtc_w = drm_rect_width(dest); + crtc_h = drm_rect_height(dest); + /* src values are in Q16 fixed point, convert to integer: */ src_x = src_x >> 16; src_y = src_y >> 16; diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 30b5d23e53b4..6924fa28f04f 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -151,20 +151,29 @@ static void commit_worker(struct work_struct *work) complete_commit(container_of(work, struct msm_commit, work), true); } +/* + * this func is identical to the drm_atomic_helper_check, but we keep this + * because we might eventually need to have a more finegrained check + * sequence without using the atomic helpers. + * + * In the past, we first called drm_atomic_helper_check_planes, and then + * drm_atomic_helper_check_modeset. We needed this because the MDP5 plane's + * ->atomic_check could update ->mode_changed for pixel format changes. + * This, however isn't needed now because if there is a pixel format change, + * we just assign a new hwpipe for it with a new SMP allocation. We might + * eventually hit a condition where we would need to do a full modeset if + * we run out of planes. There, we'd probably need to set mode_changed. + */ int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { int ret; - /* - * msm ->atomic_check can update ->mode_changed for pixel format - * changes, hence must be run before we check the modeset changes. - */ - ret = drm_atomic_helper_check_planes(dev, state); + ret = drm_atomic_helper_check_modeset(dev, state); if (ret) return ret; - ret = drm_atomic_helper_check_modeset(dev, state); + ret = drm_atomic_helper_check_planes(dev, state); if (ret) return ret; -- cgit v1.2.3-55-g7522 From 829200ac913809ff554f53bc9091b8641ac1dd26 Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Mon, 19 Dec 2016 13:34:11 +0530 Subject: drm/msm/mdp5: Configure COLOR3_OUT propagation In MDP5 Layer Mixer HW, the blender output is only the blended color components (i.e R, G and B, or COLOR0/1/2 in MDP5 HW terminology). This is fed to the BG input of the next blender. We also need to provide an alpha (COLOR3) value for the BG input at the next stage. This is configured via using the REG_MDP5_LM_BLEND_COLOR_OUT register. For each stage, we can propagate either the BG or FG alpha to the next stage. The approach taken by the driver is to propagate FG alpha, if the plane staged on that blender has an alpha. If it doesn't, we try to propagate the base layer's alpha. This is borrowed from downstream MDP5 kernel driver. Without this, we don't see any cursor plane content. Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 7a47209b0c6f..fec966293f9a 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -177,6 +177,21 @@ static void mdp5_crtc_destroy(struct drm_crtc *crtc) kfree(mdp5_crtc); } +static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage) +{ + switch (stage) { + case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA; + case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA; + case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA; + case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA; + case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA; + case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA; + case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA; + default: + return 0; + } +} + /* * blend_setup() - blend all the planes of a CRTC * @@ -197,6 +212,8 @@ static void blend_setup(struct drm_crtc *crtc) unsigned long flags; enum mdp5_pipe stage[STAGE_MAX + 1] = { SSPP_NONE }; int i, plane_cnt = 0; + bool bg_alpha_enabled = false; + u32 mixer_op_mode = 0; #define blender(stage) ((stage) - STAGE0) hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); @@ -218,6 +235,11 @@ static void blend_setup(struct drm_crtc *crtc) if (!pstates[STAGE_BASE]) { ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT; DBG("Border Color is enabled"); + } else if (plane_cnt) { + format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb)); + + if (format->alpha_enable) + bg_alpha_enabled = true; } /* The reset for blending */ @@ -232,6 +254,12 @@ static void blend_setup(struct drm_crtc *crtc) MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST); fg_alpha = pstates[i]->alpha; bg_alpha = 0xFF - pstates[i]->alpha; + + if (!format->alpha_enable && bg_alpha_enabled) + mixer_op_mode = 0; + else + mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i); + DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha); if (format->alpha_enable && pstates[i]->premultiplied) { @@ -268,6 +296,8 @@ static void blend_setup(struct drm_crtc *crtc) blender(i)), bg_alpha); } + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), mixer_op_mode); + mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags); out: -- cgit v1.2.3-55-g7522 From 5798c8e0d3e09be71d2e937b985b17d89f0b7535 Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Mon, 16 Jan 2017 11:57:04 +0530 Subject: drm/msm/mdp5: Misc cursor plane bits These are various changes added in preparation for cursor planes: - Add a pipe_cursor block for 8x96 in mdp5_cfg. - Add a new pipe CAP called MDP_PIPE_CAP_CURSOR. Use this to ensure we assign a cursor SSPP for a drm_plane with type DRM_PLANE_TYPE_CURSOR. - Update mdp5_ctl_blend_mask/ext_blend_mask funcs to incorporate cursor SSPPs. - In mdp5_ctl_blend, iterate through MAX_STAGES instead of stage_cnt, we need to do this because we can now have empty stages in between. - In mdp5_crtc_atomic_check, make sure that the cursor plane has the highest zorder, and stage the cursor plane to the maximum stage # present on the HW. - Create drm_crtc_funcs that doesn't try to implement cursors using the older LM cursor HW. - Pass drm_plane_type in mdp5_plane_init instead of a bool telling whether plane is primary or not. Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c | 10 +++++++++ drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | 34 +++++++++++++++++++++++++++---- drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c | 10 +++++++-- drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | 10 +++++++-- drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h | 7 +++++-- drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c | 8 ++++++++ drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | 8 +++++--- drivers/gpu/drm/msm/mdp/mdp_kms.h | 1 + 8 files changed, 75 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c index 618b2ffed9b4..34ab553f6897 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c @@ -421,6 +421,16 @@ const struct mdp5_cfg_hw msm8x96_config = { MDP_PIPE_CAP_SW_PIX_EXT | 0, }, + .pipe_cursor = { + .count = 2, + .base = { 0x34000, 0x36000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + MDP_PIPE_CAP_CURSOR | + 0, + }, + .lm = { .count = 6, .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 }, diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index fec966293f9a..84fcb6e3a176 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -400,6 +400,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, struct plane_state pstates[STAGE_MAX + 1]; const struct mdp5_cfg_hw *hw_cfg; const struct drm_plane_state *pstate; + bool cursor_plane = false; int cnt = 0, base = 0, i; DBG("%s: check", crtc->name); @@ -409,6 +410,9 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, pstates[cnt].state = to_mdp5_plane_state(pstate); cnt++; + + if (plane->type == DRM_PLANE_TYPE_CURSOR) + cursor_plane = true; } /* assign a stage based on sorted zpos property */ @@ -420,6 +424,10 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base)) base++; + /* trigger a warning if cursor isn't the highest zorder */ + WARN_ON(cursor_plane && + (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR)); + /* verify that there are not too many planes attached to crtc * and that we don't have conflicting mixer stages: */ @@ -431,7 +439,10 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, } for (i = 0; i < cnt; i++) { - pstates[i].state->stage = STAGE_BASE + i + base; + if (cursor_plane && (i == (cnt - 1))) + pstates[i].state->stage = hw_cfg->lm.nb_stages; + else + pstates[i].state->stage = STAGE_BASE + i + base; DBG("%s: assign pipe %s on stage=%d", crtc->name, pstates[i].plane->name, pstates[i].state->stage); @@ -642,6 +653,16 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = { .cursor_move = mdp5_crtc_cursor_move, }; +static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = { + .set_config = drm_atomic_helper_set_config, + .destroy = mdp5_crtc_destroy, + .page_flip = drm_atomic_helper_page_flip, + .set_property = drm_atomic_helper_crtc_set_property, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; + static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { .mode_set_nofb = mdp5_crtc_mode_set_nofb, .disable = mdp5_crtc_disable, @@ -775,7 +796,8 @@ void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc) /* initialize crtc */ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, - struct drm_plane *plane, int id) + struct drm_plane *plane, + struct drm_plane *cursor_plane, int id) { struct drm_crtc *crtc = NULL; struct mdp5_crtc *mdp5_crtc; @@ -796,8 +818,12 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; mdp5_crtc->err.irq = mdp5_crtc_err_irq; - drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs, - NULL); + if (cursor_plane) + drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane, + &mdp5_crtc_no_lm_cursor_funcs, NULL); + else + drm_crtc_init_with_planes(dev, crtc, plane, NULL, + &mdp5_crtc_funcs, NULL); drm_flip_work_init(&mdp5_crtc->unref_cursor_work, "unref cursor", unref_cursor_worker); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c index ab339ce7425a..8b93f7e13200 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c @@ -326,6 +326,8 @@ static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe, case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage); case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage); case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage); + case SSPP_CURSOR0: + case SSPP_CURSOR1: default: return 0; } } @@ -333,7 +335,7 @@ static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe, static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe, enum mdp_mixer_stage_id stage) { - if (stage < STAGE6) + if (stage < STAGE6 && (pipe != SSPP_CURSOR0 && pipe != SSPP_CURSOR1)) return 0; switch (pipe) { @@ -347,6 +349,8 @@ static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe, case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3; case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3; case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3; + case SSPP_CURSOR0: return MDP5_CTL_LAYER_EXT_REG_CURSOR0(stage); + case SSPP_CURSOR1: return MDP5_CTL_LAYER_EXT_REG_CURSOR1(stage); default: return 0; } } @@ -365,7 +369,7 @@ int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt, start_stage = STAGE_BASE; } - for (i = start_stage; i < start_stage + stage_cnt; i++) { + for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) { blend_cfg |= mdp_ctl_blend_mask(stage[i], i); blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i); } @@ -422,6 +426,8 @@ u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe) case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1; case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3; case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3; + case SSPP_CURSOR0: return MDP5_CTL_FLUSH_CURSOR_0; + case SSPP_CURSOR1: return MDP5_CTL_FLUSH_CURSOR_1; default: return 0; } } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index ecfa38949aea..70a1950de8ae 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -441,8 +441,14 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) bool primary = i < num_crtcs; struct drm_plane *plane; struct drm_crtc *crtc; + enum drm_plane_type type; - plane = mdp5_plane_init(dev, primary); + if (primary) + type = DRM_PLANE_TYPE_PRIMARY; + else + type = DRM_PLANE_TYPE_OVERLAY; + + plane = mdp5_plane_init(dev, type); if (IS_ERR(plane)) { ret = PTR_ERR(plane); dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret); @@ -453,7 +459,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) if (!primary) continue; - crtc = mdp5_crtc_init(dev, plane, i); + crtc = mdp5_crtc_init(dev, plane, NULL, i); if (IS_ERR(crtc)) { ret = PTR_ERR(crtc); dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index 40ebc5c50875..0396e8adb693 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -167,6 +167,7 @@ static inline const char *pipe2name(enum mdp5_pipe pipe) NAME(RGB0), NAME(RGB1), NAME(RGB2), NAME(DMA0), NAME(DMA1), NAME(VIG3), NAME(RGB3), + NAME(CURSOR0), NAME(CURSOR1), #undef NAME }; return names[pipe]; @@ -242,7 +243,8 @@ void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); uint32_t mdp5_plane_get_flush(struct drm_plane *plane); enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); -struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary); +struct drm_plane *mdp5_plane_init(struct drm_device *dev, + enum drm_plane_type type); uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); @@ -251,7 +253,8 @@ void mdp5_crtc_set_pipeline(struct drm_crtc *crtc, struct mdp5_interface *intf, struct mdp5_ctl *ctl); void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc); struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, - struct drm_plane *plane, int id); + struct drm_plane *plane, + struct drm_plane *cursor_plane, int id); struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, struct mdp5_interface *intf, struct mdp5_ctl *ctl); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c index 1ae9dc8d260d..35c4dabb0c0c 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c @@ -53,6 +53,14 @@ struct mdp5_hw_pipe *mdp5_pipe_assign(struct drm_atomic_state *s, if (caps & ~cur->caps) continue; + /* + * don't assign a cursor pipe to a plane that isn't going to + * be used as a cursor + */ + if (cur->caps & MDP_PIPE_CAP_CURSOR && + plane->type != DRM_PLANE_TYPE_CURSOR) + continue; + /* possible candidate, take the one with the * fewest unneeded caps bits set: */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 939991d5f346..7409e959d810 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -344,6 +344,9 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, if (rotation & DRM_REFLECT_Y) caps |= MDP_PIPE_CAP_VFLIP; + if (plane->type == DRM_PLANE_TYPE_CURSOR) + caps |= MDP_PIPE_CAP_CURSOR; + /* (re)allocate hw pipe if we don't have one or caps-mismatch: */ if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps)) new_hwpipe = true; @@ -870,12 +873,12 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane) } /* initialize plane */ -struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary) +struct drm_plane *mdp5_plane_init(struct drm_device *dev, + enum drm_plane_type type) { struct drm_plane *plane = NULL; struct mdp5_plane *mdp5_plane; int ret; - enum drm_plane_type type; mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL); if (!mdp5_plane) { @@ -888,7 +891,6 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary) mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats, ARRAY_SIZE(mdp5_plane->formats), false); - type = primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, mdp5_plane->formats, mdp5_plane->nformats, type, NULL); diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h index 303130320748..7574cdfef418 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h @@ -112,6 +112,7 @@ const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format); #define MDP_PIPE_CAP_CSC BIT(3) #define MDP_PIPE_CAP_DECIMATION BIT(4) #define MDP_PIPE_CAP_SW_PIX_EXT BIT(5) +#define MDP_PIPE_CAP_CURSOR BIT(6) static inline bool pipe_supports_yuv(uint32_t pipe_caps) { -- cgit v1.2.3-55-g7522 From bff8fba48b52d77b39084743b6209b6442a9e622 Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Fri, 16 Dec 2016 12:00:30 +0530 Subject: drm/msm/mdp5: Add cursor planes Register cursor drm_planes. The loop in modeset_init that inits the planes and crtcs has to be refactored a bit. We first iterate all the hwpipes to find the cursor planes. Then, we loop again to create crtcs. In msm_atomic_wait_for_commit_done, remove the check which bypasses waiting for vsyncs if state->legacy_cursor_updates is true. We will later create a fast path for cursor position changes in the cursor plane's update_plane func that doesn't go via the regular atomic commit path. For rest of cursor related updates, we will have to wait for vsyncs, so ignore the legacy_cursor_updates flag. Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | 33 ++++++++++++++++++++++++++------- drivers/gpu/drm/msm/msm_atomic.c | 5 ----- 2 files changed, 26 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 70a1950de8ae..3eb0749223d9 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -411,7 +411,9 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) struct msm_drm_private *priv = dev->dev_private; const struct mdp5_cfg_hw *hw_cfg; unsigned int num_crtcs; - int i, ret; + int i, ret, pi = 0, ci = 0; + struct drm_plane *primary[MAX_BASES] = { NULL }; + struct drm_plane *cursor[MAX_BASES] = { NULL }; hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); @@ -438,13 +440,14 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) * planes for the CRTCs, with the remainder as overlay planes: */ for (i = 0; i < mdp5_kms->num_hwpipes; i++) { - bool primary = i < num_crtcs; + struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; struct drm_plane *plane; - struct drm_crtc *crtc; enum drm_plane_type type; - if (primary) + if (i < num_crtcs) type = DRM_PLANE_TYPE_PRIMARY; + else if (hwpipe->caps & MDP_PIPE_CAP_CURSOR) + type = DRM_PLANE_TYPE_CURSOR; else type = DRM_PLANE_TYPE_OVERLAY; @@ -456,10 +459,16 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) } priv->planes[priv->num_planes++] = plane; - if (!primary) - continue; + if (type == DRM_PLANE_TYPE_PRIMARY) + primary[pi++] = plane; + if (type == DRM_PLANE_TYPE_CURSOR) + cursor[ci++] = plane; + } + + for (i = 0; i < num_crtcs; i++) { + struct drm_crtc *crtc; - crtc = mdp5_crtc_init(dev, plane, NULL, i); + crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i); if (IS_ERR(crtc)) { ret = PTR_ERR(crtc); dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret); @@ -791,6 +800,9 @@ static int hwpipe_init(struct mdp5_kms *mdp5_kms) static const enum mdp5_pipe dma_planes[] = { SSPP_DMA0, SSPP_DMA1, }; + static const enum mdp5_pipe cursor_planes[] = { + SSPP_CURSOR0, SSPP_CURSOR1, + }; const struct mdp5_cfg_hw *hw_cfg; int ret; @@ -814,6 +826,13 @@ static int hwpipe_init(struct mdp5_kms *mdp5_kms) if (ret) return ret; + /* Construct cursor pipes: */ + ret = construct_pipes(mdp5_kms, hw_cfg->pipe_cursor.count, + cursor_planes, hw_cfg->pipe_cursor.base, + hw_cfg->pipe_cursor.caps); + if (ret) + return ret; + return 0; } diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 6924fa28f04f..9633a68b14d7 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -93,11 +93,6 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev, if (!crtc->state->enable) continue; - /* Legacy cursor ioctls are completely unsynced, and userspace - * relies on that (by doing tons of cursor updates). */ - if (old_state->legacy_cursor_update) - continue; - kms->funcs->wait_for_crtc_commit_done(kms, crtc); } } -- cgit v1.2.3-55-g7522 From 9142364e4666835e53586c21fe4e8983a8b09bb2 Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Mon, 16 Jan 2017 12:16:34 +0530 Subject: drm/msm/mdp5: Refactor mdp5_plane_atomic_check In mdp5_plane_atomic_check, we get crtc_state from drm_plane_state. Later, for cursor planes, we'll populate the update_plane() func that takes a fast asynchronous path to implement cursor movements. There, we would need to call a similar atomic_check func to validate the plane state, but crtc_state would need to be derived differently. Refactor mdp5_plane_atomic_check to mdp5_plane_atomic_check_with_state such that the latter takes crtc_state as an argument. This is similar to what the intel driver has done for async cursor updates. Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | 32 +++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 7409e959d810..504201b710d9 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -270,17 +270,16 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane, } #define FRAC_16_16(mult, div) (((mult) << 16) / (div)) -static int mdp5_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) +static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, + struct drm_plane_state *state) { struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state); + struct drm_plane *plane = state->plane; struct drm_plane_state *old_state = plane->state; struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg); bool new_hwpipe = false; uint32_t max_width, max_height; uint32_t caps = 0; - struct drm_crtc *crtc; - struct drm_crtc_state *crtc_state; struct drm_rect clip; int min_scale, max_scale; int ret; @@ -288,10 +287,6 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, DBG("%s: check (%d -> %d)", plane->name, plane_enabled(old_state), plane_enabled(state)); - crtc = state->crtc ? state->crtc : plane->state->crtc; - if (!crtc) - return 0; - max_width = config->hw->lm.max_width << 16; max_height = config->hw->lm.max_height << 16; @@ -303,10 +298,6 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, return -ERANGE; } - crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc); - if (WARN_ON(!crtc_state)) - return -EINVAL; - clip.x1 = 0; clip.y1 = 0; clip.x2 = crtc_state->adjusted_mode.hdisplay; @@ -382,6 +373,23 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, return 0; } +static int mdp5_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + + crtc = state->crtc ? state->crtc : plane->state->crtc; + if (!crtc) + return 0; + + crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc); + if (WARN_ON(!crtc_state)) + return -EINVAL; + + return mdp5_plane_atomic_check_with_state(crtc_state, state); +} + static void mdp5_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { -- cgit v1.2.3-55-g7522 From 10967a0687b13a27d77b4a77d97010359cb8267f Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Fri, 16 Dec 2016 11:59:57 +0530 Subject: drm/msm/mdp5: Add support for legacy cursor updates This code has been more or less picked up from the vc4 and intel implementations of update_plane() funcs for cursor planes. The update_plane() func is usually the drm_atomic_helper_update_plane func that will issue an atomic commit with the plane updates. Such commits are not intended to be done faster than the vsync rate. The legacy cursor userspace API, on the other hand, expects the kernel to handle cursor updates immediately. Create a fast path in update_plane, which updates the cursor registers and flushes the configuration. The fast path is taken when there is only a change in the cursor's position in the crtc, or a change in the cursor's crop co-ordinates. For anything else, we go via the slow path. We take the slow path even when the fb changes, and when there is currently no fb tied to the plane. This should hopefully ensure that we always take a slow path for every new fb. This in turn should ensure that the fb is pinned/prepared. Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | 7 ++ drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h | 1 + drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | 110 +++++++++++++++++++++++++++++- 3 files changed, 115 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 84fcb6e3a176..d0c8b38b96ce 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -778,6 +778,13 @@ void mdp5_crtc_set_pipeline(struct drm_crtc *crtc, mdp5_ctl_set_pipeline(ctl, intf, lm); } +struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + + return mdp5_crtc->ctl; +} + int mdp5_crtc_get_lm(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index 0396e8adb693..9de471191eba 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -246,6 +246,7 @@ enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); struct drm_plane *mdp5_plane_init(struct drm_device *dev, enum drm_plane_type type); +struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc); uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); int mdp5_crtc_get_lm(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 504201b710d9..0ffb8affef35 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -31,6 +31,14 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_rect *src, struct drm_rect *dest); +static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h); + static void set_scanout_locked(struct drm_plane *plane, struct drm_framebuffer *fb); @@ -243,6 +251,19 @@ static const struct drm_plane_funcs mdp5_plane_funcs = { .atomic_print_state = mdp5_plane_atomic_print_state, }; +static const struct drm_plane_funcs mdp5_cursor_plane_funcs = { + .update_plane = mdp5_update_cursor_plane_legacy, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = mdp5_plane_destroy, + .set_property = drm_atomic_helper_plane_set_property, + .atomic_set_property = mdp5_plane_atomic_set_property, + .atomic_get_property = mdp5_plane_atomic_get_property, + .reset = mdp5_plane_reset, + .atomic_duplicate_state = mdp5_plane_duplicate_state, + .atomic_destroy_state = mdp5_plane_destroy_state, + .atomic_print_state = mdp5_plane_atomic_print_state, +}; + static int mdp5_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state) { @@ -860,6 +881,82 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, return ret; } +static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane, + struct drm_crtc *crtc, struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) +{ + struct drm_plane_state *plane_state, *new_plane_state; + struct mdp5_plane_state *mdp5_pstate; + struct drm_crtc_state *crtc_state = crtc->state; + int ret; + + if (!crtc_state->active || drm_atomic_crtc_needs_modeset(crtc_state)) + goto slow; + + plane_state = plane->state; + mdp5_pstate = to_mdp5_plane_state(plane_state); + + /* don't use fast path if we don't have a hwpipe allocated yet */ + if (!mdp5_pstate->hwpipe) + goto slow; + + /* only allow changing of position(crtc x/y or src x/y) in fast path */ + if (plane_state->crtc != crtc || + plane_state->src_w != src_w || + plane_state->src_h != src_h || + plane_state->crtc_w != crtc_w || + plane_state->crtc_h != crtc_h || + !plane_state->fb || + plane_state->fb != fb) + goto slow; + + new_plane_state = mdp5_plane_duplicate_state(plane); + if (!new_plane_state) + return -ENOMEM; + + new_plane_state->src_x = src_x; + new_plane_state->src_y = src_y; + new_plane_state->src_w = src_w; + new_plane_state->src_h = src_h; + new_plane_state->crtc_x = crtc_x; + new_plane_state->crtc_y = crtc_y; + new_plane_state->crtc_w = crtc_w; + new_plane_state->crtc_h = crtc_h; + + ret = mdp5_plane_atomic_check_with_state(crtc_state, new_plane_state); + if (ret) + goto slow_free; + + if (new_plane_state->visible) { + struct mdp5_ctl *ctl; + + ret = mdp5_plane_mode_set(plane, crtc, fb, + &new_plane_state->src, + &new_plane_state->dst); + WARN_ON(ret < 0); + + ctl = mdp5_crtc_get_ctl(crtc); + + mdp5_ctl_commit(ctl, mdp5_plane_get_flush(plane)); + } + + *to_mdp5_plane_state(plane_state) = + *to_mdp5_plane_state(new_plane_state); + + mdp5_plane_destroy_state(plane, new_plane_state); + + return 0; +slow_free: + mdp5_plane_destroy_state(plane, new_plane_state); +slow: + return drm_atomic_helper_update_plane(plane, crtc, fb, + crtc_x, crtc_y, crtc_w, crtc_h, + src_x, src_y, src_w, src_h); +} + enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) { struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); @@ -899,9 +996,16 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev, mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats, ARRAY_SIZE(mdp5_plane->formats), false); - ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, - mdp5_plane->formats, mdp5_plane->nformats, - type, NULL); + if (type == DRM_PLANE_TYPE_CURSOR) + ret = drm_universal_plane_init(dev, plane, 0xff, + &mdp5_cursor_plane_funcs, + mdp5_plane->formats, mdp5_plane->nformats, + type, NULL); + else + ret = drm_universal_plane_init(dev, plane, 0xff, + &mdp5_plane_funcs, + mdp5_plane->formats, mdp5_plane->nformats, + type, NULL); if (ret) goto fail; -- cgit v1.2.3-55-g7522 From a1b1a4f7e4c3cf3352ae477b86ddea2d4ed35d3e Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Wed, 4 Jan 2017 14:14:58 +0530 Subject: drm/msm/dsi: Don't error if a DSI host doesn't have a device connected The driver returns an error if a DSI DT node is populated, but no device is connected to it or if the data-lane map isn't present. Ideally, such a DSI node shouldn't be probed at all (i.e, its status should be set to "disabled in DT"), but there isn't any harm in registering the DSI device even if it doesn't have a bridge/panel connected to it. Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/dsi_host.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index eb0903d37e5c..c4dad9084a3a 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -1559,8 +1559,9 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host, prop = of_find_property(ep, "data-lanes", &len); if (!prop) { - dev_dbg(dev, "failed to find data lane mapping\n"); - return -EINVAL; + dev_dbg(dev, + "failed to find data lane mapping, using default\n"); + return 0; } num_lanes = len / sizeof(u32); @@ -1617,7 +1618,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) struct device *dev = &msm_host->pdev->dev; struct device_node *np = dev->of_node; struct device_node *endpoint, *device_node; - int ret; + int ret = 0; /* * Get the endpoint of the output port of the DSI host. In our case, @@ -1641,8 +1642,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) /* Get panel node from the output port's endpoint data */ device_node = of_graph_get_remote_port_parent(endpoint); if (!device_node) { - dev_err(dev, "%s: no valid device\n", __func__); - ret = -ENODEV; + dev_dbg(dev, "%s: no valid device\n", __func__); goto err; } -- cgit v1.2.3-55-g7522 From 3a3ff88a0fc18ad45f0f5818a4f3de395da68d47 Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Wed, 14 Sep 2016 12:23:07 +0530 Subject: drm/msm/dsi: Add 8x96 info in dsi_cfg Add 8x96 DSI data in dsi_cfg. The downstream kernel's dsi_host driver enables core_mmss_clk. We're seeing some branch clock warnings on 8x96 when enabling this. There doesn't seem to be any negative effect with not enabling this clock, so use it once we figure out why we get the warnings. Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/dsi_cfg.c | 25 +++++++++++++++++++++++++ drivers/gpu/drm/msm/dsi/dsi_cfg.h | 1 + 2 files changed, 26 insertions(+) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c index 63436d8ee470..a5d75c9b3a73 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c @@ -94,6 +94,30 @@ static const struct msm_dsi_config msm8994_dsi_cfg = { .num_dsi = 2, }; +/* + * TODO: core_mmss_clk fails to enable for some reason, but things work fine + * without it too. Figure out why it doesn't enable and uncomment below + */ +static const char * const dsi_8996_bus_clk_names[] = { + "mdp_core_clk", "iface_clk", "bus_clk", /* "core_mmss_clk", */ +}; + +static const struct msm_dsi_config msm8996_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .reg_cfg = { + .num = 2, + .regs = { + {"vdda", 18160, 1 }, /* 1.25 V */ + {"vcca", 17000, 32 }, /* 0.925 V */ + {"vddio", 100000, 100 },/* 1.8 V */ + }, + }, + .bus_clk_names = dsi_8996_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_8996_bus_clk_names), + .io_start = { 0x994000, 0x996000 }, + .num_dsi = 2, +}; + static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0, @@ -106,6 +130,7 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { &msm8974_apq8084_dsi_cfg}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1, &msm8996_dsi_cfg}, }; const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor) diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h index eeacc3232494..00a5da2663c6 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h @@ -24,6 +24,7 @@ #define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000 #define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000 #define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001 +#define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001 #define MSM_DSI_V2_VER_MINOR_8064 0x0 -- cgit v1.2.3-55-g7522 From 25c45d897016bd2c92820abfd59c800aaec7d62b Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Wed, 14 Sep 2016 12:23:59 +0530 Subject: drm/msm/dsi: Add a PHY op that initializes version specific stuff Create an init() op for dsi_phy which sets up things specific to a given DSI PHY. The dsi_phy driver probe expects every DSI version to get a "dsi_phy_regulator" mmio base. This isn't the case for 8x96. Creating an init() op will allow us to accommodate such differences. Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/phy/dsi_phy.c | 33 ++++++++++++++++++------- drivers/gpu/drm/msm/dsi/phy/dsi_phy.h | 2 ++ drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c | 1 + drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c | 2 ++ drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c | 1 + 5 files changed, 30 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index f39386ed75e4..03a354933606 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -295,6 +295,24 @@ static int dsi_phy_get_id(struct msm_dsi_phy *phy) return -EINVAL; } +int msm_dsi_phy_init_common(struct msm_dsi_phy *phy) +{ + struct platform_device *pdev = phy->pdev; + int ret = 0; + + phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", + "DSI_PHY_REG"); + if (IS_ERR(phy->reg_base)) { + dev_err(&pdev->dev, "%s: failed to map phy regulator base\n", + __func__); + ret = -ENOMEM; + goto fail; + } + +fail: + return ret; +} + static int dsi_phy_driver_probe(struct platform_device *pdev) { struct msm_dsi_phy *phy; @@ -331,15 +349,6 @@ static int dsi_phy_driver_probe(struct platform_device *pdev) goto fail; } - phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", - "DSI_PHY_REG"); - if (IS_ERR(phy->reg_base)) { - dev_err(dev, "%s: failed to map phy regulator base\n", - __func__); - ret = -ENOMEM; - goto fail; - } - ret = dsi_phy_regulator_init(phy); if (ret) { dev_err(dev, "%s: failed to init regulator\n", __func__); @@ -353,6 +362,12 @@ static int dsi_phy_driver_probe(struct platform_device *pdev) goto fail; } + if (phy->cfg->ops.init) { + ret = phy->cfg->ops.init(phy); + if (ret) + goto fail; + } + /* PLL init will call into clk_register which requires * register access, so we need to enable power and ahb clock. */ diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index f24a85439b94..b9d7d02260da 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -22,6 +22,7 @@ #define dsi_phy_write(offset, data) msm_writel((data), (offset)) struct msm_dsi_phy_ops { + int (*init) (struct msm_dsi_phy *phy); int (*enable)(struct msm_dsi_phy *phy, int src_pll_id, const unsigned long bit_rate, const unsigned long esc_rate); void (*disable)(struct msm_dsi_phy *phy); @@ -87,6 +88,7 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, const unsigned long bit_rate, const unsigned long esc_rate); void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, u32 bit_mask); +int msm_dsi_phy_init_common(struct msm_dsi_phy *phy); #endif /* __DSI_PHY_H__ */ diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c index c757e2070cac..c4a7be5dee60 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c @@ -145,6 +145,7 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = { .ops = { .enable = dsi_20nm_phy_enable, .disable = dsi_20nm_phy_disable, + .init = msm_dsi_phy_init_common, }, .io_start = { 0xfd998300, 0xfd9a0300 }, .num_dsi_phy = 2, diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c index 63d7fba31380..ea740c5fb235 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c @@ -144,6 +144,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = { .ops = { .enable = dsi_28nm_phy_enable, .disable = dsi_28nm_phy_disable, + .init = msm_dsi_phy_init_common, }, .io_start = { 0xfd922b00, 0xfd923100 }, .num_dsi_phy = 2, @@ -161,6 +162,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = { .ops = { .enable = dsi_28nm_phy_enable, .disable = dsi_28nm_phy_disable, + .init = msm_dsi_phy_init_common, }, .io_start = { 0x1a98500 }, .num_dsi_phy = 1, diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c index 7bdb9de54968..9aff0ba069cc 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c @@ -191,6 +191,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = { .ops = { .enable = dsi_28nm_phy_enable, .disable = dsi_28nm_phy_disable, + .init = msm_dsi_phy_init_common, }, .io_start = { 0x4700300, 0x5800300 }, .num_dsi_phy = 2, -- cgit v1.2.3-55-g7522 From dceac340155b66b6c97cb802b03d4778dd82e9be Mon Sep 17 00:00:00 2001 From: Hai Li Date: Thu, 15 Sep 2016 14:34:49 +0530 Subject: drm/msm/dsi: Return more timings from PHY to host The DSI host is required to configure more timings calculated in PHY. By introducing a shared structure, this change allows more timing information passed from PHY to host. Signed-off-by: Hai Li Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/dsi.h | 13 ++++++++++--- drivers/gpu/drm/msm/dsi/dsi_host.c | 20 +++++++++++++------- drivers/gpu/drm/msm/dsi/dsi_manager.c | 4 ++-- drivers/gpu/drm/msm/dsi/phy/dsi_phy.c | 29 ++++++++++++++--------------- drivers/gpu/drm/msm/dsi/phy/dsi_phy.h | 2 ++ 5 files changed, 41 insertions(+), 27 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index 81971b3caf3b..f5e4ccfc902d 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -27,6 +27,8 @@ #define DSI_1 1 #define DSI_MAX 2 +struct msm_dsi_phy_shared_timings; + enum msm_dsi_phy_type { MSM_DSI_PHY_28NM_HPM, MSM_DSI_PHY_28NM_LP, @@ -86,7 +88,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id); struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id); int msm_dsi_manager_phy_enable(int id, const unsigned long bit_rate, const unsigned long esc_rate, - u32 *clk_pre, u32 *clk_post); + struct msm_dsi_phy_shared_timings *shared_timing); void msm_dsi_manager_phy_disable(int id); int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg); bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len); @@ -165,13 +167,18 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi); /* dsi phy */ struct msm_dsi_phy; +struct msm_dsi_phy_shared_timings { + u32 clk_post; + u32 clk_pre; + bool clk_pre_inc_by_2; +}; void msm_dsi_phy_driver_register(void); void msm_dsi_phy_driver_unregister(void); int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, const unsigned long bit_rate, const unsigned long esc_rate); void msm_dsi_phy_disable(struct msm_dsi_phy *phy); -void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy, - u32 *clk_pre, u32 *clk_post); +void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy, + struct msm_dsi_phy_shared_timings *shared_timing); struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy); #endif /* __DSI_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index c4dad9084a3a..8e75e001ad02 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -756,7 +756,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt( } static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, - u32 clk_pre, u32 clk_post) + struct msm_dsi_phy_shared_timings *phy_shared_timings) { u32 flags = msm_host->mode_flags; enum mipi_dsi_pixel_format mipi_fmt = msm_host->format; @@ -819,10 +819,16 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME; dsi_write(msm_host, REG_DSI_TRIG_CTRL, data); - data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(clk_post) | - DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(clk_pre); + data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(phy_shared_timings->clk_post) | + DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(phy_shared_timings->clk_pre); dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data); + if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) && + (cfg_hnd->minor > MSM_DSI_6G_VER_MINOR_V1_0) && + phy_shared_timings->clk_pre_inc_by_2) + dsi_write(msm_host, REG_DSI_T_CLK_PRE_EXTEND, + DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK); + data = 0; if (!(flags & MIPI_DSI_MODE_EOT_PACKET)) data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND; @@ -2170,7 +2176,7 @@ static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable) int msm_dsi_host_power_on(struct mipi_dsi_host *host) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); - u32 clk_pre = 0, clk_post = 0; + struct msm_dsi_phy_shared_timings phy_shared_timings; int ret = 0; mutex_lock(&msm_host->dev_mutex); @@ -2204,7 +2210,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host) ret = msm_dsi_manager_phy_enable(msm_host->id, msm_host->byte_clk_rate * 8, msm_host->esc_clk_rate, - &clk_pre, &clk_post); + &phy_shared_timings); dsi_bus_clk_disable(msm_host); if (ret) { pr_err("%s: failed to enable phy, %d\n", __func__, ret); @@ -2226,7 +2232,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host) dsi_timing_setup(msm_host); dsi_sw_reset(msm_host); - dsi_ctrl_config(msm_host, true, clk_pre, clk_post); + dsi_ctrl_config(msm_host, true, &phy_shared_timings); if (msm_host->disp_en_gpio) gpiod_set_value(msm_host->disp_en_gpio, 1); @@ -2255,7 +2261,7 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host) goto unlock_ret; } - dsi_ctrl_config(msm_host, false, 0, 0); + dsi_ctrl_config(msm_host, false, NULL); if (msm_host->disp_en_gpio) gpiod_set_value(msm_host->disp_en_gpio, 0); diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index bd4ba00d2524..423afa66bd0f 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -660,7 +660,7 @@ void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge) int msm_dsi_manager_phy_enable(int id, const unsigned long bit_rate, const unsigned long esc_rate, - u32 *clk_pre, u32 *clk_post) + struct msm_dsi_phy_shared_timings *shared_timings) { struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); struct msm_dsi_phy *phy = msm_dsi->phy; @@ -688,7 +688,7 @@ int msm_dsi_manager_phy_enable(int id, } msm_dsi->phy_enabled = true; - msm_dsi_phy_get_clk_pre_post(phy, clk_pre, clk_post); + msm_dsi_phy_get_shared_timings(phy, shared_timings); return 0; } diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index 03a354933606..51f7c66078d7 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -115,8 +115,8 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, temp = ((timing->hs_exit >> 1) + 1) * 2 * ui; temp = 60 * coeff + 52 * ui - 24 * ui - temp; tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1; - timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false); - + timing->shared_timings.clk_post = linear_inter(tmax, tmin, pcnt2, 0, + false); tmax = 63; temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui; temp += ((timing->clk_zero >> 1) + 1) * 2 * ui; @@ -124,17 +124,21 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1; if (tmin > tmax) { temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false); - timing->clk_pre = temp >> 1; + timing->shared_timings.clk_pre = temp >> 1; + timing->shared_timings.clk_pre_inc_by_2 = true; } else { - timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false); + timing->shared_timings.clk_pre = + linear_inter(tmax, tmin, pcnt2, 0, false); + timing->shared_timings.clk_pre_inc_by_2 = false; } timing->ta_go = 3; timing->ta_sure = 0; timing->ta_get = 4; - DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", - timing->clk_pre, timing->clk_post, timing->clk_zero, + DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", + timing->shared_timings.clk_pre, timing->shared_timings.clk_post, + timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero, timing->clk_trail, timing->clk_prepare, timing->hs_exit, timing->hs_zero, timing->hs_prepare, timing->hs_trail, timing->hs_rqst); @@ -460,16 +464,11 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy) dsi_phy_regulator_disable(phy); } -void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy, - u32 *clk_pre, u32 *clk_post) +void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy, + struct msm_dsi_phy_shared_timings *shared_timings) { - if (!phy) - return; - - if (clk_pre) - *clk_pre = phy->timing.clk_pre; - if (clk_post) - *clk_post = phy->timing.clk_post; + memcpy(shared_timings, &phy->timing.shared_timings, + sizeof(*shared_timings)); } struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy) diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index b9d7d02260da..7399934cec54 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -62,6 +62,8 @@ struct msm_dsi_dphy_timing { u32 ta_go; u32 ta_sure; u32 ta_get; + + struct msm_dsi_phy_shared_timings shared_timings; }; struct msm_dsi_phy { -- cgit v1.2.3-55-g7522 From 57bf433893370c069a0c34842f35a3bb8aa130fc Mon Sep 17 00:00:00 2001 From: Hai Li Date: Thu, 15 Sep 2016 14:44:22 +0530 Subject: drm/msm/dsi: Pass down use case to PHY For some new types of DSI PHY, more settings depend on use cases controlled by DSI manager. This change allows DSI manager to setup PHY with a use case. Signed-off-by: Hai Li Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/dsi.h | 8 +++++++ drivers/gpu/drm/msm/dsi/dsi_manager.c | 43 ++++++++++++----------------------- drivers/gpu/drm/msm/dsi/phy/dsi_phy.c | 29 ++++++++++++++++++++++- drivers/gpu/drm/msm/dsi/phy/dsi_phy.h | 1 + 4 files changed, 51 insertions(+), 30 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index f5e4ccfc902d..d516fe296b78 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -37,6 +37,12 @@ enum msm_dsi_phy_type { MSM_DSI_PHY_MAX }; +enum msm_dsi_phy_usecase { + MSM_DSI_PHY_STANDALONE, + MSM_DSI_PHY_MASTER, + MSM_DSI_PHY_SLAVE, +}; + #define DSI_DEV_REGULATOR_MAX 8 #define DSI_BUS_CLK_MAX 4 @@ -180,6 +186,8 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy); void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy, struct msm_dsi_phy_shared_timings *shared_timing); struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy); +void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy, + enum msm_dsi_phy_usecase uc); #endif /* __DSI_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 423afa66bd0f..c040830f5900 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -72,11 +72,12 @@ static int dsi_mgr_parse_dual_dsi(struct device_node *np, int id) return 0; } -static int dsi_mgr_host_register(int id) +static int dsi_mgr_setup_components(int id) { struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id); struct msm_dsi *clk_master_dsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); + struct msm_dsi *clk_slave_dsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); struct msm_dsi_pll *src_pll; int ret; @@ -85,15 +86,16 @@ static int dsi_mgr_host_register(int id) if (ret) return ret; + msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE); src_pll = msm_dsi_phy_get_pll(msm_dsi->phy); ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll); } else if (!other_dsi) { ret = 0; } else { - struct msm_dsi *mdsi = IS_MASTER_DSI_LINK(id) ? - msm_dsi : other_dsi; - struct msm_dsi *sdsi = IS_MASTER_DSI_LINK(id) ? - other_dsi : msm_dsi; + struct msm_dsi *master_link_dsi = IS_MASTER_DSI_LINK(id) ? + msm_dsi : other_dsi; + struct msm_dsi *slave_link_dsi = IS_MASTER_DSI_LINK(id) ? + other_dsi : msm_dsi; /* Register slave host first, so that slave DSI device * has a chance to probe, and do not block the master * DSI device's probe. @@ -101,14 +103,18 @@ static int dsi_mgr_host_register(int id) * because only master DSI device adds the panel to global * panel list. The panel's device is the master DSI device. */ - ret = msm_dsi_host_register(sdsi->host, false); + ret = msm_dsi_host_register(slave_link_dsi->host, false); if (ret) return ret; - ret = msm_dsi_host_register(mdsi->host, true); + ret = msm_dsi_host_register(master_link_dsi->host, true); if (ret) return ret; /* PLL0 is to drive both 2 DSI link clocks in Dual DSI mode. */ + msm_dsi_phy_set_usecase(clk_master_dsi->phy, + MSM_DSI_PHY_MASTER); + msm_dsi_phy_set_usecase(clk_slave_dsi->phy, + MSM_DSI_PHY_SLAVE); src_pll = msm_dsi_phy_get_pll(clk_master_dsi->phy); ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll); if (ret) @@ -665,28 +671,12 @@ int msm_dsi_manager_phy_enable(int id, struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); struct msm_dsi_phy *phy = msm_dsi->phy; int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id; - struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy); int ret; ret = msm_dsi_phy_enable(phy, src_pll_id, bit_rate, esc_rate); if (ret) return ret; - /* - * Reset DSI PHY silently changes its PLL registers to reset status, - * which will confuse clock driver and result in wrong output rate of - * link clocks. Restore PLL status if its PLL is being used as clock - * source. - */ - if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER)) { - ret = msm_dsi_pll_restore_state(pll); - if (ret) { - pr_err("%s: failed to restore pll state\n", __func__); - msm_dsi_phy_disable(phy); - return ret; - } - } - msm_dsi->phy_enabled = true; msm_dsi_phy_get_shared_timings(phy, shared_timings); @@ -699,11 +689,6 @@ void msm_dsi_manager_phy_disable(int id) struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); struct msm_dsi_phy *phy = msm_dsi->phy; - struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy); - - /* Save PLL status if it is a clock source */ - if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER)) - msm_dsi_pll_save_state(pll); /* disable DSI phy * In dual-dsi configuration, the phy should be disabled for the @@ -834,7 +819,7 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi) goto fail; } - ret = dsi_mgr_host_register(id); + ret = dsi_mgr_setup_components(id); if (ret) { pr_err("%s: failed to register mipi dsi host for DSI %d\n", __func__, id); diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index 51f7c66078d7..1021c1c572ca 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -451,7 +451,24 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, return ret; } - return 0; + /* + * Resetting DSI PHY silently changes its PLL registers to reset status, + * which will confuse clock driver and result in wrong output rate of + * link clocks. Restore PLL status if its PLL is being used as clock + * source. + */ + if (phy->usecase != MSM_DSI_PHY_SLAVE) { + ret = msm_dsi_pll_restore_state(phy->pll); + if (ret) { + pr_err("%s: failed to restore pll state\n", __func__); + if (phy->cfg->ops.disable) + phy->cfg->ops.disable(phy); + dsi_phy_regulator_disable(phy); + return ret; + } + } + + return ret; } void msm_dsi_phy_disable(struct msm_dsi_phy *phy) @@ -459,6 +476,10 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy) if (!phy || !phy->cfg->ops.disable) return; + /* Save PLL status if it is a clock source */ + if (phy->usecase != MSM_DSI_PHY_SLAVE) + msm_dsi_pll_save_state(phy->pll); + phy->cfg->ops.disable(phy); dsi_phy_regulator_disable(phy); @@ -479,3 +500,9 @@ struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy) return phy->pll; } +void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy, + enum msm_dsi_phy_usecase uc) +{ + if (phy) + phy->usecase = uc; +} diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index 7399934cec54..e1057423fd21 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -78,6 +78,7 @@ struct msm_dsi_phy { struct msm_dsi_dphy_timing timing; const struct msm_dsi_phy_cfg *cfg; + enum msm_dsi_phy_usecase usecase; bool regulator_ldo_mode; struct msm_dsi_pll *pll; -- cgit v1.2.3-55-g7522 From 34d9545b9f769c6553e31a6820c9cb51f5e93099 Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Wed, 29 Jul 2015 12:14:12 -0400 Subject: drm/msm/dsi: Reset both PHYs before clock operation for dual DSI In case of dual DSI, some registers in PHY1 have been programmed during PLL0 clock's set_rate. The PHY1 reset called by host1 later will silently reset those PHY1 registers. This change is to reset and enable both PHYs before any PLL clock operation. [Originally worked on by Hai Li . Fixed up by Archit Taneja ] Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/dsi.h | 1 + drivers/gpu/drm/msm/dsi/dsi_host.c | 25 +++++++++++++------------ drivers/gpu/drm/msm/dsi/dsi_manager.c | 32 +++++++++++++++++++++++++++++--- 3 files changed, 43 insertions(+), 15 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index d516fe296b78..9407a682d045 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -166,6 +166,7 @@ int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer); void msm_dsi_host_unregister(struct mipi_dsi_host *host); int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host, struct msm_dsi_pll *src_pll); +void msm_dsi_host_reset_phy(struct mipi_dsi_host *host); void msm_dsi_host_destroy(struct mipi_dsi_host *host); int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, struct drm_device *dev); diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 8e75e001ad02..1d2c9b46737d 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -691,17 +691,6 @@ static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host) return 0; } -static void dsi_phy_sw_reset(struct msm_dsi_host *msm_host) -{ - DBG(""); - dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET); - /* Make sure fully reset */ - wmb(); - udelay(1000); - dsi_write(msm_host, REG_DSI_PHY_RESET, 0); - udelay(100); -} - static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable) { u32 intr; @@ -2126,6 +2115,19 @@ exit: return ret; } +void msm_dsi_host_reset_phy(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + DBG(""); + dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET); + /* Make sure fully reset */ + wmb(); + udelay(1000); + dsi_write(msm_host, REG_DSI_PHY_RESET, 0); + udelay(100); +} + int msm_dsi_host_enable(struct mipi_dsi_host *host) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); @@ -2206,7 +2208,6 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host) goto fail_disable_reg; } - dsi_phy_sw_reset(msm_host); ret = msm_dsi_manager_phy_enable(msm_host->id, msm_host->byte_clk_rate * 8, msm_host->esc_clk_rate, diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index c040830f5900..cb67e78d775b 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -669,13 +669,39 @@ int msm_dsi_manager_phy_enable(int id, struct msm_dsi_phy_shared_timings *shared_timings) { struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); + struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); struct msm_dsi_phy *phy = msm_dsi->phy; int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id; int ret; - ret = msm_dsi_phy_enable(phy, src_pll_id, bit_rate, esc_rate); - if (ret) - return ret; + /* In case of dual DSI, some registers in PHY1 have been programmed + * during PLL0 clock's set_rate. The PHY1 reset called by host1 here + * will silently reset those PHY1 registers. Therefore we need to reset + * and enable both PHYs before any PLL clock operation. + */ + if (IS_DUAL_DSI() && mdsi && sdsi) { + if (!mdsi->phy_enabled && !sdsi->phy_enabled) { + msm_dsi_host_reset_phy(mdsi->host); + msm_dsi_host_reset_phy(sdsi->host); + ret = msm_dsi_phy_enable(mdsi->phy, src_pll_id, + bit_rate, esc_rate); + if (ret) + return ret; + ret = msm_dsi_phy_enable(sdsi->phy, src_pll_id, + bit_rate, esc_rate); + if (ret) { + msm_dsi_phy_disable(mdsi->phy); + return ret; + } + } + } else { + msm_dsi_host_reset_phy(msm_dsi->host); + ret = msm_dsi_phy_enable(msm_dsi->phy, src_pll_id, bit_rate, + esc_rate); + if (ret) + return ret; + } msm_dsi->phy_enabled = true; msm_dsi_phy_get_shared_timings(phy, shared_timings); -- cgit v1.2.3-55-g7522 From b62aa70a98c5401ada63657eee7c88da70bdbb27 Mon Sep 17 00:00:00 2001 From: Hai Li Date: Sat, 7 Jan 2017 14:24:38 +0530 Subject: drm/msm/dsi: Move PHY operations out of host Since DSI PHY has been a separate platform device, it should not depend on the resources in host to be functional. This change is to trigger PHY operations in manager, instead of host, so that host and PHY can be completely separated. Signed-off-by: Hai Li Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/dsi.h | 18 ++- drivers/gpu/drm/msm/dsi/dsi_host.c | 46 +++--- drivers/gpu/drm/msm/dsi/dsi_manager.c | 178 ++++++++++++++---------- drivers/gpu/drm/msm/dsi/phy/dsi_phy.c | 39 ++++-- drivers/gpu/drm/msm/dsi/phy/dsi_phy.h | 4 +- drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c | 4 +- drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c | 4 +- drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c | 4 +- 8 files changed, 172 insertions(+), 125 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index 9407a682d045..9465996d0869 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -28,6 +28,7 @@ #define DSI_MAX 2 struct msm_dsi_phy_shared_timings; +struct msm_dsi_phy_clk_request; enum msm_dsi_phy_type { MSM_DSI_PHY_28NM_HPM, @@ -92,10 +93,6 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id); void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge); struct drm_connector *msm_dsi_manager_connector_init(u8 id); struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id); -int msm_dsi_manager_phy_enable(int id, - const unsigned long bit_rate, const unsigned long esc_rate, - struct msm_dsi_phy_shared_timings *shared_timing); -void msm_dsi_manager_phy_disable(int id); int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg); bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len); void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags); @@ -155,7 +152,8 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base, u32 len); int msm_dsi_host_enable(struct mipi_dsi_host *host); int msm_dsi_host_disable(struct mipi_dsi_host *host); -int msm_dsi_host_power_on(struct mipi_dsi_host *host); +int msm_dsi_host_power_on(struct mipi_dsi_host *host, + struct msm_dsi_phy_shared_timings *phy_shared_timings); int msm_dsi_host_power_off(struct mipi_dsi_host *host); int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, struct drm_display_mode *mode); @@ -167,6 +165,8 @@ void msm_dsi_host_unregister(struct mipi_dsi_host *host); int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host, struct msm_dsi_pll *src_pll); void msm_dsi_host_reset_phy(struct mipi_dsi_host *host); +void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host, + struct msm_dsi_phy_clk_request *clk_req); void msm_dsi_host_destroy(struct mipi_dsi_host *host); int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, struct drm_device *dev); @@ -179,10 +179,16 @@ struct msm_dsi_phy_shared_timings { u32 clk_pre; bool clk_pre_inc_by_2; }; + +struct msm_dsi_phy_clk_request { + unsigned long bitclk_rate; + unsigned long escclk_rate; +}; + void msm_dsi_phy_driver_register(void); void msm_dsi_phy_driver_unregister(void); int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, - const unsigned long bit_rate, const unsigned long esc_rate); + struct msm_dsi_phy_clk_request *clk_req); void msm_dsi_phy_disable(struct msm_dsi_phy *phy); void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy, struct msm_dsi_phy_shared_timings *shared_timing); diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 1d2c9b46737d..1fc07ce24686 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -2128,6 +2128,15 @@ void msm_dsi_host_reset_phy(struct mipi_dsi_host *host) udelay(100); } +void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host, + struct msm_dsi_phy_clk_request *clk_req) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + clk_req->bitclk_rate = msm_host->byte_clk_rate * 8; + clk_req->escclk_rate = msm_host->esc_clk_rate; +} + int msm_dsi_host_enable(struct mipi_dsi_host *host) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); @@ -2175,10 +2184,10 @@ static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable) SFPB_GPREG_MASTER_PORT_EN(en)); } -int msm_dsi_host_power_on(struct mipi_dsi_host *host) +int msm_dsi_host_power_on(struct mipi_dsi_host *host, + struct msm_dsi_phy_shared_timings *phy_shared_timings) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); - struct msm_dsi_phy_shared_timings phy_shared_timings; int ret = 0; mutex_lock(&msm_host->dev_mutex); @@ -2189,12 +2198,6 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host) msm_dsi_sfpb_config(msm_host, true); - ret = dsi_calc_clk_rate(msm_host); - if (ret) { - pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); - goto unlock_ret; - } - ret = dsi_host_regulator_enable(msm_host); if (ret) { pr_err("%s:Failed to enable vregs.ret=%d\n", @@ -2202,22 +2205,6 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host) goto unlock_ret; } - ret = dsi_bus_clk_enable(msm_host); - if (ret) { - pr_err("%s: failed to enable bus clocks, %d\n", __func__, ret); - goto fail_disable_reg; - } - - ret = msm_dsi_manager_phy_enable(msm_host->id, - msm_host->byte_clk_rate * 8, - msm_host->esc_clk_rate, - &phy_shared_timings); - dsi_bus_clk_disable(msm_host); - if (ret) { - pr_err("%s: failed to enable phy, %d\n", __func__, ret); - goto fail_disable_reg; - } - ret = dsi_clk_ctrl(msm_host, 1); if (ret) { pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret); @@ -2233,7 +2220,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host) dsi_timing_setup(msm_host); dsi_sw_reset(msm_host); - dsi_ctrl_config(msm_host, true, &phy_shared_timings); + dsi_ctrl_config(msm_host, true, phy_shared_timings); if (msm_host->disp_en_gpio) gpiod_set_value(msm_host->disp_en_gpio, 1); @@ -2269,8 +2256,6 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host) pinctrl_pm_select_sleep_state(&msm_host->pdev->dev); - msm_dsi_manager_phy_disable(msm_host->id); - dsi_clk_ctrl(msm_host, 0); dsi_host_regulator_disable(msm_host); @@ -2290,6 +2275,7 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, struct drm_display_mode *mode) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + int ret; if (msm_host->mode) { drm_mode_destroy(msm_host->dev, msm_host->mode); @@ -2302,6 +2288,12 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, return -ENOMEM; } + ret = dsi_calc_clk_rate(msm_host); + if (ret) { + pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); + return ret; + } + return 0; } diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index cb67e78d775b..921270ea6059 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -125,6 +125,84 @@ static int dsi_mgr_setup_components(int id) return ret; } +static int enable_phy(struct msm_dsi *msm_dsi, int src_pll_id, + struct msm_dsi_phy_shared_timings *shared_timings) +{ + struct msm_dsi_phy_clk_request clk_req; + int ret; + + msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req); + + ret = msm_dsi_phy_enable(msm_dsi->phy, src_pll_id, &clk_req); + msm_dsi_phy_get_shared_timings(msm_dsi->phy, shared_timings); + + return ret; +} + +static int +dsi_mgr_phy_enable(int id, + struct msm_dsi_phy_shared_timings shared_timings[DSI_MAX]) +{ + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); + struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); + int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id; + int ret; + + /* In case of dual DSI, some registers in PHY1 have been programmed + * during PLL0 clock's set_rate. The PHY1 reset called by host1 here + * will silently reset those PHY1 registers. Therefore we need to reset + * and enable both PHYs before any PLL clock operation. + */ + if (IS_DUAL_DSI() && mdsi && sdsi) { + if (!mdsi->phy_enabled && !sdsi->phy_enabled) { + msm_dsi_host_reset_phy(mdsi->host); + msm_dsi_host_reset_phy(sdsi->host); + + ret = enable_phy(mdsi, src_pll_id, + &shared_timings[DSI_CLOCK_MASTER]); + if (ret) + return ret; + ret = enable_phy(sdsi, src_pll_id, + &shared_timings[DSI_CLOCK_SLAVE]); + if (ret) { + msm_dsi_phy_disable(mdsi->phy); + return ret; + } + } + } else { + msm_dsi_host_reset_phy(mdsi->host); + ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]); + if (ret) + return ret; + } + + msm_dsi->phy_enabled = true; + + return 0; +} + +static void dsi_mgr_phy_disable(int id) +{ + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); + struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); + + /* disable DSI phy + * In dual-dsi configuration, the phy should be disabled for the + * first controller only when the second controller is disabled. + */ + msm_dsi->phy_enabled = false; + if (IS_DUAL_DSI() && mdsi && sdsi) { + if (!mdsi->phy_enabled && !sdsi->phy_enabled) { + msm_dsi_phy_disable(sdsi->phy); + msm_dsi_phy_disable(mdsi->phy); + } + } else { + msm_dsi_phy_disable(msm_dsi->phy); + } +} + struct dsi_connector { struct drm_connector base; int id; @@ -360,22 +438,31 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); struct mipi_dsi_host *host = msm_dsi->host; struct drm_panel *panel = msm_dsi->panel; + struct msm_dsi_phy_shared_timings phy_shared_timings[DSI_MAX]; bool is_dual_dsi = IS_DUAL_DSI(); int ret; DBG("id=%d", id); - if (!msm_dsi_device_connected(msm_dsi) || - (is_dual_dsi && (DSI_1 == id))) + if (!msm_dsi_device_connected(msm_dsi)) return; - ret = msm_dsi_host_power_on(host); + ret = dsi_mgr_phy_enable(id, phy_shared_timings); + if (ret) + goto phy_en_fail; + + /* Do nothing with the host if it is DSI 1 in case of dual DSI */ + if (is_dual_dsi && (DSI_1 == id)) + return; + + ret = msm_dsi_host_power_on(host, &phy_shared_timings[id]); if (ret) { pr_err("%s: power on host %d failed, %d\n", __func__, id, ret); goto host_on_fail; } if (is_dual_dsi && msm_dsi1) { - ret = msm_dsi_host_power_on(msm_dsi1->host); + ret = msm_dsi_host_power_on(msm_dsi1->host, + &phy_shared_timings[DSI_1]); if (ret) { pr_err("%s: power on host1 failed, %d\n", __func__, ret); @@ -434,6 +521,8 @@ panel_prep_fail: host1_on_fail: msm_dsi_host_power_off(host); host_on_fail: + dsi_mgr_phy_disable(id); +phy_en_fail: return; } @@ -459,10 +548,17 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) DBG("id=%d", id); - if (!msm_dsi_device_connected(msm_dsi) || - (is_dual_dsi && (DSI_1 == id))) + if (!msm_dsi_device_connected(msm_dsi)) return; + /* + * Do nothing with the host if it is DSI 1 in case of dual DSI. + * It is safe to call dsi_mgr_phy_disable() here because a single PHY + * won't be diabled until both PHYs request disable. + */ + if (is_dual_dsi && (DSI_1 == id)) + goto disable_phy; + if (panel) { ret = drm_panel_disable(panel); if (ret) @@ -497,6 +593,9 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) pr_err("%s: host1 power off failed, %d\n", __func__, ret); } + +disable_phy: + dsi_mgr_phy_disable(id); } static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge, @@ -664,73 +763,6 @@ void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge) { } -int msm_dsi_manager_phy_enable(int id, - const unsigned long bit_rate, const unsigned long esc_rate, - struct msm_dsi_phy_shared_timings *shared_timings) -{ - struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); - struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); - struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); - struct msm_dsi_phy *phy = msm_dsi->phy; - int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id; - int ret; - - /* In case of dual DSI, some registers in PHY1 have been programmed - * during PLL0 clock's set_rate. The PHY1 reset called by host1 here - * will silently reset those PHY1 registers. Therefore we need to reset - * and enable both PHYs before any PLL clock operation. - */ - if (IS_DUAL_DSI() && mdsi && sdsi) { - if (!mdsi->phy_enabled && !sdsi->phy_enabled) { - msm_dsi_host_reset_phy(mdsi->host); - msm_dsi_host_reset_phy(sdsi->host); - ret = msm_dsi_phy_enable(mdsi->phy, src_pll_id, - bit_rate, esc_rate); - if (ret) - return ret; - ret = msm_dsi_phy_enable(sdsi->phy, src_pll_id, - bit_rate, esc_rate); - if (ret) { - msm_dsi_phy_disable(mdsi->phy); - return ret; - } - } - } else { - msm_dsi_host_reset_phy(msm_dsi->host); - ret = msm_dsi_phy_enable(msm_dsi->phy, src_pll_id, bit_rate, - esc_rate); - if (ret) - return ret; - } - - msm_dsi->phy_enabled = true; - msm_dsi_phy_get_shared_timings(phy, shared_timings); - - return 0; -} - -void msm_dsi_manager_phy_disable(int id) -{ - struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); - struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); - struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); - struct msm_dsi_phy *phy = msm_dsi->phy; - - /* disable DSI phy - * In dual-dsi configuration, the phy should be disabled for the - * first controller only when the second controller is disabled. - */ - msm_dsi->phy_enabled = false; - if (IS_DUAL_DSI() && mdsi && sdsi) { - if (!mdsi->phy_enabled && !sdsi->phy_enabled) { - msm_dsi_phy_disable(sdsi->phy); - msm_dsi_phy_disable(mdsi->phy); - } - } else { - msm_dsi_phy_disable(phy); - } -} - int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg) { struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index 1021c1c572ca..a761531ec04c 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -54,8 +54,10 @@ static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing, } int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, - const unsigned long bit_rate, const unsigned long esc_rate) + struct msm_dsi_phy_clk_request *clk_req) { + const unsigned long bit_rate = clk_req->bitclk_rate; + const unsigned long esc_rate = clk_req->escclk_rate; s32 ui, lpx; s32 tmax, tmin; s32 pcnt0 = 10; @@ -429,7 +431,7 @@ void __exit msm_dsi_phy_driver_unregister(void) } int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, - const unsigned long bit_rate, const unsigned long esc_rate) + struct msm_dsi_phy_clk_request *clk_req) { struct device *dev = &phy->pdev->dev; int ret; @@ -437,18 +439,24 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, if (!phy || !phy->cfg->ops.enable) return -EINVAL; + ret = dsi_phy_enable_resource(phy); + if (ret) { + dev_err(dev, "%s: resource enable failed, %d\n", + __func__, ret); + goto res_en_fail; + } + ret = dsi_phy_regulator_enable(phy); if (ret) { dev_err(dev, "%s: regulator enable failed, %d\n", __func__, ret); - return ret; + goto reg_en_fail; } - ret = phy->cfg->ops.enable(phy, src_pll_id, bit_rate, esc_rate); + ret = phy->cfg->ops.enable(phy, src_pll_id, clk_req); if (ret) { dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret); - dsi_phy_regulator_disable(phy); - return ret; + goto phy_en_fail; } /* @@ -460,14 +468,22 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, if (phy->usecase != MSM_DSI_PHY_SLAVE) { ret = msm_dsi_pll_restore_state(phy->pll); if (ret) { - pr_err("%s: failed to restore pll state\n", __func__); - if (phy->cfg->ops.disable) - phy->cfg->ops.disable(phy); - dsi_phy_regulator_disable(phy); - return ret; + dev_err(dev, "%s: failed to restore pll state, %d\n", + __func__, ret); + goto pll_restor_fail; } } + return 0; + +pll_restor_fail: + if (phy->cfg->ops.disable) + phy->cfg->ops.disable(phy); +phy_en_fail: + dsi_phy_regulator_disable(phy); +reg_en_fail: + dsi_phy_disable_resource(phy); +res_en_fail: return ret; } @@ -483,6 +499,7 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy) phy->cfg->ops.disable(phy); dsi_phy_regulator_disable(phy); + dsi_phy_disable_resource(phy); } void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy, diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index e1057423fd21..6472b600ac41 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -24,7 +24,7 @@ struct msm_dsi_phy_ops { int (*init) (struct msm_dsi_phy *phy); int (*enable)(struct msm_dsi_phy *phy, int src_pll_id, - const unsigned long bit_rate, const unsigned long esc_rate); + struct msm_dsi_phy_clk_request *clk_req); void (*disable)(struct msm_dsi_phy *phy); }; @@ -88,7 +88,7 @@ struct msm_dsi_phy { * PHY internal functions */ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, - const unsigned long bit_rate, const unsigned long esc_rate); + struct msm_dsi_phy_clk_request *clk_req); void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, u32 bit_mask); int msm_dsi_phy_init_common(struct msm_dsi_phy *phy); diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c index c4a7be5dee60..1ca6c69516f5 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c @@ -72,7 +72,7 @@ static void dsi_20nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable) } static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, - const unsigned long bit_rate, const unsigned long esc_rate) + struct msm_dsi_phy_clk_request *clk_req) { struct msm_dsi_dphy_timing *timing = &phy->timing; int i; @@ -81,7 +81,7 @@ static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, DBG(""); - if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) { + if (msm_dsi_dphy_timing_calc(timing, clk_req)) { dev_err(&phy->pdev->dev, "%s: D-PHY timing calculation failed\n", __func__); return -EINVAL; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c index ea740c5fb235..4972b52cbe44 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c @@ -67,7 +67,7 @@ static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable) } static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, - const unsigned long bit_rate, const unsigned long esc_rate) + struct msm_dsi_phy_clk_request *clk_req) { struct msm_dsi_dphy_timing *timing = &phy->timing; int i; @@ -75,7 +75,7 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, DBG(""); - if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) { + if (msm_dsi_dphy_timing_calc(timing, clk_req)) { dev_err(&phy->pdev->dev, "%s: D-PHY timing calculation failed\n", __func__); return -EINVAL; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c index 9aff0ba069cc..398004463498 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c @@ -124,14 +124,14 @@ static void dsi_28nm_phy_lane_config(struct msm_dsi_phy *phy) } static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, - const unsigned long bit_rate, const unsigned long esc_rate) + struct msm_dsi_phy_clk_request *clk_req) { struct msm_dsi_dphy_timing *timing = &phy->timing; void __iomem *base = phy->base; DBG(""); - if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) { + if (msm_dsi_dphy_timing_calc(timing, clk_req)) { dev_err(&phy->pdev->dev, "%s: D-PHY timing calculation failed\n", __func__); return -EINVAL; -- cgit v1.2.3-55-g7522 From a4df68fa232e979fb74b2efe6997d0f38cbfc626 Mon Sep 17 00:00:00 2001 From: Hai Li Date: Tue, 3 Jan 2017 19:31:16 +0530 Subject: drm/msm/dsi: Add new method to calculate 14nm PHY timings The 14nm DSI PHY on 8x96 (called PHY v2 downstream) requires a different set of calculations for computing D-PHY timing params. Create a timing_calc_v2 func for the newer v2 PHYs. Signed-off-by: Hai Li Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/phy/dsi_phy.c | 117 ++++++++++++++++++++++++++++++++++ drivers/gpu/drm/msm/dsi/phy/dsi_phy.h | 11 +++- 2 files changed, 127 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index a761531ec04c..541d7dfa13c0 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -148,6 +148,123 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, return 0; } +int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req) +{ + const unsigned long bit_rate = clk_req->bitclk_rate; + const unsigned long esc_rate = clk_req->escclk_rate; + s32 ui, ui_x8, lpx; + s32 tmax, tmin; + s32 pcnt0 = 50; + s32 pcnt1 = 50; + s32 pcnt2 = 10; + s32 pcnt3 = 30; + s32 pcnt4 = 10; + s32 pcnt5 = 2; + s32 coeff = 1000; /* Precision, should avoid overflow */ + s32 hb_en, hb_en_ckln, pd_ckln, pd; + s32 val, val_ckln; + s32 temp; + + if (!bit_rate || !esc_rate) + return -EINVAL; + + timing->hs_halfbyte_en = 0; + hb_en = 0; + timing->hs_halfbyte_en_ckln = 0; + hb_en_ckln = 0; + timing->hs_prep_dly_ckln = (bit_rate > 100000000) ? 0 : 3; + pd_ckln = timing->hs_prep_dly_ckln; + timing->hs_prep_dly = (bit_rate > 120000000) ? 0 : 1; + pd = timing->hs_prep_dly; + + val = (hb_en << 2) + (pd << 1); + val_ckln = (hb_en_ckln << 2) + (pd_ckln << 1); + + ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); + ui_x8 = ui << 3; + lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000); + + temp = S_DIV_ROUND_UP(38 * coeff - val_ckln * ui, ui_x8); + tmin = max_t(s32, temp, 0); + temp = (95 * coeff - val_ckln * ui) / ui_x8; + tmax = max_t(s32, temp, 0); + timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false); + + temp = 300 * coeff - ((timing->clk_prepare << 3) + val_ckln) * ui; + tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3; + tmax = (tmin > 255) ? 511 : 255; + timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false); + + tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8); + temp = 105 * coeff + 12 * ui - 20 * coeff; + tmax = (temp + 3 * ui) / ui_x8; + timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false); + + temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui - val * ui, ui_x8); + tmin = max_t(s32, temp, 0); + temp = (85 * coeff + 6 * ui - val * ui) / ui_x8; + tmax = max_t(s32, temp, 0); + timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false); + + temp = 145 * coeff + 10 * ui - ((timing->hs_prepare << 3) + val) * ui; + tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3; + tmax = 255; + timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false); + + tmin = DIV_ROUND_UP(60 * coeff + 4 * ui + 3 * ui, ui_x8); + temp = 105 * coeff + 12 * ui - 20 * coeff; + tmax = (temp + 3 * ui) / ui_x8; + timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false); + + temp = 50 * coeff + ((hb_en << 2) - 8) * ui; + timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8); + + tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1; + tmax = 255; + timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false); + + temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui; + timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8); + + temp = 60 * coeff + 52 * ui - 43 * ui; + tmin = DIV_ROUND_UP(temp, ui_x8) - 1; + tmax = 63; + timing->shared_timings.clk_post = + linear_inter(tmax, tmin, pcnt2, 0, false); + + temp = 8 * ui + ((timing->clk_prepare << 3) + val_ckln) * ui; + temp += (((timing->clk_zero + 3) << 3) + 11 - (pd_ckln << 1)) * ui; + temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) : + (((timing->hs_rqst_ckln << 3) + 8) * ui); + tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1; + tmax = 63; + if (tmin > tmax) { + temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false); + timing->shared_timings.clk_pre = temp >> 1; + timing->shared_timings.clk_pre_inc_by_2 = 1; + } else { + timing->shared_timings.clk_pre = + linear_inter(tmax, tmin, pcnt2, 0, false); + timing->shared_timings.clk_pre_inc_by_2 = 0; + } + + timing->ta_go = 3; + timing->ta_sure = 0; + timing->ta_get = 4; + + DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", + timing->shared_timings.clk_pre, timing->shared_timings.clk_post, + timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero, + timing->clk_trail, timing->clk_prepare, timing->hs_exit, + timing->hs_zero, timing->hs_prepare, timing->hs_trail, + timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en, + timing->hs_halfbyte_en_ckln, timing->hs_prep_dly, + timing->hs_prep_dly_ckln); + + return 0; +} + void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, u32 bit_mask) { diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index 6472b600ac41..feee87094ef5 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -64,6 +64,13 @@ struct msm_dsi_dphy_timing { u32 ta_get; struct msm_dsi_phy_shared_timings shared_timings; + + /* For PHY v2 only */ + u32 hs_rqst_ckln; + u32 hs_prep_dly; + u32 hs_prep_dly_ckln; + u8 hs_halfbyte_en; + u8 hs_halfbyte_en_ckln; }; struct msm_dsi_phy { @@ -88,7 +95,9 @@ struct msm_dsi_phy { * PHY internal functions */ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, - struct msm_dsi_phy_clk_request *clk_req); + struct msm_dsi_phy_clk_request *clk_req); +int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req); void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, u32 bit_mask); int msm_dsi_phy_init_common(struct msm_dsi_phy *phy); -- cgit v1.2.3-55-g7522 From f079f6d999cbf857f899732de680f2b62f245b8c Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Tue, 3 Jan 2017 19:45:43 +0530 Subject: drm/msm/dsi: Add PHY/PLL for 8x96 Extend the DSI PHY/PLL drivers to support the DSI 14nm PHY/PLL found on 8x96. These are picked up from the downstream driver. The PHY part is similar to the other DSI PHYs. The PLL driver requires some trickery so that one DSI PLL can drive both the DSIs (i.e, dual DSI mode). In the case of dual DSI mode. One DSI instance becomes the clock master, and other the clock slave. The master PLL's output (Byte and Pixel clock) is fed to both the DSI hosts/PHYs. When the DSIs are configured in dual DSI mode, the PHY driver communicates to the PLL driver using msm_dsi_pll_set_usecase() which instance is the master and which one is the slave. When setting rate, the master PLL also configures some of the slave PLL/PHY registers which need to be identical to the master's for correct dual DSI behaviour. There are 2 PLL post dividers that should have ideally been modelled as generic clk_divider clocks, but require some customization for dual DSI. In particular, when the master PLL's post-diviers are set, the slave PLL's post-dividers need to be set too. The clk_ops for these use clk_divider's helper ops and flags internally to prevent redundant code. Cc: Stephen Boyd Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/Kconfig | 7 + drivers/gpu/drm/msm/Makefile | 2 + drivers/gpu/drm/msm/dsi/dsi.h | 8 + drivers/gpu/drm/msm/dsi/phy/dsi_phy.c | 4 + drivers/gpu/drm/msm/dsi/phy/dsi_phy.h | 2 + drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c | 169 +++++ drivers/gpu/drm/msm/dsi/pll/dsi_pll.c | 12 + drivers/gpu/drm/msm/dsi/pll/dsi_pll.h | 11 + drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c | 1104 ++++++++++++++++++++++++++++ 9 files changed, 1319 insertions(+) create mode 100644 drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c create mode 100644 drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 7f78da695dff..5b8e23d051f2 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -72,3 +72,10 @@ config DRM_MSM_DSI_28NM_8960_PHY help Choose this option if the 28nm DSI PHY 8960 variant is used on the platform. + +config DRM_MSM_DSI_14NM_PHY + bool "Enable DSI 14nm PHY driver in MSM DRM (used by MSM8996/APQ8096)" + depends on DRM_MSM_DSI + default y + help + Choose this option if DSI PHY on 8996 is used on the platform. diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 028c24df2291..39055362da95 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -76,11 +76,13 @@ msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \ msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o +msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y) msm-y += dsi/pll/dsi_pll.o msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o +msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o endif obj-$(CONFIG_DRM_MSM) += msm.o diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index 9465996d0869..32369975d155 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -35,6 +35,7 @@ enum msm_dsi_phy_type { MSM_DSI_PHY_28NM_LP, MSM_DSI_PHY_20NM, MSM_DSI_PHY_28NM_8960, + MSM_DSI_PHY_14NM, MSM_DSI_PHY_MAX }; @@ -117,6 +118,8 @@ int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll, struct clk **byte_clk_provider, struct clk **pixel_clk_provider); void msm_dsi_pll_save_state(struct msm_dsi_pll *pll); int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll); +int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll, + enum msm_dsi_phy_usecase uc); #else static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, enum msm_dsi_phy_type type, int id) { @@ -137,6 +140,11 @@ static inline int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll) { return 0; } +static inline int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll, + enum msm_dsi_phy_usecase uc) +{ + return -ENODEV; +} #endif /* dsi host */ diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index 541d7dfa13c0..0c2eb9c9a1fc 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -390,6 +390,10 @@ static const struct of_device_id dsi_phy_dt_match[] = { #ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY { .compatible = "qcom,dsi-phy-28nm-8960", .data = &dsi_phy_28nm_8960_cfgs }, +#endif +#ifdef CONFIG_DRM_MSM_DSI_14NM_PHY + { .compatible = "qcom,dsi-phy-14nm", + .data = &dsi_phy_14nm_cfgs }, #endif {} }; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index feee87094ef5..1733f6608a09 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -47,6 +47,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs; struct msm_dsi_dphy_timing { u32 clk_pre; @@ -77,6 +78,7 @@ struct msm_dsi_phy { struct platform_device *pdev; void __iomem *base; void __iomem *reg_base; + void __iomem *lane_base; int id; struct clk *ahb_clk; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c new file mode 100644 index 000000000000..513f4234adc1 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "dsi_phy.h" +#include "dsi.xml.h" + +#define PHY_14NM_CKLN_IDX 4 + +static void dsi_14nm_dphy_set_timing(struct msm_dsi_phy *phy, + struct msm_dsi_dphy_timing *timing, + int lane_idx) +{ + void __iomem *base = phy->lane_base; + bool clk_ln = (lane_idx == PHY_14NM_CKLN_IDX); + u32 zero = clk_ln ? timing->clk_zero : timing->hs_zero; + u32 prepare = clk_ln ? timing->clk_prepare : timing->hs_prepare; + u32 trail = clk_ln ? timing->clk_trail : timing->hs_trail; + u32 rqst = clk_ln ? timing->hs_rqst_ckln : timing->hs_rqst; + u32 prep_dly = clk_ln ? timing->hs_prep_dly_ckln : timing->hs_prep_dly; + u32 halfbyte_en = clk_ln ? timing->hs_halfbyte_en_ckln : + timing->hs_halfbyte_en; + + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(lane_idx), + DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(timing->hs_exit)); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(lane_idx), + DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(zero)); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(lane_idx), + DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(prepare)); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(lane_idx), + DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(trail)); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(lane_idx), + DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(rqst)); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG0(lane_idx), + DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(prep_dly)); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG1(lane_idx), + halfbyte_en ? DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN : 0); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(lane_idx), + DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(timing->ta_go) | + DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(timing->ta_sure)); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(lane_idx), + DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(timing->ta_get)); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(lane_idx), + DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(0xa0)); +} + +static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, + struct msm_dsi_phy_clk_request *clk_req) +{ + struct msm_dsi_dphy_timing *timing = &phy->timing; + u32 data; + int i; + int ret; + void __iomem *base = phy->base; + void __iomem *lane_base = phy->lane_base; + + if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) { + dev_err(&phy->pdev->dev, + "%s: D-PHY timing calculation failed\n", __func__); + return -EINVAL; + } + + data = 0x1c; + if (phy->usecase != MSM_DSI_PHY_STANDALONE) + data |= DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(32); + dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data); + + dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0x1); + + /* 4 data lanes + 1 clk lane configuration */ + for (i = 0; i < 5; i++) { + dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_VREG_CNTRL(i), + 0x1d); + + dsi_phy_write(lane_base + + REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(i), 0xff); + dsi_phy_write(lane_base + + REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(i), + (i == PHY_14NM_CKLN_IDX) ? 0x00 : 0x06); + + dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG3(i), + (i == PHY_14NM_CKLN_IDX) ? 0x8f : 0x0f); + dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG2(i), 0x10); + dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_DATAPATH(i), + 0); + dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_STR(i), + 0x88); + + dsi_14nm_dphy_set_timing(phy, timing, i); + } + + /* Make sure PLL is not start */ + dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0x00); + + wmb(); /* make sure everything is written before reset and enable */ + + /* reset digital block */ + dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x80); + wmb(); /* ensure reset is asserted */ + udelay(100); + dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x00); + + msm_dsi_phy_set_src_pll(phy, src_pll_id, + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, + DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL); + + ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase); + if (ret) { + dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n", + __func__, ret); + return ret; + } + + /* Remove power down from PLL and all lanes */ + dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0xff); + + return 0; +} + +static void dsi_14nm_phy_disable(struct msm_dsi_phy *phy) +{ + dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0); + dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0); + + /* ensure that the phy is completely disabled */ + wmb(); +} + +static int dsi_14nm_phy_init(struct msm_dsi_phy *phy) +{ + struct platform_device *pdev = phy->pdev; + + phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane", + "DSI_PHY_LANE"); + if (IS_ERR(phy->lane_base)) { + dev_err(&pdev->dev, "%s: failed to map phy lane base\n", + __func__); + return -ENOMEM; + } + + return 0; +} + +const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = { + .type = MSM_DSI_PHY_14NM, + .src_pll_truthtable = { {false, false}, {true, false} }, + .reg_cfg = { + .num = 1, + .regs = { + {"vcca", 17000, 32}, + }, + }, + .ops = { + .enable = dsi_14nm_phy_enable, + .disable = dsi_14nm_phy_disable, + .init = dsi_14nm_phy_init, + }, + .io_start = { 0x994400, 0x996400 }, + .num_dsi_phy = 2, +}; diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c index 5cd438f91afe..bc289f5c9078 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c @@ -140,6 +140,15 @@ int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll) return 0; } +int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll, + enum msm_dsi_phy_usecase uc) +{ + if (pll->set_usecase) + return pll->set_usecase(pll, uc); + + return 0; +} + struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, enum msm_dsi_phy_type type, int id) { @@ -154,6 +163,9 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, case MSM_DSI_PHY_28NM_8960: pll = msm_dsi_pll_28nm_8960_init(pdev, id); break; + case MSM_DSI_PHY_14NM: + pll = msm_dsi_pll_14nm_init(pdev, id); + break; default: pll = ERR_PTR(-ENXIO); break; diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h index 2cf1664723e8..f63e7ada74a8 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h @@ -41,6 +41,8 @@ struct msm_dsi_pll { void (*destroy)(struct msm_dsi_pll *pll); void (*save_state)(struct msm_dsi_pll *pll); int (*restore_state)(struct msm_dsi_pll *pll); + int (*set_usecase)(struct msm_dsi_pll *pll, + enum msm_dsi_phy_usecase uc); }; #define hw_clk_to_pll(x) container_of(x, struct msm_dsi_pll, clk_hw) @@ -104,5 +106,14 @@ static inline struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init( } #endif +#ifdef CONFIG_DRM_MSM_DSI_14NM_PHY +struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id); +#else +static inline struct msm_dsi_pll * +msm_dsi_pll_14nm_init(struct platform_device *pdev, int id) +{ + return ERR_PTR(-ENODEV); +} +#endif #endif /* __DSI_PLL_H__ */ diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c new file mode 100644 index 000000000000..fe15aa64086f --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c @@ -0,0 +1,1104 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include "dsi_pll.h" +#include "dsi.xml.h" + +/* + * DSI PLL 14nm - clock diagram (eg: DSI0): + * + * dsi0n1_postdiv_clk + * | + * | + * +----+ | +----+ + * dsi0vco_clk ---| n1 |--o--| /8 |-- dsi0pllbyte + * +----+ | +----+ + * | dsi0n1_postdivby2_clk + * | +----+ | + * o---| /2 |--o--|\ + * | +----+ | \ +----+ + * | | |--| n2 |-- dsi0pll + * o--------------| / +----+ + * |/ + */ + +#define POLL_MAX_READS 15 +#define POLL_TIMEOUT_US 1000 + +#define NUM_PROVIDED_CLKS 2 + +#define VCO_REF_CLK_RATE 19200000 +#define VCO_MIN_RATE 1300000000UL +#define VCO_MAX_RATE 2600000000UL + +#define DSI_BYTE_PLL_CLK 0 +#define DSI_PIXEL_PLL_CLK 1 + +#define DSI_PLL_DEFAULT_VCO_POSTDIV 1 + +struct dsi_pll_input { + u32 fref; /* reference clk */ + u32 fdata; /* bit clock rate */ + u32 dsiclk_sel; /* Mux configuration (see diagram) */ + u32 ssc_en; /* SSC enable/disable */ + u32 ldo_en; + + /* fixed params */ + u32 refclk_dbler_en; + u32 vco_measure_time; + u32 kvco_measure_time; + u32 bandgap_timer; + u32 pll_wakeup_timer; + u32 plllock_cnt; + u32 plllock_rng; + u32 ssc_center; + u32 ssc_adj_period; + u32 ssc_spread; + u32 ssc_freq; + u32 pll_ie_trim; + u32 pll_ip_trim; + u32 pll_iptat_trim; + u32 pll_cpcset_cur; + u32 pll_cpmset_cur; + + u32 pll_icpmset; + u32 pll_icpcset; + + u32 pll_icpmset_p; + u32 pll_icpmset_m; + + u32 pll_icpcset_p; + u32 pll_icpcset_m; + + u32 pll_lpf_res1; + u32 pll_lpf_cap1; + u32 pll_lpf_cap2; + u32 pll_c3ctrl; + u32 pll_r3ctrl; +}; + +struct dsi_pll_output { + u32 pll_txclk_en; + u32 dec_start; + u32 div_frac_start; + u32 ssc_period; + u32 ssc_step_size; + u32 plllock_cmp; + u32 pll_vco_div_ref; + u32 pll_vco_count; + u32 pll_kvco_div_ref; + u32 pll_kvco_count; + u32 pll_misc1; + u32 pll_lpf2_postdiv; + u32 pll_resetsm_cntrl; + u32 pll_resetsm_cntrl2; + u32 pll_resetsm_cntrl5; + u32 pll_kvco_code; + + u32 cmn_clk_cfg0; + u32 cmn_clk_cfg1; + u32 cmn_ldo_cntrl; + + u32 pll_postdiv; + u32 fcvo; +}; + +struct pll_14nm_cached_state { + unsigned long vco_rate; + u8 n2postdiv; + u8 n1postdiv; +}; + +struct dsi_pll_14nm { + struct msm_dsi_pll base; + + int id; + struct platform_device *pdev; + + void __iomem *phy_cmn_mmio; + void __iomem *mmio; + + int vco_delay; + + struct dsi_pll_input in; + struct dsi_pll_output out; + + /* protects REG_DSI_14nm_PHY_CMN_CLK_CFG0 register */ + spinlock_t postdiv_lock; + + u64 vco_current_rate; + u64 vco_ref_clk_rate; + + /* private clocks: */ + struct clk_hw *hws[NUM_DSI_CLOCKS_MAX]; + u32 num_hws; + + /* clock-provider: */ + struct clk_hw_onecell_data *hw_data; + + struct pll_14nm_cached_state cached_state; + + enum msm_dsi_phy_usecase uc; + struct dsi_pll_14nm *slave; +}; + +#define to_pll_14nm(x) container_of(x, struct dsi_pll_14nm, base) + +/* + * Private struct for N1/N2 post-divider clocks. These clocks are similar to + * the generic clk_divider class of clocks. The only difference is that it + * also sets the slave DSI PLL's post-dividers if in Dual DSI mode + */ +struct dsi_pll_14nm_postdiv { + struct clk_hw hw; + + /* divider params */ + u8 shift; + u8 width; + u8 flags; /* same flags as used by clk_divider struct */ + + struct dsi_pll_14nm *pll; +}; + +#define to_pll_14nm_postdiv(_hw) container_of(_hw, struct dsi_pll_14nm_postdiv, hw) + +/* + * Global list of private DSI PLL struct pointers. We need this for Dual DSI + * mode, where the master PLL's clk_ops needs access the slave's private data + */ +static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX]; + +static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm, + u32 nb_tries, u32 timeout_us) +{ + bool pll_locked = false; + void __iomem *base = pll_14nm->mmio; + u32 tries, val; + + tries = nb_tries; + while (tries--) { + val = pll_read(base + + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS); + pll_locked = !!(val & BIT(5)); + + if (pll_locked) + break; + + udelay(timeout_us); + } + + if (!pll_locked) { + tries = nb_tries; + while (tries--) { + val = pll_read(base + + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS); + pll_locked = !!(val & BIT(0)); + + if (pll_locked) + break; + + udelay(timeout_us); + } + } + + DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* "); + + return pll_locked; +} + +static void dsi_pll_14nm_input_init(struct dsi_pll_14nm *pll) +{ + pll->in.fref = pll->vco_ref_clk_rate; + pll->in.fdata = 0; + pll->in.dsiclk_sel = 1; /* Use the /2 path in Mux */ + pll->in.ldo_en = 0; /* disabled for now */ + + /* fixed input */ + pll->in.refclk_dbler_en = 0; + pll->in.vco_measure_time = 5; + pll->in.kvco_measure_time = 5; + pll->in.bandgap_timer = 4; + pll->in.pll_wakeup_timer = 5; + pll->in.plllock_cnt = 1; + pll->in.plllock_rng = 0; + + /* + * SSC is enabled by default. We might need DT props for configuring + * some SSC params like PPM and center/down spread etc. + */ + pll->in.ssc_en = 1; + pll->in.ssc_center = 0; /* down spread by default */ + pll->in.ssc_spread = 5; /* PPM / 1000 */ + pll->in.ssc_freq = 31500; /* default recommended */ + pll->in.ssc_adj_period = 37; + + pll->in.pll_ie_trim = 4; + pll->in.pll_ip_trim = 4; + pll->in.pll_cpcset_cur = 1; + pll->in.pll_cpmset_cur = 1; + pll->in.pll_icpmset = 4; + pll->in.pll_icpcset = 4; + pll->in.pll_icpmset_p = 0; + pll->in.pll_icpmset_m = 0; + pll->in.pll_icpcset_p = 0; + pll->in.pll_icpcset_m = 0; + pll->in.pll_lpf_res1 = 3; + pll->in.pll_lpf_cap1 = 11; + pll->in.pll_lpf_cap2 = 1; + pll->in.pll_iptat_trim = 7; + pll->in.pll_c3ctrl = 2; + pll->in.pll_r3ctrl = 1; +} + +#define CEIL(x, y) (((x) + ((y) - 1)) / (y)) + +static void pll_14nm_ssc_calc(struct dsi_pll_14nm *pll) +{ + u32 period, ssc_period; + u32 ref, rem; + u64 step_size; + + DBG("vco=%lld ref=%lld", pll->vco_current_rate, pll->vco_ref_clk_rate); + + ssc_period = pll->in.ssc_freq / 500; + period = (u32)pll->vco_ref_clk_rate / 1000; + ssc_period = CEIL(period, ssc_period); + ssc_period -= 1; + pll->out.ssc_period = ssc_period; + + DBG("ssc freq=%d spread=%d period=%d", pll->in.ssc_freq, + pll->in.ssc_spread, pll->out.ssc_period); + + step_size = (u32)pll->vco_current_rate; + ref = pll->vco_ref_clk_rate; + ref /= 1000; + step_size = div_u64(step_size, ref); + step_size <<= 20; + step_size = div_u64(step_size, 1000); + step_size *= pll->in.ssc_spread; + step_size = div_u64(step_size, 1000); + step_size *= (pll->in.ssc_adj_period + 1); + + rem = 0; + step_size = div_u64_rem(step_size, ssc_period + 1, &rem); + if (rem) + step_size++; + + DBG("step_size=%lld", step_size); + + step_size &= 0x0ffff; /* take lower 16 bits */ + + pll->out.ssc_step_size = step_size; +} + +static void pll_14nm_dec_frac_calc(struct dsi_pll_14nm *pll) +{ + struct dsi_pll_input *pin = &pll->in; + struct dsi_pll_output *pout = &pll->out; + u64 multiplier = BIT(20); + u64 dec_start_multiple, dec_start, pll_comp_val; + u32 duration, div_frac_start; + u64 vco_clk_rate = pll->vco_current_rate; + u64 fref = pll->vco_ref_clk_rate; + + DBG("vco_clk_rate=%lld ref_clk_rate=%lld", vco_clk_rate, fref); + + dec_start_multiple = div_u64(vco_clk_rate * multiplier, fref); + div_u64_rem(dec_start_multiple, multiplier, &div_frac_start); + + dec_start = div_u64(dec_start_multiple, multiplier); + + pout->dec_start = (u32)dec_start; + pout->div_frac_start = div_frac_start; + + if (pin->plllock_cnt == 0) + duration = 1024; + else if (pin->plllock_cnt == 1) + duration = 256; + else if (pin->plllock_cnt == 2) + duration = 128; + else + duration = 32; + + pll_comp_val = duration * dec_start_multiple; + pll_comp_val = div_u64(pll_comp_val, multiplier); + do_div(pll_comp_val, 10); + + pout->plllock_cmp = (u32)pll_comp_val; + + pout->pll_txclk_en = 1; + pout->cmn_ldo_cntrl = 0x3c; +} + +static u32 pll_14nm_kvco_slop(u32 vrate) +{ + u32 slop = 0; + + if (vrate > VCO_MIN_RATE && vrate <= 1800000000UL) + slop = 600; + else if (vrate > 1800000000UL && vrate < 2300000000UL) + slop = 400; + else if (vrate > 2300000000UL && vrate < VCO_MAX_RATE) + slop = 280; + + return slop; +} + +static void pll_14nm_calc_vco_count(struct dsi_pll_14nm *pll) +{ + struct dsi_pll_input *pin = &pll->in; + struct dsi_pll_output *pout = &pll->out; + u64 vco_clk_rate = pll->vco_current_rate; + u64 fref = pll->vco_ref_clk_rate; + u64 data; + u32 cnt; + + data = fref * pin->vco_measure_time; + do_div(data, 1000000); + data &= 0x03ff; /* 10 bits */ + data -= 2; + pout->pll_vco_div_ref = data; + + data = div_u64(vco_clk_rate, 1000000); /* unit is Mhz */ + data *= pin->vco_measure_time; + do_div(data, 10); + pout->pll_vco_count = data; + + data = fref * pin->kvco_measure_time; + do_div(data, 1000000); + data &= 0x03ff; /* 10 bits */ + data -= 1; + pout->pll_kvco_div_ref = data; + + cnt = pll_14nm_kvco_slop(vco_clk_rate); + cnt *= 2; + cnt /= 100; + cnt *= pin->kvco_measure_time; + pout->pll_kvco_count = cnt; + + pout->pll_misc1 = 16; + pout->pll_resetsm_cntrl = 48; + pout->pll_resetsm_cntrl2 = pin->bandgap_timer << 3; + pout->pll_resetsm_cntrl5 = pin->pll_wakeup_timer; + pout->pll_kvco_code = 0; +} + +static void pll_db_commit_ssc(struct dsi_pll_14nm *pll) +{ + void __iomem *base = pll->mmio; + struct dsi_pll_input *pin = &pll->in; + struct dsi_pll_output *pout = &pll->out; + u8 data; + + data = pin->ssc_adj_period; + data &= 0x0ff; + pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1, data); + data = (pin->ssc_adj_period >> 8); + data &= 0x03; + pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2, data); + + data = pout->ssc_period; + data &= 0x0ff; + pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER1, data); + data = (pout->ssc_period >> 8); + data &= 0x0ff; + pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER2, data); + + data = pout->ssc_step_size; + data &= 0x0ff; + pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1, data); + data = (pout->ssc_step_size >> 8); + data &= 0x0ff; + pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2, data); + + data = (pin->ssc_center & 0x01); + data <<= 1; + data |= 0x01; /* enable */ + pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER, data); + + wmb(); /* make sure register committed */ +} + +static void pll_db_commit_common(struct dsi_pll_14nm *pll, + struct dsi_pll_input *pin, + struct dsi_pll_output *pout) +{ + void __iomem *base = pll->mmio; + u8 data; + + /* confgiure the non frequency dependent pll registers */ + data = 0; + pll_write(base + REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET, data); + + data = pout->pll_txclk_en; + pll_write(base + REG_DSI_14nm_PHY_PLL_TXCLK_EN, data); + + data = pout->pll_resetsm_cntrl; + pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL, data); + data = pout->pll_resetsm_cntrl2; + pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2, data); + data = pout->pll_resetsm_cntrl5; + pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5, data); + + data = pout->pll_vco_div_ref & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1, data); + data = (pout->pll_vco_div_ref >> 8) & 0x3; + pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2, data); + + data = pout->pll_kvco_div_ref & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1, data); + data = (pout->pll_kvco_div_ref >> 8) & 0x3; + pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2, data); + + data = pout->pll_misc1; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_MISC1, data); + + data = pin->pll_ie_trim; + pll_write(base + REG_DSI_14nm_PHY_PLL_IE_TRIM, data); + + data = pin->pll_ip_trim; + pll_write(base + REG_DSI_14nm_PHY_PLL_IP_TRIM, data); + + data = pin->pll_cpmset_cur << 3 | pin->pll_cpcset_cur; + pll_write(base + REG_DSI_14nm_PHY_PLL_CP_SET_CUR, data); + + data = pin->pll_icpcset_p << 3 | pin->pll_icpcset_m; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPCSET, data); + + data = pin->pll_icpmset_p << 3 | pin->pll_icpcset_m; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPMSET, data); + + data = pin->pll_icpmset << 3 | pin->pll_icpcset; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICP_SET, data); + + data = pin->pll_lpf_cap2 << 4 | pin->pll_lpf_cap1; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF1, data); + + data = pin->pll_iptat_trim; + pll_write(base + REG_DSI_14nm_PHY_PLL_IPTAT_TRIM, data); + + data = pin->pll_c3ctrl | pin->pll_r3ctrl << 4; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_CRCTRL, data); +} + +static void pll_14nm_software_reset(struct dsi_pll_14nm *pll_14nm) +{ + void __iomem *cmn_base = pll_14nm->phy_cmn_mmio; + + /* de assert pll start and apply pll sw reset */ + + /* stop pll */ + pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0); + + /* pll sw reset */ + pll_write_udelay(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x20, 10); + wmb(); /* make sure register committed */ + + pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0); + wmb(); /* make sure register committed */ +} + +static void pll_db_commit_14nm(struct dsi_pll_14nm *pll, + struct dsi_pll_input *pin, + struct dsi_pll_output *pout) +{ + void __iomem *base = pll->mmio; + void __iomem *cmn_base = pll->phy_cmn_mmio; + u8 data; + + DBG("DSI%d PLL", pll->id); + + data = pout->cmn_ldo_cntrl; + pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data); + + pll_db_commit_common(pll, pin, pout); + + pll_14nm_software_reset(pll); + + data = pin->dsiclk_sel; /* set dsiclk_sel = 1 */ + pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG1, data); + + data = 0xff; /* data, clk, pll normal operation */ + pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_0, data); + + /* configure the frequency dependent pll registers */ + data = pout->dec_start; + pll_write(base + REG_DSI_14nm_PHY_PLL_DEC_START, data); + + data = pout->div_frac_start & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1, data); + data = (pout->div_frac_start >> 8) & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2, data); + data = (pout->div_frac_start >> 16) & 0xf; + pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3, data); + + data = pout->plllock_cmp & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1, data); + + data = (pout->plllock_cmp >> 8) & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2, data); + + data = (pout->plllock_cmp >> 16) & 0x3; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3, data); + + data = pin->plllock_cnt << 1 | pin->plllock_rng << 3; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN, data); + + data = pout->pll_vco_count & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT1, data); + data = (pout->pll_vco_count >> 8) & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT2, data); + + data = pout->pll_kvco_count & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT1, data); + data = (pout->pll_kvco_count >> 8) & 0x3; + pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT2, data); + + data = (pout->pll_postdiv - 1) << 4 | pin->pll_lpf_res1; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV, data); + + if (pin->ssc_en) + pll_db_commit_ssc(pll); + + wmb(); /* make sure register committed */ +} + +/* + * VCO clock Callbacks + */ +static int dsi_pll_14nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + struct dsi_pll_input *pin = &pll_14nm->in; + struct dsi_pll_output *pout = &pll_14nm->out; + + DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_14nm->id, rate, + parent_rate); + + pll_14nm->vco_current_rate = rate; + pll_14nm->vco_ref_clk_rate = VCO_REF_CLK_RATE; + + dsi_pll_14nm_input_init(pll_14nm); + + /* + * This configures the post divider internal to the VCO. It's + * fixed to divide by 1 for now. + * + * tx_band = pll_postdiv. + * 0: divided by 1 + * 1: divided by 2 + * 2: divided by 4 + * 3: divided by 8 + */ + pout->pll_postdiv = DSI_PLL_DEFAULT_VCO_POSTDIV; + + pll_14nm_dec_frac_calc(pll_14nm); + + if (pin->ssc_en) + pll_14nm_ssc_calc(pll_14nm); + + pll_14nm_calc_vco_count(pll_14nm); + + /* commit the slave DSI PLL registers if we're master. Note that we + * don't lock the slave PLL. We just ensure that the PLL/PHY registers + * of the master and slave are identical + */ + if (pll_14nm->uc == MSM_DSI_PHY_MASTER) { + struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave; + + pll_db_commit_14nm(pll_14nm_slave, pin, pout); + } + + pll_db_commit_14nm(pll_14nm, pin, pout); + + return 0; +} + +static unsigned long dsi_pll_14nm_vco_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + void __iomem *base = pll_14nm->mmio; + u64 vco_rate, multiplier = BIT(20); + u32 div_frac_start; + u32 dec_start; + u64 ref_clk = parent_rate; + + dec_start = pll_read(base + REG_DSI_14nm_PHY_PLL_DEC_START); + dec_start &= 0x0ff; + + DBG("dec_start = %x", dec_start); + + div_frac_start = (pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3) + & 0xf) << 16; + div_frac_start |= (pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2) + & 0xff) << 8; + div_frac_start |= pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1) + & 0xff; + + DBG("div_frac_start = %x", div_frac_start); + + vco_rate = ref_clk * dec_start; + + vco_rate += ((ref_clk * div_frac_start) / multiplier); + + /* + * Recalculating the rate from dec_start and frac_start doesn't end up + * the rate we originally set. Convert the freq to KHz, round it up and + * convert it back to MHz. + */ + vco_rate = DIV_ROUND_UP_ULL(vco_rate, 1000) * 1000; + + DBG("returning vco rate = %lu", (unsigned long)vco_rate); + + return (unsigned long)vco_rate; +} + +static const struct clk_ops clk_ops_dsi_pll_14nm_vco = { + .round_rate = msm_dsi_pll_helper_clk_round_rate, + .set_rate = dsi_pll_14nm_vco_set_rate, + .recalc_rate = dsi_pll_14nm_vco_recalc_rate, + .prepare = msm_dsi_pll_helper_clk_prepare, + .unprepare = msm_dsi_pll_helper_clk_unprepare, +}; + +/* + * N1 and N2 post-divider clock callbacks + */ +#define div_mask(width) ((1 << (width)) - 1) +static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw); + struct dsi_pll_14nm *pll_14nm = postdiv->pll; + void __iomem *base = pll_14nm->phy_cmn_mmio; + u8 shift = postdiv->shift; + u8 width = postdiv->width; + u32 val; + + DBG("DSI%d PLL parent rate=%lu", pll_14nm->id, parent_rate); + + val = pll_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0) >> shift; + val &= div_mask(width); + + return divider_recalc_rate(hw, parent_rate, val, NULL, + postdiv->flags); +} + +static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long *prate) +{ + struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw); + struct dsi_pll_14nm *pll_14nm = postdiv->pll; + + DBG("DSI%d PLL parent rate=%lu", pll_14nm->id, rate); + + return divider_round_rate(hw, rate, prate, NULL, + postdiv->width, + postdiv->flags); +} + +static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw); + struct dsi_pll_14nm *pll_14nm = postdiv->pll; + void __iomem *base = pll_14nm->phy_cmn_mmio; + spinlock_t *lock = &pll_14nm->postdiv_lock; + u8 shift = postdiv->shift; + u8 width = postdiv->width; + unsigned int value; + unsigned long flags = 0; + u32 val; + + DBG("DSI%d PLL parent rate=%lu parent rate %lu", pll_14nm->id, rate, + parent_rate); + + value = divider_get_val(rate, parent_rate, NULL, postdiv->width, + postdiv->flags); + + spin_lock_irqsave(lock, flags); + + val = pll_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0); + val &= ~(div_mask(width) << shift); + + val |= value << shift; + pll_write(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val); + + /* If we're master in dual DSI mode, then the slave PLL's post-dividers + * follow the master's post dividers + */ + if (pll_14nm->uc == MSM_DSI_PHY_MASTER) { + struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave; + void __iomem *slave_base = pll_14nm_slave->phy_cmn_mmio; + + pll_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val); + } + + spin_unlock_irqrestore(lock, flags); + + return 0; +} + +static const struct clk_ops clk_ops_dsi_pll_14nm_postdiv = { + .recalc_rate = dsi_pll_14nm_postdiv_recalc_rate, + .round_rate = dsi_pll_14nm_postdiv_round_rate, + .set_rate = dsi_pll_14nm_postdiv_set_rate, +}; + +/* + * PLL Callbacks + */ + +static int dsi_pll_14nm_enable_seq(struct msm_dsi_pll *pll) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + void __iomem *base = pll_14nm->mmio; + void __iomem *cmn_base = pll_14nm->phy_cmn_mmio; + bool locked; + + DBG(""); + + pll_write(base + REG_DSI_14nm_PHY_PLL_VREF_CFG1, 0x10); + pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 1); + + locked = pll_14nm_poll_for_ready(pll_14nm, POLL_MAX_READS, + POLL_TIMEOUT_US); + + if (unlikely(!locked)) + dev_err(&pll_14nm->pdev->dev, "DSI PLL lock failed\n"); + else + DBG("DSI PLL lock success"); + + return locked ? 0 : -EINVAL; +} + +static void dsi_pll_14nm_disable_seq(struct msm_dsi_pll *pll) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + void __iomem *cmn_base = pll_14nm->phy_cmn_mmio; + + DBG(""); + + pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0); +} + +static void dsi_pll_14nm_save_state(struct msm_dsi_pll *pll) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state; + void __iomem *cmn_base = pll_14nm->phy_cmn_mmio; + u32 data; + + data = pll_read(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0); + + cached_state->n1postdiv = data & 0xf; + cached_state->n2postdiv = (data >> 4) & 0xf; + + DBG("DSI%d PLL save state %x %x", pll_14nm->id, + cached_state->n1postdiv, cached_state->n2postdiv); + + cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw); +} + +static int dsi_pll_14nm_restore_state(struct msm_dsi_pll *pll) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state; + void __iomem *cmn_base = pll_14nm->phy_cmn_mmio; + u32 data; + int ret; + + ret = dsi_pll_14nm_vco_set_rate(&pll->clk_hw, + cached_state->vco_rate, 0); + if (ret) { + dev_err(&pll_14nm->pdev->dev, + "restore vco rate failed. ret=%d\n", ret); + return ret; + } + + data = cached_state->n1postdiv | (cached_state->n2postdiv << 4); + + DBG("DSI%d PLL restore state %x %x", pll_14nm->id, + cached_state->n1postdiv, cached_state->n2postdiv); + + pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data); + + /* also restore post-dividers for slave DSI PLL */ + if (pll_14nm->uc == MSM_DSI_PHY_MASTER) { + struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave; + void __iomem *slave_base = pll_14nm_slave->phy_cmn_mmio; + + pll_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data); + } + + return 0; +} + +static int dsi_pll_14nm_set_usecase(struct msm_dsi_pll *pll, + enum msm_dsi_phy_usecase uc) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + void __iomem *base = pll_14nm->mmio; + u32 clkbuflr_en, bandgap = 0; + + switch (uc) { + case MSM_DSI_PHY_STANDALONE: + clkbuflr_en = 0x1; + break; + case MSM_DSI_PHY_MASTER: + clkbuflr_en = 0x3; + pll_14nm->slave = pll_14nm_list[(pll_14nm->id + 1) % DSI_MAX]; + break; + case MSM_DSI_PHY_SLAVE: + clkbuflr_en = 0x0; + bandgap = 0x3; + break; + default: + return -EINVAL; + } + + pll_write(base + REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN, clkbuflr_en); + if (bandgap) + pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_BANDGAP, bandgap); + + pll_14nm->uc = uc; + + return 0; +} + +static int dsi_pll_14nm_get_provider(struct msm_dsi_pll *pll, + struct clk **byte_clk_provider, + struct clk **pixel_clk_provider) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + struct clk_hw_onecell_data *hw_data = pll_14nm->hw_data; + + if (byte_clk_provider) + *byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk; + if (pixel_clk_provider) + *pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk; + + return 0; +} + +static void dsi_pll_14nm_destroy(struct msm_dsi_pll *pll) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + struct platform_device *pdev = pll_14nm->pdev; + int num_hws = pll_14nm->num_hws; + + of_clk_del_provider(pdev->dev.of_node); + + while (num_hws--) + clk_hw_unregister(pll_14nm->hws[num_hws]); +} + +static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm, + const char *name, + const char *parent_name, + unsigned long flags, + u8 shift) +{ + struct dsi_pll_14nm_postdiv *pll_postdiv; + struct device *dev = &pll_14nm->pdev->dev; + struct clk_init_data postdiv_init = { + .parent_names = (const char *[]) { parent_name }, + .num_parents = 1, + .name = name, + .flags = flags, + .ops = &clk_ops_dsi_pll_14nm_postdiv, + }; + int ret; + + pll_postdiv = devm_kzalloc(dev, sizeof(*pll_postdiv), GFP_KERNEL); + if (!pll_postdiv) + return ERR_PTR(-ENOMEM); + + pll_postdiv->pll = pll_14nm; + pll_postdiv->shift = shift; + /* both N1 and N2 postdividers are 4 bits wide */ + pll_postdiv->width = 4; + /* range of each divider is from 1 to 15 */ + pll_postdiv->flags = CLK_DIVIDER_ONE_BASED; + pll_postdiv->hw.init = &postdiv_init; + + ret = clk_hw_register(dev, &pll_postdiv->hw); + if (ret) + return ERR_PTR(ret); + + return &pll_postdiv->hw; +} + +static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm) +{ + char clk_name[32], parent[32], vco_name[32]; + struct clk_init_data vco_init = { + .parent_names = (const char *[]){ "xo" }, + .num_parents = 1, + .name = vco_name, + .flags = CLK_IGNORE_UNUSED, + .ops = &clk_ops_dsi_pll_14nm_vco, + }; + struct device *dev = &pll_14nm->pdev->dev; + struct clk_hw **hws = pll_14nm->hws; + struct clk_hw_onecell_data *hw_data; + struct clk_hw *hw; + int num = 0; + int ret; + + DBG("DSI%d", pll_14nm->id); + + hw_data = devm_kzalloc(dev, sizeof(*hw_data) + + NUM_PROVIDED_CLKS * sizeof(struct clk_hw *), + GFP_KERNEL); + if (!hw_data) + return -ENOMEM; + + snprintf(vco_name, 32, "dsi%dvco_clk", pll_14nm->id); + pll_14nm->base.clk_hw.init = &vco_init; + + ret = clk_hw_register(dev, &pll_14nm->base.clk_hw); + if (ret) + return ret; + + hws[num++] = &pll_14nm->base.clk_hw; + + snprintf(clk_name, 32, "dsi%dn1_postdiv_clk", pll_14nm->id); + snprintf(parent, 32, "dsi%dvco_clk", pll_14nm->id); + + /* N1 postdiv, bits 0-3 in REG_DSI_14nm_PHY_CMN_CLK_CFG0 */ + hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent, + CLK_SET_RATE_PARENT, 0); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + + snprintf(clk_name, 32, "dsi%dpllbyte", pll_14nm->id); + snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->id); + + /* DSI Byte clock = VCO_CLK / N1 / 8 */ + hw = clk_hw_register_fixed_factor(dev, clk_name, parent, + CLK_SET_RATE_PARENT, 1, 8); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + hw_data->hws[DSI_BYTE_PLL_CLK] = hw; + + snprintf(clk_name, 32, "dsi%dn1_postdivby2_clk", pll_14nm->id); + snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->id); + + /* + * Skip the mux for now, force DSICLK_SEL to 1, Add a /2 divider + * on the way. Don't let it set parent. + */ + hw = clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 2); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + + snprintf(clk_name, 32, "dsi%dpll", pll_14nm->id); + snprintf(parent, 32, "dsi%dn1_postdivby2_clk", pll_14nm->id); + + /* DSI pixel clock = VCO_CLK / N1 / 2 / N2 + * This is the output of N2 post-divider, bits 4-7 in + * REG_DSI_14nm_PHY_CMN_CLK_CFG0. Don't let it set parent. + */ + hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent, 0, 4); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + hw_data->hws[DSI_PIXEL_PLL_CLK] = hw; + + pll_14nm->num_hws = num; + + hw_data->num = NUM_PROVIDED_CLKS; + pll_14nm->hw_data = hw_data; + + ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, + pll_14nm->hw_data); + if (ret) { + dev_err(dev, "failed to register clk provider: %d\n", ret); + return ret; + } + + return 0; +} + +struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id) +{ + struct dsi_pll_14nm *pll_14nm; + struct msm_dsi_pll *pll; + int ret; + + if (!pdev) + return ERR_PTR(-ENODEV); + + pll_14nm = devm_kzalloc(&pdev->dev, sizeof(*pll_14nm), GFP_KERNEL); + if (!pll_14nm) + return ERR_PTR(-ENOMEM); + + DBG("PLL%d", id); + + pll_14nm->pdev = pdev; + pll_14nm->id = id; + pll_14nm_list[id] = pll_14nm; + + pll_14nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY"); + if (IS_ERR_OR_NULL(pll_14nm->phy_cmn_mmio)) { + dev_err(&pdev->dev, "failed to map CMN PHY base\n"); + return ERR_PTR(-ENOMEM); + } + + pll_14nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL"); + if (IS_ERR_OR_NULL(pll_14nm->mmio)) { + dev_err(&pdev->dev, "failed to map PLL base\n"); + return ERR_PTR(-ENOMEM); + } + + spin_lock_init(&pll_14nm->postdiv_lock); + + pll = &pll_14nm->base; + pll->min_rate = VCO_MIN_RATE; + pll->max_rate = VCO_MAX_RATE; + pll->get_provider = dsi_pll_14nm_get_provider; + pll->destroy = dsi_pll_14nm_destroy; + pll->disable_seq = dsi_pll_14nm_disable_seq; + pll->save_state = dsi_pll_14nm_save_state; + pll->restore_state = dsi_pll_14nm_restore_state; + pll->set_usecase = dsi_pll_14nm_set_usecase; + + pll_14nm->vco_delay = 1; + + pll->en_seq_cnt = 1; + pll->enable_seqs[0] = dsi_pll_14nm_enable_seq; + + ret = pll_14nm_register(pll_14nm); + if (ret) { + dev_err(&pdev->dev, "failed to register PLL: %d\n", ret); + return ERR_PTR(ret); + } + + return pll; +} -- cgit v1.2.3-55-g7522 From 21c42da18ef128ca8fb4cc4ead888f5c61e3916a Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 16 Jan 2017 14:58:08 +0300 Subject: drm/msm: return -EFAULT if copy_from_user() fails copy_from_user_inatomic() is actually a local function that returns -EFAULT or positive values on error. Otherwise copy_from_user() returns the number of bytes remaining to be copied. We want to return -EFAULT here. I removed an unlikely() because we just did a copy_from_user() so I don't think it can possibly make a difference. Signed-off-by: Dan Carpenter Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/msm_gem_submit.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 489676568a10..1172fe7a9252 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -95,13 +95,13 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, */ submit->bos[i].flags = 0; - ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo)); - if (unlikely(ret)) { + if (copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo))) { pagefault_enable(); spin_unlock(&file->table_lock); - ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); - if (ret) + if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) { + ret = -EFAULT; goto out; + } spin_lock(&file->table_lock); pagefault_disable(); } @@ -317,9 +317,10 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob uint64_t iova; bool valid; - ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc)); - if (ret) + if (copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc))) { + ret = -EFAULT; goto out; + } if (submit_reloc.submit_offset % 4) { DRM_ERROR("non-aligned reloc offset: %u\n", -- cgit v1.2.3-55-g7522