diff options
Diffstat (limited to 'drivers/gpu/drm')
35 files changed, 806 insertions, 514 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 023bfdb3e63f..c04f44a90392 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -382,6 +382,8 @@ static int acp_hw_init(void *handle) adev->acp.acp_cell[0].name = "acp_audio_dma"; adev->acp.acp_cell[0].num_resources = 4; adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0]; + adev->acp.acp_cell[0].platform_data = &adev->asic_type; + adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type); adev->acp.acp_cell[1].name = "designware-i2s"; adev->acp.acp_cell[1].num_resources = 1; diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index de6fc2731b98..b72f8a43d86b 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -23,36 +23,11 @@ #ifndef __AMD_SHARED_H__ #define __AMD_SHARED_H__ -#define AMD_MAX_USEC_TIMEOUT 200000 /* 200 ms */ +#include <drm/amd_asic_type.h> struct seq_file; -/* - * Supported ASIC types - */ -enum amd_asic_type { - CHIP_TAHITI = 0, - CHIP_PITCAIRN, - CHIP_VERDE, - CHIP_OLAND, - CHIP_HAINAN, - CHIP_BONAIRE, - CHIP_KAVERI, - CHIP_KABINI, - CHIP_HAWAII, - CHIP_MULLINS, - CHIP_TOPAZ, - CHIP_TONGA, - CHIP_FIJI, - CHIP_CARRIZO, - CHIP_STONEY, - CHIP_POLARIS10, - CHIP_POLARIS11, - CHIP_POLARIS12, - CHIP_VEGA10, - CHIP_RAVEN, - CHIP_LAST, -}; +#define AMD_MAX_USEC_TIMEOUT 200000 /* 200 ms */ /* * Chip flags diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 2affe53f3fda..279c1035c12d 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -681,6 +681,7 @@ EXPORT_SYMBOL(drm_framebuffer_init); /** * drm_framebuffer_lookup - look up a drm framebuffer and grab a reference * @dev: drm device + * @file_priv: drm file to check for lease against. * @id: id of the fb object * * If successful, this grabs an additional reference to the framebuffer - diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c index 7c8b2698c6a7..ce4d2fb32810 100644 --- a/drivers/gpu/drm/drm_mode_object.c +++ b/drivers/gpu/drm/drm_mode_object.c @@ -151,6 +151,7 @@ struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev, /** * drm_mode_object_find - look up a drm object with static lifetime + * @dev: drm device * @file_priv: drm file * @id: id of the mode object * @type: type of the mode object diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 305dc3d4ff77..5a7c9d8abd6b 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -3,6 +3,7 @@ config DRM_EXYNOS depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM) select DRM_KMS_HELPER select VIDEOMODE_HELPERS + select SND_SOC_HDMI_CODEC if SND_SOC help Choose this option if you have a Samsung SoC EXYNOS chipset. If M is selected the module will be called exynosdrm. diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 6ce0821590df..dc01342e759a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -95,8 +95,23 @@ static enum drm_mode_status exynos_crtc_mode_valid(struct drm_crtc *crtc, return MODE_OK; } +static bool exynos_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); + + if (exynos_crtc->ops->mode_fixup) + return exynos_crtc->ops->mode_fixup(exynos_crtc, mode, + adjusted_mode); + + return true; +} + + static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { .mode_valid = exynos_crtc_mode_valid, + .mode_fixup = exynos_crtc_mode_fixup, .atomic_check = exynos_crtc_atomic_check, .atomic_begin = exynos_crtc_atomic_begin, .atomic_flush = exynos_crtc_atomic_flush, diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index f8bae4cb4823..c6847fa708fa 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -136,6 +136,9 @@ struct exynos_drm_crtc_ops { u32 (*get_vblank_counter)(struct exynos_drm_crtc *crtc); enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc, const struct drm_display_mode *mode); + bool (*mode_fixup)(struct exynos_drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); int (*atomic_check)(struct exynos_drm_crtc *crtc, struct drm_crtc_state *state); void (*atomic_begin)(struct exynos_drm_crtc *crtc); diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 0109ff40b1db..82d1b7e2febe 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -40,7 +40,7 @@ #include <linux/component.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> - +#include <sound/hdmi-codec.h> #include <drm/exynos_drm.h> #include <media/cec-notifier.h> @@ -111,15 +111,20 @@ struct hdmi_driver_data { struct string_array_spec clk_muxes; }; +struct hdmi_audio { + struct platform_device *pdev; + struct hdmi_audio_infoframe infoframe; + struct hdmi_codec_params params; + bool mute; +}; + struct hdmi_context { struct drm_encoder encoder; struct device *dev; struct drm_device *drm_dev; struct drm_connector connector; - bool powered; bool dvi_mode; struct delayed_work hotplug_work; - struct drm_display_mode current_mode; struct cec_notifier *notifier; const struct hdmi_driver_data *drv_data; @@ -137,6 +142,11 @@ struct hdmi_context { struct regulator *reg_hdmi_en; struct exynos_drm_clk phy_clk; struct drm_bridge *bridge; + + /* mutex protecting subsequent fields below */ + struct mutex mutex; + struct hdmi_audio audio; + bool powered; }; static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e) @@ -298,6 +308,15 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = { }, }, { + .pixel_clock = 85500000, + .conf = { + 0x01, 0xd1, 0x24, 0x11, 0x40, 0x40, 0xd0, 0x08, + 0x84, 0xa0, 0xd6, 0xd8, 0x45, 0xa0, 0xac, 0x80, + 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, + 0x54, 0x90, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, + }, + }, + { .pixel_clock = 106500000, .conf = { 0x01, 0xd1, 0x2c, 0x12, 0x40, 0x0c, 0x09, 0x08, @@ -768,8 +787,25 @@ static int hdmi_clk_set_parents(struct hdmi_context *hdata, bool to_phy) return ret; } +static int hdmi_audio_infoframe_apply(struct hdmi_context *hdata) +{ + struct hdmi_audio_infoframe *infoframe = &hdata->audio.infoframe; + u8 buf[HDMI_INFOFRAME_SIZE(AUDIO)]; + int len; + + len = hdmi_audio_infoframe_pack(infoframe, buf, sizeof(buf)); + if (len < 0) + return len; + + hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_EVERY_VSYNC); + hdmi_reg_write_buf(hdata, HDMI_AUI_HEADER0, buf, len); + + return 0; +} + static void hdmi_reg_infoframes(struct hdmi_context *hdata) { + struct drm_display_mode *m = &hdata->encoder.crtc->state->mode; union hdmi_infoframe frm; u8 buf[25]; int ret; @@ -783,8 +819,7 @@ static void hdmi_reg_infoframes(struct hdmi_context *hdata) return; } - ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi, - &hdata->current_mode, false); + ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi, m, false); if (!ret) ret = hdmi_avi_infoframe_pack(&frm.avi, buf, sizeof(buf)); if (ret > 0) { @@ -794,8 +829,7 @@ static void hdmi_reg_infoframes(struct hdmi_context *hdata) DRM_INFO("%s: invalid AVI infoframe (%d)\n", __func__, ret); } - ret = drm_hdmi_vendor_infoframe_from_display_mode(&frm.vendor.hdmi, - &hdata->current_mode); + ret = drm_hdmi_vendor_infoframe_from_display_mode(&frm.vendor.hdmi, m); if (!ret) ret = hdmi_vendor_infoframe_pack(&frm.vendor.hdmi, buf, sizeof(buf)); @@ -805,15 +839,7 @@ static void hdmi_reg_infoframes(struct hdmi_context *hdata) hdmi_reg_write_buf(hdata, HDMI_VSI_DATA(0), buf + 3, ret - 3); } - ret = hdmi_audio_infoframe_init(&frm.audio); - if (!ret) { - frm.audio.channels = 2; - ret = hdmi_audio_infoframe_pack(&frm.audio, buf, sizeof(buf)); - } - if (ret > 0) { - hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_EVERY_VSYNC); - hdmi_reg_write_buf(hdata, HDMI_AUI_HEADER0, buf, ret); - } + hdmi_audio_infoframe_apply(hdata); } static enum drm_connector_status hdmi_detect(struct drm_connector *connector, @@ -1003,23 +1029,18 @@ static void hdmi_reg_acr(struct hdmi_context *hdata, u32 freq) hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4); } -static void hdmi_audio_init(struct hdmi_context *hdata) +static void hdmi_audio_config(struct hdmi_context *hdata) { - u32 sample_rate, bits_per_sample; - u32 data_num, bit_ch, sample_frq; - u32 val; - - sample_rate = 44100; - bits_per_sample = 16; + u32 bit_ch = 1; + u32 data_num, val; + int i; - switch (bits_per_sample) { + switch (hdata->audio.params.sample_width) { case 20: data_num = 2; - bit_ch = 1; break; case 24: data_num = 3; - bit_ch = 1; break; default: data_num = 1; @@ -1027,7 +1048,7 @@ static void hdmi_audio_init(struct hdmi_context *hdata) break; } - hdmi_reg_acr(hdata, sample_rate); + hdmi_reg_acr(hdata, hdata->audio.params.sample_rate); hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CON, HDMI_I2S_IN_DISABLE | HDMI_I2S_AUD_I2S | HDMI_I2S_CUV_I2S_ENABLE @@ -1037,12 +1058,6 @@ static void hdmi_audio_init(struct hdmi_context *hdata) | HDMI_I2S_CH1_EN | HDMI_I2S_CH2_EN); hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CUV, HDMI_I2S_CUV_RL_EN); - - sample_frq = (sample_rate == 44100) ? 0 : - (sample_rate == 48000) ? 2 : - (sample_rate == 32000) ? 3 : - (sample_rate == 96000) ? 0xa : 0x0; - hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_DIS); hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_EN); @@ -1066,39 +1081,33 @@ static void hdmi_audio_init(struct hdmi_context *hdata) | HDMI_I2S_SET_SDATA_BIT(data_num) | HDMI_I2S_BASIC_FORMAT); - /* Configure register related to CUV information */ - hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_0, HDMI_I2S_CH_STATUS_MODE_0 - | HDMI_I2S_2AUD_CH_WITHOUT_PREEMPH - | HDMI_I2S_COPYRIGHT - | HDMI_I2S_LINEAR_PCM - | HDMI_I2S_CONSUMER_FORMAT); - hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_1, HDMI_I2S_CD_PLAYER); - hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_2, HDMI_I2S_SET_SOURCE_NUM(0)); - hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_3, HDMI_I2S_CLK_ACCUR_LEVEL_2 - | HDMI_I2S_SET_SMP_FREQ(sample_frq)); - hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_4, - HDMI_I2S_ORG_SMP_FREQ_44_1 - | HDMI_I2S_WORD_LEN_MAX24_24BITS - | HDMI_I2S_WORD_LEN_MAX_24BITS); + /* Configuration of the audio channel status registers */ + for (i = 0; i < HDMI_I2S_CH_ST_MAXNUM; i++) + hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST(i), + hdata->audio.params.iec.status[i]); hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_CON, HDMI_I2S_CH_STATUS_RELOAD); } -static void hdmi_audio_control(struct hdmi_context *hdata, bool onoff) +static void hdmi_audio_control(struct hdmi_context *hdata) { + bool enable = !hdata->audio.mute; + if (hdata->dvi_mode) return; - hdmi_reg_writeb(hdata, HDMI_AUI_CON, onoff ? 2 : 0); - hdmi_reg_writemask(hdata, HDMI_CON_0, onoff ? + hdmi_reg_writeb(hdata, HDMI_AUI_CON, enable ? + HDMI_AVI_CON_EVERY_VSYNC : HDMI_AUI_CON_NO_TRAN); + hdmi_reg_writemask(hdata, HDMI_CON_0, enable ? HDMI_ASP_EN : HDMI_ASP_DIS, HDMI_ASP_MASK); } static void hdmi_start(struct hdmi_context *hdata, bool start) { + struct drm_display_mode *m = &hdata->encoder.crtc->state->mode; u32 val = start ? HDMI_TG_EN : 0; - if (hdata->current_mode.flags & DRM_MODE_FLAG_INTERLACE) + if (m->flags & DRM_MODE_FLAG_INTERLACE) val |= HDMI_FIELD_EN; hdmi_reg_writemask(hdata, HDMI_CON_0, val, HDMI_EN); @@ -1168,7 +1177,7 @@ static void hdmiphy_wait_for_pll(struct hdmi_context *hdata) static void hdmi_v13_mode_apply(struct hdmi_context *hdata) { - struct drm_display_mode *m = &hdata->current_mode; + struct drm_display_mode *m = &hdata->encoder.crtc->state->mode; unsigned int val; hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay); @@ -1247,7 +1256,19 @@ static void hdmi_v13_mode_apply(struct hdmi_context *hdata) static void hdmi_v14_mode_apply(struct hdmi_context *hdata) { - struct drm_display_mode *m = &hdata->current_mode; + struct drm_display_mode *m = &hdata->encoder.crtc->state->mode; + struct drm_display_mode *am = + &hdata->encoder.crtc->state->adjusted_mode; + int hquirk = 0; + + /* + * In case video mode coming from CRTC differs from requested one HDMI + * sometimes is able to almost properly perform conversion - only + * first line is distorted. + */ + if ((m->vdisplay != am->vdisplay) && + (m->hdisplay == 1280 || m->hdisplay == 1024 || m->hdisplay == 1366)) + hquirk = 258; hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay); hdmi_reg_writev(hdata, HDMI_V_LINE_0, 2, m->vtotal); @@ -1341,8 +1362,9 @@ static void hdmi_v14_mode_apply(struct hdmi_context *hdata) hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0, 2, 0xffff); hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal); - hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, m->htotal - m->hdisplay); - hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay); + hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, + m->htotal - m->hdisplay - hquirk); + hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay + hquirk); hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal); if (hdata->drv_data == &exynos5433_hdmi_driver_data) hdmi_reg_writeb(hdata, HDMI_TG_DECON_EN, 1); @@ -1380,10 +1402,11 @@ static void hdmiphy_enable_mode_set(struct hdmi_context *hdata, bool enable) static void hdmiphy_conf_apply(struct hdmi_context *hdata) { + struct drm_display_mode *m = &hdata->encoder.crtc->state->mode; int ret; const u8 *phy_conf; - ret = hdmi_find_phy_conf(hdata, hdata->current_mode.clock * 1000); + ret = hdmi_find_phy_conf(hdata, m->clock * 1000); if (ret < 0) { DRM_ERROR("failed to find hdmiphy conf\n"); return; @@ -1406,28 +1429,14 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata) hdmiphy_wait_for_pll(hdata); } +/* Should be called with hdata->mutex mutex held */ static void hdmi_conf_apply(struct hdmi_context *hdata) { hdmi_start(hdata, false); hdmi_conf_init(hdata); - hdmi_audio_init(hdata); + hdmi_audio_config(hdata); hdmi_mode_apply(hdata); - hdmi_audio_control(hdata, true); -} - -static void hdmi_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct hdmi_context *hdata = encoder_to_hdmi(encoder); - struct drm_display_mode *m = adjusted_mode; - - DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n", - m->hdisplay, m->vdisplay, - m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ? - "INTERLACED" : "PROGRESSIVE"); - - drm_mode_copy(&hdata->current_mode, m); + hdmi_audio_control(hdata); } static void hdmi_set_refclk(struct hdmi_context *hdata, bool on) @@ -1439,6 +1448,7 @@ static void hdmi_set_refclk(struct hdmi_context *hdata, bool on) SYSREG_HDMI_REFCLK_INT_CLK, on ? ~0 : 0); } +/* Should be called with hdata->mutex mutex held. */ static void hdmiphy_enable(struct hdmi_context *hdata) { if (hdata->powered) @@ -1461,6 +1471,7 @@ static void hdmiphy_enable(struct hdmi_context *hdata) hdata->powered = true; } +/* Should be called with hdata->mutex mutex held. */ static void hdmiphy_disable(struct hdmi_context *hdata) { if (!hdata->powered) @@ -1486,33 +1497,42 @@ static void hdmi_enable(struct drm_encoder *encoder) { struct hdmi_context *hdata = encoder_to_hdmi(encoder); + mutex_lock(&hdata->mutex); + hdmiphy_enable(hdata); hdmi_conf_apply(hdata); + + mutex_unlock(&hdata->mutex); } static void hdmi_disable(struct drm_encoder *encoder) { struct hdmi_context *hdata = encoder_to_hdmi(encoder); - if (!hdata->powered) + mutex_lock(&hdata->mutex); + + if (hdata->powered) { + /* + * The SFRs of VP and Mixer are updated by Vertical Sync of + * Timing generator which is a part of HDMI so the sequence + * to disable TV Subsystem should be as following, + * VP -> Mixer -> HDMI + * + * To achieve such sequence HDMI is disabled together with + * HDMI PHY, via pipe clock callback. + */ + mutex_unlock(&hdata->mutex); + cancel_delayed_work(&hdata->hotplug_work); + cec_notifier_set_phys_addr(hdata->notifier, + CEC_PHYS_ADDR_INVALID); return; + } - /* - * The SFRs of VP and Mixer are updated by Vertical Sync of - * Timing generator which is a part of HDMI so the sequence - * to disable TV Subsystem should be as following, - * VP -> Mixer -> HDMI - * - * To achieve such sequence HDMI is disabled together with HDMI PHY, via - * pipe clock callback. - */ - cancel_delayed_work(&hdata->hotplug_work); - cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID); + mutex_unlock(&hdata->mutex); } static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = { .mode_fixup = hdmi_mode_fixup, - .mode_set = hdmi_mode_set, .enable = hdmi_enable, .disable = hdmi_disable, }; @@ -1521,6 +1541,99 @@ static const struct drm_encoder_funcs exynos_hdmi_encoder_funcs = { .destroy = drm_encoder_cleanup, }; +static void hdmi_audio_shutdown(struct device *dev, void *data) +{ + struct hdmi_context *hdata = dev_get_drvdata(dev); + + mutex_lock(&hdata->mutex); + + hdata->audio.mute = true; + + if (hdata->powered) + hdmi_audio_control(hdata); + + mutex_unlock(&hdata->mutex); +} + +static int hdmi_audio_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + struct hdmi_context *hdata = dev_get_drvdata(dev); + + if (daifmt->fmt != HDMI_I2S || daifmt->bit_clk_inv || + daifmt->frame_clk_inv || daifmt->bit_clk_master || + daifmt->frame_clk_master) { + dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__, + daifmt->bit_clk_inv, daifmt->frame_clk_inv, + daifmt->bit_clk_master, + daifmt->frame_clk_master); + return -EINVAL; + } + + mutex_lock(&hdata->mutex); + + hdata->audio.params = *params; + + if (hdata->powered) { + hdmi_audio_config(hdata); + hdmi_audio_infoframe_apply(hdata); + } + + mutex_unlock(&hdata->mutex); + + return 0; +} + +static int hdmi_audio_digital_mute(struct device *dev, void *data, bool mute) +{ + struct hdmi_context *hdata = dev_get_drvdata(dev); + + mutex_lock(&hdata->mutex); + + hdata->audio.mute = mute; + + if (hdata->powered) + hdmi_audio_control(hdata); + + mutex_unlock(&hdata->mutex); + + return 0; +} + +static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, + size_t len) +{ + struct hdmi_context *hdata = dev_get_drvdata(dev); + struct drm_connector *connector = &hdata->connector; + + memcpy(buf, connector->eld, min(sizeof(connector->eld), len)); + + return 0; +} + +static const struct hdmi_codec_ops audio_codec_ops = { + .hw_params = hdmi_audio_hw_params, + .audio_shutdown = hdmi_audio_shutdown, + .digital_mute = hdmi_audio_digital_mute, + .get_eld = hdmi_audio_get_eld, +}; + +static int hdmi_register_audio_device(struct hdmi_context *hdata) +{ + struct hdmi_codec_pdata codec_data = { + .ops = &audio_codec_ops, + .max_i2s_channels = 6, + .i2s = 1, + }; + + hdata->audio.pdev = platform_device_register_data( + hdata->dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO, + &codec_data, sizeof(codec_data)); + + return PTR_ERR_OR_ZERO(hdata->audio.pdev); +} + static void hdmi_hotplug_work_func(struct work_struct *work) { struct hdmi_context *hdata; @@ -1596,11 +1709,14 @@ static void hdmiphy_clk_enable(struct exynos_drm_clk *clk, bool enable) { struct hdmi_context *hdata = container_of(clk, struct hdmi_context, phy_clk); + mutex_lock(&hdata->mutex); if (enable) hdmiphy_enable(hdata); else hdmiphy_disable(hdata); + + mutex_unlock(&hdata->mutex); } static int hdmi_bridge_init(struct hdmi_context *hdata) @@ -1811,6 +1927,7 @@ out: static int hdmi_probe(struct platform_device *pdev) { + struct hdmi_audio_infoframe *audio_infoframe; struct device *dev = &pdev->dev; struct hdmi_context *hdata; struct resource *res; @@ -1826,6 +1943,8 @@ static int hdmi_probe(struct platform_device *pdev) hdata->dev = dev; + mutex_init(&hdata->mutex); + ret = hdmi_resources_init(hdata); if (ret) { if (ret != -EPROBE_DEFER) @@ -1885,12 +2004,26 @@ static int hdmi_probe(struct platform_device *pdev) pm_runtime_enable(dev); - ret = component_add(&pdev->dev, &hdmi_component_ops); + audio_infoframe = &hdata->audio.infoframe; + hdmi_audio_infoframe_init(audio_infoframe); + audio_infoframe->coding_type = HDMI_AUDIO_CODING_TYPE_STREAM; + audio_infoframe->sample_size = HDMI_AUDIO_SAMPLE_SIZE_STREAM; + audio_infoframe->sample_frequency = HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM; + audio_infoframe->channels = 2; + + ret = hdmi_register_audio_device(hdata); if (ret) goto err_notifier_put; + ret = component_add(&pdev->dev, &hdmi_component_ops); + if (ret) + goto err_unregister_audio; + return ret; +err_unregister_audio: + platform_device_unregister(hdata->audio.pdev); + err_notifier_put: cec_notifier_put(hdata->notifier); pm_runtime_disable(dev); @@ -1914,6 +2047,7 @@ static int hdmi_remove(struct platform_device *pdev) cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID); component_del(&pdev->dev, &hdmi_component_ops); + platform_device_unregister(hdata->audio.pdev); cec_notifier_put(hdata->notifier); pm_runtime_disable(&pdev->dev); @@ -1929,6 +2063,8 @@ static int hdmi_remove(struct platform_device *pdev) put_device(&hdata->ddc_adpt->dev); + mutex_destroy(&hdata->mutex); + return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 002755415e00..dc5d79465f9b 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -67,19 +67,6 @@ #define MXR_FORMAT_ARGB4444 6 #define MXR_FORMAT_ARGB8888 7 -struct mixer_resources { - int irq; - void __iomem *mixer_regs; - void __iomem *vp_regs; - spinlock_t reg_slock; - struct clk *mixer; - struct clk *vp; - struct clk *hdmi; - struct clk *sclk_mixer; - struct clk *sclk_hdmi; - struct clk *mout_mixer; -}; - enum mixer_version_id { MXR_VER_0_0_0_16, MXR_VER_16_0_33_0, @@ -117,8 +104,18 @@ struct mixer_context { struct exynos_drm_plane planes[MIXER_WIN_NR]; unsigned long flags; - struct mixer_resources mixer_res; + int irq; + void __iomem *mixer_regs; + void __iomem *vp_regs; + spinlock_t reg_slock; + struct clk *mixer; + struct clk *vp; + struct clk *hdmi; + struct clk *sclk_mixer; + struct clk *sclk_hdmi; + struct clk *mout_mixer; enum mixer_version_id mxr_ver; + int scan_value; }; struct mixer_drv_data { @@ -194,44 +191,44 @@ static inline bool is_alpha_format(unsigned int pixel_format) } } -static inline u32 vp_reg_read(struct mixer_resources *res, u32 reg_id) +static inline u32 vp_reg_read(struct mixer_context *ctx, u32 reg_id) { - return readl(res->vp_regs + reg_id); + return readl(ctx->vp_regs + reg_id); } -static inline void vp_reg_write(struct mixer_resources *res, u32 reg_id, +static inline void vp_reg_write(struct mixer_context *ctx, u32 reg_id, u32 val) { - writel(val, res->vp_regs + reg_id); + writel(val, ctx->vp_regs + reg_id); } -static inline void vp_reg_writemask(struct mixer_resources *res, u32 reg_id, +static inline void vp_reg_writemask(struct mixer_context *ctx, u32 reg_id, u32 val, u32 mask) { - u32 old = vp_reg_read(res, reg_id); + u32 old = vp_reg_read(ctx, reg_id); val = (val & mask) | (old & ~mask); - writel(val, res->vp_regs + reg_id); + writel(val, ctx->vp_regs + reg_id); } -static inline u32 mixer_reg_read(struct mixer_resources *res, u32 reg_id) +static inline u32 mixer_reg_read(struct mixer_context *ctx, u32 reg_id) { - return readl(res->mixer_regs + reg_id); + return readl(ctx->mixer_regs + reg_id); } -static inline void mixer_reg_write(struct mixer_resources *res, u32 reg_id, +static inline void mixer_reg_write(struct mixer_context *ctx, u32 reg_id, u32 val) { - writel(val, res->mixer_regs + reg_id); + writel(val, ctx->mixer_regs + reg_id); } -static inline void mixer_reg_writemask(struct mixer_resources *res, +static inline void mixer_reg_writemask(struct mixer_context *ctx, u32 reg_id, u32 val, u32 mask) { - u32 old = mixer_reg_read(res, reg_id); + u32 old = mixer_reg_read(ctx, reg_id); val = (val & mask) | (old & ~mask); - writel(val, res->mixer_regs + reg_id); + writel(val, ctx->mixer_regs + reg_id); } static void mixer_regs_dump(struct mixer_context *ctx) @@ -239,7 +236,7 @@ static void mixer_regs_dump(struct mixer_context *ctx) #define DUMPREG(reg_id) \ do { \ DRM_DEBUG_KMS(#reg_id " = %08x\n", \ - (u32)readl(ctx->mixer_res.mixer_regs + reg_id)); \ + (u32)readl(ctx->mixer_regs + reg_id)); \ } while (0) DUMPREG(MXR_STATUS); @@ -271,7 +268,7 @@ static void vp_regs_dump(struct mixer_context *ctx) #define DUMPREG(reg_id) \ do { \ DRM_DEBUG_KMS(#reg_id " = %08x\n", \ - (u32) readl(ctx->mixer_res.vp_regs + reg_id)); \ + (u32) readl(ctx->vp_regs + reg_id)); \ } while (0) DUMPREG(VP_ENABLE); @@ -301,7 +298,7 @@ do { \ #undef DUMPREG } -static inline void vp_filter_set(struct mixer_resources *res, +static inline void vp_filter_set(struct mixer_context *ctx, int reg_id, const u8 *data, unsigned int size) { /* assure 4-byte align */ @@ -309,24 +306,23 @@ static inline void vp_filter_set(struct mixer_resources *res, for (; size; size -= 4, reg_id += 4, data += 4) { u32 val = (data[0] << 24) | (data[1] << 16) | (data[2] << 8) | data[3]; - vp_reg_write(res, reg_id, val); + vp_reg_write(ctx, reg_id, val); } } -static void vp_default_filter(struct mixer_resources *res) +static void vp_default_filter(struct mixer_context *ctx) { - vp_filter_set(res, VP_POLY8_Y0_LL, + vp_filter_set(ctx, VP_POLY8_Y0_LL, filter_y_horiz_tap8, sizeof(filter_y_horiz_tap8)); - vp_filter_set(res, VP_POLY4_Y0_LL, + vp_filter_set(ctx, VP_POLY4_Y0_LL, filter_y_vert_tap4, sizeof(filter_y_vert_tap4)); - vp_filter_set(res, VP_POLY4_C0_LL, + vp_filter_set(ctx, VP_POLY4_C0_LL, filter_cr_horiz_tap4, sizeof(filter_cr_horiz_tap4)); } static void mixer_cfg_gfx_blend(struct mixer_context *ctx, unsigned int win, bool alpha) { - struct mixer_resources *res = &ctx->mixer_res; u32 val; val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */ @@ -335,13 +331,12 @@ static void mixer_cfg_gfx_blend(struct mixer_context *ctx, unsigned int win, val |= MXR_GRP_CFG_BLEND_PRE_MUL; val |= MXR_GRP_CFG_PIXEL_BLEND_EN; } - mixer_reg_writemask(res, MXR_GRAPHIC_CFG(win), + mixer_reg_writemask(ctx, MXR_GRAPHIC_CFG(win), val, MXR_GRP_CFG_MISC_MASK); } static void mixer_cfg_vp_blend(struct mixer_context *ctx) { - struct mixer_resources *res = &ctx->mixer_res; u32 val; /* @@ -351,51 +346,39 @@ static void mixer_cfg_vp_blend(struct mixer_context *ctx) * support blending of the video layer through this. */ val = 0; - mixer_reg_write(res, MXR_VIDEO_CFG, val); + mixer_reg_write(ctx, MXR_VIDEO_CFG, val); } static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable) { - struct mixer_resources *res = &ctx->mixer_res; - /* block update on vsync */ - mixer_reg_writemask(res, MXR_STATUS, enable ? + mixer_reg_writemask(ctx, MXR_STATUS, enable ? MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE); if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) - vp_reg_write(res, VP_SHADOW_UPDATE, enable ? + vp_reg_write(ctx, VP_SHADOW_UPDATE, enable ? VP_SHADOW_UPDATE_ENABLE : 0); } -static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height) +static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height) { - struct mixer_resources *res = &ctx->mixer_res; u32 val; /* choosing between interlace and progressive mode */ val = test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? MXR_CFG_SCAN_INTERLACE : MXR_CFG_SCAN_PROGRESSIVE; - if (ctx->mxr_ver != MXR_VER_128_0_0_184) { - /* choosing between proper HD and SD mode */ - if (height <= 480) - val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD; - else if (height <= 576) - val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD; - else if (height <= 720) - val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD; - else if (height <= 1080) - val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD; - else - val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD; - } + if (ctx->mxr_ver == MXR_VER_128_0_0_184) + mixer_reg_write(ctx, MXR_RESOLUTION, + MXR_MXR_RES_HEIGHT(height) | MXR_MXR_RES_WIDTH(width)); + else + val |= ctx->scan_value; - mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_SCAN_MASK); + mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_SCAN_MASK); } static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height) { - struct mixer_resources *res = &ctx->mixer_res; u32 val; switch (height) { @@ -408,45 +391,44 @@ static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height) default: val = MXR_CFG_RGB709_16_235; /* Configure the BT.709 CSC matrix for full range RGB. */ - mixer_reg_write(res, MXR_CM_COEFF_Y, + mixer_reg_write(ctx, MXR_CM_COEFF_Y, MXR_CSC_CT( 0.184, 0.614, 0.063) | MXR_CM_COEFF_RGB_FULL); - mixer_reg_write(res, MXR_CM_COEFF_CB, + mixer_reg_write(ctx, MXR_CM_COEFF_CB, MXR_CSC_CT(-0.102, -0.338, 0.440)); - mixer_reg_write(res, MXR_CM_COEFF_CR, + mixer_reg_write(ctx, MXR_CM_COEFF_CR, MXR_CSC_CT( 0.440, -0.399, -0.040)); break; } - mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK); + mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK); } static void mixer_cfg_layer(struct mixer_context *ctx, unsigned int win, unsigned int priority, bool enable) { - struct mixer_resources *res = &ctx->mixer_res; u32 val = enable ? ~0 : 0; switch (win) { case 0: - mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP0_ENABLE); - mixer_reg_writemask(res, MXR_LAYER_CFG, + mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_GRP0_ENABLE); + mixer_reg_writemask(ctx, MXR_LAYER_CFG, MXR_LAYER_CFG_GRP0_VAL(priority), MXR_LAYER_CFG_GRP0_MASK); break; case 1: - mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP1_ENABLE); - mixer_reg_writemask(res, MXR_LAYER_CFG, + mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_GRP1_ENABLE); + mixer_reg_writemask(ctx, MXR_LAYER_CFG, MXR_LAYER_CFG_GRP1_VAL(priority), MXR_LAYER_CFG_GRP1_MASK); break; case VP_DEFAULT_WIN: if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) { - vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON); - mixer_reg_writemask(res, MXR_CFG, val, + vp_reg_writemask(ctx, VP_ENABLE, val, VP_ENABLE_ON); + mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_VP_ENABLE); - mixer_reg_writemask(res, MXR_LAYER_CFG, + mixer_reg_writemask(ctx, MXR_LAYER_CFG, MXR_LAYER_CFG_VP_VAL(priority), MXR_LAYER_CFG_VP_MASK); } @@ -456,30 +438,34 @@ static void mixer_cfg_layer(struct mixer_context *ctx, unsigned int win, static void mixer_run(struct mixer_context *ctx) { - struct mixer_resources *res = &ctx->mixer_res; - - mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_REG_RUN); + mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_REG_RUN); } static void mixer_stop(struct mixer_context *ctx) { - struct mixer_resources *res = &ctx->mixer_res; int timeout = 20; - mixer_reg_writemask(res, MXR_STATUS, 0, MXR_STATUS_REG_RUN); + mixer_reg_writemask(ctx, MXR_STATUS, 0, MXR_STATUS_REG_RUN); - while (!(mixer_reg_read(res, MXR_STATUS) & MXR_STATUS_REG_IDLE) && + while (!(mixer_reg_read(ctx, MXR_STATUS) & MXR_STATUS_REG_IDLE) && --timeout) usleep_range(10000, 12000); } +static void mixer_commit(struct mixer_context *ctx) +{ + struct drm_display_mode *mode = &ctx->crtc->base.state->adjusted_mode; + + mixer_cfg_scan(ctx, mode->hdisplay, mode->vdisplay); + mixer_cfg_rgb_fmt(ctx, mode->vdisplay); + mixer_run(ctx); +} + static void vp_video_buffer(struct mixer_context *ctx, struct exynos_drm_plane *plane) { struct exynos_drm_plane_state *state = to_exynos_plane_state(plane->base.state); - struct drm_display_mode *mode = &state->base.crtc->state->adjusted_mode; - struct mixer_resources *res = &ctx->mixer_res; struct drm_framebuffer *fb = state->base.fb; unsigned int priority = state->base.normalized_zpos + 1; unsigned long flags; @@ -493,8 +479,7 @@ static void vp_video_buffer(struct mixer_context *ctx, luma_addr[0] = exynos_drm_fb_dma_addr(fb, 0); chroma_addr[0] = exynos_drm_fb_dma_addr(fb, 1); - if (mode->flags & DRM_MODE_FLAG_INTERLACE) { - __set_bit(MXR_BIT_INTERLACE, &ctx->flags); + if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { if (is_tiled) { luma_addr[1] = luma_addr[0] + 0x40; chroma_addr[1] = chroma_addr[0] + 0x40; @@ -503,63 +488,59 @@ static void vp_video_buffer(struct mixer_context *ctx, chroma_addr[1] = chroma_addr[0] + fb->pitches[0]; } } else { - __clear_bit(MXR_BIT_INTERLACE, &ctx->flags); luma_addr[1] = 0; chroma_addr[1] = 0; } - spin_lock_irqsave(&res->reg_slock, flags); + spin_lock_irqsave(&ctx->reg_slock, flags); /* interlace or progressive scan mode */ val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0); - vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP); + vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP); /* setup format */ val = (is_nv21 ? VP_MODE_NV21 : VP_MODE_NV12); val |= (is_tiled ? VP_MODE_MEM_TILED : VP_MODE_MEM_LINEAR); - vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK); + vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_FMT_MASK); /* setting size of input image */ - vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) | + vp_reg_write(ctx, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) | VP_IMG_VSIZE(fb->height)); /* chroma plane for NV12/NV21 is half the height of the luma plane */ - vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) | + vp_reg_write(ctx, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) | VP_IMG_VSIZE(fb->height / 2)); - vp_reg_write(res, VP_SRC_WIDTH, state->src.w); - vp_reg_write(res, VP_SRC_HEIGHT, state->src.h); - vp_reg_write(res, VP_SRC_H_POSITION, + vp_reg_write(ctx, VP_SRC_WIDTH, state->src.w); + vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h); + vp_reg_write(ctx, VP_SRC_H_POSITION, VP_SRC_H_POSITION_VAL(state->src.x)); - vp_reg_write(res, VP_SRC_V_POSITION, state->src.y); + vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y); - vp_reg_write(res, VP_DST_WIDTH, state->crtc.w); - vp_reg_write(res, VP_DST_H_POSITION, state->crtc.x); + vp_reg_write(ctx, VP_DST_WIDTH, state->crtc.w); + vp_reg_write(ctx, VP_DST_H_POSITION, state->crtc.x); if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { - vp_reg_write(res, VP_DST_HEIGHT, state->crtc.h / 2); - vp_reg_write(res, VP_DST_V_POSITION, state->crtc.y / 2); + vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h / 2); + vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y / 2); } else { - vp_reg_write(res, VP_DST_HEIGHT, state->crtc.h); - vp_reg_write(res, VP_DST_V_POSITION, state->crtc.y); + vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h); + vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y); } - vp_reg_write(res, VP_H_RATIO, state->h_ratio); - vp_reg_write(res, VP_V_RATIO, state->v_ratio); + vp_reg_write(ctx, VP_H_RATIO, state->h_ratio); + vp_reg_write(ctx, VP_V_RATIO, state->v_ratio); - vp_reg_write(res, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE); + vp_reg_write(ctx, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE); /* set buffer address to vp */ - vp_reg_write(res, VP_TOP_Y_PTR, luma_addr[0]); - vp_reg_write(res, VP_BOT_Y_PTR, luma_addr[1]); - vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]); - vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]); + vp_reg_write(ctx, VP_TOP_Y_PTR, luma_addr[0]); + vp_reg_write(ctx, VP_BOT_Y_PTR, luma_addr[1]); + vp_reg_write(ctx, VP_TOP_C_PTR, chroma_addr[0]); + vp_reg_write(ctx, VP_BOT_C_PTR, chroma_addr[1]); - mixer_cfg_scan(ctx, mode->vdisplay); - mixer_cfg_rgb_fmt(ctx, mode->vdisplay); mixer_cfg_layer(ctx, plane->index, priority, true); mixer_cfg_vp_blend(ctx); - mixer_run(ctx); - spin_unlock_irqrestore(&res->reg_slock, flags); + spin_unlock_irqrestore(&ctx->reg_slock, flags); mixer_regs_dump(ctx); vp_regs_dump(ctx); @@ -567,9 +548,7 @@ static void vp_video_buffer(struct mixer_context *ctx, static void mixer_layer_update(struct mixer_context *ctx) { - struct mixer_resources *res = &ctx->mixer_res; - - mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE); + mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE); } static void mixer_graph_buffer(struct mixer_context *ctx, @@ -577,8 +556,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx, { struct exynos_drm_plane_state *state = to_exynos_plane_state(plane->base.state); - struct drm_display_mode *mode = &state->base.crtc->state->adjusted_mode; - struct mixer_resources *res = &ctx->mixer_res; struct drm_framebuffer *fb = state->base.fb; unsigned int priority = state->base.normalized_zpos + 1; unsigned long flags; @@ -623,45 +600,30 @@ static void mixer_graph_buffer(struct mixer_context *ctx, + (state->src.x * fb->format->cpp[0]) + (state->src.y * fb->pitches[0]); - if (mode->flags & DRM_MODE_FLAG_INTERLACE) - __set_bit(MXR_BIT_INTERLACE, &ctx->flags); - else - __clear_bit(MXR_BIT_INTERLACE, &ctx->flags); - - spin_lock_irqsave(&res->reg_slock, flags); + spin_lock_irqsave(&ctx->reg_slock, flags); /* setup format */ - mixer_reg_writemask(res, MXR_GRAPHIC_CFG(win), + mixer_reg_writemask(ctx, MXR_GRAPHIC_CFG(win), MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK); /* setup geometry */ - mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), + mixer_reg_write(ctx, MXR_GRAPHIC_SPAN(win), fb->pitches[0] / fb->format->cpp[0]); - /* setup display size */ - if (ctx->mxr_ver == MXR_VER_128_0_0_184 && - win == DEFAULT_WIN) { - val = MXR_MXR_RES_HEIGHT(mode->vdisplay); - val |= MXR_MXR_RES_WIDTH(mode->hdisplay); - mixer_reg_write(res, MXR_RESOLUTION, val); - } - val = MXR_GRP_WH_WIDTH(state->src.w); val |= MXR_GRP_WH_HEIGHT(state->src.h); val |= MXR_GRP_WH_H_SCALE(x_ratio); val |= MXR_GRP_WH_V_SCALE(y_ratio); - mixer_reg_write(res, MXR_GRAPHIC_WH(win), val); + mixer_reg_write(ctx, MXR_GRAPHIC_WH(win), val); /* setup offsets in display image */ val = MXR_GRP_DXY_DX(dst_x_offset); val |= MXR_GRP_DXY_DY(dst_y_offset); - mixer_reg_write(res, MXR_GRAPHIC_DXY(win), val); + mixer_reg_write(ctx, MXR_GRAPHIC_DXY(win), val); /* set buffer address to mixer */ - mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr); + mixer_reg_write(ctx, MXR_GRAPHIC_BASE(win), dma_addr); - mixer_cfg_scan(ctx, mode->vdisplay); - mixer_cfg_rgb_fmt(ctx, mode->vdisplay); mixer_cfg_layer(ctx, win, priority, true); mixer_cfg_gfx_blend(ctx, win, is_alpha_format(fb->format->format)); @@ -670,22 +632,19 @@ static void mixer_graph_buffer(struct mixer_context *ctx, ctx->mxr_ver == MXR_VER_128_0_0_184) mixer_layer_update(ctx); - mixer_run(ctx); - - spin_unlock_irqrestore(&res->reg_slock, flags); + spin_unlock_irqrestore(&ctx->reg_slock, flags); mixer_regs_dump(ctx); } static void vp_win_reset(struct mixer_context *ctx) { - struct mixer_resources *res = &ctx->mixer_res; unsigned int tries = 100; - vp_reg_write(res, VP_SRESET, VP_SRESET_PROCESSING); + vp_reg_write(ctx, VP_SRESET, VP_SRESET_PROCESSING); while (--tries) { /* waiting until VP_SRESET_PROCESSING is 0 */ - if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING) + if (~vp_reg_read(ctx, VP_SRESET) & VP_SRESET_PROCESSING) break; mdelay(10); } @@ -694,57 +653,55 @@ static void vp_win_reset(struct mixer_context *ctx) static void mixer_win_reset(struct mixer_context *ctx) { - struct mixer_resources *res = &ctx->mixer_res; unsigned long flags; - spin_lock_irqsave(&res->reg_slock, flags); + spin_lock_irqsave(&ctx->reg_slock, flags); - mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK); + mixer_reg_writemask(ctx, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK); /* set output in RGB888 mode */ - mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK); + mixer_reg_writemask(ctx, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK); /* 16 beat burst in DMA */ - mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST, + mixer_reg_writemask(ctx, MXR_STATUS, MXR_STATUS_16_BURST, MXR_STATUS_BURST_MASK); /* reset default layer priority */ - mixer_reg_write(res, MXR_LAYER_CFG, 0); + mixer_reg_write(ctx, MXR_LAYER_CFG, 0); /* set all background colors to RGB (0,0,0) */ - mixer_reg_write(res, MXR_BG_COLOR0, MXR_YCBCR_VAL(0, 128, 128)); - mixer_reg_write(res, MXR_BG_COLOR1, MXR_YCBCR_VAL(0, 128, 128)); - mixer_reg_write(res, MXR_BG_COLOR2, MXR_YCBCR_VAL(0, 128, 128)); + mixer_reg_write(ctx, MXR_BG_COLOR0, MXR_YCBCR_VAL(0, 128, 128)); + mixer_reg_write(ctx, MXR_BG_COLOR1, MXR_YCBCR_VAL(0, 128, 128)); + mixer_reg_write(ctx, MXR_BG_COLOR2, MXR_YCBCR_VAL(0, 128, 128)); if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) { /* configuration of Video Processor Registers */ vp_win_reset(ctx); - vp_default_filter(res); + vp_default_filter(ctx); } /* disable all layers */ - mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE); - mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE); + mixer_reg_writemask(ctx, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE); + mixer_reg_writemask(ctx, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE); if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) - mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE); + mixer_reg_writemask(ctx, MXR_CFG, 0, MXR_CFG_VP_ENABLE); /* set all source image offsets to zero */ - mixer_reg_write(res, MXR_GRAPHIC_SXY(0), 0); - mixer_reg_write(res, MXR_GRAPHIC_SXY(1), 0); + mixer_reg_write(ctx, MXR_GRAPHIC_SXY(0), 0); + mixer_reg_write(ctx, MXR_GRAPHIC_SXY(1), 0); - spin_unlock_irqrestore(&res->reg_slock, flags); + spin_unlock_irqrestore(&ctx->reg_slock, flags); } static irqreturn_t mixer_irq_handler(int irq, void *arg) { struct mixer_context *ctx = arg; - struct mixer_resources *res = &ctx->mixer_res; u32 val, base, shadow; - spin_lock(&res->reg_slock); + spin_lock(&ctx->reg_slock); /* read interrupt status for handling and clearing flags for VSYNC */ - val = mixer_reg_read(res, MXR_INT_STATUS); + val = mixer_reg_read(ctx, MXR_INT_STATUS); /* handling VSYNC */ if (val & MXR_INT_STATUS_VSYNC) { @@ -754,13 +711,13 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg) /* interlace scan need to check shadow register */ if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { - base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0)); - shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0)); + base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0)); + shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0)); if (base != shadow) goto out; - base = mixer_reg_read(res, MXR_GRAPHIC_BASE(1)); - shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1)); + base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1)); + shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1)); if (base != shadow) goto out; } @@ -770,9 +727,9 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg) out: /* clear interrupts */ - mixer_reg_write(res, MXR_INT_STATUS, val); + mixer_reg_write(ctx, MXR_INT_STATUS, val); - spin_unlock(&res->reg_slock); + spin_unlock(&ctx->reg_slock); return IRQ_HANDLED; } @@ -780,26 +737,25 @@ out: static int mixer_resources_init(struct mixer_context *mixer_ctx) { struct device *dev = &mixer_ctx->pdev->dev; - struct mixer_resources *mixer_res = &mixer_ctx->mixer_res; struct resource *res; int ret; - spin_lock_init(&mixer_res->reg_slock); + spin_lock_init(&mixer_ctx->reg_slock); - mixer_res->mixer = devm_clk_get(dev, "mixer"); - if (IS_ERR(mixer_res->mixer)) { + mixer_ctx->mixer = devm_clk_get(dev, "mixer"); + if (IS_ERR(mixer_ctx->mixer)) { dev_err(dev, "failed to get clock 'mixer'\n"); return -ENODEV; } - mixer_res->hdmi = devm_clk_get(dev, "hdmi"); - if (IS_ERR(mixer_res->hdmi)) { + mixer_ctx->hdmi = devm_clk_get(dev, "hdmi"); + if (IS_ERR(mixer_ctx->hdmi)) { dev_err(dev, "failed to get clock 'hdmi'\n"); - return PTR_ERR(mixer_res->hdmi); + return PTR_ERR(mixer_ctx->hdmi); } - mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi"); - if (IS_ERR(mixer_res->sclk_hdmi)) { + mixer_ctx->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi"); + if (IS_ERR(mixer_ctx->sclk_hdmi)) { dev_err(dev, "failed to get clock 'sclk_hdmi'\n"); return -ENODEV; } @@ -809,9 +765,9 @@ static int mixer_resources_init(struct mixer_context *mixer_ctx) return -ENXIO; } - mixer_res->mixer_regs = devm_ioremap(dev, res->start, + mixer_ctx->mixer_regs = devm_ioremap(dev, res->start, resource_size(res)); - if (mixer_res->mixer_regs == NULL) { + if (mixer_ctx->mixer_regs == NULL) { dev_err(dev, "register mapping failed.\n"); return -ENXIO; } @@ -828,7 +784,7 @@ static int mixer_resources_init(struct mixer_context *mixer_ctx) dev_err(dev, "request interrupt failed.\n"); return ret; } - mixer_res->irq = res->start; + mixer_ctx->irq = res->start; return 0; } @@ -836,30 +792,29 @@ static int mixer_resources_init(struct mixer_context *mixer_ctx) static int vp_resources_init(struct mixer_context *mixer_ctx) { struct device *dev = &mixer_ctx->pdev->dev; - struct mixer_resources *mixer_res = &mixer_ctx->mixer_res; struct resource *res; - mixer_res->vp = devm_clk_get(dev, "vp"); - if (IS_ERR(mixer_res->vp)) { + mixer_ctx->vp = devm_clk_get(dev, "vp"); + if (IS_ERR(mixer_ctx->vp)) { dev_err(dev, "failed to get clock 'vp'\n"); return -ENODEV; } if (test_bit(MXR_BIT_HAS_SCLK, &mixer_ctx->flags)) { - mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer"); - if (IS_ERR(mixer_res->sclk_mixer)) { + mixer_ctx->sclk_mixer = devm_clk_get(dev, "sclk_mixer"); + if (IS_ERR(mixer_ctx->sclk_mixer)) { dev_err(dev, "failed to get clock 'sclk_mixer'\n"); return -ENODEV; } - mixer_res->mout_mixer = devm_clk_get(dev, "mout_mixer"); - if (IS_ERR(mixer_res->mout_mixer)) { + mixer_ctx->mout_mixer = devm_clk_get(dev, "mout_mixer"); + if (IS_ERR(mixer_ctx->mout_mixer)) { dev_err(dev, "failed to get clock 'mout_mixer'\n"); return -ENODEV; } - if (mixer_res->sclk_hdmi && mixer_res->mout_mixer) - clk_set_parent(mixer_res->mout_mixer, - mixer_res->sclk_hdmi); + if (mixer_ctx->sclk_hdmi && mixer_ctx->mout_mixer) + clk_set_parent(mixer_ctx->mout_mixer, + mixer_ctx->sclk_hdmi); } res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_MEM, 1); @@ -868,9 +823,9 @@ static int vp_resources_init(struct mixer_context *mixer_ctx) return -ENXIO; } - mixer_res->vp_regs = devm_ioremap(dev, res->start, + mixer_ctx->vp_regs = devm_ioremap(dev, res->start, resource_size(res)); - if (mixer_res->vp_regs == NULL) { + if (mixer_ctx->vp_regs == NULL) { dev_err(dev, "register mapping failed.\n"); return -ENXIO; } @@ -914,15 +869,14 @@ static void mixer_ctx_remove(struct mixer_context *mixer_ctx) static int mixer_enable_vblank(struct exynos_drm_crtc *crtc) { struct mixer_context *mixer_ctx = crtc->ctx; - struct mixer_resources *res = &mixer_ctx->mixer_res; __set_bit(MXR_BIT_VSYNC, &mixer_ctx->flags); if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) return 0; /* enable vsync interrupt */ - mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC); - mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC); + mixer_reg_writemask(mixer_ctx, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC); + mixer_reg_writemask(mixer_ctx, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC); return 0; } @@ -930,7 +884,6 @@ static int mixer_enable_vblank(struct exynos_drm_crtc *crtc) static void mixer_disable_vblank(struct exynos_drm_crtc *crtc) { struct mixer_context *mixer_ctx = crtc->ctx; - struct mixer_resources *res = &mixer_ctx->mixer_res; __clear_bit(MXR_BIT_VSYNC, &mixer_ctx->flags); @@ -938,8 +891,8 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc) return; /* disable vsync interrupt */ - mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC); - mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC); + mixer_reg_writemask(mixer_ctx, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC); + mixer_reg_writemask(mixer_ctx, MXR_INT_EN, 0, MXR_INT_EN_VSYNC); } static void mixer_atomic_begin(struct exynos_drm_crtc *crtc) @@ -972,7 +925,6 @@ static void mixer_disable_plane(struct exynos_drm_crtc *crtc, struct exynos_drm_plane *plane) { struct mixer_context *mixer_ctx = crtc->ctx; - struct mixer_resources *res = &mixer_ctx->mixer_res; unsigned long flags; DRM_DEBUG_KMS("win: %d\n", plane->index); @@ -980,9 +932,9 @@ static void mixer_disable_plane(struct exynos_drm_crtc *crtc, if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) return; - spin_lock_irqsave(&res->reg_slock, flags); + spin_lock_irqsave(&mixer_ctx->reg_slock, flags); mixer_cfg_layer(mixer_ctx, plane->index, 0, false); - spin_unlock_irqrestore(&res->reg_slock, flags); + spin_unlock_irqrestore(&mixer_ctx->reg_slock, flags); } static void mixer_atomic_flush(struct exynos_drm_crtc *crtc) @@ -999,7 +951,6 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc) static void mixer_enable(struct exynos_drm_crtc *crtc) { struct mixer_context *ctx = crtc->ctx; - struct mixer_resources *res = &ctx->mixer_res; if (test_bit(MXR_BIT_POWERED, &ctx->flags)) return; @@ -1010,14 +961,17 @@ static void mixer_enable(struct exynos_drm_crtc *crtc) mixer_vsync_set_update(ctx, false); - mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET); + mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET); if (test_bit(MXR_BIT_VSYNC, &ctx->flags)) { - mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC); - mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC); + mixer_reg_writemask(ctx, MXR_INT_STATUS, ~0, + MXR_INT_CLEAR_VSYNC); + mixer_reg_writemask(ctx, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC); } mixer_win_reset(ctx); + mixer_commit(ctx); + mixer_vsync_set_update(ctx, true); set_bit(MXR_BIT_POWERED, &ctx->flags); @@ -1044,26 +998,75 @@ static void mixer_disable(struct exynos_drm_crtc *crtc) clear_bit(MXR_BIT_POWERED, &ctx->flags); } -/* Only valid for Mixer version 16.0.33.0 */ -static int mixer_atomic_check(struct exynos_drm_crtc *crtc, - struct drm_crtc_state *state) +static int mixer_mode_valid(struct exynos_drm_crtc *crtc, + const struct drm_display_mode *mode) { - struct drm_display_mode *mode = &state->adjusted_mode; - u32 w, h; + struct mixer_context *ctx = crtc->ctx; + u32 w = mode->hdisplay, h = mode->vdisplay; - w = mode->hdisplay; - h = mode->vdisplay; + DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d\n", w, h, + mode->vrefresh, !!(mode->flags & DRM_MODE_FLAG_INTERLACE)); - DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d\n", - mode->hdisplay, mode->vdisplay, mode->vrefresh, - (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0); + if (ctx->mxr_ver == MXR_VER_128_0_0_184) + return MODE_OK; if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) || - (w >= 1024 && w <= 1280 && h >= 576 && h <= 720) || - (w >= 1664 && w <= 1920 && h >= 936 && h <= 1080)) - return 0; + (w >= 1024 && w <= 1280 && h >= 576 && h <= 720) || + (w >= 1664 && w <= 1920 && h >= 936 && h <= 1080)) + return MODE_OK; + + if ((w == 1024 && h == 768) || + (w == 1366 && h == 768) || + (w == 1280 && h == 1024)) + return MODE_OK; + + return MODE_BAD; +} + +static bool mixer_mode_fixup(struct exynos_drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct mixer_context *ctx = crtc->ctx; + int width = mode->hdisplay, height = mode->vdisplay, i; + + struct { + int hdisplay, vdisplay, htotal, vtotal, scan_val; + } static const modes[] = { + { 720, 480, 858, 525, MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD }, + { 720, 576, 864, 625, MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD }, + { 1280, 720, 1650, 750, MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD }, + { 1920, 1080, 2200, 1125, MXR_CFG_SCAN_HD_1080 | + MXR_CFG_SCAN_HD } + }; + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + __set_bit(MXR_BIT_INTERLACE, &ctx->flags); + else + __clear_bit(MXR_BIT_INTERLACE, &ctx->flags); + + if (ctx->mxr_ver == MXR_VER_128_0_0_184) + return true; + + for (i = 0; i < ARRAY_SIZE(modes); ++i) + if (width <= modes[i].hdisplay && height <= modes[i].vdisplay) { + ctx->scan_value = modes[i].scan_val; + if (width < modes[i].hdisplay || + height < modes[i].vdisplay) { + adjusted_mode->hdisplay = modes[i].hdisplay; + adjusted_mode->hsync_start = modes[i].hdisplay; + adjusted_mode->hsync_end = modes[i].htotal; + adjusted_mode->htotal = modes[i].htotal; + adjusted_mode->vdisplay = modes[i].vdisplay; + adjusted_mode->vsync_start = modes[i].vdisplay; + adjusted_mode->vsync_end = modes[i].vtotal; + adjusted_mode->vtotal = modes[i].vtotal; + } + + return true; + } - return -EINVAL; + return false; } static const struct exynos_drm_crtc_ops mixer_crtc_ops = { @@ -1075,7 +1078,8 @@ static const struct exynos_drm_crtc_ops mixer_crtc_ops = { .update_plane = mixer_update_plane, .disable_plane = mixer_disable_plane, .atomic_flush = mixer_atomic_flush, - .atomic_check = mixer_atomic_check, + .mode_valid = mixer_mode_valid, + .mode_fixup = mixer_mode_fixup, }; static const struct mixer_drv_data exynos5420_mxr_drv_data = { @@ -1217,14 +1221,13 @@ static int mixer_remove(struct platform_device *pdev) static int __maybe_unused exynos_mixer_suspend(struct device *dev) { struct mixer_context *ctx = dev_get_drvdata(dev); - struct mixer_resources *res = &ctx->mixer_res; - clk_disable_unprepare(res->hdmi); - clk_disable_unprepare(res->mixer); + clk_disable_unprepare(ctx->hdmi); + clk_disable_unprepare(ctx->mixer); if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) { - clk_disable_unprepare(res->vp); + clk_disable_unprepare(ctx->vp); if (test_bit(MXR_BIT_HAS_SCLK, &ctx->flags)) - clk_disable_unprepare(res->sclk_mixer); + clk_disable_unprepare(ctx->sclk_mixer); } return 0; @@ -1233,28 +1236,27 @@ static int __maybe_unused exynos_mixer_suspend(struct device *dev) static int __maybe_unused exynos_mixer_resume(struct device *dev) { struct mixer_context *ctx = dev_get_drvdata(dev); - struct mixer_resources *res = &ctx->mixer_res; int ret; - ret = clk_prepare_enable(res->mixer); + ret = clk_prepare_enable(ctx->mixer); if (ret < 0) { DRM_ERROR("Failed to prepare_enable the mixer clk [%d]\n", ret); return ret; } - ret = clk_prepare_enable(res->hdmi); + ret = clk_prepare_enable(ctx->hdmi); if (ret < 0) { DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret); return ret; } if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) { - ret = clk_prepare_enable(res->vp); + ret = clk_prepare_enable(ctx->vp); if (ret < 0) { DRM_ERROR("Failed to prepare_enable the vp clk [%d]\n", ret); return ret; } if (test_bit(MXR_BIT_HAS_SCLK, &ctx->flags)) { - ret = clk_prepare_enable(res->sclk_mixer); + ret = clk_prepare_enable(ctx->sclk_mixer); if (ret < 0) { DRM_ERROR("Failed to prepare_enable the " \ "sclk_mixer clk [%d]\n", diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h index a0507dc18d9e..04be0f7e8193 100644 --- a/drivers/gpu/drm/exynos/regs-hdmi.h +++ b/drivers/gpu/drm/exynos/regs-hdmi.h @@ -419,11 +419,9 @@ #define HDMI_I2S_DSD_CON HDMI_I2S_BASE(0x01c) #define HDMI_I2S_MUX_CON HDMI_I2S_BASE(0x020) #define HDMI_I2S_CH_ST_CON HDMI_I2S_BASE(0x024) -#define HDMI_I2S_CH_ST_0 HDMI_I2S_BASE(0x028) -#define HDMI_I2S_CH_ST_1 HDMI_I2S_BASE(0x02c) -#define HDMI_I2S_CH_ST_2 HDMI_I2S_BASE(0x030) -#define HDMI_I2S_CH_ST_3 HDMI_I2S_BASE(0x034) -#define HDMI_I2S_CH_ST_4 HDMI_I2S_BASE(0x038) +/* n must be within range 0...(HDMI_I2S_CH_ST_MAXNUM - 1) */ +#define HDMI_I2S_CH_ST_MAXNUM 5 +#define HDMI_I2S_CH_ST(n) HDMI_I2S_BASE(0x028 + 4 * (n)) #define HDMI_I2S_CH_ST_SH_0 HDMI_I2S_BASE(0x03c) #define HDMI_I2S_CH_ST_SH_1 HDMI_I2S_BASE(0x040) #define HDMI_I2S_CH_ST_SH_2 HDMI_I2S_BASE(0x044) diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 58e9e0601a61..faf17b83b910 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -210,7 +210,6 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev) return PTR_ERR(fsl_dev->state); } - clk_disable_unprepare(fsl_dev->pix_clk); clk_disable_unprepare(fsl_dev->clk); return 0; @@ -233,6 +232,7 @@ static int fsl_dcu_drm_pm_resume(struct device *dev) if (fsl_dev->tcon) fsl_tcon_bypass_enable(fsl_dev->tcon); fsl_dcu_drm_init_planes(fsl_dev->drm); + enable_irq(fsl_dev->irq); drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state); console_lock(); @@ -240,7 +240,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev) console_unlock(); drm_kms_helper_poll_enable(fsl_dev->drm); - enable_irq(fsl_dev->irq); return 0; } diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c index edd7d8127d19..c54806d08dd7 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c @@ -102,7 +102,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev, { struct drm_encoder *encoder = &fsl_dev->encoder; struct drm_connector *connector = &fsl_dev->connector.base; - struct drm_mode_config *mode_config = &fsl_dev->drm->mode_config; int ret; fsl_dev->connector.encoder = encoder; @@ -122,10 +121,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev, if (ret < 0) goto err_sysfs; - drm_object_property_set_value(&connector->base, - mode_config->dpms_property, - DRM_MODE_DPMS_OFF); - ret = drm_panel_attach(panel, connector); if (ret) { dev_err(fsl_dev->dev, "failed to attach panel\n"); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 026cb52ece0b..94b23fcbc989 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4603,11 +4603,17 @@ static void __i915_gem_free_work(struct work_struct *work) * unbound now. */ + spin_lock(&i915->mm.free_lock); while ((freed = llist_del_all(&i915->mm.free_list))) { + spin_unlock(&i915->mm.free_lock); + __i915_gem_free_objects(i915, freed); if (need_resched()) - break; + return; + + spin_lock(&i915->mm.free_lock); } + spin_unlock(&i915->mm.free_lock); } static void __i915_gem_free_object_rcu(struct rcu_head *head) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 5bf96a258509..e304dcbc6042 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -106,14 +106,9 @@ static void lut_close(struct i915_gem_context *ctx) radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { struct i915_vma *vma = rcu_dereference_raw(*slot); - struct drm_i915_gem_object *obj = vma->obj; radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); - - if (!i915_vma_is_ggtt(vma)) - i915_vma_close(vma); - - __i915_gem_object_release_unless_active(obj); + __i915_gem_object_release_unless_active(vma->obj); } } @@ -198,6 +193,11 @@ static void context_close(struct i915_gem_context *ctx) { i915_gem_context_set_closed(ctx); + /* + * The LUT uses the VMA as a backpointer to unref the object, + * so we need to clear the LUT before we close all the VMA (inside + * the ppgtt). + */ lut_close(ctx); if (ctx->ppgtt) i915_ppgtt_close(&ctx->ppgtt->base); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 527a2d2d6281..5eaa6893daaa 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1341,7 +1341,7 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm, if (IS_ERR(pt)) goto unwind; - if (count < GEN8_PTES) + if (count < GEN8_PTES || intel_vgpu_active(vm->i915)) gen8_initialize_pt(vm, pt); gen8_ppgtt_set_pde(vm, pd, pt, pde); diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index eb31f8aa5c21..3770e3323fc8 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -162,6 +162,18 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, if (!shrinker_lock(dev_priv, &unlock)) return 0; + /* + * When shrinking the active list, also consider active contexts. + * Active contexts are pinned until they are retired, and so can + * not be simply unbound to retire and unpin their pages. To shrink + * the contexts, we must wait until the gpu is idle. + * + * We don't care about errors here; if we cannot wait upon the GPU, + * we will free as much as we can and hope to get a second chance. + */ + if (flags & I915_SHRINK_ACTIVE) + i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED); + trace_i915_gem_shrink(dev_priv, target, flags); i915_gem_retire_requests(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index a2e8114b739d..f84c267728fd 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -610,6 +610,7 @@ done: execlists->first = rb; if (submit) { port_assign(port, last); + execlists_set_active(execlists, EXECLISTS_ACTIVE_USER); i915_guc_submit(engine); } spin_unlock_irq(&engine->timeline->lock); @@ -633,6 +634,8 @@ static void i915_guc_irq_handler(unsigned long data) rq = port_request(&port[0]); } + if (!rq) + execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER); if (!port_isset(last_port)) i915_guc_dequeue(engine); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index b1296a55c1e4..f8205841868b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1388,8 +1388,10 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) bool tasklet = false; if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) { - __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); - tasklet = true; + if (READ_ONCE(engine->execlists.active)) { + __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); + tasklet = true; + } } if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) { diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index fc77e8191eb5..fbfab2f33023 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -54,6 +54,13 @@ i915_vma_retire(struct i915_gem_active *active, if (--obj->active_count) return; + /* Prune the shared fence arrays iff completely idle (inc. external) */ + if (reservation_object_trylock(obj->resv)) { + if (reservation_object_test_signaled_rcu(obj->resv, true)) + reservation_object_add_excl_fence(obj->resv, NULL); + reservation_object_unlock(obj->resv); + } + /* Bump our place on the bound list to keep it roughly in LRU order * so that we don't steal from recently used but inactive objects * (unless we are forced to ofc!) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e2ac976844d8..f4a9a182868f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3676,6 +3676,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) intel_pps_unlock_regs_wa(dev_priv); intel_modeset_init_hw(dev); + intel_init_clock_gating(dev_priv); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hpd_irq_setup) @@ -14350,8 +14351,6 @@ void intel_modeset_init_hw(struct drm_device *dev) intel_update_cdclk(dev_priv); dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw; - - intel_init_clock_gating(dev_priv); } /* @@ -15063,6 +15062,15 @@ intel_modeset_setup_hw_state(struct drm_device *dev, struct intel_encoder *encoder; int i; + if (IS_HASWELL(dev_priv)) { + /* + * WaRsPkgCStateDisplayPMReq:hsw + * System hang if this isn't done before disabling all planes! + */ + I915_WRITE(CHICKEN_PAR1_1, + I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); + } + intel_modeset_readout_hw_state(dev); /* HW state is read out, now we need to sanitize this mess. */ @@ -15160,6 +15168,8 @@ void intel_modeset_gem_init(struct drm_device *dev) intel_init_gt_powersave(dev_priv); + intel_init_clock_gating(dev_priv); + intel_setup_overlay(dev_priv); } diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index a47a9c6bea52..ab5bf4e2e28e 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -1548,8 +1548,8 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) return false; - /* Both ports drained, no more ELSP submission? */ - if (port_request(&engine->execlists.port[0])) + /* Waiting to drain ELSP? */ + if (READ_ONCE(engine->execlists.active)) return false; /* ELSP is empty, but there are ready requests? */ @@ -1749,6 +1749,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m) idx); } } + drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active); rcu_read_unlock(); } else if (INTEL_GEN(dev_priv) > 6) { drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n", diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 7f45dd7dc3e5..d36e25607435 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -575,7 +575,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * the state of the GPU is known (idle). */ inject_preempt_context(engine); - execlists->preempt = true; + execlists_set_active(execlists, + EXECLISTS_ACTIVE_PREEMPT); goto unlock; } else { /* @@ -683,8 +684,10 @@ done: unlock: spin_unlock_irq(&engine->timeline->lock); - if (submit) + if (submit) { + execlists_set_active(execlists, EXECLISTS_ACTIVE_USER); execlists_submit_ports(engine); + } } static void @@ -696,6 +699,7 @@ execlist_cancel_port_requests(struct intel_engine_execlists *execlists) while (num_ports-- && port_isset(port)) { struct drm_i915_gem_request *rq = port_request(port); + GEM_BUG_ON(!execlists->active); execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_PREEMPTED); i915_gem_request_put(rq); @@ -730,7 +734,6 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) { INIT_LIST_HEAD(&rq->priotree.link); - rq->priotree.priority = INT_MAX; dma_fence_set_error(&rq->fence, -EIO); __i915_gem_request_submit(rq); @@ -861,15 +864,21 @@ static void intel_lrc_irq_handler(unsigned long data) unwind_incomplete_requests(engine); spin_unlock_irq(&engine->timeline->lock); - GEM_BUG_ON(!execlists->preempt); - execlists->preempt = false; + GEM_BUG_ON(!execlists_is_active(execlists, + EXECLISTS_ACTIVE_PREEMPT)); + execlists_clear_active(execlists, + EXECLISTS_ACTIVE_PREEMPT); continue; } if (status & GEN8_CTX_STATUS_PREEMPTED && - execlists->preempt) + execlists_is_active(execlists, + EXECLISTS_ACTIVE_PREEMPT)) continue; + GEM_BUG_ON(!execlists_is_active(execlists, + EXECLISTS_ACTIVE_USER)); + /* Check the context/desc id for this event matches */ GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id); @@ -881,7 +890,6 @@ static void intel_lrc_irq_handler(unsigned long data) execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); trace_i915_gem_request_out(rq); - rq->priotree.priority = INT_MAX; i915_gem_request_put(rq); execlists_port_complete(execlists, port); @@ -892,6 +900,9 @@ static void intel_lrc_irq_handler(unsigned long data) /* After the final element, the hw should be idle */ GEM_BUG_ON(port_count(port) == 0 && !(status & GEN8_CTX_STATUS_ACTIVE_IDLE)); + if (port_count(port) == 0) + execlists_clear_active(execlists, + EXECLISTS_ACTIVE_USER); } if (head != execlists->csb_head) { @@ -901,7 +912,7 @@ static void intel_lrc_irq_handler(unsigned long data) } } - if (!execlists->preempt) + if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)) execlists_dequeue(engine); intel_uncore_forcewake_put(dev_priv, execlists->fw_domains); @@ -1460,7 +1471,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine) GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift); clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); execlists->csb_head = -1; - execlists->preempt = false; + execlists->active = 0; /* After a GPU reset, we may have requests to replay */ if (!i915_modparams.enable_guc_submission && execlists->first) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index c42a65a93b3a..aa12a44e9a76 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3133,7 +3133,11 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev, struct intel_crtc_state *newstate) { struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate; - struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk; + struct intel_atomic_state *intel_state = + to_intel_atomic_state(newstate->base.state); + const struct intel_crtc_state *oldstate = + intel_atomic_get_old_crtc_state(intel_state, intel_crtc); + const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal; int level, max_level = ilk_wm_max_level(to_i915(dev)); /* @@ -3142,6 +3146,9 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev, * and after the vblank. */ *a = newstate->wm.ilk.optimal; + if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base)) + return 0; + a->pipe_enabled |= b->pipe_enabled; a->sprites_enabled |= b->sprites_enabled; a->sprites_scaled |= b->sprites_scaled; @@ -5755,12 +5762,30 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv) mutex_unlock(&dev_priv->wm.wm_mutex); } +/* + * FIXME should probably kill this and improve + * the real watermark readout/sanitation instead + */ +static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) +{ + I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); + I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); + I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); + + /* + * Don't touch WM1S_LP_EN here. + * Doing so could cause underruns. + */ +} + void ilk_wm_get_hw_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct ilk_wm_values *hw = &dev_priv->wm.hw; struct drm_crtc *crtc; + ilk_init_lp_watermarks(dev_priv); + for_each_crtc(dev, crtc) ilk_pipe_wm_get_hw_state(crtc); @@ -8207,18 +8232,6 @@ static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv) } } -static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) -{ - I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); - I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); - I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); - - /* - * Don't touch WM1S_LP_EN here. - * Doing so could cause underruns. - */ -} - static void ilk_init_clock_gating(struct drm_i915_private *dev_priv) { uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; @@ -8252,8 +8265,6 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv) (I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS)); - ilk_init_lp_watermarks(dev_priv); - /* * Based on the document from hardware guys the following bits * should be set unconditionally in order to enable FBC. @@ -8366,8 +8377,6 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_GT_MODE, _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); - ilk_init_lp_watermarks(dev_priv); - I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); @@ -8594,8 +8603,6 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) I915_GTT_PAGE_SIZE_2M); enum pipe pipe; - ilk_init_lp_watermarks(dev_priv); - /* WaSwitchSolVfFArbitrationPriority:bdw */ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); @@ -8646,8 +8653,6 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) static void hsw_init_clock_gating(struct drm_i915_private *dev_priv) { - ilk_init_lp_watermarks(dev_priv); - /* L3 caching of data atomics doesn't work -- disable it. */ I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE); I915_WRITE(HSW_ROW_CHICKEN3, @@ -8691,10 +8696,6 @@ static void hsw_init_clock_gating(struct drm_i915_private *dev_priv) /* WaSwitchSolVfFArbitrationPriority:hsw */ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); - /* WaRsPkgCStateDisplayPMReq:hsw */ - I915_WRITE(CHICKEN_PAR1_1, - I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); - lpt_init_clock_gating(dev_priv); } @@ -8702,8 +8703,6 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv) { uint32_t snpcr; - ilk_init_lp_watermarks(dev_priv); - I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); /* WaDisableEarlyCull:ivb */ diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 17186f067408..6a42ed618a28 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -241,9 +241,17 @@ struct intel_engine_execlists { } port[EXECLIST_MAX_PORTS]; /** - * @preempt: are we currently handling a preempting context switch? + * @active: is the HW active? We consider the HW as active after + * submitting any context for execution and until we have seen the + * last context completion event. After that, we do not expect any + * more events until we submit, and so can park the HW. + * + * As we have a small number of different sources from which we feed + * the HW, we track the state of each inside a single bitfield. */ - bool preempt; + unsigned int active; +#define EXECLISTS_ACTIVE_USER 0 +#define EXECLISTS_ACTIVE_PREEMPT 1 /** * @port_mask: number of execlist ports - 1 @@ -525,6 +533,27 @@ struct intel_engine_cs { u32 (*get_cmd_length_mask)(u32 cmd_header); }; +static inline void +execlists_set_active(struct intel_engine_execlists *execlists, + unsigned int bit) +{ + __set_bit(bit, (unsigned long *)&execlists->active); +} + +static inline void +execlists_clear_active(struct intel_engine_execlists *execlists, + unsigned int bit) +{ + __clear_bit(bit, (unsigned long *)&execlists->active); +} + +static inline bool +execlists_is_active(const struct intel_engine_execlists *execlists, + unsigned int bit) +{ + return test_bit(bit, (unsigned long *)&execlists->active); +} + static inline unsigned int execlists_num_ports(const struct intel_engine_execlists * const execlists) { @@ -538,6 +567,7 @@ execlists_port_complete(struct intel_engine_execlists * const execlists, const unsigned int m = execlists->port_mask; GEM_BUG_ON(port_index(port, execlists) != 0); + GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER)); memmove(port, port + 1, m * sizeof(struct execlist_port)); memset(port + m, 0, sizeof(struct execlist_port)); diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 53e0b24beda6..9a9961802f5c 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -115,7 +115,7 @@ static void imx_drm_crtc_reset(struct drm_crtc *crtc) if (crtc->state) { if (crtc->state->mode_blob) - drm_property_unreference_blob(crtc->state->mode_blob); + drm_property_blob_put(crtc->state->mode_blob); state = to_imx_crtc_state(crtc->state); memset(state, 0, sizeof(*state)); diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 8def97d75030..aedecda9728a 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -183,7 +183,7 @@ static int imx_pd_register(struct drm_device *drm, &imx_pd_connector_helper_funcs); drm_connector_init(drm, &imxpd->connector, &imx_pd_connector_funcs, - DRM_MODE_CONNECTOR_VGA); + DRM_MODE_CONNECTOR_DPI); } if (imxpd->panel) diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index 74fc9362ecf9..c0fb52c6d4ca 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -219,7 +219,7 @@ int qxl_garbage_collect(struct qxl_device *qdev) union qxl_release_info *info; while (qxl_ring_pop(qdev->release_ring, &id)) { - QXL_INFO(qdev, "popped %lld\n", id); + DRM_DEBUG_DRIVER("popped %lld\n", id); while (id) { release = qxl_release_from_id_locked(qdev, id); if (release == NULL) @@ -229,8 +229,8 @@ int qxl_garbage_collect(struct qxl_device *qdev) next_id = info->next; qxl_release_unmap(qdev, release, info); - QXL_INFO(qdev, "popped %lld, next %lld\n", id, - next_id); + DRM_DEBUG_DRIVER("popped %lld, next %lld\n", id, + next_id); switch (release->type) { case QXL_RELEASE_DRAWABLE: @@ -248,7 +248,7 @@ int qxl_garbage_collect(struct qxl_device *qdev) } } - QXL_INFO(qdev, "%s: %d\n", __func__, i); + DRM_DEBUG_DRIVER("%d\n", i); return i; } @@ -381,17 +381,19 @@ void qxl_io_create_primary(struct qxl_device *qdev, { struct qxl_surface_create *create; - QXL_INFO(qdev, "%s: qdev %p, ram_header %p\n", __func__, qdev, - qdev->ram_header); + DRM_DEBUG_DRIVER("qdev %p, ram_header %p\n", qdev, qdev->ram_header); create = &qdev->ram_header->create_surface; create->format = bo->surf.format; create->width = bo->surf.width; create->height = bo->surf.height; create->stride = bo->surf.stride; - create->mem = qxl_bo_physical_address(qdev, bo, offset); + if (bo->shadow) { + create->mem = qxl_bo_physical_address(qdev, bo->shadow, offset); + } else { + create->mem = qxl_bo_physical_address(qdev, bo, offset); + } - QXL_INFO(qdev, "%s: mem = %llx, from %p\n", __func__, create->mem, - bo->kptr); + DRM_DEBUG_DRIVER("mem = %llx, from %p\n", create->mem, bo->kptr); create->flags = QXL_SURF_FLAG_KEEP_DATA; create->type = QXL_SURF_TYPE_PRIMARY; @@ -401,7 +403,7 @@ void qxl_io_create_primary(struct qxl_device *qdev, void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id) { - QXL_INFO(qdev, "qxl_memslot_add %d\n", id); + DRM_DEBUG_DRIVER("qxl_memslot_add %d\n", id); wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC); } diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index afbf50d0c08f..4756b3c9bf2c 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -305,7 +305,9 @@ static const struct drm_crtc_funcs qxl_crtc_funcs = { void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb); + struct qxl_bo *bo = gem_to_qxl_bo(qxl_fb->obj); + WARN_ON(bo->shadow); drm_gem_object_unreference_unlocked(qxl_fb->obj); drm_framebuffer_cleanup(fb); kfree(qxl_fb); @@ -508,6 +510,7 @@ static void qxl_primary_atomic_update(struct drm_plane *plane, .x2 = qfb->base.width, .y2 = qfb->base.height }; + bool same_shadow = false; if (old_state->fb) { qfb_old = to_qxl_framebuffer(old_state->fb); @@ -519,15 +522,23 @@ static void qxl_primary_atomic_update(struct drm_plane *plane, if (bo == bo_old) return; + if (bo_old && bo_old->shadow && bo->shadow && + bo_old->shadow == bo->shadow) { + same_shadow = true; + } + if (bo_old && bo_old->is_primary) { - qxl_io_destroy_primary(qdev); + if (!same_shadow) + qxl_io_destroy_primary(qdev); bo_old->is_primary = false; } if (!bo->is_primary) { - qxl_io_create_primary(qdev, 0, bo); + if (!same_shadow) + qxl_io_create_primary(qdev, 0, bo); bo->is_primary = true; } + qxl_draw_dirty_fb(qdev, qfb, bo, 0, 0, &norect, 1, 1); } @@ -679,8 +690,9 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane, static int qxl_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state) { + struct qxl_device *qdev = plane->dev->dev_private; struct drm_gem_object *obj; - struct qxl_bo *user_bo; + struct qxl_bo *user_bo, *old_bo = NULL; int ret; if (!new_state->fb) @@ -689,6 +701,32 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane, obj = to_qxl_framebuffer(new_state->fb)->obj; user_bo = gem_to_qxl_bo(obj); + if (plane->type == DRM_PLANE_TYPE_PRIMARY && + user_bo->is_dumb && !user_bo->shadow) { + if (plane->state->fb) { + obj = to_qxl_framebuffer(plane->state->fb)->obj; + old_bo = gem_to_qxl_bo(obj); + } + if (old_bo && old_bo->shadow && + user_bo->gem_base.size == old_bo->gem_base.size && + plane->state->crtc == new_state->crtc && + plane->state->crtc_w == new_state->crtc_w && + plane->state->crtc_h == new_state->crtc_h && + plane->state->src_x == new_state->src_x && + plane->state->src_y == new_state->src_y && + plane->state->src_w == new_state->src_w && + plane->state->src_h == new_state->src_h && + plane->state->rotation == new_state->rotation && + plane->state->zpos == new_state->zpos) { + drm_gem_object_get(&old_bo->shadow->gem_base); + user_bo->shadow = old_bo->shadow; + } else { + qxl_bo_create(qdev, user_bo->gem_base.size, + true, true, QXL_GEM_DOMAIN_VRAM, NULL, + &user_bo->shadow); + } + } + ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL); if (ret) return ret; @@ -713,6 +751,11 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane, obj = to_qxl_framebuffer(old_state->fb)->obj; user_bo = gem_to_qxl_bo(obj); qxl_bo_unpin(user_bo); + + if (user_bo->shadow && !user_bo->is_primary) { + drm_gem_object_put_unlocked(&user_bo->shadow->gem_base); + user_bo->shadow = NULL; + } } static const uint32_t qxl_cursor_plane_formats[] = { diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 3397a1907336..08752c0ffb35 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -62,33 +62,9 @@ #define QXL_DEBUGFS_MAX_COMPONENTS 32 -extern int qxl_log_level; extern int qxl_num_crtc; extern int qxl_max_ioctls; -enum { - QXL_INFO_LEVEL = 1, - QXL_DEBUG_LEVEL = 2, -}; - -#define QXL_INFO(qdev, fmt, ...) do { \ - if (qxl_log_level >= QXL_INFO_LEVEL) { \ - qxl_io_log(qdev, fmt, __VA_ARGS__); \ - } \ - } while (0) -#define QXL_DEBUG(qdev, fmt, ...) do { \ - if (qxl_log_level >= QXL_DEBUG_LEVEL) { \ - qxl_io_log(qdev, fmt, __VA_ARGS__); \ - } \ - } while (0) -#define QXL_INFO_ONCE(qdev, fmt, ...) do { \ - static int done; \ - if (!done) { \ - done = 1; \ - QXL_INFO(qdev, fmt, __VA_ARGS__); \ - } \ - } while (0) - #define DRM_FILE_OFFSET 0x100000000ULL #define DRM_FILE_PAGE_OFFSET (DRM_FILE_OFFSET >> PAGE_SHIFT) @@ -113,6 +89,8 @@ struct qxl_bo { /* Constant after initialization */ struct drm_gem_object gem_base; bool is_primary; /* is this now a primary surface */ + bool is_dumb; + struct qxl_bo *shadow; bool hw_surf_alloc; struct qxl_surface surf; uint32_t surface_id; @@ -351,7 +329,7 @@ int qxl_check_idle(struct qxl_ring *ring); static inline void * qxl_fb_virtual_address(struct qxl_device *qdev, unsigned long physical) { - QXL_INFO(qdev, "not implemented (%lu)\n", physical); + DRM_DEBUG_DRIVER("not implemented (%lu)\n", physical); return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c index 5e65d5d2d937..11085ab01374 100644 --- a/drivers/gpu/drm/qxl/qxl_dumb.c +++ b/drivers/gpu/drm/qxl/qxl_dumb.c @@ -63,6 +63,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv, &handle); if (r) return r; + qobj->is_dumb = true; args->pitch = pitch; args->handle = handle; return 0; diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index 844c4a31ca13..23af3e352673 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c @@ -240,18 +240,15 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev, return ret; qbo = gem_to_qxl_bo(gobj); - QXL_INFO(qdev, "%s: %dx%d %d\n", __func__, mode_cmd.width, - mode_cmd.height, mode_cmd.pitches[0]); + DRM_DEBUG_DRIVER("%dx%d %d\n", mode_cmd.width, + mode_cmd.height, mode_cmd.pitches[0]); shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height); /* TODO: what's the usual response to memory allocation errors? */ BUG_ON(!shadow); - QXL_INFO(qdev, - "surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n", - qxl_bo_gpu_offset(qbo), - qxl_bo_mmap_offset(qbo), - qbo->kptr, - shadow); + DRM_DEBUG_DRIVER("surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n", + qxl_bo_gpu_offset(qbo), qxl_bo_mmap_offset(qbo), + qbo->kptr, shadow); size = mode_cmd.pitches[0] * mode_cmd.height; info = drm_fb_helper_alloc_fbi(&qfbdev->helper); diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index e6ec845b5be0..a6da6fa6ad58 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -154,7 +154,7 @@ qxl_release_alloc(struct qxl_device *qdev, int type, return handle; } *ret = release; - QXL_INFO(qdev, "allocated release %d\n", handle); + DRM_DEBUG_DRIVER("allocated release %d\n", handle); release->id = handle; return handle; } @@ -179,8 +179,7 @@ void qxl_release_free(struct qxl_device *qdev, struct qxl_release *release) { - QXL_INFO(qdev, "release %d, type %d\n", release->id, - release->type); + DRM_DEBUG_DRIVER("release %d, type %d\n", release->id, release->type); if (release->surface_release_id) qxl_surface_id_dealloc(qdev, release->surface_release_id); diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 7ecf8a4b9fe6..ab4823875311 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -136,8 +136,8 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma) "filp->private_data->minor->dev->dev_private == NULL\n"); return -EINVAL; } - QXL_INFO(qdev, "%s: filp->private_data = 0x%p, vma->vm_pgoff = %lx\n", - __func__, filp->private_data, vma->vm_pgoff); + DRM_DEBUG_DRIVER("filp->private_data = 0x%p, vma->vm_pgoff = %lx\n", + filp->private_data, vma->vm_pgoff); r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev); if (unlikely(r != 0)) diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c index 4d3f6ad0abdd..93b7102dd008 100644 --- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c +++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c @@ -72,7 +72,7 @@ struct rockchip_dp_device { struct reset_control *rst; struct work_struct psr_work; - spinlock_t psr_lock; + struct mutex psr_lock; unsigned int psr_state; const struct rockchip_dp_chip_data *data; @@ -83,21 +83,20 @@ struct rockchip_dp_device { static void analogix_dp_psr_set(struct drm_encoder *encoder, bool enabled) { struct rockchip_dp_device *dp = to_dp(encoder); - unsigned long flags; if (!analogix_dp_psr_supported(dp->dev)) return; DRM_DEV_DEBUG(dp->dev, "%s PSR...\n", enabled ? "Entry" : "Exit"); - spin_lock_irqsave(&dp->psr_lock, flags); + mutex_lock(&dp->psr_lock); if (enabled) dp->psr_state = EDP_VSC_PSR_STATE_ACTIVE; else dp->psr_state = ~EDP_VSC_PSR_STATE_ACTIVE; schedule_work(&dp->psr_work); - spin_unlock_irqrestore(&dp->psr_lock, flags); + mutex_unlock(&dp->psr_lock); } static void analogix_dp_psr_work(struct work_struct *work) @@ -105,7 +104,6 @@ static void analogix_dp_psr_work(struct work_struct *work) struct rockchip_dp_device *dp = container_of(work, typeof(*dp), psr_work); int ret; - unsigned long flags; ret = rockchip_drm_wait_vact_end(dp->encoder.crtc, PSR_WAIT_LINE_FLAG_TIMEOUT_MS); @@ -114,12 +112,12 @@ static void analogix_dp_psr_work(struct work_struct *work) return; } - spin_lock_irqsave(&dp->psr_lock, flags); + mutex_lock(&dp->psr_lock); if (dp->psr_state == EDP_VSC_PSR_STATE_ACTIVE) analogix_dp_enable_psr(dp->dev); else analogix_dp_disable_psr(dp->dev); - spin_unlock_irqrestore(&dp->psr_lock, flags); + mutex_unlock(&dp->psr_lock); } static int rockchip_dp_pre_init(struct rockchip_dp_device *dp) @@ -381,7 +379,7 @@ static int rockchip_dp_bind(struct device *dev, struct device *master, dp->plat_data.power_off = rockchip_dp_powerdown; dp->plat_data.get_modes = rockchip_dp_get_modes; - spin_lock_init(&dp->psr_lock); + mutex_init(&dp->psr_lock); dp->psr_state = ~EDP_VSC_PSR_STATE_ACTIVE; INIT_WORK(&dp->psr_work, analogix_dp_psr_work); diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 4bcacd3f4861..b0a1dedac802 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -174,9 +174,9 @@ struct tegra_sor { struct reset_control *rst; struct clk *clk_parent; - struct clk *clk_brick; struct clk *clk_safe; - struct clk *clk_src; + struct clk *clk_out; + struct clk *clk_pad; struct clk *clk_dp; struct clk *clk; @@ -255,7 +255,7 @@ static int tegra_sor_set_parent_clock(struct tegra_sor *sor, struct clk *parent) clk_disable_unprepare(sor->clk); - err = clk_set_parent(sor->clk, parent); + err = clk_set_parent(sor->clk_out, parent); if (err < 0) return err; @@ -266,24 +266,24 @@ static int tegra_sor_set_parent_clock(struct tegra_sor *sor, struct clk *parent) return 0; } -struct tegra_clk_sor_brick { +struct tegra_clk_sor_pad { struct clk_hw hw; struct tegra_sor *sor; }; -static inline struct tegra_clk_sor_brick *to_brick(struct clk_hw *hw) +static inline struct tegra_clk_sor_pad *to_pad(struct clk_hw *hw) { - return container_of(hw, struct tegra_clk_sor_brick, hw); + return container_of(hw, struct tegra_clk_sor_pad, hw); } -static const char * const tegra_clk_sor_brick_parents[] = { +static const char * const tegra_clk_sor_pad_parents[] = { "pll_d2_out0", "pll_dp" }; -static int tegra_clk_sor_brick_set_parent(struct clk_hw *hw, u8 index) +static int tegra_clk_sor_pad_set_parent(struct clk_hw *hw, u8 index) { - struct tegra_clk_sor_brick *brick = to_brick(hw); - struct tegra_sor *sor = brick->sor; + struct tegra_clk_sor_pad *pad = to_pad(hw); + struct tegra_sor *sor = pad->sor; u32 value; value = tegra_sor_readl(sor, SOR_CLK_CNTRL); @@ -304,10 +304,10 @@ static int tegra_clk_sor_brick_set_parent(struct clk_hw *hw, u8 index) return 0; } -static u8 tegra_clk_sor_brick_get_parent(struct clk_hw *hw) +static u8 tegra_clk_sor_pad_get_parent(struct clk_hw *hw) { - struct tegra_clk_sor_brick *brick = to_brick(hw); - struct tegra_sor *sor = brick->sor; + struct tegra_clk_sor_pad *pad = to_pad(hw); + struct tegra_sor *sor = pad->sor; u8 parent = U8_MAX; u32 value; @@ -328,33 +328,33 @@ static u8 tegra_clk_sor_brick_get_parent(struct clk_hw *hw) return parent; } -static const struct clk_ops tegra_clk_sor_brick_ops = { - .set_parent = tegra_clk_sor_brick_set_parent, - .get_parent = tegra_clk_sor_brick_get_parent, +static const struct clk_ops tegra_clk_sor_pad_ops = { + .set_parent = tegra_clk_sor_pad_set_parent, + .get_parent = tegra_clk_sor_pad_get_parent, }; -static struct clk *tegra_clk_sor_brick_register(struct tegra_sor *sor, - const char *name) +static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor, + const char *name) { - struct tegra_clk_sor_brick *brick; + struct tegra_clk_sor_pad *pad; struct clk_init_data init; struct clk *clk; - brick = devm_kzalloc(sor->dev, sizeof(*brick), GFP_KERNEL); - if (!brick) + pad = devm_kzalloc(sor->dev, sizeof(*pad), GFP_KERNEL); + if (!pad) return ERR_PTR(-ENOMEM); - brick->sor = sor; + pad->sor = sor; init.name = name; init.flags = 0; - init.parent_names = tegra_clk_sor_brick_parents; - init.num_parents = ARRAY_SIZE(tegra_clk_sor_brick_parents); - init.ops = &tegra_clk_sor_brick_ops; + init.parent_names = tegra_clk_sor_pad_parents; + init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents); + init.ops = &tegra_clk_sor_pad_ops; - brick->hw.init = &init; + pad->hw.init = &init; - clk = devm_clk_register(sor->dev, &brick->hw); + clk = devm_clk_register(sor->dev, &pad->hw); return clk; } @@ -998,8 +998,10 @@ static int tegra_sor_power_down(struct tegra_sor *sor) /* switch to safe parent clock */ err = tegra_sor_set_parent_clock(sor, sor->clk_safe); - if (err < 0) + if (err < 0) { dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); + return err; + } value = tegra_sor_readl(sor, SOR_DP_PADCTL0); value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 | @@ -2007,8 +2009,10 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder) /* switch to safe parent clock */ err = tegra_sor_set_parent_clock(sor, sor->clk_safe); - if (err < 0) + if (err < 0) { dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); + return; + } div = clk_get_rate(sor->clk) / 1000000 * 4; @@ -2111,13 +2115,17 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder) tegra_sor_writel(sor, value, SOR_XBAR_CTRL); /* switch to parent clock */ - err = clk_set_parent(sor->clk_src, sor->clk_parent); - if (err < 0) - dev_err(sor->dev, "failed to set source clock: %d\n", err); - - err = tegra_sor_set_parent_clock(sor, sor->clk_src); - if (err < 0) + err = clk_set_parent(sor->clk, sor->clk_parent); + if (err < 0) { dev_err(sor->dev, "failed to set parent clock: %d\n", err); + return; + } + + err = tegra_sor_set_parent_clock(sor, sor->clk_pad); + if (err < 0) { + dev_err(sor->dev, "failed to set pad clock: %d\n", err); + return; + } value = SOR_INPUT_CONTROL_HDMI_SRC_SELECT(dc->pipe); @@ -2628,11 +2636,24 @@ static int tegra_sor_probe(struct platform_device *pdev) } if (sor->soc->supports_hdmi || sor->soc->supports_dp) { - sor->clk_src = devm_clk_get(&pdev->dev, "source"); - if (IS_ERR(sor->clk_src)) { - err = PTR_ERR(sor->clk_src); - dev_err(sor->dev, "failed to get source clock: %d\n", - err); + struct device_node *np = pdev->dev.of_node; + const char *name; + + /* + * For backwards compatibility with Tegra210 device trees, + * fall back to the old clock name "source" if the new "out" + * clock is not available. + */ + if (of_property_match_string(np, "clock-names", "out") < 0) + name = "source"; + else + name = "out"; + + sor->clk_out = devm_clk_get(&pdev->dev, name); + if (IS_ERR(sor->clk_out)) { + err = PTR_ERR(sor->clk_out); + dev_err(sor->dev, "failed to get %s clock: %d\n", + name, err); goto remove; } } @@ -2658,16 +2679,60 @@ static int tegra_sor_probe(struct platform_device *pdev) goto remove; } + /* + * Starting with Tegra186, the BPMP provides an implementation for + * the pad output clock, so we have to look it up from device tree. + */ + sor->clk_pad = devm_clk_get(&pdev->dev, "pad"); + if (IS_ERR(sor->clk_pad)) { + if (sor->clk_pad != ERR_PTR(-ENOENT)) { + err = PTR_ERR(sor->clk_pad); + goto remove; + } + + /* + * If the pad output clock is not available, then we assume + * we're on Tegra210 or earlier and have to provide our own + * implementation. + */ + sor->clk_pad = NULL; + } + + /* + * The bootloader may have set up the SOR such that it's module clock + * is sourced by one of the display PLLs. However, that doesn't work + * without properly having set up other bits of the SOR. + */ + err = clk_set_parent(sor->clk_out, sor->clk_safe); + if (err < 0) { + dev_err(&pdev->dev, "failed to use safe clock: %d\n", err); + goto remove; + } + platform_set_drvdata(pdev, sor); pm_runtime_enable(&pdev->dev); - pm_runtime_get_sync(&pdev->dev); - sor->clk_brick = tegra_clk_sor_brick_register(sor, "sor1_brick"); - pm_runtime_put(&pdev->dev); + /* + * On Tegra210 and earlier, provide our own implementation for the + * pad output clock. + */ + if (!sor->clk_pad) { + err = pm_runtime_get_sync(&pdev->dev); + if (err < 0) { + dev_err(&pdev->dev, "failed to get runtime PM: %d\n", + err); + goto remove; + } + + sor->clk_pad = tegra_clk_sor_pad_register(sor, + "sor1_pad_clkout"); + pm_runtime_put(&pdev->dev); + } - if (IS_ERR(sor->clk_brick)) { - err = PTR_ERR(sor->clk_brick); - dev_err(&pdev->dev, "failed to register SOR clock: %d\n", err); + if (IS_ERR(sor->clk_pad)) { + err = PTR_ERR(sor->clk_pad); + dev_err(&pdev->dev, "failed to register SOR pad clock: %d\n", + err); goto remove; } |