summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/display/dc/core
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc/core')
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c515
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c293
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c227
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c144
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c148
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_sink.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c260
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c75
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c93
11 files changed, 1380 insertions, 424 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 18c775a950cc..4ef4dc63e221 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -22,6 +22,8 @@
* Authors: AMD
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "dc.h"
@@ -33,6 +35,7 @@
#include "resource.h"
+#include "clk_mgr.h"
#include "clock_source.h"
#include "dc_bios_types.h"
@@ -55,6 +58,14 @@
#include "dc_link_dp.h"
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+#include "dsc.h"
+#endif
+
+#ifdef CONFIG_DRM_AMD_DC_DCN2_0
+#include "vm_helper.h"
+#endif
+
#include "dce/dce_i2c.h"
#define DC_LOGGER \
@@ -169,9 +180,14 @@ static bool create_links(
link = link_create(&link_init_params);
if (link) {
- dc->links[dc->link_count] = link;
- link->dc = dc;
- ++dc->link_count;
+ if (dc->config.edp_not_connected &&
+ link->connector_signal == SIGNAL_TYPE_EDP) {
+ link_destroy(&link);
+ } else {
+ dc->links[dc->link_count] = link;
+ link->dc = dc;
+ ++dc->link_count;
+ }
}
}
@@ -257,7 +273,7 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe->stream == stream && pipe->stream_res.stream_enc) {
+ if (pipe->stream == stream && pipe->stream_res.tg) {
pipe->stream->adjust = *adjust;
dc->hwss.set_drr(&pipe,
1,
@@ -451,7 +467,7 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
pipes,
stream->output_color_space,
stream->csc_color_matrix.matrix,
- pipes->plane_res.hubp ? pipes->plane_res.hubp->opp_id : 0);
+ pipes->stream_res.opp->inst);
ret = true;
}
}
@@ -484,128 +500,6 @@ void dc_stream_set_static_screen_events(struct dc *dc,
dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
}
-void dc_link_set_drive_settings(struct dc *dc,
- struct link_training_settings *lt_settings,
- const struct dc_link *link)
-{
-
- int i;
-
- for (i = 0; i < dc->link_count; i++) {
- if (dc->links[i] == link)
- break;
- }
-
- if (i >= dc->link_count)
- ASSERT_CRITICAL(false);
-
- dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
-}
-
-void dc_link_perform_link_training(struct dc *dc,
- struct dc_link_settings *link_setting,
- bool skip_video_pattern)
-{
- int i;
-
- for (i = 0; i < dc->link_count; i++)
- dc_link_dp_perform_link_training(
- dc->links[i],
- link_setting,
- skip_video_pattern);
-}
-
-void dc_link_set_preferred_link_settings(struct dc *dc,
- struct dc_link_settings *link_setting,
- struct dc_link *link)
-{
- int i;
- struct pipe_ctx *pipe;
- struct dc_stream_state *link_stream;
- struct dc_link_settings store_settings = *link_setting;
-
- link->preferred_link_setting = store_settings;
-
- /* Retrain with preferred link settings only relevant for
- * DP signal type
- */
- if (!dc_is_dp_signal(link->connector_signal))
- return;
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->link) {
- if (pipe->stream->link == link)
- break;
- }
- }
-
- /* Stream not found */
- if (i == MAX_PIPES)
- return;
-
- link_stream = link->dc->current_state->res_ctx.pipe_ctx[i].stream;
-
- /* Cannot retrain link if backend is off */
- if (link_stream->dpms_off)
- return;
-
- if (link_stream)
- decide_link_settings(link_stream, &store_settings);
-
- if ((store_settings.lane_count != LANE_COUNT_UNKNOWN) &&
- (store_settings.link_rate != LINK_RATE_UNKNOWN))
- dp_retrain_link_dp_test(link, &store_settings, false);
-}
-
-void dc_link_enable_hpd(const struct dc_link *link)
-{
- dc_link_dp_enable_hpd(link);
-}
-
-void dc_link_disable_hpd(const struct dc_link *link)
-{
- dc_link_dp_disable_hpd(link);
-}
-
-
-void dc_link_set_test_pattern(struct dc_link *link,
- enum dp_test_pattern test_pattern,
- const struct link_training_settings *p_link_settings,
- const unsigned char *p_custom_pattern,
- unsigned int cust_pattern_size)
-{
- if (link != NULL)
- dc_link_dp_set_test_pattern(
- link,
- test_pattern,
- p_link_settings,
- p_custom_pattern,
- cust_pattern_size);
-}
-
-uint32_t dc_link_bandwidth_kbps(
- const struct dc_link *link,
- const struct dc_link_settings *link_setting)
-{
- uint32_t link_bw_kbps = link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */
-
- link_bw_kbps *= 8; /* 8 bits per byte*/
- link_bw_kbps *= link_setting->lane_count;
-
- return link_bw_kbps;
-
-}
-
-const struct dc_link_settings *dc_link_get_link_cap(
- const struct dc_link *link)
-{
- if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
- link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
- return &link->preferred_link_setting;
- return &link->verified_link_cap;
-}
-
static void destruct(struct dc *dc)
{
dc_release_state(dc->current_state);
@@ -613,6 +507,11 @@ static void destruct(struct dc *dc)
destroy_links(dc);
+ if (dc->clk_mgr) {
+ dc_destroy_clk_mgr(dc->clk_mgr);
+ dc->clk_mgr = NULL;
+ }
+
dc_destroy_resource_pool(dc);
if (dc->ctx->gpio_service)
@@ -640,6 +539,11 @@ static void destruct(struct dc *dc)
dc->dcn_ip = NULL;
#endif
+#ifdef CONFIG_DRM_AMD_DC_DCN2_0
+ kfree(dc->vm_helper);
+ dc->vm_helper = NULL;
+
+#endif
}
static bool construct(struct dc *dc,
@@ -656,6 +560,11 @@ static bool construct(struct dc *dc,
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
dc->config = init_params->flags;
+#ifdef CONFIG_DRM_AMD_DC_DCN2_0
+ // Allocate memory for the vm_helper
+ dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
+
+#endif
memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
@@ -689,6 +598,9 @@ static bool construct(struct dc *dc,
}
dc->dcn_ip = dcn_ip;
+#ifdef CONFIG_DRM_AMD_DC_DCN2_0
+ dc->soc_bounding_box = init_params->soc_bounding_box;
+#endif
#endif
dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
@@ -756,6 +668,10 @@ static bool construct(struct dc *dc,
if (!dc->res_pool)
goto fail;
+ dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
+ if (!dc->clk_mgr)
+ goto fail;
+
/* Creation of current_state must occur after dc->dml
* is initialized in dc_create_resource_pool because
* on creation it copies the contents of dc->dml
@@ -781,6 +697,21 @@ fail:
return false;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+static bool disable_all_writeback_pipes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < stream->num_wb_info; i++)
+ stream->writeback_info[i].wb_enabled = false;
+
+ return true;
+}
+#endif
+
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
int i, j;
@@ -805,6 +736,9 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
}
if (should_disable && old_stream) {
dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
+#endif
dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
}
}
@@ -1136,10 +1070,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
/* Program all planes within new context*/
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
- struct dc_stream_status *status;
-
- if (context->streams[i]->apply_seamless_boot_optimization)
- context->streams[i]->apply_seamless_boot_optimization = false;
if (!context->streams[i]->mode_changed)
continue;
@@ -1164,9 +1094,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
}
}
- status = dc_stream_get_status_from_state(context, context->streams[i]);
- context->streams[i]->out.otg_offset = status->primary_otg_inst;
-
CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
context->streams[i]->timing.h_addressable,
context->streams[i]->timing.v_addressable,
@@ -1263,14 +1190,12 @@ struct dc_state *dc_create_state(struct dc *dc)
struct dc_state *dc_copy_state(struct dc_state *src_ctx)
{
int i, j;
- struct dc_state *new_ctx = kzalloc(sizeof(struct dc_state),
- GFP_KERNEL);
+ struct dc_state *new_ctx = kmemdup(src_ctx,
+ sizeof(struct dc_state), GFP_KERNEL);
if (!new_ctx)
return NULL;
- memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
-
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
@@ -1331,71 +1256,94 @@ static bool is_surface_in_context(
static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
{
union surface_update_flags *update_flags = &u->surface->update_flags;
+ enum surface_update_type update_type = UPDATE_TYPE_FAST;
if (!u->plane_info)
return UPDATE_TYPE_FAST;
- if (u->plane_info->color_space != u->surface->color_space)
+ if (u->plane_info->color_space != u->surface->color_space) {
update_flags->bits.color_space_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ }
- if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror)
+ if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
update_flags->bits.horizontal_mirror_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ }
- if (u->plane_info->rotation != u->surface->rotation)
+ if (u->plane_info->rotation != u->surface->rotation) {
update_flags->bits.rotation_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ }
- if (u->plane_info->format != u->surface->format)
+ if (u->plane_info->format != u->surface->format) {
update_flags->bits.pixel_format_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ }
- if (u->plane_info->stereo_format != u->surface->stereo_format)
+ if (u->plane_info->stereo_format != u->surface->stereo_format) {
update_flags->bits.stereo_format_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ }
- if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha)
+ if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
update_flags->bits.per_pixel_alpha_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ }
- if (u->plane_info->global_alpha_value != u->surface->global_alpha_value)
+ if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
update_flags->bits.global_alpha_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ }
+
+ if (u->plane_info->sdr_white_level != u->surface->sdr_white_level) {
+ update_flags->bits.sdr_white_level = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ }
if (u->plane_info->dcc.enable != u->surface->dcc.enable
|| u->plane_info->dcc.grph.independent_64b_blks != u->surface->dcc.grph.independent_64b_blks
- || u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch)
+ || u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch) {
update_flags->bits.dcc_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ }
if (resource_pixel_format_to_bpp(u->plane_info->format) !=
- resource_pixel_format_to_bpp(u->surface->format))
+ resource_pixel_format_to_bpp(u->surface->format)) {
/* different bytes per element will require full bandwidth
* and DML calculation
*/
update_flags->bits.bpp_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ }
if (u->plane_info->plane_size.grph.surface_pitch != u->surface->plane_size.grph.surface_pitch
|| u->plane_info->plane_size.video.luma_pitch != u->surface->plane_size.video.luma_pitch
- || u->plane_info->plane_size.video.chroma_pitch != u->surface->plane_size.video.chroma_pitch)
+ || u->plane_info->plane_size.video.chroma_pitch != u->surface->plane_size.video.chroma_pitch) {
update_flags->bits.plane_size_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ }
if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
sizeof(union dc_tiling_info)) != 0) {
update_flags->bits.swizzle_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+
/* todo: below are HW dependent, we should add a hook to
* DCE/N resource and validated there.
*/
- if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR)
+ if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
/* swizzled mode requires RQ to be setup properly,
* thus need to run DML to calculate RQ settings
*/
update_flags->bits.bandwidth_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ }
}
- if (update_flags->bits.rotation_change
- || update_flags->bits.stereo_format_change
- || update_flags->bits.pixel_format_change
- || update_flags->bits.bpp_change
- || update_flags->bits.bandwidth_change
- || update_flags->bits.output_tf_change)
- return UPDATE_TYPE_FULL;
-
- return update_flags->raw ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST;
+ /* This should be UPDATE_TYPE_FAST if nothing has changed. */
+ return update_type;
}
static enum surface_update_type get_scaling_info_update_type(
@@ -1459,6 +1407,9 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
update_flags->raw = 0; // Reset all flags
+ if (u->flip_addr)
+ update_flags->bits.addr_update = 1;
+
if (!is_surface_in_context(context, u->surface)) {
update_flags->bits.new_plane = 1;
return UPDATE_TYPE_FULL;
@@ -1475,6 +1426,9 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
type = get_scaling_info_update_type(u);
elevate_update_type(&overall_type, type);
+ if (u->flip_addr)
+ update_flags->bits.addr_update = 1;
+
if (u->in_transfer_func)
update_flags->bits.in_transfer_func_change = 1;
@@ -1542,6 +1496,11 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->dpms_off)
return UPDATE_TYPE_FULL;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (stream_update->wb_update)
+ return UPDATE_TYPE_FULL;
+#endif
}
for (i = 0 ; i < surface_count; i++) {
@@ -1686,6 +1645,26 @@ static void copy_surface_update_to_plane(
sizeof(struct dc_transfer_func_distributed_points));
}
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (srf_update->func_shaper &&
+ (surface->in_shaper_func !=
+ srf_update->func_shaper))
+ memcpy(surface->in_shaper_func, srf_update->func_shaper,
+ sizeof(*surface->in_shaper_func));
+
+ if (srf_update->lut3d_func &&
+ (surface->lut3d_func !=
+ srf_update->lut3d_func))
+ memcpy(surface->lut3d_func, srf_update->lut3d_func,
+ sizeof(*surface->lut3d_func));
+
+ if (srf_update->blend_tf &&
+ (surface->blend_tf !=
+ srf_update->blend_tf))
+ memcpy(surface->blend_tf, srf_update->blend_tf,
+ sizeof(*surface->blend_tf));
+
+#endif
if (srf_update->input_csc_color_matrix)
surface->input_csc_color_matrix =
*srf_update->input_csc_color_matrix;
@@ -1695,6 +1674,101 @@ static void copy_surface_update_to_plane(
*srf_update->coeff_reduction_factor;
}
+static void copy_stream_update_to_stream(struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream,
+ const struct dc_stream_update *update)
+{
+ if (update == NULL || stream == NULL)
+ return;
+
+ if (update->src.height && update->src.width)
+ stream->src = update->src;
+
+ if (update->dst.height && update->dst.width)
+ stream->dst = update->dst;
+
+ if (update->out_transfer_func &&
+ stream->out_transfer_func != update->out_transfer_func) {
+ stream->out_transfer_func->sdr_ref_white_level =
+ update->out_transfer_func->sdr_ref_white_level;
+ stream->out_transfer_func->tf = update->out_transfer_func->tf;
+ stream->out_transfer_func->type =
+ update->out_transfer_func->type;
+ memcpy(&stream->out_transfer_func->tf_pts,
+ &update->out_transfer_func->tf_pts,
+ sizeof(struct dc_transfer_func_distributed_points));
+ }
+
+ if (update->hdr_static_metadata)
+ stream->hdr_static_metadata = *update->hdr_static_metadata;
+
+ if (update->abm_level)
+ stream->abm_level = *update->abm_level;
+
+ if (update->periodic_interrupt0)
+ stream->periodic_interrupt0 = *update->periodic_interrupt0;
+
+ if (update->periodic_interrupt1)
+ stream->periodic_interrupt1 = *update->periodic_interrupt1;
+
+ if (update->gamut_remap)
+ stream->gamut_remap_matrix = *update->gamut_remap;
+
+ /* Note: this being updated after mode set is currently not a use case
+ * however if it arises OCSC would need to be reprogrammed at the
+ * minimum
+ */
+ if (update->output_color_space)
+ stream->output_color_space = *update->output_color_space;
+
+ if (update->output_csc_transform)
+ stream->csc_color_matrix = *update->output_csc_transform;
+
+ if (update->vrr_infopacket)
+ stream->vrr_infopacket = *update->vrr_infopacket;
+
+ if (update->dpms_off)
+ stream->dpms_off = *update->dpms_off;
+
+ if (update->vsc_infopacket)
+ stream->vsc_infopacket = *update->vsc_infopacket;
+
+ if (update->vsp_infopacket)
+ stream->vsp_infopacket = *update->vsp_infopacket;
+
+ if (update->dither_option)
+ stream->dither_option = *update->dither_option;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ /* update current stream with writeback info */
+ if (update->wb_update) {
+ int i;
+
+ stream->num_wb_info = update->wb_update->num_wb_info;
+ ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
+ for (i = 0; i < stream->num_wb_info; i++)
+ stream->writeback_info[i] =
+ update->wb_update->writeback_info[i];
+ }
+#endif
+#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
+ if (update->dsc_config) {
+ struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
+ uint32_t old_dsc_enabled = stream->timing.flags.DSC;
+ uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
+ update->dsc_config->num_slices_v != 0);
+
+ stream->timing.dsc_cfg = *update->dsc_config;
+ stream->timing.flags.DSC = enable_dsc;
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, context,
+ true)) {
+ stream->timing.dsc_cfg = old_dsc_cfg;
+ stream->timing.flags.DSC = old_dsc_enabled;
+ }
+ }
+#endif
+}
+
static void commit_planes_do_stream_update(struct dc *dc,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update,
@@ -1711,13 +1785,6 @@ static void commit_planes_do_stream_update(struct dc *dc,
pipe_ctx->stream &&
pipe_ctx->stream == stream) {
- /* Fast update*/
- // VRR program can be done as part of FAST UPDATE
- if (stream_update->adjust)
- dc->hwss.set_drr(&pipe_ctx, 1,
- stream_update->adjust->v_total_min,
- stream_update->adjust->v_total_max);
-
if (stream_update->periodic_interrupt0 &&
dc->hwss.setup_periodic_interrupt)
dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE0);
@@ -1741,13 +1808,29 @@ static void commit_planes_do_stream_update(struct dc *dc,
dc_stream_program_csc_matrix(dc, stream);
if (stream_update->dither_option) {
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ struct pipe_ctx *odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
+#endif
resource_build_bit_depth_reduction_params(pipe_ctx->stream,
&pipe_ctx->stream->bit_depth_params);
pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
&stream->bit_depth_params,
&stream->clamping);
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (odm_pipe)
+ odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
+ &stream->bit_depth_params,
+ &stream->clamping);
+#endif
}
+#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
+ if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) {
+ dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true);
+ dp_update_dsc_config(pipe_ctx);
+ dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false);
+ }
+#endif
/* Full fe update*/
if (update_type == UPDATE_TYPE_FAST)
continue;
@@ -1792,10 +1875,15 @@ static void commit_planes_for_stream(struct dc *dc,
if (dc->optimize_seamless_boot && surface_count > 0) {
/* Optimize seamless boot flag keeps clocks and watermarks high until
* first flip. After first flip, optimization is required to lower
- * bandwidth.
+ * bandwidth. Important to note that it is expected UEFI will
+ * only light up a single display on POST, therefore we only expect
+ * one stream with seamless boot flag set.
*/
- dc->optimize_seamless_boot = false;
- dc->optimized_required = true;
+ if (stream->apply_seamless_boot_optimization) {
+ stream->apply_seamless_boot_optimization = false;
+ dc->optimize_seamless_boot = false;
+ dc->optimized_required = true;
+ }
}
if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) {
@@ -1816,6 +1904,30 @@ static void commit_planes_for_stream(struct dc *dc,
return;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+ /*set logical flag for lock/unlock use*/
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+ if (!pipe_ctx->plane_state)
+ continue;
+ if (pipe_ctx->plane_state != plane_state)
+ continue;
+ plane_state->triplebuffer_flips = false;
+ if (update_type == UPDATE_TYPE_FAST &&
+ dc->hwss.program_triplebuffer != NULL &&
+ !plane_state->flip_immediate &&
+ !dc->debug.disable_tri_buf) {
+ /*triple buffer for VUpdate only*/
+ plane_state->triplebuffer_flips = true;
+ }
+ }
+ }
+ }
+#endif
+
// Update Type FULL, Surface updates
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
@@ -1834,6 +1946,16 @@ static void commit_planes_for_stream(struct dc *dc,
if (update_type == UPDATE_TYPE_FAST)
continue;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
+
+ if (dc->hwss.program_triplebuffer != NULL &&
+ !dc->debug.disable_tri_buf) {
+ /*turn off triple buffer for full update*/
+ dc->hwss.program_triplebuffer(
+ dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+ }
+#endif
stream_status =
stream_get_status(context, pipe_ctx->stream);
@@ -1850,6 +1972,26 @@ static void commit_planes_for_stream(struct dc *dc,
*/
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (dc->hwss.set_flip_control_gsl)
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->stream != stream)
+ continue;
+
+ if (pipe_ctx->plane_state != plane_state)
+ continue;
+
+ // GSL has to be used for flip immediate
+ dc->hwss.set_flip_control_gsl(pipe_ctx,
+ plane_state->flip_immediate);
+ }
+ }
+#endif
/* Perform requested Updates */
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *plane_state = srf_updates[i].surface;
@@ -1862,7 +2004,15 @@ static void commit_planes_for_stream(struct dc *dc,
if (pipe_ctx->plane_state != plane_state)
continue;
-
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ /*program triple buffer after lock based on flip type*/
+ if (dc->hwss.program_triplebuffer != NULL &&
+ !dc->debug.disable_tri_buf) {
+ /*only enable triplebuffer for fast_update*/
+ dc->hwss.program_triplebuffer(
+ dc, pipe_ctx, plane_state->triplebuffer_flips);
+ }
+#endif
if (srf_updates[i].flip_addr)
dc->hwss.update_plane_addr(dc, pipe_ctx);
}
@@ -1870,6 +2020,20 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
}
+
+ // Fire manual trigger only when bottom plane is flipped
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->bottom_pipe ||
+ !pipe_ctx->stream ||
+ pipe_ctx->stream != stream ||
+ !pipe_ctx->plane_state->update_flags.bits.addr_update)
+ continue;
+
+ if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
+ pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
+ }
}
void dc_commit_updates_for_stream(struct dc *dc,
@@ -1933,6 +2097,8 @@ void dc_commit_updates_for_stream(struct dc *dc,
}
}
+ copy_stream_update_to_stream(dc, context, stream, stream_update);
+
commit_planes_for_stream(
dc,
srf_updates,
@@ -2006,6 +2172,12 @@ void dc_set_power_state(
enum dc_acpi_cm_power_state power_state)
{
struct kref refcount;
+ struct display_mode_lib *dml = kzalloc(sizeof(struct display_mode_lib),
+ GFP_KERNEL);
+
+ ASSERT(dml);
+ if (!dml)
+ return;
switch (power_state) {
case DC_ACPI_CM_POWER_STATE_D0:
@@ -2022,15 +2194,20 @@ void dc_set_power_state(
/* Preserve refcount */
refcount = dc->current_state->refcount;
+ /* Preserve display mode lib */
+ memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
+
dc_resource_state_destruct(dc->current_state);
memset(dc->current_state, 0,
sizeof(*dc->current_state));
dc->current_state->refcount = refcount;
+ dc->current_state->bw_ctx.dml = *dml;
break;
}
+ kfree(dml);
}
void dc_resume(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 83d121510ef5..c026b393f3c5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/delay.h>
+
#include "dm_services.h"
#include "core_types.h"
#include "timing_generator.h"
@@ -45,8 +47,10 @@ enum dc_color_space_type {
COLOR_SPACE_RGB_LIMITED_TYPE,
COLOR_SPACE_YCBCR601_TYPE,
COLOR_SPACE_YCBCR709_TYPE,
+ COLOR_SPACE_YCBCR2020_TYPE,
COLOR_SPACE_YCBCR601_LIMITED_TYPE,
- COLOR_SPACE_YCBCR709_LIMITED_TYPE
+ COLOR_SPACE_YCBCR709_LIMITED_TYPE,
+ COLOR_SPACE_YCBCR709_BLACK_TYPE,
};
static const struct tg_color black_color_format[] = {
@@ -80,7 +84,6 @@ static const struct out_csc_color_matrix_type output_csc_matrix[] = {
{ COLOR_SPACE_YCBCR709_TYPE,
{ 0xE04, 0xF345, 0xFEB7, 0x1004, 0x5D3, 0x1399, 0x1FA,
0x201, 0xFCCA, 0xF533, 0xE04, 0x1004} },
-
/* TODO: correct values below */
{ COLOR_SPACE_YCBCR601_LIMITED_TYPE,
{ 0xE00, 0xF447, 0xFDB9, 0x1000, 0x991,
@@ -88,6 +91,12 @@ static const struct out_csc_color_matrix_type output_csc_matrix[] = {
{ COLOR_SPACE_YCBCR709_LIMITED_TYPE,
{ 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
+ { COLOR_SPACE_YCBCR2020_TYPE,
+ { 0x1000, 0xF149, 0xFEB7, 0x0000, 0x0868, 0x15B2,
+ 0x01E6, 0x0000, 0xFB88, 0xF478, 0x1000, 0x0000} },
+ { COLOR_SPACE_YCBCR709_BLACK_TYPE,
+ { 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000,
+ 0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x1000} },
};
static bool is_rgb_type(
@@ -149,6 +158,16 @@ static bool is_ycbcr709_type(
return ret;
}
+static bool is_ycbcr2020_type(
+ enum dc_color_space color_space)
+{
+ bool ret = false;
+
+ if (color_space == COLOR_SPACE_2020_YCBCR)
+ ret = true;
+ return ret;
+}
+
static bool is_ycbcr709_limited_type(
enum dc_color_space color_space)
{
@@ -174,7 +193,12 @@ enum dc_color_space_type get_color_space_type(enum dc_color_space color_space)
type = COLOR_SPACE_YCBCR601_LIMITED_TYPE;
else if (is_ycbcr709_limited_type(color_space))
type = COLOR_SPACE_YCBCR709_LIMITED_TYPE;
-
+ else if (is_ycbcr2020_type(color_space))
+ type = COLOR_SPACE_YCBCR2020_TYPE;
+ else if (color_space == COLOR_SPACE_YCBCR709)
+ type = COLOR_SPACE_YCBCR709_BLACK_TYPE;
+ else if (color_space == COLOR_SPACE_YCBCR709_BLACK)
+ type = COLOR_SPACE_YCBCR709_BLACK_TYPE;
return type;
}
@@ -206,6 +230,7 @@ void color_space_to_black_color(
switch (colorspace) {
case COLOR_SPACE_YCBCR601:
case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR709_BLACK:
case COLOR_SPACE_YCBCR601_LIMITED:
case COLOR_SPACE_YCBCR709_LIMITED:
case COLOR_SPACE_2020_YCBCR:
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index b37ecc3ede61..8dbf759eba45 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "atom.h"
#include "dm_helpers.h"
@@ -42,6 +44,11 @@
#include "fixed31_32.h"
#include "dpcd_defs.h"
#include "dmcu.h"
+#include "hw/clk_mgr.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+#include "resource.h"
+#endif
+#include "hw/clk_mgr.h"
#define DC_LOGGER_INIT(logger)
@@ -216,8 +223,11 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
return true;
}
- if (link->connector_signal == SIGNAL_TYPE_EDP)
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ /*in case it is not on*/
+ link->dc->hwss.edp_power_control(link, true);
link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+ }
/* todo: may need to lock gpio access */
hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
@@ -519,11 +529,31 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link)
union lane_count_set lane_count_set = { {0} };
uint8_t link_bw_set;
uint8_t link_rate_set;
+ uint32_t read_dpcd_retry_cnt = 10;
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ int i;
// Read DPCD 00101h to find out the number of lanes currently set
- core_link_read_dpcd(link, DP_LANE_COUNT_SET,
- &lane_count_set.raw, sizeof(lane_count_set));
- link->cur_link_settings.lane_count = lane_count_set.bits.LANE_COUNT_SET;
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(
+ link,
+ DP_LANE_COUNT_SET,
+ &lane_count_set.raw,
+ sizeof(lane_count_set));
+ /* First DPCD read after VDD ON can fail if the particular board
+ * does not have HPD pin wired correctly. So if DPCD read fails,
+ * which it should never happen, retry a few times. Target worst
+ * case scenario of 80 ms.
+ */
+ if (status == DC_OK) {
+ link->cur_link_settings.lane_count = lane_count_set.bits.LANE_COUNT_SET;
+ break;
+ }
+
+ msleep(8);
+ }
+
+ ASSERT(status == DC_OK);
// Read DPCD 00100h to find if standard link rates are set
core_link_read_dpcd(link, DP_LINK_BW_SET,
@@ -677,6 +707,11 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
if (dc_is_virtual_signal(link->connector_signal))
return false;
+ if ((link->connector_signal == SIGNAL_TYPE_LVDS ||
+ link->connector_signal == SIGNAL_TYPE_EDP) &&
+ link->local_sink)
+ return true;
+
if (false == dc_link_detect_sink(link, &new_connection_type)) {
BREAK_TO_DEBUGGER();
return false;
@@ -687,14 +722,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
* up to date, especially if link was powered on by GOP.
*/
read_edp_current_link_settings_on_detect(link);
- if (link->local_sink)
- return true;
}
- if (link->connector_signal == SIGNAL_TYPE_LVDS &&
- link->local_sink)
- return true;
-
prev_sink = link->local_sink;
if (prev_sink != NULL) {
dc_sink_retain(prev_sink);
@@ -704,6 +733,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
if (new_connection_type != dc_connection_none) {
link->type = new_connection_type;
+ link->link_state_valid = false;
/* From Disconnected-to-Connected. */
switch (link->connector_signal) {
@@ -906,10 +936,10 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
/* Connectivity log: detection */
- for (i = 0; i < sink->dc_edid.length / EDID_BLOCK_SIZE; i++) {
+ for (i = 0; i < sink->dc_edid.length / DC_EDID_BLOCK_SIZE; i++) {
CONN_DATA_DETECT(link,
- &sink->dc_edid.raw_edid[i * EDID_BLOCK_SIZE],
- EDID_BLOCK_SIZE,
+ &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE],
+ DC_EDID_BLOCK_SIZE,
"%s: [Block %d] ", sink->edid_caps.display_name, i);
}
@@ -960,6 +990,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
link->type = dc_connection_none;
sink_caps.signal = SIGNAL_TYPE_NONE;
+ /* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk
+ * is not cleared. If we emulate a DP signal on this connection, it thinks
+ * the dongle is still there and limits the number of modes we can emulate.
+ * Clear dongle_max_pix_clk on disconnect to fix this
+ */
+ link->dongle_max_pix_clk = 0;
}
LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p dpcd same=%d edid same=%d\n",
@@ -1156,7 +1192,7 @@ static bool construct(
link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index);
if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
- dm_error("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n",
+ dm_output_to_console("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n",
__func__, init_params->connector_index,
link->link_id.type, OBJECT_TYPE_CONNECTOR);
goto create_fail;
@@ -1474,6 +1510,10 @@ static enum dc_status enable_link_dp(
if (link_settings.link_rate == LINK_RATE_LOW)
skip_video_pattern = false;
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ dp_set_fec_ready(link, true);
+#endif
+
if (perform_link_training_with_retries(
link,
&link_settings,
@@ -1485,6 +1525,9 @@ static enum dc_status enable_link_dp(
else
status = DC_FAIL_DP_LINK_TRAINING;
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ dp_set_fec_enable(link, true);
+#endif
return status;
}
@@ -2107,6 +2150,14 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
dp_disable_link_phy(link, signal);
else
dp_disable_link_phy_mst(link, signal);
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+
+ if (dc_is_dp_sst_signal(signal) ||
+ link->mst_stream_alloc_table.stream_count == 0) {
+ dp_set_fec_enable(link, false);
+ dp_set_fec_ready(link, false);
+ }
+#endif
} else
link->link_enc->funcs->disable_output(link->link_enc, signal);
@@ -2337,7 +2388,8 @@ void core_link_resume(struct dc_link *link)
static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
{
struct fixed31_32 mbytes_per_sec;
- uint32_t link_rate_in_mbytes_per_sec = dc_link_bandwidth_kbps(stream->link, &stream->link->cur_link_settings);
+ uint32_t link_rate_in_mbytes_per_sec = dc_link_bandwidth_kbps(stream->link,
+ &stream->link->cur_link_settings);
link_rate_in_mbytes_per_sec /= 8000; /* Kbits to MBytes */
mbytes_per_sec = dc_fixpt_from_int(link_rate_in_mbytes_per_sec);
@@ -2631,6 +2683,8 @@ void core_link_enable_stream(
stream->phy_pix_clk,
pipe_ctx->stream_res.audio != NULL);
+ pipe_ctx->stream->link->link_state_valid = true;
+
if (dc_is_dvi_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dvi_set_stream_attribute(
pipe_ctx->stream_res.stream_enc,
@@ -2700,33 +2754,76 @@ void core_link_enable_stream(
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
allocate_mst_payload(pipe_ctx);
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ if (pipe_ctx->stream->timing.flags.DSC &&
+ (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal))) {
+ dp_set_dsc_enable(pipe_ctx, true);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(
+ pipe_ctx->stream_res.tg,
+ CRTC_STATE_VBLANK);
+ }
+#endif
core_dc->hwss.unblank_stream(pipe_ctx,
&pipe_ctx->stream->link->cur_link_settings);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
enable_stream_features(pipe_ctx);
}
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ else { // if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment))
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ dp_set_dsc_enable(pipe_ctx, true);
+ }
+#endif
}
void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
{
struct dc *core_dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
core_dc->hwss.blank_stream(pipe_ctx);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
- dal_ddc_service_write_scdc_data(
- stream->link->ddc, 0,
- stream->timing.flags.LTE_340MCSC_SCRAMBLE);
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
+ struct ext_hdmi_settings settings = {0};
+ enum engine_id eng_id = pipe_ctx->stream_res.stream_enc->id;
+ unsigned short masked_chip_caps = link->chip_caps &
+ EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
+ //Need to inform that sink is going to use legacy HDMI mode.
+ dal_ddc_service_write_scdc_data(
+ link->ddc,
+ 165000,//vbios only handles 165Mhz.
+ false);
+ if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
+ /* DP159, Retimer settings */
+ if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings))
+ write_i2c_retimer_setting(pipe_ctx,
+ false, false, &settings);
+ else
+ write_i2c_default_retimer_setting(pipe_ctx,
+ false, false);
+ } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
+ /* PI3EQX1204, Redriver settings */
+ write_i2c_redriver_setting(pipe_ctx, false);
+ }
+ }
core_dc->hwss.disable_stream(pipe_ctx, option);
disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ if (pipe_ctx->stream->timing.flags.DSC &&
+ dc_is_dp_signal(pipe_ctx->stream->signal)) {
+ dp_set_dsc_enable(pipe_ctx, false);
+ }
+#endif
}
void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
@@ -2794,6 +2891,14 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
uint32_t bits_per_channel = 0;
uint32_t kbps;
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ if (timing->flags.DSC) {
+ kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel);
+ kbps = kbps / 160 + ((kbps % 160) ? 1 : 0);
+ return kbps;
+ }
+#endif
+
switch (timing->display_color_depth) {
case COLOR_DEPTH_666:
bits_per_channel = 6;
@@ -2834,3 +2939,153 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
return kbps;
}
+
+void dc_link_set_drive_settings(struct dc *dc,
+ struct link_training_settings *lt_settings,
+ const struct dc_link *link)
+{
+
+ int i;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if (dc->links[i] == link)
+ break;
+ }
+
+ if (i >= dc->link_count)
+ ASSERT_CRITICAL(false);
+
+ dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
+}
+
+void dc_link_perform_link_training(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ bool skip_video_pattern)
+{
+ int i;
+
+ for (i = 0; i < dc->link_count; i++)
+ dc_link_dp_perform_link_training(
+ dc->links[i],
+ link_setting,
+ skip_video_pattern);
+}
+
+void dc_link_set_preferred_link_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link)
+{
+ int i;
+ struct pipe_ctx *pipe;
+ struct dc_stream_state *link_stream;
+ struct dc_link_settings store_settings = *link_setting;
+
+ link->preferred_link_setting = store_settings;
+
+ /* Retrain with preferred link settings only relevant for
+ * DP signal type
+ */
+ if (!dc_is_dp_signal(link->connector_signal))
+ return;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream && pipe->stream->link) {
+ if (pipe->stream->link == link)
+ break;
+ }
+ }
+
+ /* Stream not found */
+ if (i == MAX_PIPES)
+ return;
+
+ link_stream = link->dc->current_state->res_ctx.pipe_ctx[i].stream;
+
+ /* Cannot retrain link if backend is off */
+ if (link_stream->dpms_off)
+ return;
+
+ if (link_stream)
+ decide_link_settings(link_stream, &store_settings);
+
+ if ((store_settings.lane_count != LANE_COUNT_UNKNOWN) &&
+ (store_settings.link_rate != LINK_RATE_UNKNOWN))
+ dp_retrain_link_dp_test(link, &store_settings, false);
+}
+
+void dc_link_enable_hpd(const struct dc_link *link)
+{
+ dc_link_dp_enable_hpd(link);
+}
+
+void dc_link_disable_hpd(const struct dc_link *link)
+{
+ dc_link_dp_disable_hpd(link);
+}
+
+
+void dc_link_set_test_pattern(struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size)
+{
+ if (link != NULL)
+ dc_link_dp_set_test_pattern(
+ link,
+ test_pattern,
+ p_link_settings,
+ p_custom_pattern,
+ cust_pattern_size);
+}
+
+uint32_t dc_link_bandwidth_kbps(
+ const struct dc_link *link,
+ const struct dc_link_settings *link_setting)
+{
+ uint32_t link_bw_kbps =
+ link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */
+
+ link_bw_kbps *= 8; /* 8 bits per byte*/
+ link_bw_kbps *= link_setting->lane_count;
+
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ if (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
+ /* Account for FEC overhead.
+ * We have to do it based on caps,
+ * and not based on FEC being set ready,
+ * because FEC is set ready too late in
+ * the process to correctly be picked up
+ * by mode enumeration.
+ *
+ * There's enough zeros at the end of 'kbps'
+ * that make the below operation 100% precise
+ * for our purposes.
+ * 'long long' makes it work even for HDMI 2.1
+ * max bandwidth (and much, much bigger bandwidths
+ * than that, actually).
+ *
+ * NOTE: Reducing link BW by 3% may not be precise
+ * because it may be a stream BT that increases by 3%, and so
+ * 1/1.03 = 0.970873 factor should have been used instead,
+ * but the difference is minimal and is in a safe direction,
+ * which all works well around potential ambiguity of DP 1.4a spec.
+ */
+ link_bw_kbps = mul_u64_u32_shr(BIT_ULL(32) * 970LL / 1000,
+ link_bw_kbps, 32);
+ }
+#endif
+
+ return link_bw_kbps;
+
+}
+
+const struct dc_link_settings *dc_link_get_link_cap(
+ const struct dc_link *link)
+{
+ if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
+ link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
+ return &link->preferred_link_setting;
+ return &link->verified_link_cap;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index f02092a0dc76..e6da8506128b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "dm_helpers.h"
#include "gpio_service_interface.h"
@@ -91,6 +93,8 @@ union hdmi_scdc_status_flags_data {
uint8_t CH2_LOCKED:1;
uint8_t RESERVED:4;
uint8_t RESERVED2:8;
+ uint8_t RESERVED3:8;
+
} fields;
};
@@ -107,14 +111,10 @@ union hdmi_scdc_ced_data {
uint8_t CH2_7HIGH:7;
uint8_t CH2_VALID:1;
uint8_t CHECKSUM:8;
- } fields;
-};
-
-union hdmi_scdc_test_config_Data {
- uint8_t byte;
- struct {
- uint8_t TEST_READ_REQUEST_DELAY:7;
- uint8_t TEST_READ_REQUEST: 1;
+ uint8_t RESERVED:8;
+ uint8_t RESERVED2:8;
+ uint8_t RESERVED3:8;
+ uint8_t RESERVED4:4;
} fields;
};
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 1ee544a32ebb..056be4c34a98 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -4,6 +4,12 @@
#include "dc_link_dp.h"
#include "dm_helpers.h"
#include "opp.h"
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+#include "dsc.h"
+#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+#include "resource.h"
+#endif
#include "inc/core_types.h"
#include "link_hwss.h"
@@ -89,6 +95,29 @@ static void dpcd_set_training_pattern(
dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
}
+static enum hw_dp_training_pattern get_supported_tp(struct dc_link *link)
+{
+ enum hw_dp_training_pattern highest_tp = HW_DP_TRAINING_PATTERN_2;
+ struct encoder_feature_support *features = &link->link_enc->features;
+ struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
+
+ if (features->flags.bits.IS_TPS3_CAPABLE)
+ highest_tp = HW_DP_TRAINING_PATTERN_3;
+
+ if (features->flags.bits.IS_TPS4_CAPABLE)
+ highest_tp = HW_DP_TRAINING_PATTERN_4;
+
+ if (dpcd_caps->max_down_spread.bits.TPS4_SUPPORTED &&
+ highest_tp >= HW_DP_TRAINING_PATTERN_4)
+ return HW_DP_TRAINING_PATTERN_4;
+
+ if (dpcd_caps->max_ln_count.bits.TPS3_SUPPORTED &&
+ highest_tp >= HW_DP_TRAINING_PATTERN_3)
+ return HW_DP_TRAINING_PATTERN_3;
+
+ return HW_DP_TRAINING_PATTERN_2;
+}
+
static void dpcd_set_link_settings(
struct dc_link *link,
const struct link_training_settings *lt_settings)
@@ -97,6 +126,7 @@ static void dpcd_set_link_settings(
union down_spread_ctrl downspread = { {0} };
union lane_count_set lane_count_set = { {0} };
+ enum hw_dp_training_pattern hw_tr_pattern;
downspread.raw = (uint8_t)
(lt_settings->link_settings.link_spread);
@@ -106,8 +136,13 @@ static void dpcd_set_link_settings(
lane_count_set.bits.ENHANCED_FRAMING = 1;
- lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
- link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
+ lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
+
+ hw_tr_pattern = get_supported_tp(link);
+ if (hw_tr_pattern != HW_DP_TRAINING_PATTERN_4) {
+ lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
+ link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
+ }
core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
&downspread.raw, sizeof(downspread));
@@ -698,29 +733,6 @@ static bool perform_post_lt_adj_req_sequence(
}
-static enum hw_dp_training_pattern get_supported_tp(struct dc_link *link)
-{
- enum hw_dp_training_pattern highest_tp = HW_DP_TRAINING_PATTERN_2;
- struct encoder_feature_support *features = &link->link_enc->features;
- struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
-
- if (features->flags.bits.IS_TPS3_CAPABLE)
- highest_tp = HW_DP_TRAINING_PATTERN_3;
-
- if (features->flags.bits.IS_TPS4_CAPABLE)
- highest_tp = HW_DP_TRAINING_PATTERN_4;
-
- if (dpcd_caps->max_down_spread.bits.TPS4_SUPPORTED &&
- highest_tp >= HW_DP_TRAINING_PATTERN_4)
- return HW_DP_TRAINING_PATTERN_4;
-
- if (dpcd_caps->max_ln_count.bits.TPS3_SUPPORTED &&
- highest_tp >= HW_DP_TRAINING_PATTERN_3)
- return HW_DP_TRAINING_PATTERN_3;
-
- return HW_DP_TRAINING_PATTERN_2;
-}
-
static enum link_training_result get_cr_failure(enum dc_lane_count ln_count,
union lane_status *dpcd_lane_status)
{
@@ -1624,8 +1636,7 @@ static bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settin
uint32_t link_bw;
if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14 ||
- link->dpcd_caps.edp_supported_link_rates_count == 0 ||
- link->dc->config.optimize_edp_link_rate == false) {
+ link->dpcd_caps.edp_supported_link_rates_count == 0) {
*link_setting = link->verified_link_cap;
return true;
}
@@ -2361,6 +2372,7 @@ static bool retrieve_link_cap(struct dc_link *link)
/*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST.
*/
uint8_t dpcd_dprx_data = '\0';
+ uint8_t dpcd_power_state = '\0';
struct dp_device_vendor_id sink_id;
union down_stream_port_count down_strm_port_count;
@@ -2377,6 +2389,17 @@ static bool retrieve_link_cap(struct dc_link *link)
memset(&edp_config_cap, '\0',
sizeof(union edp_configuration_cap));
+ status = core_link_read_dpcd(link, DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+
+ /* Delay 1 ms if AUX CH is in power down state. Based on spec
+ * section 2.3.1.2, if AUX CH may be powered down due to
+ * write to DPCD 600h = 2. Sink AUX CH is monitoring differential
+ * signal and may need up to 1 ms before being able to reply.
+ */
+ if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3)
+ udelay(1000);
+
for (i = 0; i < read_dpcd_retry_cnt; i++) {
status = core_link_read_dpcd(
link,
@@ -2530,6 +2553,30 @@ static bool retrieve_link_cap(struct dc_link *link)
dp_hw_fw_revision.ieee_fw_rev,
sizeof(dp_hw_fw_revision.ieee_fw_rev));
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ memset(&link->dpcd_caps.dsc_caps, '\0',
+ sizeof(link->dpcd_caps.dsc_caps));
+ memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap));
+ /* Read DSC and FEC sink capabilities if DP revision is 1.4 and up */
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14) {
+ status = core_link_read_dpcd(
+ link,
+ DP_FEC_CAPABILITY,
+ &link->dpcd_caps.fec_cap.raw,
+ sizeof(link->dpcd_caps.fec_cap.raw));
+ status = core_link_read_dpcd(
+ link,
+ DP_DSC_SUPPORT,
+ link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
+ sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw));
+ status = core_link_read_dpcd(
+ link,
+ DP_DSC_BRANCH_OVERALL_THROUGHPUT_0,
+ link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
+ sizeof(link->dpcd_caps.dsc_caps.dsc_ext_caps.raw));
+ }
+#endif
+
/* Connectivity log: detection */
CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
@@ -2597,7 +2644,8 @@ void detect_edp_sink_caps(struct dc_link *link)
memset(supported_link_rates, 0, sizeof(supported_link_rates));
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
- link->dc->config.optimize_edp_link_rate) {
+ (link->dc->config.optimize_edp_link_rate ||
+ link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) {
// Read DPCD 00010h - 0001Fh 16 bytes at one shot
core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
supported_link_rates, sizeof(supported_link_rates));
@@ -2612,6 +2660,9 @@ void detect_edp_sink_caps(struct dc_link *link)
link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz);
link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate;
link->dpcd_caps.edp_supported_link_rates_count++;
+
+ if (link->reported_link_cap.link_rate < link_rate)
+ link->reported_link_cap.link_rate = link_rate;
}
}
}
@@ -2653,6 +2704,14 @@ static void set_crtc_test_pattern(struct dc_link *link,
stream->timing.display_color_depth;
struct bit_depth_reduction_params params;
struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ int width = pipe_ctx->stream->timing.h_addressable +
+ pipe_ctx->stream->timing.h_border_left +
+ pipe_ctx->stream->timing.h_border_right;
+ int height = pipe_ctx->stream->timing.v_addressable +
+ pipe_ctx->stream->timing.v_border_bottom +
+ pipe_ctx->stream->timing.v_border_top;
+#endif
memset(&params, 0, sizeof(params));
@@ -2696,6 +2755,30 @@ static void set_crtc_test_pattern(struct dc_link *link,
if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
controller_test_pattern, color_depth);
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ else if (opp->funcs->opp_set_disp_pattern_generator) {
+ struct pipe_ctx *bot_odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
+
+ if (bot_odm_pipe) {
+ struct output_pixel_processor *bot_opp = bot_odm_pipe->stream_res.opp;
+
+ bot_opp->funcs->opp_program_bit_depth_reduction(bot_opp, &params);
+ width /= 2;
+ bot_opp->funcs->opp_set_disp_pattern_generator(bot_opp,
+ controller_test_pattern,
+ color_depth,
+ NULL,
+ width,
+ height);
+ }
+ opp->funcs->opp_set_disp_pattern_generator(opp,
+ controller_test_pattern,
+ color_depth,
+ NULL,
+ width,
+ height);
+ }
+#endif
}
break;
case DP_TEST_PATTERN_VIDEO_MODE:
@@ -2708,6 +2791,30 @@ static void set_crtc_test_pattern(struct dc_link *link,
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
color_depth);
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ else if (opp->funcs->opp_set_disp_pattern_generator) {
+ struct pipe_ctx *bot_odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
+
+ if (bot_odm_pipe) {
+ struct output_pixel_processor *bot_opp = bot_odm_pipe->stream_res.opp;
+
+ bot_opp->funcs->opp_program_bit_depth_reduction(bot_opp, &params);
+ width /= 2;
+ bot_opp->funcs->opp_set_disp_pattern_generator(bot_opp,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ color_depth,
+ NULL,
+ width,
+ height);
+ }
+ opp->funcs->opp_set_disp_pattern_generator(opp,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ color_depth,
+ NULL,
+ width,
+ height);
+ }
+#endif
}
break;
@@ -2882,3 +2989,67 @@ void dp_enable_mst_on_sink(struct dc_link *link, bool enable)
core_link_write_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1);
}
+
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+void dp_set_fec_ready(struct dc_link *link, bool ready)
+{
+ /* FEC has to be "set ready" before the link training.
+ * The policy is to always train with FEC
+ * if the sink supports it and leave it enabled on link.
+ * If FEC is not supported, disable it.
+ */
+ struct link_encoder *link_enc = link->link_enc;
+ uint8_t fec_config = 0;
+
+ if (link->dc->debug.disable_fec ||
+ IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment))
+ return;
+
+ if (link_enc->funcs->fec_set_ready &&
+ link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
+ if (link->fec_state == dc_link_fec_not_ready && ready) {
+ fec_config = 1;
+ if (core_link_write_dpcd(link,
+ DP_FEC_CONFIGURATION,
+ &fec_config,
+ sizeof(fec_config)) == DC_OK) {
+ link_enc->funcs->fec_set_ready(link_enc, true);
+ link->fec_state = dc_link_fec_ready;
+ } else {
+ dm_error("dpcd write failed to set fec_ready");
+ }
+ } else if (link->fec_state == dc_link_fec_ready && !ready) {
+ fec_config = 0;
+ core_link_write_dpcd(link,
+ DP_FEC_CONFIGURATION,
+ &fec_config,
+ sizeof(fec_config));
+ link->link_enc->funcs->fec_set_ready(
+ link->link_enc, false);
+ link->fec_state = dc_link_fec_not_ready;
+ }
+ }
+}
+
+void dp_set_fec_enable(struct dc_link *link, bool enable)
+{
+ struct link_encoder *link_enc = link->link_enc;
+
+ if (link->dc->debug.disable_fec ||
+ IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment))
+ return;
+
+ if (link_enc->funcs->fec_set_enable &&
+ link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
+ if (link->fec_state == dc_link_fec_ready && enable) {
+ msleep(1);
+ link_enc->funcs->fec_set_enable(link_enc, true);
+ link->fec_state = dc_link_fec_enabled;
+ } else if (link->fec_state == dc_link_fec_enabled && !enable) {
+ link_enc->funcs->fec_set_enable(link_enc, false);
+ link->fec_state = dc_link_fec_ready;
+ }
+ }
+}
+#endif
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index b0dea759cd86..2d019e1f6135 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -12,6 +12,12 @@
#include "dc_link_ddc.h"
#include "dm_helpers.h"
#include "dpcd_defs.h"
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+#include "dsc.h"
+#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+#include "resource.h"
+#endif
enum dc_status core_link_read_dpcd(
struct dc_link *link,
@@ -360,3 +366,141 @@ void dp_retrain_link_dp_test(struct dc_link *link,
}
}
}
+
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+#define DC_LOGGER \
+ dsc->ctx->logger
+static void dsc_optc_config_log(struct display_stream_compressor *dsc,
+ struct dsc_optc_config *config)
+{
+ DC_LOG_DSC("Setting optc DSC config at DSC inst %d", dsc->inst);
+ DC_LOG_DSC("\n\tbytes_per_pixel %d\n\tis_pixel_format_444 %d\n\tslice_width %d",
+ config->bytes_per_pixel,
+ config->is_pixel_format_444, config->slice_width);
+}
+
+static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ bool result = false;
+
+ if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment))
+ result = true;
+ else
+ result = dm_helpers_dp_write_dsc_enable(core_dc->ctx, stream, enable);
+ return result;
+}
+
+/* This has to be done after DSC was enabled on RX first, i.e. after dp_enable_dsc_on_rx() had been called
+ */
+static void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct pipe_ctx *odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
+
+ if (enable) {
+ /* TODO proper function */
+ struct dsc_config dsc_cfg;
+ struct dsc_optc_config dsc_optc_cfg;
+ enum optc_dsc_mode optc_dsc_mode;
+ uint8_t dsc_packed_pps[128];
+
+ /* Enable DSC hw block */
+ dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
+ dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
+ dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
+ dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+
+ dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg, &dsc_packed_pps[0]);
+ if (odm_pipe) {
+ struct display_stream_compressor *bot_dsc = odm_pipe->stream_res.dsc;
+ uint8_t dsc_packed_pps_odm[128];
+
+ dsc_cfg.pic_width /= 2;
+ ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % 2 == 0);
+ dsc_cfg.dc_dsc_cfg.num_slices_h /= 2;
+ dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg, &dsc_packed_pps_odm[0]);
+ bot_dsc->funcs->dsc_set_config(bot_dsc, &dsc_cfg, &dsc_optc_cfg, &dsc_packed_pps_odm[0]);
+ bot_dsc->funcs->dsc_enable(bot_dsc, odm_pipe->stream_res.opp->inst);
+ }
+ dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
+
+ optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
+
+ dsc_optc_config_log(dsc, &dsc_optc_cfg);
+ /* Enable DSC in encoder */
+ if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) && pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config)
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc,
+ optc_dsc_mode,
+ dsc_optc_cfg.bytes_per_pixel,
+ dsc_optc_cfg.slice_width,
+ &dsc_packed_pps[0]);
+
+ /* Enable DSC in OPTC */
+ pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg,
+ optc_dsc_mode,
+ dsc_optc_cfg.bytes_per_pixel,
+ dsc_optc_cfg.slice_width);
+ } else {
+ /* disable DSC in OPTC */
+ pipe_ctx->stream_res.tg->funcs->set_dsc_config(
+ pipe_ctx->stream_res.tg,
+ OPTC_DSC_DISABLED, 0, 0);
+
+ /* disable DSC in stream encoder */
+ if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(
+ pipe_ctx->stream_res.stream_enc,
+ OPTC_DSC_DISABLED, 0, 0, NULL);
+ }
+
+ /* disable DSC block */
+ pipe_ctx->stream_res.dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc);
+ if (odm_pipe)
+ odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc);
+ }
+}
+
+bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+ bool result = false;
+
+ if (!pipe_ctx->stream->timing.flags.DSC)
+ goto out;
+ if (!dsc)
+ goto out;
+
+ if (enable) {
+ if (dp_set_dsc_on_rx(pipe_ctx, true)) {
+ dp_set_dsc_on_stream(pipe_ctx, true);
+ result = true;
+ }
+ } else {
+ dp_set_dsc_on_rx(pipe_ctx, false);
+ dp_set_dsc_on_stream(pipe_ctx, false);
+ result = true;
+ }
+out:
+ return result;
+}
+
+bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx)
+{
+ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+
+ if (!pipe_ctx->stream->timing.flags.DSC)
+ return false;
+ if (!dsc)
+ return false;
+
+ dp_set_dsc_on_stream(pipe_ctx, true);
+ return true;
+}
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index eac7186e4f08..173fcfb5abe6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -22,6 +22,9 @@
* Authors: AMD
*
*/
+
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "resource.h"
@@ -46,6 +49,9 @@
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
#include "dcn10/dcn10_resource.h"
#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+#include "dcn20/dcn20_resource.h"
+#endif
#include "dce120/dce120_resource.h"
#define DC_LOGGER_INIT(logger)
@@ -93,10 +99,14 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case FAMILY_RV:
dc_version = DCN_VERSION_1_0;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev))
dc_version = DCN_VERSION_1_01;
+ break;
#endif
+
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ case FAMILY_NV:
+ dc_version = DCN_VERSION_2_0;
break;
#endif
default:
@@ -147,14 +157,18 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case DCN_VERSION_1_0:
-#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
case DCN_VERSION_1_01:
-#endif
res_pool = dcn10_create_resource_pool(init_data, dc);
break;
#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ case DCN_VERSION_2_0:
+ res_pool = dcn20_create_resource_pool(init_data, dc);
+ break;
+#endif
+
default:
break;
}
@@ -1184,24 +1198,27 @@ static int acquire_first_split_pipe(
int i;
for (i = 0; i < pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
-
- if (pipe_ctx->top_pipe &&
- pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state) {
- pipe_ctx->top_pipe->bottom_pipe = pipe_ctx->bottom_pipe;
- if (pipe_ctx->bottom_pipe)
- pipe_ctx->bottom_pipe->top_pipe = pipe_ctx->top_pipe;
-
- memset(pipe_ctx, 0, sizeof(*pipe_ctx));
- pipe_ctx->stream_res.tg = pool->timing_generators[i];
- pipe_ctx->plane_res.hubp = pool->hubps[i];
- pipe_ctx->plane_res.ipp = pool->ipps[i];
- pipe_ctx->plane_res.dpp = pool->dpps[i];
- pipe_ctx->stream_res.opp = pool->opps[i];
- pipe_ctx->plane_res.mpcc_inst = pool->dpps[i]->inst;
- pipe_ctx->pipe_idx = i;
-
- pipe_ctx->stream = stream;
+ struct pipe_ctx *split_pipe = &res_ctx->pipe_ctx[i];
+
+ if (split_pipe->top_pipe && !dc_res_is_odm_head_pipe(split_pipe) &&
+ split_pipe->top_pipe->plane_state == split_pipe->plane_state) {
+ split_pipe->top_pipe->bottom_pipe = split_pipe->bottom_pipe;
+ if (split_pipe->bottom_pipe)
+ split_pipe->bottom_pipe->top_pipe = split_pipe->top_pipe;
+
+ if (split_pipe->top_pipe->plane_state)
+ resource_build_scaling_params(split_pipe->top_pipe);
+
+ memset(split_pipe, 0, sizeof(*split_pipe));
+ split_pipe->stream_res.tg = pool->timing_generators[i];
+ split_pipe->plane_res.hubp = pool->hubps[i];
+ split_pipe->plane_res.ipp = pool->ipps[i];
+ split_pipe->plane_res.dpp = pool->dpps[i];
+ split_pipe->stream_res.opp = pool->opps[i];
+ split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst;
+ split_pipe->pipe_idx = i;
+
+ split_pipe->stream = stream;
return i;
}
}
@@ -1647,46 +1664,6 @@ static int acquire_first_free_pipe(
return -1;
}
-static struct stream_encoder *find_first_free_match_stream_enc_for_link(
- struct resource_context *res_ctx,
- const struct resource_pool *pool,
- struct dc_stream_state *stream)
-{
- int i;
- int j = -1;
- struct dc_link *link = stream->link;
-
- for (i = 0; i < pool->stream_enc_count; i++) {
- if (!res_ctx->is_stream_enc_acquired[i] &&
- pool->stream_enc[i]) {
- /* Store first available for MST second display
- * in daisy chain use case */
- j = i;
- if (pool->stream_enc[i]->id ==
- link->link_enc->preferred_engine)
- return pool->stream_enc[i];
- }
- }
-
- /*
- * below can happen in cases when stream encoder is acquired:
- * 1) for second MST display in chain, so preferred engine already
- * acquired;
- * 2) for another link, which preferred engine already acquired by any
- * MST configuration.
- *
- * If signal is of DP type and preferred engine not found, return last available
- *
- * TODO - This is just a patch up and a generic solution is
- * required for non DP connectors.
- */
-
- if (j >= 0 && link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT)
- return pool->stream_enc[j];
-
- return NULL;
-}
-
static struct audio *find_first_free_audio(
struct resource_context *res_ctx,
const struct resource_pool *pool,
@@ -1998,7 +1975,7 @@ enum dc_status resource_map_pool_resources(
pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
pipe_ctx->stream_res.stream_enc =
- find_first_free_match_stream_enc_for_link(
+ dc->res_pool->funcs->find_first_free_match_stream_enc_for_link(
&context->res_ctx, pool, stream);
if (!pipe_ctx->stream_res.stream_enc)
@@ -2059,7 +2036,7 @@ void dc_resource_state_construct(
const struct dc *dc,
struct dc_state *dst_ctx)
{
- dst_ctx->clk_mgr = dc->res_pool->clk_mgr;
+ dst_ctx->clk_mgr = dc->clk_mgr;
}
/**
@@ -2354,7 +2331,18 @@ static void set_avi_info_frame(
break;
}
}
+ /* If VIC >= 128, the Source shall use AVI InfoFrame Version 3*/
hdmi_info.bits.VIC0_VIC7 = vic;
+ if (vic >= 128)
+ hdmi_info.bits.header.version = 3;
+ /* If (C1, C0)=(1, 1) and (EC2, EC1, EC0)=(1, 1, 1),
+ * the Source shall use 20 AVI InfoFrame Version 4
+ */
+ if (hdmi_info.bits.C0_C1 == COLORIMETRY_EXTENDED &&
+ hdmi_info.bits.EC0_EC2 == COLORIMETRYEX_RESERVED) {
+ hdmi_info.bits.header.version = 4;
+ hdmi_info.bits.header.length = 14;
+ }
/* pixel repetition
* PR0 - PR3 start from 0 whereas pHwPathMode->mode.timing.flags.pixel
@@ -2373,12 +2361,19 @@ static void set_avi_info_frame(
hdmi_info.bits.bar_right = (stream->timing.h_total
- stream->timing.h_border_right + 1);
+ /* Additional Colorimetry Extension
+ * Used in conduction with C0-C1 and EC0-EC2
+ * 0 = DCI-P3 RGB (D65)
+ * 1 = DCI-P3 RGB (theater)
+ */
+ hdmi_info.bits.ACE0_ACE3 = 0;
+
/* check_sum - Calculate AFMT_AVI_INFO0 ~ AFMT_AVI_INFO3 */
check_sum = &hdmi_info.packet_raw_data.sb[0];
- *check_sum = HDMI_INFOFRAME_TYPE_AVI + HDMI_AVI_INFOFRAME_SIZE + 2;
+ *check_sum = HDMI_INFOFRAME_TYPE_AVI + hdmi_info.bits.header.length + hdmi_info.bits.header.version;
- for (byte_index = 1; byte_index <= HDMI_AVI_INFOFRAME_SIZE; byte_index++)
+ for (byte_index = 1; byte_index <= hdmi_info.bits.header.length; byte_index++)
*check_sum += hdmi_info.packet_raw_data.sb[byte_index];
/* one byte complement */
@@ -2425,21 +2420,6 @@ static void set_spd_info_packet(
*info_packet = stream->vrr_infopacket;
}
-static void set_dp_sdp_info_packet(
- struct dc_info_packet *info_packet,
- struct dc_stream_state *stream)
-{
- /* SPD info packet for custom sdp message */
-
- /* Return if false. If true,
- * set the corresponding bit in the info packet
- */
- if (!stream->dpsdp_infopacket.valid)
- return;
-
- *info_packet = stream->dpsdp_infopacket;
-}
-
static void set_hdr_static_info_packet(
struct dc_info_packet *info_packet,
struct dc_stream_state *stream)
@@ -2495,7 +2475,6 @@ void dc_resource_state_copy_construct(
if (cur_pipe->bottom_pipe)
cur_pipe->bottom_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
-
}
for (i = 0; i < dst_ctx->stream_count; i++) {
@@ -2536,7 +2515,6 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
info->spd.valid = false;
info->hdrsmd.valid = false;
info->vsc.valid = false;
- info->dpsdp.valid = false;
signal = pipe_ctx->stream->signal;
@@ -2556,8 +2534,6 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
set_spd_info_packet(&info->spd, pipe_ctx->stream);
set_hdr_static_info_packet(&info->hdrsmd, pipe_ctx->stream);
-
- set_dp_sdp_info_packet(&info->dpsdp, pipe_ctx->stream);
}
patch_gamut_packet_checksum(&info->gamut);
@@ -2644,6 +2620,10 @@ bool pipe_need_reprogram(
if (is_vsc_info_packet_changed(pipe_ctx_old->stream, pipe_ctx->stream))
return true;
+ if (false == pipe_ctx_old->stream->link->link_state_valid &&
+ false == pipe_ctx_old->stream->dpms_off)
+ return true;
+
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
index 9971b515c3eb..5cbfdf1c4b11 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "dm_helpers.h"
#include "core_types.h"
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 96e97d25d639..af7f8be230f7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -23,6 +23,9 @@
*
*/
+#include <linux/delay.h>
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "dc.h"
#include "core_types.h"
@@ -47,8 +50,8 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink)
if (dc_is_dvi_signal(stream->signal)) {
if (stream->ctx->dc->caps.dual_link_dvi &&
- (stream->timing.pix_clk_100hz / 10) > TMDS_MAX_PIXEL_CLOCK &&
- sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
+ (stream->timing.pix_clk_100hz / 10) > TMDS_MAX_PIXEL_CLOCK &&
+ sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK;
else
stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
@@ -105,6 +108,17 @@ static void construct(struct dc_stream_state *stream,
/* EDID CAP translation for HDMI 2.0 */
stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble;
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ memset(&stream->timing.dsc_cfg, 0, sizeof(stream->timing.dsc_cfg));
+ stream->timing.dsc_cfg.num_slices_h = 0;
+ stream->timing.dsc_cfg.num_slices_v = 0;
+ stream->timing.dsc_cfg.bits_per_pixel = 128;
+ stream->timing.dsc_cfg.block_pred_enable = 1;
+ stream->timing.dsc_cfg.linebuf_depth = 9;
+ stream->timing.dsc_cfg.version_minor = 2;
+ stream->timing.dsc_cfg.ycbcr422_simple = 0;
+#endif
+
update_stream_signal(stream, dc_sink_data);
stream->out_transfer_func = dc_create_transfer_func();
@@ -167,18 +181,19 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
{
struct dc_stream_state *new_stream;
- new_stream = kzalloc(sizeof(struct dc_stream_state), GFP_KERNEL);
+ new_stream = kmemdup(stream, sizeof(struct dc_stream_state), GFP_KERNEL);
if (!new_stream)
return NULL;
- memcpy(new_stream, stream, sizeof(struct dc_stream_state));
-
if (new_stream->sink)
dc_sink_retain(new_stream->sink);
if (new_stream->out_transfer_func)
dc_transfer_func_retain(new_stream->out_transfer_func);
+ new_stream->stream_id = new_stream->ctx->dc_stream_id_count;
+ new_stream->ctx->dc_stream_id_count++;
+
kref_init(&new_stream->refcount);
return new_stream;
@@ -229,7 +244,7 @@ static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
unsigned int us_per_line;
if (stream->ctx->asic_id.chip_family == FAMILY_RV &&
- ASIC_REV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
+ ASICREV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
vupdate_line = get_vupdate_offset_from_vsync(pipe_ctx);
if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))
@@ -352,53 +367,138 @@ bool dc_stream_set_cursor_position(
return true;
}
-uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+bool dc_stream_add_writeback(struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_writeback_info *wb_info)
{
- uint8_t i;
- struct dc *core_dc = stream->ctx->dc;
- struct resource_context *res_ctx =
- &core_dc->current_state->res_ctx;
+ bool isDrc = false;
+ int i = 0;
+ struct dwbc *dwb;
- for (i = 0; i < MAX_PIPES; i++) {
- struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
+ if (stream == NULL) {
+ dm_error("DC: dc_stream is NULL!\n");
+ return false;
+ }
- if (res_ctx->pipe_ctx[i].stream != stream)
- continue;
+ if (wb_info == NULL) {
+ dm_error("DC: dc_writeback_info is NULL!\n");
+ return false;
+ }
- return tg->funcs->get_frame_count(tg);
+ if (wb_info->dwb_pipe_inst >= MAX_DWB_PIPES) {
+ dm_error("DC: writeback pipe is invalid!\n");
+ return false;
}
- return 0;
+ wb_info->dwb_params.out_transfer_func = stream->out_transfer_func;
+
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ dwb->dwb_is_drc = false;
+
+ /* recalculate and apply DML parameters */
+
+ for (i = 0; i < stream->num_wb_info; i++) {
+ /*dynamic update*/
+ if (stream->writeback_info[i].wb_enabled &&
+ stream->writeback_info[i].dwb_pipe_inst == wb_info->dwb_pipe_inst) {
+ stream->writeback_info[i] = *wb_info;
+ isDrc = true;
+ }
+ }
+
+ if (!isDrc) {
+ stream->writeback_info[stream->num_wb_info++] = *wb_info;
+ }
+
+ if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
+ dm_error("DC: update_bandwidth failed!\n");
+ return false;
+ }
+
+ /* enable writeback */
+ if (dc->hwss.enable_writeback) {
+ struct dc_stream_status *stream_status = dc_stream_get_status(stream);
+ struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+
+ if (dwb->funcs->is_enabled(dwb)) {
+ /* writeback pipe already enabled, only need to update */
+ dc->hwss.update_writeback(dc, stream_status, wb_info);
+ } else {
+ /* Enable writeback pipe from scratch*/
+ dc->hwss.enable_writeback(dc, stream_status, wb_info);
+ }
+ }
+
+ return true;
}
-static void build_dp_sdp_info_frame(struct pipe_ctx *pipe_ctx,
- const uint8_t *custom_sdp_message,
- unsigned int sdp_message_size)
+bool dc_stream_remove_writeback(struct dc *dc,
+ struct dc_stream_state *stream,
+ uint32_t dwb_pipe_inst)
{
- uint8_t i;
- struct encoder_info_frame *info = &pipe_ctx->stream_res.encoder_info_frame;
+ int i = 0, j = 0;
+ if (stream == NULL) {
+ dm_error("DC: dc_stream is NULL!\n");
+ return false;
+ }
- /* set valid info */
- info->dpsdp.valid = true;
+ if (dwb_pipe_inst >= MAX_DWB_PIPES) {
+ dm_error("DC: writeback pipe is invalid!\n");
+ return false;
+ }
- /* set sdp message header */
- info->dpsdp.hb0 = custom_sdp_message[0]; /* package id */
- info->dpsdp.hb1 = custom_sdp_message[1]; /* package type */
- info->dpsdp.hb2 = custom_sdp_message[2]; /* package specific byte 0 any data */
- info->dpsdp.hb3 = custom_sdp_message[3]; /* package specific byte 0 any data */
+// stream->writeback_info[dwb_pipe_inst].wb_enabled = false;
+ for (i = 0; i < stream->num_wb_info; i++) {
+ /*dynamic update*/
+ if (stream->writeback_info[i].wb_enabled &&
+ stream->writeback_info[i].dwb_pipe_inst == dwb_pipe_inst) {
+ stream->writeback_info[i].wb_enabled = false;
+ }
+ }
- /* set sdp message data */
- for (i = 0; i < 32; i++)
- info->dpsdp.sb[i] = (custom_sdp_message[i+4]);
+ /* remove writeback info for disabled writeback pipes from stream */
+ for (i = 0, j = 0; i < stream->num_wb_info; i++) {
+ if (stream->writeback_info[i].wb_enabled) {
+ if (i != j)
+ /* trim the array */
+ stream->writeback_info[j] = stream->writeback_info[i];
+ j++;
+ }
+ }
+ stream->num_wb_info = j;
+ /* recalculate and apply DML parameters */
+ if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
+ dm_error("DC: update_bandwidth failed!\n");
+ return false;
+ }
+
+ /* disable writeback */
+ if (dc->hwss.disable_writeback)
+ dc->hwss.disable_writeback(dc, dwb_pipe_inst);
+
+ return true;
}
+#endif
-static void invalid_dp_sdp_info_frame(struct pipe_ctx *pipe_ctx)
+uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
{
- struct encoder_info_frame *info = &pipe_ctx->stream_res.encoder_info_frame;
+ uint8_t i;
+ struct dc *core_dc = stream->ctx->dc;
+ struct resource_context *res_ctx =
+ &core_dc->current_state->res_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
+
+ if (res_ctx->pipe_ctx[i].stream != stream)
+ continue;
- /* in-valid info */
- info->dpsdp.valid = false;
+ return tg->funcs->get_frame_count(tg);
+ }
+
+ return 0;
}
bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream,
@@ -406,7 +506,7 @@ bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream,
unsigned int sdp_message_size)
{
int i;
- struct dc *core_dc;
+ struct dc *dc;
struct resource_context *res_ctx;
if (stream == NULL) {
@@ -414,8 +514,8 @@ bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream,
return false;
}
- core_dc = stream->ctx->dc;
- res_ctx = &core_dc->current_state->res_ctx;
+ dc = stream->ctx->dc;
+ res_ctx = &dc->current_state->res_ctx;
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
@@ -423,11 +523,14 @@ bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream,
if (pipe_ctx->stream != stream)
continue;
- build_dp_sdp_info_frame(pipe_ctx, custom_sdp_message, sdp_message_size);
-
- core_dc->hwss.update_info_frame(pipe_ctx);
+ if (dc->hwss.send_immediate_sdp_message != NULL)
+ dc->hwss.send_immediate_sdp_message(pipe_ctx,
+ custom_sdp_message,
+ sdp_message_size);
+ else
+ DC_LOG_WARNING("%s:send_immediate_sdp_message not implemented on this ASIC\n",
+ __func__);
- invalid_dp_sdp_info_frame(pipe_ctx);
}
return true;
@@ -463,6 +566,77 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
return ret;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream)
+{
+ bool status = true;
+ struct pipe_ctx *pipe = NULL;
+ int i;
+
+ if (!dc->hwss.dmdata_status_done)
+ return false;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream == stream)
+ break;
+ }
+ /* Stream not found, by default we'll assume HUBP fetched dm data */
+ if (i == MAX_PIPES)
+ return true;
+
+ status = dc->hwss.dmdata_status_done(pipe);
+ return status;
+}
+
+bool dc_stream_set_dynamic_metadata(struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_dmdata_attributes *attr)
+{
+ struct pipe_ctx *pipe_ctx = NULL;
+ struct hubp *hubp;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->stream == stream)
+ break;
+ }
+
+ if (i == MAX_PIPES)
+ return false;
+
+ hubp = pipe_ctx->plane_res.hubp;
+ if (hubp == NULL)
+ return false;
+
+ pipe_ctx->stream->dmdata_address = attr->address;
+
+ if (pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL) {
+ if (pipe_ctx->stream->dmdata_address.quad_part != 0) {
+ /* if using dynamic meta, don't set up generic infopackets */
+ pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false;
+ pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata(
+ pipe_ctx->stream_res.stream_enc,
+ true, pipe_ctx->plane_res.hubp->inst,
+ dc_is_dp_signal(pipe_ctx->stream->signal) ?
+ dmdata_dp : dmdata_hdmi);
+ } else
+ pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata(
+ pipe_ctx->stream_res.stream_enc,
+ false, pipe_ctx->plane_res.hubp->inst,
+ dc_is_dp_signal(pipe_ctx->stream->signal) ?
+ dmdata_dp : dmdata_hdmi);
+ }
+
+ if (hubp->funcs->dmdata_set_attributes != NULL &&
+ pipe_ctx->stream->dmdata_address.quad_part != 0) {
+ hubp->funcs->dmdata_set_attributes(hubp, attr);
+ }
+
+ return true;
+}
+#endif
void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
{
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index a5e86f9b148f..f40e4fd52fa2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/mm.h>
+
/* DC interface (public) */
#include "dm_services.h"
#include "dc.h"
@@ -48,6 +50,25 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state
plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
plane_state->in_transfer_func->ctx = ctx;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ plane_state->in_shaper_func = dc_create_transfer_func();
+ if (plane_state->in_shaper_func != NULL) {
+ plane_state->in_shaper_func->type = TF_TYPE_BYPASS;
+ plane_state->in_shaper_func->ctx = ctx;
+ }
+
+ plane_state->lut3d_func = dc_create_3dlut_func();
+ if (plane_state->lut3d_func != NULL) {
+ plane_state->lut3d_func->ctx = ctx;
+ plane_state->lut3d_func->initialized = false;
+ }
+ plane_state->blend_tf = dc_create_transfer_func();
+ if (plane_state->blend_tf != NULL) {
+ plane_state->blend_tf->type = TF_TYPE_BYPASS;
+ plane_state->blend_tf->ctx = ctx;
+ }
+
+#endif
}
static void destruct(struct dc_plane_state *plane_state)
@@ -60,6 +81,24 @@ static void destruct(struct dc_plane_state *plane_state)
plane_state->in_transfer_func);
plane_state->in_transfer_func = NULL;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (plane_state->in_shaper_func != NULL) {
+ dc_transfer_func_release(
+ plane_state->in_shaper_func);
+ plane_state->in_shaper_func = NULL;
+ }
+ if (plane_state->lut3d_func != NULL) {
+ dc_3dlut_func_release(
+ plane_state->lut3d_func);
+ plane_state->lut3d_func = NULL;
+ }
+ if (plane_state->blend_tf != NULL) {
+ dc_transfer_func_release(
+ plane_state->blend_tf);
+ plane_state->blend_tf = NULL;
+ }
+
+#endif
}
/*******************************************************************************
@@ -224,4 +263,40 @@ alloc_fail:
return NULL;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+static void dc_3dlut_func_free(struct kref *kref)
+{
+ struct dc_3dlut *lut = container_of(kref, struct dc_3dlut, refcount);
+
+ kvfree(lut);
+}
+
+struct dc_3dlut *dc_create_3dlut_func(void)
+{
+ struct dc_3dlut *lut = kvzalloc(sizeof(*lut), GFP_KERNEL);
+
+ if (lut == NULL)
+ goto alloc_fail;
+
+ kref_init(&lut->refcount);
+ lut->initialized = false;
+
+ return lut;
+
+alloc_fail:
+ return NULL;
+
+}
+
+void dc_3dlut_func_release(struct dc_3dlut *lut)
+{
+ kref_put(&lut->refcount, dc_3dlut_func_free);
+}
+
+void dc_3dlut_func_retain(struct dc_3dlut *lut)
+{
+ kref_get(&lut->refcount);
+}
+#endif
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
index 6ce87b682a32..a96d8de9380e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
@@ -24,8 +24,9 @@
*/
#include "vm_helper.h"
+#include "dc.h"
-static void mark_vmid_used(struct vm_helper *vm_helper, unsigned int pos, uint8_t hubp_idx)
+void vm_helper_mark_vmid_used(struct vm_helper *vm_helper, unsigned int pos, uint8_t hubp_idx)
{
struct vmid_usage vmids = vm_helper->hubp_vmid_usage[hubp_idx];
@@ -33,91 +34,43 @@ static void mark_vmid_used(struct vm_helper *vm_helper, unsigned int pos, uint8_
vmids.vmid_usage[1] = 1 << pos;
}
-static void add_ptb_to_table(struct vm_helper *vm_helper, unsigned int vmid, uint64_t ptb)
+int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_config)
{
- vm_helper->ptb_assigned_to_vmid[vmid] = ptb;
- vm_helper->num_vmids_available--;
-}
-
-static void clear_entry_from_vmid_table(struct vm_helper *vm_helper, unsigned int vmid)
-{
- vm_helper->ptb_assigned_to_vmid[vmid] = 0;
- vm_helper->num_vmids_available++;
-}
-
-static void evict_vmids(struct vm_helper *vm_helper)
-{
- int i;
- uint16_t ord = 0;
+ int num_vmids = 0;
- for (i = 0; i < vm_helper->num_vmid; i++)
- ord |= vm_helper->hubp_vmid_usage[i].vmid_usage[0] | vm_helper->hubp_vmid_usage[i].vmid_usage[1];
+ /* Call HWSS to setup HUBBUB for address config */
+ if (dc->hwss.init_sys_ctx) {
+ num_vmids = dc->hwss.init_sys_ctx(dc->hwseq, dc, pa_config);
- // At this point any positions with value 0 are unused vmids, evict them
- for (i = 1; i < vm_helper->num_vmid; i++) {
- if (ord & (1u << i))
- clear_entry_from_vmid_table(vm_helper, i);
+ /* Pre-init system aperture start/end for all HUBP instances (if not gating?)
+ * or cache system aperture if using power gating
+ */
+ memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config));
+ dc->vm_pa_config.valid = true;
}
-}
-
-// Return value of -1 indicates vmid table unitialized or ptb dne in the table
-static int get_existing_vmid_for_ptb(struct vm_helper *vm_helper, uint64_t ptb)
-{
- int i;
- for (i = 0; i < vm_helper->num_vmid; i++) {
- if (vm_helper->ptb_assigned_to_vmid[i] == ptb)
- return i;
- }
-
- return -1;
+ return num_vmids;
}
-// Expected to be called only when there's an available vmid
-static int get_next_available_vmid(struct vm_helper *vm_helper)
+void dc_setup_vm_context(struct dc *dc, struct dc_virtual_addr_space_config *va_config, int vmid)
{
- int i;
-
- for (i = 1; i < vm_helper->num_vmid; i++) {
- if (vm_helper->ptb_assigned_to_vmid[i] == 0)
- return i;
- }
-
- return -1;
+ dc->hwss.init_vm_ctx(dc->hwseq, dc, va_config, vmid);
}
-uint8_t get_vmid_for_ptb(struct vm_helper *vm_helper, int64_t ptb, uint8_t hubp_idx)
+int dc_get_vmid_use_vector(struct dc *dc)
{
- unsigned int vmid = 0;
- int vmid_exists = -1;
-
- // Physical address gets vmid 0
- if (ptb == 0)
- return 0;
-
- vmid_exists = get_existing_vmid_for_ptb(vm_helper, ptb);
-
- if (vmid_exists != -1) {
- mark_vmid_used(vm_helper, vmid_exists, hubp_idx);
- vmid = vmid_exists;
- } else {
- if (vm_helper->num_vmids_available == 0)
- evict_vmids(vm_helper);
-
- vmid = get_next_available_vmid(vm_helper);
- mark_vmid_used(vm_helper, vmid, hubp_idx);
- add_ptb_to_table(vm_helper, vmid, ptb);
- }
+ int i;
+ int in_use = 0;
- return vmid;
+ for (i = 0; i < dc->vm_helper->num_vmid; i++)
+ in_use |= dc->vm_helper->hubp_vmid_usage[i].vmid_usage[0]
+ | dc->vm_helper->hubp_vmid_usage[i].vmid_usage[1];
+ return in_use;
}
-void init_vm_helper(struct vm_helper *vm_helper, unsigned int num_vmid, unsigned int num_hubp)
+void vm_helper_init(struct vm_helper *vm_helper, unsigned int num_vmid)
{
vm_helper->num_vmid = num_vmid;
- vm_helper->num_hubp = num_hubp;
- vm_helper->num_vmids_available = num_vmid - 1;
memset(vm_helper->hubp_vmid_usage, 0, sizeof(vm_helper->hubp_vmid_usage[0]) * MAX_HUBP);
- memset(vm_helper->ptb_assigned_to_vmid, 0, sizeof(vm_helper->ptb_assigned_to_vmid[0]) * MAX_VMID);
}