diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm')
5 files changed, 246 insertions, 157 deletions
| diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index ff0a217b9d56..e5554a36e8c8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -424,12 +424,12 @@ static void dm_pflip_high_irq(void *interrupt_params)  	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); -	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ -		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n", -						 amdgpu_crtc->pflip_status, -						 AMDGPU_FLIP_SUBMITTED, -						 amdgpu_crtc->crtc_id, -						 amdgpu_crtc); +	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { +		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n", +			     amdgpu_crtc->pflip_status, +			     AMDGPU_FLIP_SUBMITTED, +			     amdgpu_crtc->crtc_id, +			     amdgpu_crtc);  		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);  		return;  	} @@ -883,7 +883,7 @@ static int dm_set_powergating_state(void *handle,  }  /* Prototypes of private functions */ -static int dm_early_init(void* handle); +static int dm_early_init(void *handle);  /* Allocate memory for FBC compressed data  */  static void amdgpu_dm_fbc_init(struct drm_connector *connector) @@ -1282,7 +1282,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_  	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;  	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18; -	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ; +	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24;  	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;  	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24; @@ -1347,6 +1347,15 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)  	if (amdgpu_in_reset(adev))  		goto skip; +	if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || +		offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { +		dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT); +		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); +		offload_work->offload_wq->is_handling_mst_msg_rdy_event = false; +		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); +		goto skip; +	} +  	mutex_lock(&adev->dm.dc_lock);  	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {  		dc_link_dp_handle_automated_test(dc_link); @@ -1365,8 +1374,7 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)  		DP_TEST_RESPONSE,  		&test_response.raw,  		sizeof(test_response)); -	} -	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && +	} else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&  			dc_link_check_link_loss_status(dc_link, &offload_work->data) &&  			dc_link_dp_allow_hpd_rx_irq(dc_link)) {  		/* offload_work->data is from handle_hpd_rx_irq-> @@ -1554,7 +1562,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  	mutex_init(&adev->dm.dc_lock);  	mutex_init(&adev->dm.audio_lock); -	if(amdgpu_dm_irq_init(adev)) { +	if (amdgpu_dm_irq_init(adev)) {  		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");  		goto error;  	} @@ -1630,9 +1638,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  		}  		break;  	} -	if (init_data.flags.gpu_vm_support && -	    (amdgpu_sg_display == 0)) -		init_data.flags.gpu_vm_support = false; +	if (init_data.flags.gpu_vm_support) +		init_data.flags.gpu_vm_support = amdgpu_sg_display_supported(adev);  	if (init_data.flags.gpu_vm_support)  		adev->mode_info.gpu_vm_support = true; @@ -1696,9 +1703,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)  		adev->dm.dc->debug.disable_stutter = true; -	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) { +	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)  		adev->dm.dc->debug.disable_dsc = true; -	}  	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)  		adev->dm.dc->debug.disable_clock_gate = true; @@ -1942,8 +1948,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)  	mutex_destroy(&adev->dm.audio_lock);  	mutex_destroy(&adev->dm.dc_lock);  	mutex_destroy(&adev->dm.dpia_aux_lock); - -	return;  }  static int load_dmcu_fw(struct amdgpu_device *adev) @@ -1952,7 +1956,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)  	int r;  	const struct dmcu_firmware_header_v1_0 *hdr; -	switch(adev->asic_type) { +	switch (adev->asic_type) {  #if defined(CONFIG_DRM_AMD_DC_SI)  	case CHIP_TAHITI:  	case CHIP_PITCAIRN: @@ -2709,7 +2713,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,  		struct dc_scaling_info scaling_infos[MAX_SURFACES];  		struct dc_flip_addrs flip_addrs[MAX_SURFACES];  		struct dc_stream_update stream_update; -	} * bundle; +	} *bundle;  	int k, m;  	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); @@ -2739,8 +2743,6 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,  cleanup:  	kfree(bundle); - -	return;  }  static int dm_resume(void *handle) @@ -2954,8 +2956,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {  	.set_powergating_state = dm_set_powergating_state,  }; -const struct amdgpu_ip_block_version dm_ip_block = -{ +const struct amdgpu_ip_block_version dm_ip_block = {  	.type = AMD_IP_BLOCK_TYPE_DCE,  	.major = 1,  	.minor = 0, @@ -3000,9 +3001,12 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)  	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;  	caps->aux_support = false; -	if (caps->ext_caps->bits.oled == 1 /*|| -	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 || -	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/) +	if (caps->ext_caps->bits.oled == 1 +	    /* +	     * || +	     * caps->ext_caps->bits.sdr_aux_backlight_control == 1 || +	     * caps->ext_caps->bits.hdr_aux_backlight_control == 1 +	     */)  		caps->aux_support = true;  	if (amdgpu_backlight == 0) @@ -3236,86 +3240,6 @@ static void handle_hpd_irq(void *param)  } -static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector) -{ -	u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; -	u8 dret; -	bool new_irq_handled = false; -	int dpcd_addr; -	int dpcd_bytes_to_read; - -	const int max_process_count = 30; -	int process_count = 0; - -	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link); - -	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) { -		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT; -		/* DPCD 0x200 - 0x201 for downstream IRQ */ -		dpcd_addr = DP_SINK_COUNT; -	} else { -		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI; -		/* DPCD 0x2002 - 0x2005 for downstream IRQ */ -		dpcd_addr = DP_SINK_COUNT_ESI; -	} - -	dret = drm_dp_dpcd_read( -		&aconnector->dm_dp_aux.aux, -		dpcd_addr, -		esi, -		dpcd_bytes_to_read); - -	while (dret == dpcd_bytes_to_read && -		process_count < max_process_count) { -		u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {}; -		u8 retry; -		dret = 0; - -		process_count++; - -		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]); -		/* handle HPD short pulse irq */ -		if (aconnector->mst_mgr.mst_state) -			drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr, -							esi, -							ack, -							&new_irq_handled); - -		if (new_irq_handled) { -			/* ACK at DPCD to notify down stream */ -			for (retry = 0; retry < 3; retry++) { -				ssize_t wret; - -				wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux, -							  dpcd_addr + 1, -							  ack[1]); -				if (wret == 1) -					break; -			} - -			if (retry == 3) { -				DRM_ERROR("Failed to ack MST event.\n"); -				return; -			} - -			drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr); -			/* check if there is new irq to be handled */ -			dret = drm_dp_dpcd_read( -				&aconnector->dm_dp_aux.aux, -				dpcd_addr, -				esi, -				dpcd_bytes_to_read); - -			new_irq_handled = false; -		} else { -			break; -		} -	} - -	if (process_count == max_process_count) -		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); -} -  static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,  							union hpd_irq_data hpd_irq_data)  { @@ -3377,7 +3301,23 @@ static void handle_hpd_rx_irq(void *param)  	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {  		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||  			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { -			dm_handle_mst_sideband_msg(aconnector); +			bool skip = false; + +			/* +			 * DOWN_REP_MSG_RDY is also handled by polling method +			 * mgr->cbs->poll_hpd_irq() +			 */ +			spin_lock(&offload_wq->offload_lock); +			skip = offload_wq->is_handling_mst_msg_rdy_event; + +			if (!skip) +				offload_wq->is_handling_mst_msg_rdy_event = true; + +			spin_unlock(&offload_wq->offload_lock); + +			if (!skip) +				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); +  			goto out;  		} @@ -3468,7 +3408,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)  		aconnector = to_amdgpu_dm_connector(connector);  		dc_link = aconnector->dc_link; -		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { +		if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {  			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;  			int_params.irq_source = dc_link->irq_source_hpd; @@ -3477,7 +3417,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)  					(void *) aconnector);  		} -		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { +		if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {  			/* Also register for DP short pulse (hpd_rx). */  			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; @@ -3486,11 +3426,11 @@ static void register_hpd_handlers(struct amdgpu_device *adev)  			amdgpu_dm_irq_register_interrupt(adev, &int_params,  					handle_hpd_rx_irq,  					(void *) aconnector); - -			if (adev->dm.hpd_rx_offload_wq) -				adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector = -					aconnector;  		} + +		if (adev->dm.hpd_rx_offload_wq) +			adev->dm.hpd_rx_offload_wq[connector->index].aconnector = +				aconnector;  	}  } @@ -3503,7 +3443,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)  	struct dc_interrupt_params int_params = {0};  	int r;  	int i; -	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; +	unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;  	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;  	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; @@ -3517,11 +3457,12 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)  	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts  	 *    coming from DC hardware.  	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC -	 *    for acknowledging and handling. */ +	 *    for acknowledging and handling. +	 */  	/* Use VBLANK interrupt */  	for (i = 0; i < adev->mode_info.num_crtc; i++) { -		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq); +		r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);  		if (r) {  			DRM_ERROR("Failed to add crtc irq id!\n");  			return r; @@ -3529,7 +3470,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)  		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;  		int_params.irq_source = -			dc_interrupt_to_irq_source(dc, i+1 , 0); +			dc_interrupt_to_irq_source(dc, i + 1, 0);  		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; @@ -3585,7 +3526,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)  	struct dc_interrupt_params int_params = {0};  	int r;  	int i; -	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; +	unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;  	if (adev->family >= AMDGPU_FAMILY_AI)  		client_id = SOC15_IH_CLIENTID_DCE; @@ -3602,7 +3543,8 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)  	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts  	 *    coming from DC hardware.  	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC -	 *    for acknowledging and handling. */ +	 *    for acknowledging and handling. +	 */  	/* Use VBLANK interrupt */  	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { @@ -4049,7 +3991,7 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,  }  static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, -				unsigned *min, unsigned *max) +				unsigned int *min, unsigned int *max)  {  	if (!caps)  		return 0; @@ -4069,7 +4011,7 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,  static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,  					uint32_t brightness)  { -	unsigned min, max; +	unsigned int min, max;  	if (!get_brightness_range(caps, &min, &max))  		return brightness; @@ -4082,7 +4024,7 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c  static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,  				      uint32_t brightness)  { -	unsigned min, max; +	unsigned int min, max;  	if (!get_brightness_range(caps, &min, &max))  		return brightness; @@ -4562,7 +4504,6 @@ fail:  static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)  {  	drm_atomic_private_obj_fini(&dm->atomic_obj); -	return;  }  /****************************************************************************** @@ -5394,6 +5335,7 @@ static bool adjust_colour_depth_from_display_info(  {  	enum dc_color_depth depth = timing_out->display_color_depth;  	int normalized_clk; +  	do {  		normalized_clk = timing_out->pix_clk_100hz / 10;  		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */ @@ -5609,6 +5551,7 @@ create_fake_sink(struct amdgpu_dm_connector *aconnector)  {  	struct dc_sink_init_data sink_init_data = { 0 };  	struct dc_sink *sink = NULL; +  	sink_init_data.link = aconnector->dc_link;  	sink_init_data.sink_signal = aconnector->dc_link->connector_signal; @@ -5732,7 +5675,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,  		return &aconnector->freesync_vid_base;  	/* Find the preferred mode */ -	list_for_each_entry (m, list_head, head) { +	list_for_each_entry(m, list_head, head) {  		if (m->type & DRM_MODE_TYPE_PREFERRED) {  			m_pref = m;  			break; @@ -5756,7 +5699,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,  	 * For some monitors, preferred mode is not the mode with highest  	 * supported refresh rate.  	 */ -	list_for_each_entry (m, list_head, head) { +	list_for_each_entry(m, list_head, head) {  		current_refresh  = drm_mode_vrefresh(m);  		if (m->hdisplay == m_pref->hdisplay && @@ -6028,7 +5971,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,  		 * This may not be an error, the use case is when we have no  		 * usermode calls to reset and set mode upon hotplug. In this  		 * case, we call set mode ourselves to restore the previous mode -		 * and the modelist may not be filled in in time. +		 * and the modelist may not be filled in time.  		 */  		DRM_DEBUG_DRIVER("No preferred mode found\n");  	} else { @@ -6051,9 +5994,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,  		drm_mode_set_crtcinfo(&mode, 0);  	/* -	* If scaling is enabled and refresh rate didn't change -	* we copy the vic and polarities of the old timings -	*/ +	 * If scaling is enabled and refresh rate didn't change +	 * we copy the vic and polarities of the old timings +	 */  	if (!scale || mode_refresh != preferred_refresh)  		fill_stream_properties_from_drm_display_mode(  			stream, &mode, &aconnector->base, con_state, NULL, @@ -6817,6 +6760,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,  	if (!state->duplicated) {  		int max_bpc = conn_state->max_requested_bpc; +  		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&  			  aconnector->force_yuv420_output;  		color_depth = convert_color_depth_from_display_info(connector, @@ -7135,7 +7079,7 @@ static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,  {  	struct drm_display_mode *m; -	list_for_each_entry (m, &aconnector->base.probed_modes, head) { +	list_for_each_entry(m, &aconnector->base.probed_modes, head) {  		if (drm_mode_equal(m, mode))  			return true;  	} @@ -7295,6 +7239,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,  	aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;  	memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));  	mutex_init(&aconnector->hpd_lock); +	mutex_init(&aconnector->handle_mst_msg_ready);  	/*  	 * configure support HPD hot plug connector_>polled default value is 0 @@ -7454,7 +7399,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,  	link->priv = aconnector; -	DRM_DEBUG_DRIVER("%s()\n", __func__);  	i2c = create_i2c(link->ddc, link->link_index, &res);  	if (!i2c) { @@ -8125,7 +8069,15 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,  		 * Only allow immediate flips for fast updates that don't  		 * change memory domain, FB pitch, DCC state, rotation or  		 * mirroring. +		 * +		 * dm_crtc_helper_atomic_check() only accepts async flips with +		 * fast updates.  		 */ +		if (crtc->state->async_flip && +		    acrtc_state->update_type != UPDATE_TYPE_FAST) +			drm_warn_once(state->dev, +				      "[PLANE:%d:%s] async flip with non-fast update\n", +				      plane->base.id, plane->name);  		bundle->flip_addrs[planes_count].flip_immediate =  			crtc->state->async_flip &&  			acrtc_state->update_type == UPDATE_TYPE_FAST && @@ -8168,8 +8120,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,  			 * DRI3/Present extension with defined target_msc.  			 */  			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc); -		} -		else { +		} else {  			/* For variable refresh rate mode only:  			 * Get vblank of last completed flip to avoid > 1 vrr  			 * flips per video frame by use of throttling, but allow @@ -8502,8 +8453,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)  		dc_resource_state_copy_construct_current(dm->dc, dc_state);  	} -	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state, -				       new_crtc_state, i) { +	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, +				      new_crtc_state, i) {  		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);  		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); @@ -8526,9 +8477,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)  		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);  		drm_dbg_state(state->dev, -			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " -			"planes_changed:%d, mode_changed:%d,active_changed:%d," -			"connectors_changed:%d\n", +			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",  			acrtc->crtc_id,  			new_crtc_state->enable,  			new_crtc_state->active, @@ -9104,8 +9053,8 @@ static int do_aquire_global_lock(struct drm_device *dev,  					&commit->flip_done, 10*HZ);  		if (ret == 0) -			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done " -				  "timed out\n", crtc->base.id, crtc->name); +			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n", +				  crtc->base.id, crtc->name);  		drm_crtc_commit_put(commit);  	} @@ -9190,7 +9139,8 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,  	return false;  } -static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) { +static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) +{  	u64 num, den, res;  	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base; @@ -9312,9 +9262,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,  		goto skip_modeset;  	drm_dbg_state(state->dev, -		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " -		"planes_changed:%d, mode_changed:%d,active_changed:%d," -		"connectors_changed:%d\n", +		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",  		acrtc->crtc_id,  		new_crtc_state->enable,  		new_crtc_state->active, @@ -9343,8 +9291,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,  						     old_crtc_state)) {  			new_crtc_state->mode_changed = false;  			DRM_DEBUG_DRIVER( -				"Mode change not required for front porch change, " -				"setting mode_changed to %d", +				"Mode change not required for front porch change, setting mode_changed to %d",  				new_crtc_state->mode_changed);  			set_freesync_fixed_config(dm_new_crtc_state); @@ -9356,9 +9303,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,  			struct drm_display_mode *high_mode;  			high_mode = get_highest_refresh_rate_mode(aconnector, false); -			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) { +			if (!drm_mode_equal(&new_crtc_state->mode, high_mode))  				set_freesync_fixed_config(dm_new_crtc_state); -			}  		}  		ret = dm_atomic_get_state(state, &dm_state); @@ -9526,6 +9472,7 @@ static bool should_reset_plane(struct drm_atomic_state *state,  	 */  	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {  		struct amdgpu_framebuffer *old_afb, *new_afb; +  		if (other->type == DRM_PLANE_TYPE_CURSOR)  			continue; @@ -9624,11 +9571,12 @@ static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,  	}  	/* Core DRM takes care of checking FB modifiers, so we only need to -	 * check tiling flags when the FB doesn't have a modifier. */ +	 * check tiling flags when the FB doesn't have a modifier. +	 */  	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {  		if (adev->family < AMDGPU_FAMILY_AI) {  			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 && -			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 && +				 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&  				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;  		} else {  			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0; @@ -9850,12 +9798,12 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,  	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a  	 * cursor per pipe but it's going to inherit the scaling and  	 * positioning from the underlying pipe. Check the cursor plane's -	 * blending properties match the underlying planes'. */ +	 * blending properties match the underlying planes'. +	 */  	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor); -	if (!new_cursor_state || !new_cursor_state->fb) { +	if (!new_cursor_state || !new_cursor_state->fb)  		return 0; -	}  	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);  	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w; @@ -9900,6 +9848,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm  	struct drm_connector_state *conn_state, *old_conn_state;  	struct amdgpu_dm_connector *aconnector = NULL;  	int i; +  	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {  		if (!conn_state->crtc)  			conn_state = old_conn_state; @@ -10334,7 +10283,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  	}  	/* Store the overall update type for use later in atomic check. */ -	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) { +	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {  		struct dm_crtc_state *dm_new_crtc_state =  			to_dm_crtc_state(new_crtc_state); @@ -10356,7 +10305,7 @@ fail:  	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)  		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");  	else -		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret); +		DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret);  	trace_amdgpu_dm_atomic_check_finish(state, ret); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 4561f55afa99..9fb5bb3a75a7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -195,6 +195,11 @@ struct hpd_rx_irq_offload_work_queue {  	 */  	bool is_handling_link_loss;  	/** +	 * @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message +	 * ready event when we're already handling mst message ready event +	 */ +	bool is_handling_mst_msg_rdy_event; +	/**  	 * @aconnector: The aconnector that this work queue is attached to  	 */  	struct amdgpu_dm_connector *aconnector; @@ -638,6 +643,8 @@ struct amdgpu_dm_connector {  	struct drm_dp_mst_port *mst_output_port;  	struct amdgpu_dm_connector *mst_root;  	struct drm_dp_aux *dsc_aux; +	struct mutex handle_mst_msg_ready; +  	/* TODO see if we can merge with ddc_bus or make a dm_connector */  	struct amdgpu_i2c_adapter *i2c; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 440fc0869a34..30d4c6fd95f5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -398,6 +398,18 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,  		return -EINVAL;  	} +	/* +	 * Only allow async flips for fast updates that don't change the FB +	 * pitch, the DCC state, rotation, etc. +	 */ +	if (crtc_state->async_flip && +	    dm_crtc_state->update_type != UPDATE_TYPE_FAST) { +		drm_dbg_atomic(crtc->dev, +			       "[CRTC:%d:%s] async flips are only supported for fast updates\n", +			       crtc->base.id, crtc->name); +		return -EINVAL; +	} +  	/* In some use cases, like reset, no stream is attached */  	if (!dm_crtc_state->stream)  		return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 46d0a8f57e55..b885c39bd16b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -619,8 +619,118 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,  	return connector;  } +void dm_handle_mst_sideband_msg_ready_event( +	struct drm_dp_mst_topology_mgr *mgr, +	enum mst_msg_ready_type msg_rdy_type) +{ +	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; +	uint8_t dret; +	bool new_irq_handled = false; +	int dpcd_addr; +	uint8_t dpcd_bytes_to_read; +	const uint8_t max_process_count = 30; +	uint8_t process_count = 0; +	u8 retry; +	struct amdgpu_dm_connector *aconnector = +			container_of(mgr, struct amdgpu_dm_connector, mst_mgr); + + +	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link); + +	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) { +		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT; +		/* DPCD 0x200 - 0x201 for downstream IRQ */ +		dpcd_addr = DP_SINK_COUNT; +	} else { +		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI; +		/* DPCD 0x2002 - 0x2005 for downstream IRQ */ +		dpcd_addr = DP_SINK_COUNT_ESI; +	} + +	mutex_lock(&aconnector->handle_mst_msg_ready); + +	while (process_count < max_process_count) { +		u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {}; + +		process_count++; + +		dret = drm_dp_dpcd_read( +			&aconnector->dm_dp_aux.aux, +			dpcd_addr, +			esi, +			dpcd_bytes_to_read); + +		if (dret != dpcd_bytes_to_read) { +			DRM_DEBUG_KMS("DPCD read and acked number is not as expected!"); +			break; +		} + +		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]); + +		switch (msg_rdy_type) { +		case DOWN_REP_MSG_RDY_EVENT: +			/* Only handle DOWN_REP_MSG_RDY case*/ +			esi[1] &= DP_DOWN_REP_MSG_RDY; +			break; +		case UP_REQ_MSG_RDY_EVENT: +			/* Only handle UP_REQ_MSG_RDY case*/ +			esi[1] &= DP_UP_REQ_MSG_RDY; +			break; +		default: +			/* Handle both cases*/ +			esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY); +			break; +		} + +		if (!esi[1]) +			break; + +		/* handle MST irq */ +		if (aconnector->mst_mgr.mst_state) +			drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr, +						 esi, +						 ack, +						 &new_irq_handled); + +		if (new_irq_handled) { +			/* ACK at DPCD to notify down stream */ +			for (retry = 0; retry < 3; retry++) { +				ssize_t wret; + +				wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux, +							  dpcd_addr + 1, +							  ack[1]); +				if (wret == 1) +					break; +			} + +			if (retry == 3) { +				DRM_ERROR("Failed to ack MST event.\n"); +				break; +			} + +			drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr); + +			new_irq_handled = false; +		} else { +			break; +		} +	} + +	mutex_unlock(&aconnector->handle_mst_msg_ready); + +	if (process_count == max_process_count) +		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); +} + +static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr) +{ +	dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT); +} +  static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {  	.add_connector = dm_dp_add_mst_connector, +	.poll_hpd_irq = dm_handle_mst_down_rep_msg_ready,  };  void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, @@ -1210,7 +1320,7 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,  		if (computed_streams[i])  			continue; -		if (!res_pool->funcs->remove_stream_from_ctx || +		if (res_pool->funcs->remove_stream_from_ctx &&  		    res_pool->funcs->remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)  			return -EINVAL; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index 1e4ede1e57ab..37c820ab0fdb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -49,6 +49,13 @@  #define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B	1031  #define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B	1000 +enum mst_msg_ready_type { +	NONE_MSG_RDY_EVENT = 0, +	DOWN_REP_MSG_RDY_EVENT = 1, +	UP_REQ_MSG_RDY_EVENT = 2, +	DOWN_OR_UP_MSG_RDY_EVENT = 3 +}; +  struct amdgpu_display_manager;  struct amdgpu_dm_connector; @@ -61,6 +68,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,  void  dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev); +void dm_handle_mst_sideband_msg_ready_event( +	struct drm_dp_mst_topology_mgr *mgr, +	enum mst_msg_ready_type msg_rdy_type); +  struct dsc_mst_fairness_vars {  	int pbn;  	bool dsc_enabled; | 
