From patchwork Mon Nov 22 18:06:31 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Timo Aaltonen X-Patchwork-Id: 1558179 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Authentication-Results: ozlabs.org; spf=none (no SPF record) smtp.mailfrom=lists.ubuntu.com (client-ip=91.189.94.19; helo=huckleberry.canonical.com; envelope-from=kernel-team-bounces@lists.ubuntu.com; receiver=) Received: from huckleberry.canonical.com (huckleberry.canonical.com [91.189.94.19]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by bilbo.ozlabs.org (Postfix) with ESMTPS id 4Hyb1G3Q5Wz9sRN for ; Tue, 23 Nov 2021 05:10:58 +1100 (AEDT) Received: from localhost ([127.0.0.1] helo=huckleberry.canonical.com) by huckleberry.canonical.com with esmtp (Exim 4.86_2) (envelope-from ) id 1mpDm7-0001BO-VY; Mon, 22 Nov 2021 18:10:43 +0000 Received: from smtp-relay-canonical-0.internal ([10.131.114.83] helo=smtp-relay-canonical-0.canonical.com) by huckleberry.canonical.com with esmtps (TLS1.2:ECDHE_RSA_AES_128_GCM_SHA256:128) (Exim 4.86_2) (envelope-from ) id 1mpDiM-0001oW-Br for kernel-team@lists.ubuntu.com; Mon, 22 Nov 2021 18:06:50 +0000 Received: from leon.. (mobile-user-c1d2e5-215.dhcp.inet.fi [193.210.229.215]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits) server-digest SHA256) (No client certificate requested) by smtp-relay-canonical-0.canonical.com (Postfix) with ESMTPSA id E70DF40079 for ; Mon, 22 Nov 2021 18:06:49 +0000 (UTC) From: Timo Aaltonen To: kernel-team@lists.ubuntu.com Subject: [PATCH 29/30] drm/amd/display: Deadlock/HPD Status/Crash Bug Fix Date: Mon, 22 Nov 2021 20:06:31 +0200 Message-Id: <20211122180632.72792-33-tjaalton@ubuntu.com> X-Mailer: git-send-email 2.32.0 In-Reply-To: <20211122180632.72792-1-tjaalton@ubuntu.com> References: <20211122180632.72792-1-tjaalton@ubuntu.com> MIME-Version: 1.0 X-BeenThere: kernel-team@lists.ubuntu.com X-Mailman-Version: 2.1.20 Precedence: list List-Id: Kernel team discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: kernel-team-bounces@lists.ubuntu.com Sender: "kernel-team" From: Jude Shih BugLink: https://bugs.launchpad.net/bugs/1951868 [why] 1. HPD callback function has deadlock problem 2. HPD status is not assigned 3. There is crash due to null pointer 4. link_enc is NULL in DPIA case [How] 1. Fix deadlock problem by moving it out of the drm_modeset_lock 2. Assign HPD status from the notify of outbox from dmub FW 3. Fix the crash by checking if pin or enc exists 4. Use link_enc_cfg_get_link_enc_used_by_link to dynamically assign Reviewed-by: Nicholas Kazlauskas Acked-by: Wayne Lin Acked-by: Harry Wentland Signed-off-by: Jude Shih Signed-off-by: Alex Deucher (backported from commit f6e03f80eb1f4ef134845c69729a72e706184bc2 - fix conflicts) Signed-off-by: Timo Aaltonen --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 70 ++++++++++++------- 1 file changed, 44 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 7cf1b1e98885..6c9cd888a34b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -29,6 +29,7 @@ #include "dm_services_types.h" #include "dc.h" #include "dc_link_dp.h" +#include "link_enc_cfg.h" #include "dc/inc/core_types.h" #include "dal_asic_id.h" #include "dmub/dmub_srv.h" @@ -648,6 +649,7 @@ void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notific void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify) { struct amdgpu_dm_connector *aconnector; + struct amdgpu_dm_connector *hpd_aconnector = NULL; struct drm_connector *connector; struct drm_connector_list_iter iter; struct dc_link *link; @@ -678,13 +680,15 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not aconnector = to_amdgpu_dm_connector(connector); if (link && aconnector->dc_link == link) { DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index); - handle_hpd_irq_helper(aconnector); + hpd_aconnector = aconnector; break; } } drm_connector_list_iter_end(&iter); drm_modeset_unlock(&dev->mode_config.connection_mutex); + if (hpd_aconnector) + handle_hpd_irq_helper(hpd_aconnector); } /** @@ -747,8 +751,10 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) struct dmcub_trace_buf_entry entry = { 0 }; uint32_t count = 0; struct dmub_hpd_work *dmub_hpd_wrk; + struct dc_link *plink = NULL; - if (dc_enable_dmub_notifications(adev->dm.dc)) { + if (dc_enable_dmub_notifications(adev->dm.dc) && + irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); if (!dmub_hpd_wrk) { DRM_ERROR("Failed to allocate dmub_hpd_wrk"); @@ -756,27 +762,28 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) } INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); - if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { - do { - dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); - if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) { - DRM_ERROR("DM: notify type %d larger than the array size %ld !", notify.type, - ARRAY_SIZE(dm->dmub_thread_offload)); - continue; - } - if (dm->dmub_thread_offload[notify.type] == true) { - dmub_hpd_wrk->dmub_notify = ¬ify; - dmub_hpd_wrk->adev = adev; - queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); - } else { - dm->dmub_callback[notify.type](adev, ¬ify); + do { + dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); + if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) { + DRM_ERROR("DM: notify type %d invalid!", notify.type); + continue; + } + if (dm->dmub_thread_offload[notify.type] == true) { + dmub_hpd_wrk->dmub_notify = ¬ify; + dmub_hpd_wrk->adev = adev; + if (notify.type == DMUB_NOTIFICATION_HPD) { + plink = adev->dm.dc->links[notify.link_index]; + if (plink) { + plink->hpd_status = + notify.hpd_status == + DP_HPD_PLUG ? true : false; + } } - - } while (notify.pending_notification); - - } else { - DRM_ERROR("DM: Failed to receive correct outbox IRQ !"); - } + queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); + } else { + dm->dmub_callback[notify.type](adev, ¬ify); + } + } while (notify.pending_notification); } @@ -794,7 +801,8 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) } while (count <= DMUB_TRACE_MAX_READ); - ASSERT(count <= DMUB_TRACE_MAX_READ); + if (count > DMUB_TRACE_MAX_READ) + DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); } #endif @@ -2796,7 +2804,6 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) if (aconnector->base.force && new_connection_type == dc_connection_none) { emulated_link_detect(aconnector->dc_link); - drm_modeset_lock_all(dev); dm_restore_drm_connector_state(dev, connector); drm_modeset_unlock_all(dev); @@ -7910,7 +7917,17 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, break; case DRM_MODE_CONNECTOR_DisplayPort: aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; - aconnector->base.ycbcr_420_allowed = + if (link->is_dig_mapping_flexible && + link->dc->res_pool->funcs->link_encs_assign) { + link->link_enc = + link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); + if (!link->link_enc) + link->link_enc = + link_enc_cfg_get_next_avail_link_enc(link->ctx->dc); + } + + if (link->link_enc) + aconnector->base.ycbcr_420_allowed = link->link_enc->features.dp_ycbcr420_supported ? true : false; break; case DRM_MODE_CONNECTOR_DVID: @@ -8025,7 +8042,8 @@ create_i2c(struct ddc_service *ddc_service, snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); i2c_set_adapdata(&i2c->base, i2c); i2c->ddc_service = ddc_service; - i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index; + if (i2c->ddc_service->ddc_pin) + i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index; return i2c; }