From 34b4eedec8e3d85a346d1ecb5ff6b9c71604fb2e Mon Sep 17 00:00:00 2001 From: Bruce Hoo Date: Sat, 3 Dec 2022 00:00:55 +0800 Subject: [PATCH 1/6] disp: msm: sde: set in_log_limited as an unchangeable dump_mode Default mode of dump_mode in sde_dbg module is in_mem, and it changes to in_coredump when hw_recovery feature is enabled. This change allows developer to redefine in_log_limited as default mode, and it's unchangeable even when hw_recovery is enabled. Change-Id: I51574b3ac297f32f28ae7a69d008d1a5f443a781 Signed-off-by: Bruce Hoo --- msm/sde/sde_connector.c | 5 +++-- msm/sde_dbg.h | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/msm/sde/sde_connector.c b/msm/sde/sde_connector.c index 20f25686..4b958673 100644 --- a/msm/sde/sde_connector.c +++ b/msm/sde/sde_connector.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */ @@ -3475,7 +3475,8 @@ int sde_connector_register_custom_event(struct sde_kms *kms, break; case DRM_EVENT_SDE_HW_RECOVERY: ret = _sde_conn_enable_hw_recovery(conn_drm); - sde_dbg_update_dump_mode(val); + if (SDE_DBG_DEFAULT_DUMP_MODE != SDE_DBG_DUMP_IN_LOG_LIMITED) + sde_dbg_update_dump_mode(val); break; default: break; diff --git a/msm/sde_dbg.h b/msm/sde_dbg.h index 4075836c..5a8ae24e 100644 --- a/msm/sde_dbg.h +++ b/msm/sde_dbg.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */ @@ -543,7 +543,7 @@ void sde_rsc_debug_dump(u32 mux_sel); /** * sde_dbg_update_dump_mode - update dump mode to in_coredump mode if devcoredump - * fueature is enabled. Default dump mode is in_mem, if HW recovery feature is + * feature is enabled. Default dump mode is in_mem, if HW recovery feature is * enabled, this function will be called to set dump mode to in_coredump option. * @enable_coredump: if enable_coredump is true, update dump mode to in_coredump, * otherwise reset the dump mode to default mode. From 8588ddb23827de1d7a34183ff0351b65bc94d8cf Mon Sep 17 00:00:00 2001 From: Soutrik Mukhopadhyay Date: Tue, 7 Feb 2023 21:25:50 +0530 Subject: [PATCH 2/6] disp: msm: dp: turn off aux switch on dp cable disconnect The dp_display_disconnect_sync is disabling the aux switch during the attention hpd low processing. Ideally, the aux switch needs to be turned off only when the dp cable is disconnected. With aux switch getting turned off even while cable is connected is leading to HDCP compliance test failure. This change will turn off aux switch only when the cable is disconnected. It reverts the commit id b6466ca7f597396cd2ecb3623d059435dfb0e4c6. Change-Id: I90cc5f31b2be1afda61f74ea4e0a44332811ead3 Signed-off-by: Soutrik Mukhopadhyay --- msm/dp/dp_display.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/msm/dp/dp_display.c b/msm/dp/dp_display.c index 17651db1..065c8ed0 100644 --- a/msm/dp/dp_display.c +++ b/msm/dp/dp_display.c @@ -1655,10 +1655,6 @@ static void dp_display_disconnect_sync(struct dp_display_private *dp) cancel_work_sync(&dp->attention_work); flush_workqueue(dp->wq); - if (!dp->debug->sim_mode && !dp->no_aux_switch - && !dp->parser->gpio_aux_switch) - dp->aux->aux_switch(dp->aux, false, ORIENTATION_NONE); - /* * Delay the teardown of the mainlink for better interop experience. * It is possible that certain sinks can issue an HPD high immediately @@ -1709,6 +1705,13 @@ static int dp_display_usbpd_disconnect_cb(struct device *dev) if (dp->debug->psm_enabled && dp_display_state_is(DP_STATE_READY)) dp->link->psm_config(dp->link, &dp->panel->link_info, true); + dp->ctrl->abort(dp->ctrl, true); + dp->aux->abort(dp->aux, true); + + if (!dp->debug->sim_mode && !dp->no_aux_switch + && !dp->parser->gpio_aux_switch) + dp->aux->aux_switch(dp->aux, false, ORIENTATION_NONE); + dp_display_disconnect_sync(dp); mutex_lock(&dp->session_lock); From 4c7ea38eff40f355f3d66b39fcc5a9b7e5e51a96 Mon Sep 17 00:00:00 2001 From: Mahadevan Date: Wed, 8 Feb 2023 20:33:58 +0530 Subject: [PATCH 3/6] disp: msm: sde: qos vote for all cpus during vm transition For a proxy-scheduled VCPU like the TUI VM, assignment to a physical core is a runtime decision made by the HLOS scheduler, and it may change frequently. pm_qos vote added by PVM for specific CPUs won't be sufficient for addressing irq latency. This change updates votes for all possible CPUs during TVM entry and also removes the vote during exit. Change-Id: Iab5cb5f57e2389ee57689ba2ab69394376f59788 Signed-off-by: Mahadevan --- msm/sde/sde_kms.c | 123 ++++++++++++++++++++++++---------------------- 1 file changed, 63 insertions(+), 60 deletions(-) diff --git a/msm/sde/sde_kms.c b/msm/sde/sde_kms.c index 717affd8..edb09a7c 100644 --- a/msm/sde/sde_kms.c +++ b/msm/sde/sde_kms.c @@ -1079,6 +1079,62 @@ static struct drm_crtc *sde_kms_vm_get_vm_crtc( return vm_crtc; } +static void _sde_kms_update_pm_qos_irq_request(struct sde_kms *sde_kms, const cpumask_t *mask) +{ + struct device *cpu_dev; + int cpu = 0; + u32 cpu_irq_latency = sde_kms->catalog->perf.cpu_irq_latency; + + // save irq cpu mask + sde_kms->irq_cpu_mask = *mask; + if (cpumask_empty(&sde_kms->irq_cpu_mask)) { + SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__); + return; + } + + for_each_cpu(cpu, &sde_kms->irq_cpu_mask) { + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) { + SDE_DEBUG("%s: failed to get cpu%d device\n", __func__, + cpu); + continue; + } + + if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu])) + dev_pm_qos_update_request(&sde_kms->pm_qos_irq_req[cpu], + cpu_irq_latency); + else + dev_pm_qos_add_request(cpu_dev, + &sde_kms->pm_qos_irq_req[cpu], + DEV_PM_QOS_RESUME_LATENCY, + cpu_irq_latency); + } +} + +static void _sde_kms_remove_pm_qos_irq_request(struct sde_kms *sde_kms, const cpumask_t *mask) +{ + struct device *cpu_dev; + int cpu = 0; + + if (cpumask_empty(mask)) { + SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__); + return; + } + + for_each_cpu(cpu, mask) { + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) { + SDE_DEBUG("%s: failed to get cpu%d device\n", __func__, + cpu); + continue; + } + + if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu])) + dev_pm_qos_remove_request( + &sde_kms->pm_qos_irq_req[cpu]); + } +} + int sde_kms_vm_primary_prepare_commit(struct sde_kms *sde_kms, struct drm_atomic_state *state) { @@ -1116,6 +1172,8 @@ int sde_kms_vm_primary_prepare_commit(struct sde_kms *sde_kms, if (sde_kms->hw_intr && sde_kms->hw_intr->ops.clear_all_irqs) sde_kms->hw_intr->ops.clear_all_irqs(sde_kms->hw_intr); + _sde_kms_remove_pm_qos_irq_request(sde_kms, &CPU_MASK_ALL); + /* enable the display path IRQ's */ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) { @@ -1404,6 +1462,7 @@ int sde_kms_vm_pre_release(struct sde_kms *sde_kms, if (is_primary) { + _sde_kms_update_pm_qos_irq_request(sde_kms, &CPU_MASK_ALL); /* disable vblank events */ drm_crtc_vblank_off(crtc); @@ -4512,60 +4571,6 @@ static int _sde_kms_active_override(struct sde_kms *sde_kms, bool enable) return 0; } -static void _sde_kms_update_pm_qos_irq_request(struct sde_kms *sde_kms) -{ - struct device *cpu_dev; - int cpu = 0; - u32 cpu_irq_latency = sde_kms->catalog->perf.cpu_irq_latency; - - if (cpumask_empty(&sde_kms->irq_cpu_mask)) { - SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__); - return; - } - - for_each_cpu(cpu, &sde_kms->irq_cpu_mask) { - cpu_dev = get_cpu_device(cpu); - if (!cpu_dev) { - SDE_DEBUG("%s: failed to get cpu%d device\n", __func__, - cpu); - continue; - } - - if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu])) - dev_pm_qos_update_request(&sde_kms->pm_qos_irq_req[cpu], - cpu_irq_latency); - else - dev_pm_qos_add_request(cpu_dev, - &sde_kms->pm_qos_irq_req[cpu], - DEV_PM_QOS_RESUME_LATENCY, - cpu_irq_latency); - } -} - -static void _sde_kms_remove_pm_qos_irq_request(struct sde_kms *sde_kms) -{ - struct device *cpu_dev; - int cpu = 0; - - if (cpumask_empty(&sde_kms->irq_cpu_mask)) { - SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__); - return; - } - - for_each_cpu(cpu, &sde_kms->irq_cpu_mask) { - cpu_dev = get_cpu_device(cpu); - if (!cpu_dev) { - SDE_DEBUG("%s: failed to get cpu%d device\n", __func__, - cpu); - continue; - } - - if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu])) - dev_pm_qos_remove_request( - &sde_kms->pm_qos_irq_req[cpu]); - } -} - void sde_kms_cpu_vote_for_irq(struct sde_kms *sde_kms, bool enable) { struct msm_drm_private *priv = sde_kms->dev->dev_private; @@ -4573,9 +4578,9 @@ void sde_kms_cpu_vote_for_irq(struct sde_kms *sde_kms, bool enable) mutex_lock(&priv->phandle.phandle_lock); if (enable && atomic_inc_return(&sde_kms->irq_vote_count) == 1) - _sde_kms_update_pm_qos_irq_request(sde_kms); + _sde_kms_update_pm_qos_irq_request(sde_kms, &sde_kms->irq_cpu_mask); else if (!enable && atomic_dec_return(&sde_kms->irq_vote_count) == 0) - _sde_kms_remove_pm_qos_irq_request(sde_kms); + _sde_kms_remove_pm_qos_irq_request(sde_kms, &sde_kms->irq_cpu_mask); mutex_unlock(&priv->phandle.phandle_lock); } @@ -4595,13 +4600,11 @@ static void sde_kms_irq_affinity_notify( mutex_lock(&priv->phandle.phandle_lock); - _sde_kms_remove_pm_qos_irq_request(sde_kms); - // save irq cpu mask - sde_kms->irq_cpu_mask = *mask; + _sde_kms_remove_pm_qos_irq_request(sde_kms, &sde_kms->irq_cpu_mask); // request vote with updated irq cpu mask if (atomic_read(&sde_kms->irq_vote_count)) - _sde_kms_update_pm_qos_irq_request(sde_kms); + _sde_kms_update_pm_qos_irq_request(sde_kms, mask); mutex_unlock(&priv->phandle.phandle_lock); } From ac48ea94f97163cf8ca7ee030dd6f1384a78efb2 Mon Sep 17 00:00:00 2001 From: Ping Li Date: Thu, 16 Feb 2023 11:49:53 -0800 Subject: [PATCH 4/6] drm: msm: skip the color processing programming if crtc is not enabled Add check to avoid programming the color processing HW if sde_crtc is not enabled. Change-Id: I7ffd341147f0caebefb647486a139df5c0aeab31 Signed-off-by: Ping Li --- msm/sde/sde_color_processing.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/msm/sde/sde_color_processing.c b/msm/sde/sde_color_processing.c index 04ae2335..e1fb7f5e 100644 --- a/msm/sde/sde_color_processing.c +++ b/msm/sde/sde_color_processing.c @@ -2243,6 +2243,9 @@ void sde_cp_crtc_apply_properties(struct drm_crtc *crtc) } _sde_cp_flush_properties(crtc); + if (!sde_crtc->enabled) + return; + mutex_lock(&sde_crtc->crtc_cp_lock); _sde_clear_ltm_merge_mode(sde_crtc); From 266de47bef76649caa176dfb5aa6dd3aba800259 Mon Sep 17 00:00:00 2001 From: Anjaneya Prasad Musunuri Date: Fri, 24 Feb 2023 12:05:05 +0530 Subject: [PATCH 5/6] disp: msm: sde: use vzalloc for large allocations Large allocations using kvzalloc can lead to timeouts. This updates the allocation calls accordingly to use vzalloc to remove requirements on physically contiguous memory. Change-Id: I437913b3bf2e46bfeeb2c511bdfc153470fcbc24 Signed-off-by: Anjaneya Prasad Musunuri --- msm/sde/sde_hw_reg_dma_v1_color_proc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/msm/sde/sde_hw_reg_dma_v1_color_proc.c b/msm/sde/sde_hw_reg_dma_v1_color_proc.c index 4ff08da2..8b4ebe0f 100644 --- a/msm/sde/sde_hw_reg_dma_v1_color_proc.c +++ b/msm/sde/sde_hw_reg_dma_v1_color_proc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. */ @@ -4631,7 +4631,7 @@ void reg_dmav2_setup_dspp_3d_gamutv43(struct sde_hw_dspp *ctx, void *cfg) if (len % transfer_size_bytes) len = len + (transfer_size_bytes - len % transfer_size_bytes); - data = kvzalloc(len, GFP_KERNEL); + data = vzalloc(len); if (!data) return; @@ -4707,7 +4707,7 @@ void reg_dmav2_setup_dspp_3d_gamutv43(struct sde_hw_dspp *ctx, void *cfg) _perform_sbdma_kickoff(ctx, hw_cfg, dma_ops, blk, GAMUT); exit: - kvfree(data); + vfree(data); } void reg_dmav2_setup_vig_gamutv61(struct sde_hw_pipe *ctx, void *cfg) From d6fb8eead2283c8423b45103f278e8d3ca5eb311 Mon Sep 17 00:00:00 2001 From: Andhavarapu Karthik Date: Thu, 16 Feb 2023 14:01:37 +0530 Subject: [PATCH 6/6] disp: msm: sde: update qos cpu mask to avoid defective cores CPU qos_mask populated from devicetree can have defective cpu cores included. This change identifies and replaces the defective cores in the qos mask with the next possible working cpu cores. Change-Id: I0f32205e7f0abf0482d7dbbd288b7d7f3088726a Signed-off-by: Andhavarapu Karthik --- msm/sde/sde_hw_catalog.c | 45 +++++++++++++++++++++++++++++++++++----- 1 file changed, 40 insertions(+), 5 deletions(-) diff --git a/msm/sde/sde_hw_catalog.c b/msm/sde/sde_hw_catalog.c index f905d29c..f3d0085c 100644 --- a/msm/sde/sde_hw_catalog.c +++ b/msm/sde/sde_hw_catalog.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. */ @@ -4427,12 +4427,45 @@ static void _sde_perf_parse_dt_cfg_populate(struct sde_mdss_cfg *cfg, DEFAULT_AXI_BUS_WIDTH; } +/** + * _sde_set_possible_cpu_mask - checks defective cores in qos mask and update the + * mask to avoid defective cores and add next possible cores for pm qos vote. + * @qos_mask: qos_mask set from DT + */ +static int _sde_set_possible_cpu_mask(unsigned long qos_mask) +{ + int cpu = 0, defective_cores_count = 0; + struct cpumask *cpu_qos_mask = to_cpumask(&qos_mask); + unsigned long cpu_p_mask = cpu_possible_mask->bits[0]; + unsigned long cpu_defective_qos = qos_mask & (~cpu_p_mask); + + /* Count all the defective cores in cpu_defective_qos */ + defective_cores_count = cpumask_weight(to_cpumask(&cpu_defective_qos)); + + for_each_cpu(cpu, cpu_all_mask) { + if (cpu_possible(cpu) && !cpumask_test_cpu(cpu, cpu_qos_mask) && + defective_cores_count > 0) { + /* Set next possible cpu */ + cpumask_set_cpu(cpu, cpu_qos_mask); + defective_cores_count--; + } else if (cpumask_test_cpu(cpu, cpu_qos_mask) && !cpu_possible(cpu)) + /* Unset the defective core from qos mask */ + cpumask_clear_cpu(cpu, cpu_qos_mask); + } + + qos_mask = cpu_qos_mask->bits[0]; + return qos_mask; +} + + + static int _sde_perf_parse_dt_cfg(struct device_node *np, struct sde_mdss_cfg *cfg, int *prop_count, struct sde_prop_value *prop_value, bool *prop_exists) { int rc, j; const char *str = NULL; + unsigned long qos_mask = 0; /* * The following performance parameters (e.g. core_ib_ff) are @@ -4476,14 +4509,16 @@ static int _sde_perf_parse_dt_cfg(struct device_node *np, set_bit(SDE_FEATURE_CDP, cfg->features); } - cfg->perf.cpu_mask = - prop_exists[PERF_CPU_MASK] ? + qos_mask = prop_exists[PERF_CPU_MASK] ? PROP_VALUE_ACCESS(prop_value, PERF_CPU_MASK, 0) : DEFAULT_CPU_MASK; - cfg->perf.cpu_mask_perf = - prop_exists[CPU_MASK_PERF] ? + cfg->perf.cpu_mask = _sde_set_possible_cpu_mask(qos_mask); + + qos_mask = prop_exists[CPU_MASK_PERF] ? PROP_VALUE_ACCESS(prop_value, CPU_MASK_PERF, 0) : DEFAULT_CPU_MASK; + cfg->perf.cpu_mask_perf = _sde_set_possible_cpu_mask(qos_mask); + cfg->perf.cpu_dma_latency = prop_exists[PERF_CPU_DMA_LATENCY] ? PROP_VALUE_ACCESS(prop_value, PERF_CPU_DMA_LATENCY, 0) :