Merge 132458ad67 on remote branch

Change-Id: I7c4dc30285481b89c79a66eb27ad616469687a8b
This commit is contained in:
Linux Build Service Account
2023-03-16 02:00:50 -07:00
7 changed files with 121 additions and 76 deletions

View File

@@ -1655,10 +1655,6 @@ static void dp_display_disconnect_sync(struct dp_display_private *dp)
cancel_work_sync(&dp->attention_work);
flush_workqueue(dp->wq);
if (!dp->debug->sim_mode && !dp->no_aux_switch
&& !dp->parser->gpio_aux_switch)
dp->aux->aux_switch(dp->aux, false, ORIENTATION_NONE);
/*
* Delay the teardown of the mainlink for better interop experience.
* It is possible that certain sinks can issue an HPD high immediately
@@ -1709,6 +1705,13 @@ static int dp_display_usbpd_disconnect_cb(struct device *dev)
if (dp->debug->psm_enabled && dp_display_state_is(DP_STATE_READY))
dp->link->psm_config(dp->link, &dp->panel->link_info, true);
dp->ctrl->abort(dp->ctrl, true);
dp->aux->abort(dp->aux, true);
if (!dp->debug->sim_mode && !dp->no_aux_switch
&& !dp->parser->gpio_aux_switch)
dp->aux->aux_switch(dp->aux, false, ORIENTATION_NONE);
dp_display_disconnect_sync(dp);
mutex_lock(&dp->session_lock);

View File

@@ -2243,6 +2243,9 @@ void sde_cp_crtc_apply_properties(struct drm_crtc *crtc)
}
_sde_cp_flush_properties(crtc);
if (!sde_crtc->enabled)
return;
mutex_lock(&sde_crtc->crtc_cp_lock);
_sde_clear_ltm_merge_mode(sde_crtc);

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
*/
@@ -3475,7 +3475,8 @@ int sde_connector_register_custom_event(struct sde_kms *kms,
break;
case DRM_EVENT_SDE_HW_RECOVERY:
ret = _sde_conn_enable_hw_recovery(conn_drm);
sde_dbg_update_dump_mode(val);
if (SDE_DBG_DEFAULT_DUMP_MODE != SDE_DBG_DUMP_IN_LOG_LIMITED)
sde_dbg_update_dump_mode(val);
break;
default:
break;

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
@@ -4427,12 +4427,45 @@ static void _sde_perf_parse_dt_cfg_populate(struct sde_mdss_cfg *cfg,
DEFAULT_AXI_BUS_WIDTH;
}
/**
* _sde_set_possible_cpu_mask - checks defective cores in qos mask and update the
* mask to avoid defective cores and add next possible cores for pm qos vote.
* @qos_mask: qos_mask set from DT
*/
static int _sde_set_possible_cpu_mask(unsigned long qos_mask)
{
int cpu = 0, defective_cores_count = 0;
struct cpumask *cpu_qos_mask = to_cpumask(&qos_mask);
unsigned long cpu_p_mask = cpu_possible_mask->bits[0];
unsigned long cpu_defective_qos = qos_mask & (~cpu_p_mask);
/* Count all the defective cores in cpu_defective_qos */
defective_cores_count = cpumask_weight(to_cpumask(&cpu_defective_qos));
for_each_cpu(cpu, cpu_all_mask) {
if (cpu_possible(cpu) && !cpumask_test_cpu(cpu, cpu_qos_mask) &&
defective_cores_count > 0) {
/* Set next possible cpu */
cpumask_set_cpu(cpu, cpu_qos_mask);
defective_cores_count--;
} else if (cpumask_test_cpu(cpu, cpu_qos_mask) && !cpu_possible(cpu))
/* Unset the defective core from qos mask */
cpumask_clear_cpu(cpu, cpu_qos_mask);
}
qos_mask = cpu_qos_mask->bits[0];
return qos_mask;
}
static int _sde_perf_parse_dt_cfg(struct device_node *np,
struct sde_mdss_cfg *cfg, int *prop_count,
struct sde_prop_value *prop_value, bool *prop_exists)
{
int rc, j;
const char *str = NULL;
unsigned long qos_mask = 0;
/*
* The following performance parameters (e.g. core_ib_ff) are
@@ -4476,14 +4509,16 @@ static int _sde_perf_parse_dt_cfg(struct device_node *np,
set_bit(SDE_FEATURE_CDP, cfg->features);
}
cfg->perf.cpu_mask =
prop_exists[PERF_CPU_MASK] ?
qos_mask = prop_exists[PERF_CPU_MASK] ?
PROP_VALUE_ACCESS(prop_value, PERF_CPU_MASK, 0) :
DEFAULT_CPU_MASK;
cfg->perf.cpu_mask_perf =
prop_exists[CPU_MASK_PERF] ?
cfg->perf.cpu_mask = _sde_set_possible_cpu_mask(qos_mask);
qos_mask = prop_exists[CPU_MASK_PERF] ?
PROP_VALUE_ACCESS(prop_value, CPU_MASK_PERF, 0) :
DEFAULT_CPU_MASK;
cfg->perf.cpu_mask_perf = _sde_set_possible_cpu_mask(qos_mask);
cfg->perf.cpu_dma_latency =
prop_exists[PERF_CPU_DMA_LATENCY] ?
PROP_VALUE_ACCESS(prop_value, PERF_CPU_DMA_LATENCY, 0) :

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
*/
@@ -4631,7 +4631,7 @@ void reg_dmav2_setup_dspp_3d_gamutv43(struct sde_hw_dspp *ctx, void *cfg)
if (len % transfer_size_bytes)
len = len + (transfer_size_bytes - len % transfer_size_bytes);
data = kvzalloc(len, GFP_KERNEL);
data = vzalloc(len);
if (!data)
return;
@@ -4707,7 +4707,7 @@ void reg_dmav2_setup_dspp_3d_gamutv43(struct sde_hw_dspp *ctx, void *cfg)
_perform_sbdma_kickoff(ctx, hw_cfg, dma_ops, blk, GAMUT);
exit:
kvfree(data);
vfree(data);
}
void reg_dmav2_setup_vig_gamutv61(struct sde_hw_pipe *ctx, void *cfg)

View File

@@ -1079,6 +1079,62 @@ static struct drm_crtc *sde_kms_vm_get_vm_crtc(
return vm_crtc;
}
static void _sde_kms_update_pm_qos_irq_request(struct sde_kms *sde_kms, const cpumask_t *mask)
{
struct device *cpu_dev;
int cpu = 0;
u32 cpu_irq_latency = sde_kms->catalog->perf.cpu_irq_latency;
// save irq cpu mask
sde_kms->irq_cpu_mask = *mask;
if (cpumask_empty(&sde_kms->irq_cpu_mask)) {
SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__);
return;
}
for_each_cpu(cpu, &sde_kms->irq_cpu_mask) {
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
SDE_DEBUG("%s: failed to get cpu%d device\n", __func__,
cpu);
continue;
}
if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu]))
dev_pm_qos_update_request(&sde_kms->pm_qos_irq_req[cpu],
cpu_irq_latency);
else
dev_pm_qos_add_request(cpu_dev,
&sde_kms->pm_qos_irq_req[cpu],
DEV_PM_QOS_RESUME_LATENCY,
cpu_irq_latency);
}
}
static void _sde_kms_remove_pm_qos_irq_request(struct sde_kms *sde_kms, const cpumask_t *mask)
{
struct device *cpu_dev;
int cpu = 0;
if (cpumask_empty(mask)) {
SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__);
return;
}
for_each_cpu(cpu, mask) {
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
SDE_DEBUG("%s: failed to get cpu%d device\n", __func__,
cpu);
continue;
}
if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu]))
dev_pm_qos_remove_request(
&sde_kms->pm_qos_irq_req[cpu]);
}
}
int sde_kms_vm_primary_prepare_commit(struct sde_kms *sde_kms,
struct drm_atomic_state *state)
{
@@ -1116,6 +1172,8 @@ int sde_kms_vm_primary_prepare_commit(struct sde_kms *sde_kms,
if (sde_kms->hw_intr && sde_kms->hw_intr->ops.clear_all_irqs)
sde_kms->hw_intr->ops.clear_all_irqs(sde_kms->hw_intr);
_sde_kms_remove_pm_qos_irq_request(sde_kms, &CPU_MASK_ALL);
/* enable the display path IRQ's */
drm_for_each_encoder_mask(encoder, crtc->dev,
crtc->state->encoder_mask) {
@@ -1404,6 +1462,7 @@ int sde_kms_vm_pre_release(struct sde_kms *sde_kms,
if (is_primary) {
_sde_kms_update_pm_qos_irq_request(sde_kms, &CPU_MASK_ALL);
/* disable vblank events */
drm_crtc_vblank_off(crtc);
@@ -4512,60 +4571,6 @@ static int _sde_kms_active_override(struct sde_kms *sde_kms, bool enable)
return 0;
}
static void _sde_kms_update_pm_qos_irq_request(struct sde_kms *sde_kms)
{
struct device *cpu_dev;
int cpu = 0;
u32 cpu_irq_latency = sde_kms->catalog->perf.cpu_irq_latency;
if (cpumask_empty(&sde_kms->irq_cpu_mask)) {
SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__);
return;
}
for_each_cpu(cpu, &sde_kms->irq_cpu_mask) {
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
SDE_DEBUG("%s: failed to get cpu%d device\n", __func__,
cpu);
continue;
}
if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu]))
dev_pm_qos_update_request(&sde_kms->pm_qos_irq_req[cpu],
cpu_irq_latency);
else
dev_pm_qos_add_request(cpu_dev,
&sde_kms->pm_qos_irq_req[cpu],
DEV_PM_QOS_RESUME_LATENCY,
cpu_irq_latency);
}
}
static void _sde_kms_remove_pm_qos_irq_request(struct sde_kms *sde_kms)
{
struct device *cpu_dev;
int cpu = 0;
if (cpumask_empty(&sde_kms->irq_cpu_mask)) {
SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__);
return;
}
for_each_cpu(cpu, &sde_kms->irq_cpu_mask) {
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
SDE_DEBUG("%s: failed to get cpu%d device\n", __func__,
cpu);
continue;
}
if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu]))
dev_pm_qos_remove_request(
&sde_kms->pm_qos_irq_req[cpu]);
}
}
void sde_kms_cpu_vote_for_irq(struct sde_kms *sde_kms, bool enable)
{
struct msm_drm_private *priv = sde_kms->dev->dev_private;
@@ -4573,9 +4578,9 @@ void sde_kms_cpu_vote_for_irq(struct sde_kms *sde_kms, bool enable)
mutex_lock(&priv->phandle.phandle_lock);
if (enable && atomic_inc_return(&sde_kms->irq_vote_count) == 1)
_sde_kms_update_pm_qos_irq_request(sde_kms);
_sde_kms_update_pm_qos_irq_request(sde_kms, &sde_kms->irq_cpu_mask);
else if (!enable && atomic_dec_return(&sde_kms->irq_vote_count) == 0)
_sde_kms_remove_pm_qos_irq_request(sde_kms);
_sde_kms_remove_pm_qos_irq_request(sde_kms, &sde_kms->irq_cpu_mask);
mutex_unlock(&priv->phandle.phandle_lock);
}
@@ -4595,13 +4600,11 @@ static void sde_kms_irq_affinity_notify(
mutex_lock(&priv->phandle.phandle_lock);
_sde_kms_remove_pm_qos_irq_request(sde_kms);
// save irq cpu mask
sde_kms->irq_cpu_mask = *mask;
_sde_kms_remove_pm_qos_irq_request(sde_kms, &sde_kms->irq_cpu_mask);
// request vote with updated irq cpu mask
if (atomic_read(&sde_kms->irq_vote_count))
_sde_kms_update_pm_qos_irq_request(sde_kms);
_sde_kms_update_pm_qos_irq_request(sde_kms, mask);
mutex_unlock(&priv->phandle.phandle_lock);
}

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
*/
@@ -543,7 +543,7 @@ void sde_rsc_debug_dump(u32 mux_sel);
/**
* sde_dbg_update_dump_mode - update dump mode to in_coredump mode if devcoredump
* fueature is enabled. Default dump mode is in_mem, if HW recovery feature is
* feature is enabled. Default dump mode is in_mem, if HW recovery feature is
* enabled, this function will be called to set dump mode to in_coredump option.
* @enable_coredump: if enable_coredump is true, update dump mode to in_coredump,
* otherwise reset the dump mode to default mode.