techpack: disp: msm: sde: Remove unneeded PM QoS requests
These are blocking some CPUs in the LITTLE cluster from entering deep idle because the driver assumes that display rendering work occurs on a hardcoded set of CPUs, which is false. The scope of this is also quite large, which increases power consumption. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> Change-Id: I6d5d88f769a25952ad62cad2ee52670c51271292
This commit is contained in:
committed by
Wiktor Rudzki
parent
60362f61bc
commit
b8cbc800d2
@@ -150,69 +150,6 @@ void sde_encoder_uidle_enable(struct drm_encoder *drm_enc, bool enable)
|
||||
}
|
||||
}
|
||||
|
||||
static void _sde_encoder_pm_qos_add_request(struct drm_encoder *drm_enc)
|
||||
{
|
||||
struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
|
||||
struct msm_drm_private *priv;
|
||||
struct sde_kms *sde_kms;
|
||||
struct device *cpu_dev;
|
||||
struct cpumask *cpu_mask = NULL;
|
||||
int cpu = 0;
|
||||
u32 cpu_dma_latency;
|
||||
|
||||
priv = drm_enc->dev->dev_private;
|
||||
sde_kms = to_sde_kms(priv->kms);
|
||||
|
||||
if (!sde_kms->catalog || !sde_kms->catalog->perf.cpu_mask)
|
||||
return;
|
||||
|
||||
cpu_dma_latency = sde_kms->catalog->perf.cpu_dma_latency;
|
||||
cpumask_clear(&sde_enc->valid_cpu_mask);
|
||||
|
||||
if (sde_enc->mode_info.frame_rate > DEFAULT_FPS)
|
||||
cpu_mask = to_cpumask(&sde_kms->catalog->perf.cpu_mask_perf);
|
||||
if (!cpu_mask &&
|
||||
sde_encoder_check_curr_mode(drm_enc,
|
||||
MSM_DISPLAY_CMD_MODE))
|
||||
cpu_mask = to_cpumask(&sde_kms->catalog->perf.cpu_mask);
|
||||
|
||||
if (!cpu_mask)
|
||||
return;
|
||||
|
||||
for_each_cpu(cpu, cpu_mask) {
|
||||
cpu_dev = get_cpu_device(cpu);
|
||||
if (!cpu_dev) {
|
||||
SDE_ERROR("%s: failed to get cpu%d device\n", __func__,
|
||||
cpu);
|
||||
return;
|
||||
}
|
||||
cpumask_set_cpu(cpu, &sde_enc->valid_cpu_mask);
|
||||
dev_pm_qos_add_request(cpu_dev,
|
||||
&sde_enc->pm_qos_cpu_req[cpu],
|
||||
DEV_PM_QOS_RESUME_LATENCY, cpu_dma_latency);
|
||||
SDE_EVT32_VERBOSE(DRMID(drm_enc), cpu_dma_latency, cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static void _sde_encoder_pm_qos_remove_request(struct drm_encoder *drm_enc)
|
||||
{
|
||||
struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
|
||||
struct device *cpu_dev;
|
||||
int cpu = 0;
|
||||
|
||||
for_each_cpu(cpu, &sde_enc->valid_cpu_mask) {
|
||||
cpu_dev = get_cpu_device(cpu);
|
||||
if (!cpu_dev) {
|
||||
SDE_ERROR("%s: failed to get cpu%d device\n", __func__,
|
||||
cpu);
|
||||
continue;
|
||||
}
|
||||
dev_pm_qos_remove_request(&sde_enc->pm_qos_cpu_req[cpu]);
|
||||
SDE_EVT32_VERBOSE(DRMID(drm_enc), cpu);
|
||||
}
|
||||
cpumask_clear(&sde_enc->valid_cpu_mask);
|
||||
}
|
||||
|
||||
static bool _sde_encoder_is_autorefresh_enabled(
|
||||
struct sde_encoder_virt *sde_enc)
|
||||
{
|
||||
@@ -1543,12 +1480,7 @@ static int _sde_encoder_resource_control_helper(struct drm_encoder *drm_enc,
|
||||
|
||||
/* enable all the irq */
|
||||
sde_encoder_irq_control(drm_enc, true);
|
||||
|
||||
_sde_encoder_pm_qos_add_request(drm_enc);
|
||||
|
||||
} else {
|
||||
_sde_encoder_pm_qos_remove_request(drm_enc);
|
||||
|
||||
/* disable all the irq */
|
||||
sde_encoder_irq_control(drm_enc, false);
|
||||
|
||||
@@ -1732,7 +1664,6 @@ static int _sde_encoder_rc_kickoff(struct drm_encoder *drm_enc,
|
||||
|
||||
if (is_vid_mode && sde_enc->rc_state == SDE_ENC_RC_STATE_IDLE) {
|
||||
sde_encoder_irq_control(drm_enc, true);
|
||||
_sde_encoder_pm_qos_add_request(drm_enc);
|
||||
} else {
|
||||
/* enable all the clks and resources */
|
||||
ret = _sde_encoder_resource_control_helper(drm_enc,
|
||||
@@ -1890,7 +1821,6 @@ skip_wait:
|
||||
SDE_ENC_RC_STATE_MODESET, SDE_EVTLOG_FUNC_CASE5);
|
||||
|
||||
sde_enc->rc_state = SDE_ENC_RC_STATE_MODESET;
|
||||
_sde_encoder_pm_qos_remove_request(drm_enc);
|
||||
|
||||
end:
|
||||
mutex_unlock(&sde_enc->rc_lock);
|
||||
@@ -1926,7 +1856,6 @@ static int _sde_encoder_rc_post_modeset(struct drm_encoder *drm_enc,
|
||||
SDE_ENC_RC_STATE_ON, SDE_EVTLOG_FUNC_CASE6);
|
||||
|
||||
sde_enc->rc_state = SDE_ENC_RC_STATE_ON;
|
||||
_sde_encoder_pm_qos_add_request(drm_enc);
|
||||
|
||||
end:
|
||||
mutex_unlock(&sde_enc->rc_lock);
|
||||
@@ -1965,7 +1894,6 @@ static int _sde_encoder_rc_idle(struct drm_encoder *drm_enc,
|
||||
|
||||
if (is_vid_mode) {
|
||||
sde_encoder_irq_control(drm_enc, false);
|
||||
_sde_encoder_pm_qos_remove_request(drm_enc);
|
||||
} else {
|
||||
/* disable all the clks and resources */
|
||||
_sde_encoder_update_rsc_client(drm_enc, false);
|
||||
|
||||
@@ -186,7 +186,6 @@ struct sde_encoder_ops {
|
||||
* @recovery_events_enabled: status of hw recovery feature enable by client
|
||||
* @elevated_ahb_vote: increase AHB bus speed for the first frame
|
||||
* after power collapse
|
||||
* @pm_qos_cpu_req: qos request for all cpu core frequency
|
||||
* @valid_cpu_mask: actual voted cpu core mask
|
||||
* @mode_info: stores the current mode and should be used
|
||||
* only in commit phase
|
||||
@@ -258,7 +257,6 @@ struct sde_encoder_virt {
|
||||
|
||||
bool recovery_events_enabled;
|
||||
bool elevated_ahb_vote;
|
||||
struct dev_pm_qos_request pm_qos_cpu_req[NR_CPUS];
|
||||
struct cpumask valid_cpu_mask;
|
||||
struct msm_mode_info mode_info;
|
||||
bool delay_kickoff;
|
||||
|
||||
Reference in New Issue
Block a user