scsi: ufs: Add simple IRQ-affined PM QoS operations
Qualcomm's PM QoS solution suffers from a number of issues: applying PM QoS to all CPUs, convoluted spaghetti code that wastes CPU cycles, and keeping PM QoS applied for 10 ms after all requests finish processing. This implements a simple IRQ-affined PM QoS mechanism for each UFS adapter which uses atomics to elide locking, and enqueues a worker to apply PM QoS to the target CPU as soon as a command request is issued. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> Signed-off-by: alk3pInjection <webmaster@raspii.tech> Signed-off-by: UtsavBalar1231 <utsavbalar1231@gmail.com>
This commit is contained in:
committed by
Samuel Pascua
parent
577e047f9f
commit
3340d216db
@@ -4217,6 +4217,48 @@ static inline void ufshcd_put_read_lock(struct ufs_hba *hba)
|
||||
up_read(&hba->lock);
|
||||
}
|
||||
|
||||
static void ufshcd_pm_qos_get_worker(struct work_struct *work)
|
||||
{
|
||||
struct ufs_hba *hba = container_of(work, typeof(*hba), pm_qos.get_work);
|
||||
|
||||
if (!atomic_read(&hba->pm_qos.count))
|
||||
return;
|
||||
|
||||
mutex_lock(&hba->pm_qos.lock);
|
||||
if (atomic_read(&hba->pm_qos.count) && !hba->pm_qos.active) {
|
||||
pm_qos_update_request(&hba->pm_qos.req, 100);
|
||||
hba->pm_qos.active = true;
|
||||
}
|
||||
mutex_unlock(&hba->pm_qos.lock);
|
||||
}
|
||||
|
||||
static void ufshcd_pm_qos_put_worker(struct work_struct *work)
|
||||
{
|
||||
struct ufs_hba *hba = container_of(work, typeof(*hba), pm_qos.put_work);
|
||||
|
||||
if (atomic_read(&hba->pm_qos.count))
|
||||
return;
|
||||
|
||||
mutex_lock(&hba->pm_qos.lock);
|
||||
if (!atomic_read(&hba->pm_qos.count) && hba->pm_qos.active) {
|
||||
pm_qos_update_request(&hba->pm_qos.req, PM_QOS_DEFAULT_VALUE);
|
||||
hba->pm_qos.active = false;
|
||||
}
|
||||
mutex_unlock(&hba->pm_qos.lock);
|
||||
}
|
||||
|
||||
static void ufshcd_pm_qos_get(struct ufs_hba *hba)
|
||||
{
|
||||
if (atomic_inc_return(&hba->pm_qos.count) == 1)
|
||||
queue_work(system_unbound_wq, &hba->pm_qos.get_work);
|
||||
}
|
||||
|
||||
static void ufshcd_pm_qos_put(struct ufs_hba *hba)
|
||||
{
|
||||
if (atomic_dec_return(&hba->pm_qos.count) == 0)
|
||||
queue_work(system_unbound_wq, &hba->pm_qos.put_work);
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_queuecommand - main entry point for SCSI requests
|
||||
* @cmd: command from SCSI Midlayer
|
||||
@@ -4232,12 +4274,16 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
||||
int tag;
|
||||
int err = 0;
|
||||
bool has_read_lock = false;
|
||||
bool cmd_sent = false;
|
||||
|
||||
hba = shost_priv(host);
|
||||
|
||||
if (!cmd || !cmd->request || !hba)
|
||||
return -EINVAL;
|
||||
|
||||
/* Wake the CPU managing the IRQ as soon as possible */
|
||||
ufshcd_pm_qos_get(hba);
|
||||
|
||||
tag = cmd->request->tag;
|
||||
if (!ufshcd_valid_tag(hba, tag)) {
|
||||
dev_err(hba->dev,
|
||||
@@ -4249,10 +4295,13 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
||||
err = ufshcd_get_read_lock(hba, cmd->device->lun);
|
||||
if (unlikely(err < 0)) {
|
||||
if (err == -EPERM) {
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
err = SCSI_MLQUEUE_HOST_BUSY;
|
||||
goto out_pm_qos;
|
||||
}
|
||||
if (err == -EAGAIN) {
|
||||
err = SCSI_MLQUEUE_HOST_BUSY;
|
||||
goto out_pm_qos;
|
||||
}
|
||||
if (err == -EAGAIN)
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
} else if (err == 1) {
|
||||
has_read_lock = true;
|
||||
}
|
||||
@@ -4410,16 +4459,22 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
||||
if (has_read_lock)
|
||||
ufshcd_put_read_lock(hba);
|
||||
cmd->scsi_done(cmd);
|
||||
return 0;
|
||||
err = 0;
|
||||
goto out_pm_qos;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
cmd_sent = true;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
out:
|
||||
if (has_read_lock)
|
||||
ufshcd_put_read_lock(hba);
|
||||
out_pm_qos:
|
||||
if (!cmd_sent)
|
||||
ufshcd_pm_qos_put(hba);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -7481,6 +7536,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
|
||||
|
||||
req = cmd->request;
|
||||
if (req) {
|
||||
ufshcd_pm_qos_put(hba);
|
||||
/* Update IO svc time latency histogram */
|
||||
if (req->lat_hist_enabled) {
|
||||
ktime_t completion;
|
||||
@@ -7551,13 +7607,8 @@ void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
|
||||
/* Mark completed command as NULL in LRB */
|
||||
lrbp->cmd = NULL;
|
||||
ufshcd_release_all(hba);
|
||||
if (cmd->request) {
|
||||
/*
|
||||
* As we are accessing the "request" structure,
|
||||
* this must be called before calling
|
||||
* ->scsi_done() callback.
|
||||
*/
|
||||
}
|
||||
if (cmd->request)
|
||||
ufshcd_pm_qos_put(hba);
|
||||
/* Do not touch lrbp after scsi done */
|
||||
cmd->scsi_done(cmd);
|
||||
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
|
||||
@@ -12691,6 +12742,9 @@ void ufshcd_remove(struct ufs_hba *hba)
|
||||
/* disable interrupts */
|
||||
ufshcd_disable_intr(hba, hba->intr_mask);
|
||||
ufshcd_hba_stop(hba, true);
|
||||
cancel_work_sync(&hba->pm_qos.put_work);
|
||||
cancel_work_sync(&hba->pm_qos.get_work);
|
||||
pm_qos_remove_request(&hba->pm_qos.req);
|
||||
|
||||
ufshcd_exit_clk_gating(hba);
|
||||
ufshcd_exit_hibern8_on_idle(hba);
|
||||
@@ -12969,6 +13023,14 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
|
||||
*/
|
||||
ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
|
||||
|
||||
mutex_init(&hba->pm_qos.lock);
|
||||
INIT_WORK(&hba->pm_qos.get_work, ufshcd_pm_qos_get_worker);
|
||||
INIT_WORK(&hba->pm_qos.put_work, ufshcd_pm_qos_put_worker);
|
||||
hba->pm_qos.req.type = PM_QOS_REQ_AFFINE_IRQ;
|
||||
hba->pm_qos.req.irq = irq;
|
||||
pm_qos_add_request(&hba->pm_qos.req, PM_QOS_CPU_DMA_LATENCY,
|
||||
PM_QOS_DEFAULT_VALUE);
|
||||
|
||||
/* IRQ registration */
|
||||
err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED,
|
||||
dev_name(dev), hba);
|
||||
@@ -13075,6 +13137,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
|
||||
out_remove_scsi_host:
|
||||
scsi_remove_host(hba->host);
|
||||
exit_gating:
|
||||
pm_qos_remove_request(&hba->pm_qos.req);
|
||||
ufshcd_exit_clk_gating(hba);
|
||||
ufshcd_exit_latency_hist(hba);
|
||||
out_disable:
|
||||
|
||||
@@ -58,6 +58,7 @@
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/reset.h>
|
||||
#include <linux/extcon.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include "unipro.h"
|
||||
|
||||
#include <asm/irq.h>
|
||||
@@ -1214,6 +1215,15 @@ struct ufs_hba {
|
||||
void *crypto_DO_NOT_USE[8];
|
||||
#endif /* CONFIG_SCSI_UFS_CRYPTO */
|
||||
|
||||
struct {
|
||||
struct pm_qos_request req;
|
||||
struct work_struct get_work;
|
||||
struct work_struct put_work;
|
||||
struct mutex lock;
|
||||
atomic_t count;
|
||||
bool active;
|
||||
} pm_qos;
|
||||
|
||||
#if IS_ENABLED(CONFIG_BLK_TURBO_WRITE)
|
||||
bool support_tw;
|
||||
bool tw_state_not_allowed;
|
||||
|
||||
Reference in New Issue
Block a user