[ALPS05150811] Revert GKI related patch: 723feab

This reverts commit 723feab600.
ANDROID: GKI: QoS: Enhance framework to support cpu/irq specific
QoS requests

The above patch will cause KASAN error slab-out-of-bounds error in
find_next_bit(). Revert it to pass KASAN check.

MTK-Commit-Id: e9e20fc70387d9b65a71ce7f09a18809492a755c

Change-Id: Ie80623665095791522022ab9ceafd24171a61e98
CR-Id: ALPS05150811
Feature: [Module]Kernel Maintenance
Signed-off-by: SkyLake.Huang <skylake.huang@mediatek.com>
This commit is contained in:
SkyLake.Huang
2021-01-29 00:08:04 +08:00
committed by Alan Hu
parent 82b4182848
commit 9e34066f02
3 changed files with 1 additions and 192 deletions

View File

@@ -43,17 +43,6 @@ registered notifiers are called only if the target value is now different.
Clients of pm_qos need to save the returned handle for future use in other
pm_qos API functions.
The handle is a pm_qos_request object. By default the request object sets the
request type to PM_QOS_REQ_ALL_CORES, in which case, the PM QoS request
applies to all cores. However, the driver can also specify a request type to
be either of
PM_QOS_REQ_ALL_CORES,
PM_QOS_REQ_AFFINE_CORES,
PM_QOS_REQ_AFFINE_IRQ,
Specify the cpumask when type is set to PM_QOS_REQ_AFFINE_CORES and specify
the IRQ number with PM_QOS_REQ_AFFINE_IRQ.
void pm_qos_update_request(handle, new_target_value):
Will update the list element pointed to by the handle with the new target value
and recompute the new aggregated target, calling the notification tree if the
@@ -67,13 +56,6 @@ the request.
int pm_qos_request(param_class):
Returns the aggregated value for a given PM QoS class.
int pm_qos_request_for_cpu(param_class, cpu):
Returns the aggregated value for a given PM QoS class for the specified cpu.
int pm_qos_request_for_cpumask(param_class, cpumask):
Returns the aggregated value for a given PM QoS class for the specified
cpumask.
int pm_qos_request_active(handle):
Returns if the request is still active, i.e. it has not been removed from a
PM QoS class constraints list.

View File

@@ -9,8 +9,6 @@
#include <linux/notifier.h>
#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
enum {
PM_QOS_RESERVED = 0,
@@ -46,22 +44,7 @@ enum pm_qos_flags_status {
#define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
enum pm_qos_req_type {
PM_QOS_REQ_ALL_CORES = 0,
PM_QOS_REQ_AFFINE_CORES,
#ifdef CONFIG_SMP
PM_QOS_REQ_AFFINE_IRQ,
#endif
};
struct pm_qos_request {
enum pm_qos_req_type type;
struct cpumask cpus_affine;
#ifdef CONFIG_SMP
uint32_t irq;
/* Internal structure members */
struct irq_affinity_notify irq_notify;
#endif
struct plist_node node;
int pm_qos_class;
struct delayed_work work; /* for pm_qos_update_request_timeout */
@@ -102,7 +85,6 @@ enum pm_qos_type {
struct pm_qos_constraints {
struct plist_head list;
s32 target_value; /* Do not change to 64 bit */
s32 target_per_cpu[NR_CPUS];
s32 default_value;
s32 no_constraint_value;
enum pm_qos_type type;
@@ -149,8 +131,6 @@ void pm_qos_update_request_timeout(struct pm_qos_request *req,
void pm_qos_remove_request(struct pm_qos_request *req);
int pm_qos_request(int pm_qos_class);
int pm_qos_request_for_cpu(int pm_qos_class, int cpu);
int pm_qos_request_for_cpumask(int pm_qos_class, struct cpumask *mask);
int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
int pm_qos_request_active(struct pm_qos_request *req);

View File

@@ -43,8 +43,6 @@
#include <linux/kernel.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <linux/uaccess.h>
#include <linux/export.h>
@@ -69,8 +67,6 @@ static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
static struct pm_qos_constraints cpu_dma_constraints = {
.list = PLIST_HEAD_INIT(cpu_dma_constraints.list),
.target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
.target_per_cpu = { [0 ... (NR_CPUS - 1)] =
PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE },
.default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
.no_constraint_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
.type = PM_QOS_MIN,
@@ -85,8 +81,6 @@ static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
static struct pm_qos_constraints network_lat_constraints = {
.list = PLIST_HEAD_INIT(network_lat_constraints.list),
.target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
.target_per_cpu = { [0 ... (NR_CPUS - 1)] =
PM_QOS_NETWORK_LAT_DEFAULT_VALUE },
.default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
.no_constraint_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
.type = PM_QOS_MIN,
@@ -97,12 +91,11 @@ static struct pm_qos_object network_lat_pm_qos = {
.name = "network_latency",
};
static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
static struct pm_qos_constraints network_tput_constraints = {
.list = PLIST_HEAD_INIT(network_tput_constraints.list),
.target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
.target_per_cpu = { [0 ... (NR_CPUS - 1)] =
PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE },
.default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
.no_constraint_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
.type = PM_QOS_MAX,
@@ -265,33 +258,6 @@ static const struct file_operations pm_qos_debug_fops = {
.release = single_release,
};
static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c)
{
struct pm_qos_request *req = NULL;
int cpu;
s32 qos_val[NR_CPUS] = { [0 ... (NR_CPUS - 1)] = c->default_value };
plist_for_each_entry(req, &c->list, node) {
for_each_cpu(cpu, &req->cpus_affine) {
switch (c->type) {
case PM_QOS_MIN:
if (qos_val[cpu] > req->node.prio)
qos_val[cpu] = req->node.prio;
break;
case PM_QOS_MAX:
if (req->node.prio > qos_val[cpu])
qos_val[cpu] = req->node.prio;
break;
default:
break;
}
}
}
for_each_possible_cpu(cpu)
c->target_per_cpu[cpu] = qos_val[cpu];
}
/**
* pm_qos_update_target - manages the constraints list and calls the notifiers
* if needed
@@ -340,7 +306,6 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
curr_value = pm_qos_get_value(c);
pm_qos_set_value(c, curr_value);
pm_qos_set_value_for_cpus(c);
spin_unlock_irqrestore(&pm_qos_lock, flags);
@@ -434,49 +399,12 @@ int pm_qos_request(int pm_qos_class)
}
EXPORT_SYMBOL_GPL(pm_qos_request);
int pm_qos_request_for_cpu(int pm_qos_class, int cpu)
{
return pm_qos_array[pm_qos_class]->constraints->target_per_cpu[cpu];
}
EXPORT_SYMBOL(pm_qos_request_for_cpu);
int pm_qos_request_active(struct pm_qos_request *req)
{
return req->pm_qos_class != 0;
}
EXPORT_SYMBOL_GPL(pm_qos_request_active);
int pm_qos_request_for_cpumask(int pm_qos_class, struct cpumask *mask)
{
unsigned long irqflags;
int cpu;
struct pm_qos_constraints *c = NULL;
int val;
spin_lock_irqsave(&pm_qos_lock, irqflags);
c = pm_qos_array[pm_qos_class]->constraints;
val = c->default_value;
for_each_cpu(cpu, mask) {
switch (c->type) {
case PM_QOS_MIN:
if (c->target_per_cpu[cpu] < val)
val = c->target_per_cpu[cpu];
break;
case PM_QOS_MAX:
if (c->target_per_cpu[cpu] > val)
val = c->target_per_cpu[cpu];
break;
default:
break;
}
}
spin_unlock_irqrestore(&pm_qos_lock, irqflags);
return val;
}
EXPORT_SYMBOL(pm_qos_request_for_cpumask);
static void __pm_qos_update_request(struct pm_qos_request *req,
s32 new_value)
{
@@ -503,42 +431,6 @@ static void pm_qos_work_fn(struct work_struct *work)
__pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
}
#ifdef CONFIG_SMP
static void pm_qos_irq_release(struct kref *ref)
{
unsigned long flags;
struct irq_affinity_notify *notify = container_of(ref,
struct irq_affinity_notify, kref);
struct pm_qos_request *req = container_of(notify,
struct pm_qos_request, irq_notify);
struct pm_qos_constraints *c =
pm_qos_array[req->pm_qos_class]->constraints;
spin_lock_irqsave(&pm_qos_lock, flags);
cpumask_setall(&req->cpus_affine);
spin_unlock_irqrestore(&pm_qos_lock, flags);
pm_qos_update_target(c, &req->node, PM_QOS_UPDATE_REQ,
c->default_value);
}
static void pm_qos_irq_notify(struct irq_affinity_notify *notify,
const cpumask_t *mask)
{
unsigned long flags;
struct pm_qos_request *req = container_of(notify,
struct pm_qos_request, irq_notify);
struct pm_qos_constraints *c =
pm_qos_array[req->pm_qos_class]->constraints;
spin_lock_irqsave(&pm_qos_lock, flags);
cpumask_copy(&req->cpus_affine, mask);
spin_unlock_irqrestore(&pm_qos_lock, flags);
pm_qos_update_target(c, &req->node, PM_QOS_UPDATE_REQ, req->node.prio);
}
#endif
/**
* pm_qos_add_request - inserts new qos request into the list
* @req: pointer to a preallocated handle
@@ -562,51 +454,6 @@ void pm_qos_add_request(struct pm_qos_request *req,
WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
return;
}
switch (req->type) {
case PM_QOS_REQ_AFFINE_CORES:
if (cpumask_empty(&req->cpus_affine)) {
req->type = PM_QOS_REQ_ALL_CORES;
cpumask_setall(&req->cpus_affine);
WARN(1, "Affine cores not set for request with affinity flag\n");
}
break;
#ifdef CONFIG_SMP
case PM_QOS_REQ_AFFINE_IRQ:
if (irq_can_set_affinity(req->irq)) {
int ret = 0;
struct irq_desc *desc = irq_to_desc(req->irq);
struct cpumask *mask = desc->irq_data.common->affinity;
/* Get the current affinity */
cpumask_copy(&req->cpus_affine, mask);
req->irq_notify.irq = req->irq;
req->irq_notify.notify = pm_qos_irq_notify;
req->irq_notify.release = pm_qos_irq_release;
ret = irq_set_affinity_notifier(req->irq,
&req->irq_notify);
if (ret) {
WARN(1, "IRQ affinity notify set failed\n");
req->type = PM_QOS_REQ_ALL_CORES;
cpumask_setall(&req->cpus_affine);
}
} else {
req->type = PM_QOS_REQ_ALL_CORES;
cpumask_setall(&req->cpus_affine);
WARN(1, "IRQ-%d not set for request with affinity flag\n",
req->irq);
}
break;
#endif
default:
WARN(1, "Unknown request type %d\n", req->type);
/* fall through */
case PM_QOS_REQ_ALL_CORES:
cpumask_setall(&req->cpus_affine);
break;
}
req->pm_qos_class = pm_qos_class;
INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
trace_pm_qos_add_request(pm_qos_class, value);