block: Import oplus changes

Change-Id: I2b926fa0a05d2844959c333defb6e1e72c02fd00
Signed-off-by: Pranaya Deomani <pranayadeomani@protonmail.com>
This commit is contained in:
Pranaya Deomani
2021-12-25 17:54:26 +05:30
parent 1b78b17ab1
commit f46cc94c64
15 changed files with 671 additions and 21 deletions

View File

@@ -230,3 +230,8 @@ config BLK_MQ_RDMA
default y
source block/Kconfig.iosched
#ifdef OPLUS_FEATURE_FG_IO_OPT
#/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
source block/oppo_foreground_io_opt/Kconfig
#endif

View File

@@ -38,4 +38,8 @@ obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o
obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += keyslot-manager.o bio-crypt-ctx.o \
blk-crypto.o
obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) += blk-crypto-fallback.o
obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) += blk-crypto-fallback.o
#ifdef OPLUS_FEATURE_FG_IO_OPT
#/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
obj-$(CONFIG_OPPO_FG_IO_OPT) += oppo_foreground_io_opt/
#endif /*OPLUS_FEATURE_FG_IO_OPT*/

153
block/blk-core.c Normal file → Executable file
View File

@@ -46,7 +46,17 @@
#include "blk-mq-sched.h"
#include "blk-wbt.h"
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPPO_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
#include "oppo_foreground_io_opt/oppo_foreground_io_opt.h"
#endif
#ifdef CONFIG_DEBUG_FS
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
#include <linux/iomonitor/iomonitor.h>
#endif /*OPLUS_FEATURE_IOMONITOR*/
struct dentry *blk_debugfs_root;
#endif
@@ -118,6 +128,10 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
memset(rq, 0, sizeof(*rq));
INIT_LIST_HEAD(&rq->queuelist);
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPPO_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
INIT_LIST_HEAD(&rq->fg_list);
#endif
INIT_LIST_HEAD(&rq->timeout_list);
rq->cpu = -1;
rq->q = q;
@@ -836,7 +850,10 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
gfp_mask | __GFP_ZERO, node_id);
if (!q)
return NULL;
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPPO_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
INIT_LIST_HEAD(&q->fg_head);
#endif
q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
if (q->id < 0)
goto fail_q;
@@ -858,7 +875,10 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->backing_dev_info->name = "block";
q->node = node_id;
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPPO_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
fg_bg_max_count_init(q);
#endif
setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
laptop_mode_timer_fn, (unsigned long) q);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
@@ -1302,7 +1322,9 @@ out:
*/
if (ioc_batching(q, ioc))
ioc->nr_batch_requests--;
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
iomonitor_init_reqstats(rq);
#endif /*OPLUS_FEATURE_IOMONITOR*/
trace_block_getrq(q, bio, op);
return rq;
@@ -1794,7 +1816,11 @@ void blk_init_request_from_bio(struct request *req, struct bio *bio)
if (bio->bi_opf & REQ_RAHEAD)
req->cmd_flags |= REQ_FAILFAST_MASK;
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPPO_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
if (bio->bi_opf & REQ_FG)
req->cmd_flags |= REQ_FG;
#endif
req->__sector = bio->bi_iter.bi_sector;
if (ioprio_valid(bio_prio(bio)))
req->ioprio = bio_prio(bio);
@@ -2003,6 +2029,7 @@ static inline int blk_partition_remap(struct bio *bio)
trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
bio->bi_iter.bi_sector - p->start_sect);
} else {
printk("%s: fail for partition %d\n", __func__, bio->bi_partno);
ret = -EIO;
}
rcu_read_unlock();
@@ -2291,11 +2318,17 @@ blk_qc_t submit_bio(struct bio *bio)
if (op_is_write(bio_op(bio))) {
count_vm_events(PGPGOUT, count);
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
iomonitor_update_vm_stats(PGPGOUT, count);
#endif /*OPLUS_FEATURE_IOMONITOR*/
} else {
if (bio_flagged(bio, BIO_WORKINGSET))
workingset_read = true;
task_io_account_read(bio->bi_iter.bi_size);
count_vm_events(PGPGIN, count);
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
iomonitor_update_vm_stats(PGPGIN, count);
#endif /*OPLUS_FEATURE_IOMONITOR*/
}
#ifdef CONFIG_MTK_BLOCK_TAG
@@ -2310,7 +2343,11 @@ blk_qc_t submit_bio(struct bio *bio)
bio_devname(bio, b), count);
}
}
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPPO_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
if (high_prio_for_task(current))
bio->bi_opf |= REQ_FG;
#endif
/*
* If we're reading data that is part of the userspace
* workingset, count submission time as memory stall. When the
@@ -2603,6 +2640,10 @@ struct request *blk_peek_request(struct request_queue *q)
* not be passed by new incoming requests
*/
rq->rq_flags |= RQF_STARTED;
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
rq->req_td = ktime_get();
#endif /*OPLUS_FEATURE_IOMONITOR*/
trace_block_rq_issue(q, rq);
}
@@ -2662,7 +2703,9 @@ struct request *blk_peek_request(struct request_queue *q)
break;
}
}
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
iomonitor_record_io_history(rq);
#endif /*OPLUS_FEATURE_IOMONITOR*/
return rq;
}
EXPORT_SYMBOL(blk_peek_request);
@@ -2675,7 +2718,10 @@ static void blk_dequeue_request(struct request *rq)
BUG_ON(ELV_ON_HASH(rq));
list_del_init(&rq->queuelist);
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPPO_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
list_del_init(&rq->fg_list);
#endif
/*
* the time frame between a request being removed from the lists
* and to it is freed is accounted as io that is in progress at
@@ -2685,6 +2731,13 @@ static void blk_dequeue_request(struct request *rq)
q->in_flight[rq_is_sync(rq)]++;
set_io_start_time_ns(rq);
}
#ifdef OPLUS_FEATURE_HEALTHINFO
// jiheng.xie@PSW.Tech.BSP.Performance, 2019/03/11
// Add for ioqueue
#ifdef CONFIG_OPPO_HEALTHINFO
ohm_ioqueue_add_inflight(q, rq);
#endif
#endif /* OPLUS_FEATURE_HEALTHINFO */
}
/**
@@ -2767,6 +2820,9 @@ bool blk_update_request(struct request *req, blk_status_t error,
int total_bytes;
trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
iomonitor_record_reqstats(req, nr_bytes);
#endif /*OPLUS_FEATURE_IOMONITOR*/
if (!req->bio)
return false;
@@ -3681,3 +3737,86 @@ int __init blk_dev_init(void)
return 0;
}
#ifdef VENDOR_EDIT
/*
* Blk IO latency support. We want this to be as cheap as possible, so doing
* this lockless (and avoiding atomics), a few off by a few errors in this
* code is not harmful, and we don't want to do anything that is
* perf-impactful.
* TODO : If necessary, we can make the histograms per-cpu and aggregate
* them when printing them out.
*/
void
blk_zero_latency_hist(struct io_latency_state *s)
{
memset(s->latency_y_axis_read, 0,
sizeof(s->latency_y_axis_read));
memset(s->latency_y_axis_write, 0,
sizeof(s->latency_y_axis_write));
s->latency_reads_elems = 0;
s->latency_writes_elems = 0;
}
EXPORT_SYMBOL(blk_zero_latency_hist);
ssize_t
blk_latency_hist_show(struct io_latency_state *s, char *buf)
{
int i;
int bytes_written = 0;
u_int64_t num_elem, elem;
int pct;
num_elem = s->latency_reads_elems;
if (num_elem > 0) {
bytes_written += scnprintf(buf + bytes_written,
PAGE_SIZE - bytes_written,
"IO svc_time Read Latency Histogram (n = %llu):\n",
num_elem);
for (i = 0;
i < ARRAY_SIZE(latency_x_axis_us);
i++) {
elem = s->latency_y_axis_read[i];
pct = div64_u64(elem * 100, num_elem);
bytes_written += scnprintf(buf + bytes_written,
PAGE_SIZE - bytes_written,
"\t< %5lluus%15llu%15d%%\n",
latency_x_axis_us[i],
elem, pct);
}
/* Last element in y-axis table is overflow */
elem = s->latency_y_axis_read[i];
pct = div64_u64(elem * 100, num_elem);
bytes_written += scnprintf(buf + bytes_written,
PAGE_SIZE - bytes_written,
"\t> %5dms%15llu%15d%%\n", 10,
elem, pct);
}
num_elem = s->latency_writes_elems;
if (num_elem > 0) {
bytes_written += scnprintf(buf + bytes_written,
PAGE_SIZE - bytes_written,
"IO svc_time Write Latency Histogram (n = %llu):\n",
num_elem);
for (i = 0;
i < ARRAY_SIZE(latency_x_axis_us);
i++) {
elem = s->latency_y_axis_write[i];
pct = div64_u64(elem * 100, num_elem);
bytes_written += scnprintf(buf + bytes_written,
PAGE_SIZE - bytes_written,
"\t< %5lluus%15llu%15d%%\n",
latency_x_axis_us[i],
elem, pct);
}
/* Last element in y-axis table is overflow */
elem = s->latency_y_axis_write[i];
pct = div64_u64(elem * 100, num_elem);
bytes_written += scnprintf(buf + bytes_written,
PAGE_SIZE - bytes_written,
"\t> %5dms%15llu%15d%%\n", 10,
elem, pct);
}
return bytes_written;
}
EXPORT_SYMBOL(blk_latency_hist_show);
#endif

View File

@@ -76,6 +76,11 @@
#include "blk-mq-tag.h"
#include "blk-mq-sched.h"
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPPO_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
#include "oppo_foreground_io_opt/oppo_foreground_io_opt.h"
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
/* PREFLUSH/FUA sequences */
enum {
REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
@@ -93,6 +98,11 @@ enum {
FLUSH_PENDING_TIMEOUT = 5 * HZ,
};
#ifdef VENDOR_EDIT
/*jason.tang@TECH.BSP.Kernel.Storage, 2019-05-20, add to count flush*/
extern unsigned long sysctl_blkdev_issue_flush_count;
#endif
static bool blk_kick_flush(struct request_queue *q,
struct blk_flush_queue *fq);
@@ -142,6 +152,10 @@ static bool blk_flush_queue_rq(struct request *rq, bool add_front)
list_add(&rq->queuelist, &rq->q->queue_head);
else
list_add_tail(&rq->queuelist, &rq->q->queue_head);
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPPO_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
queue_throtl_add_request(rq->q, rq, add_front);
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
return true;
}
}
@@ -465,7 +479,15 @@ void blk_insert_flush(struct request *rq)
if (q->mq_ops)
blk_mq_sched_insert_request(rq, false, true, false, false);
else
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPPO_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
{
list_add_tail(&rq->queuelist, &q->queue_head);
queue_throtl_add_request(q, rq, false);
}
#else
list_add_tail(&rq->queuelist, &q->queue_head);
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
return;
}
@@ -523,7 +545,10 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
*/
if (!q->make_request_fn)
return -ENXIO;
#ifdef VENDOR_EDIT
/*jason.tang@TECH.BSP.Kernel.Storage, 2019-05-20, add to count flush*/
sysctl_blkdev_issue_flush_count++;
#endif
bio = bio_alloc(gfp_mask, 0);
bio_set_dev(bio, bdev);
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;

56
block/blk-sysfs.c Normal file → Executable file
View File

@@ -16,6 +16,10 @@
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-wbt.h"
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPPO_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
#include "oppo_foreground_io_opt/oppo_foreground_io_opt.h"
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
struct queue_sysfs_entry {
struct attribute attr;
@@ -395,7 +399,22 @@ static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
return count;
}
#ifdef OPLUS_FEATURE_HEALTHINFO
// jiheng.xie@PSW.Tech.BSP.Performance, 2019/03/11
// Add for ioqueue
#ifdef CONFIG_OPPO_HEALTHINFO
static ssize_t queue_show_ohm_inflight(struct request_queue *q, char *page)
{
ssize_t ret;
ret = sprintf(page, "async:%d\n", q->in_flight[0]);
ret += sprintf(page + ret, "sync:%d\n", q->in_flight[1]);
ret += sprintf(page + ret, "bg:%d\n", q->in_flight[2]);
ret += sprintf(page + ret, "fg:%d\n", q->in_flight[3]);
return ret;
}
#endif
#endif /* OPLUS_FEATURE_HEALTHINFO */
static ssize_t queue_poll_show(struct request_queue *q, char *page)
{
return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
@@ -642,7 +661,16 @@ static struct queue_sysfs_entry queue_rq_affinity_entry = {
.show = queue_rq_affinity_show,
.store = queue_rq_affinity_store,
};
#ifdef OPLUS_FEATURE_HEALTHINFO
// jiheng.xie@PSW.Tech.BSP.Performance, 2019/03/11
// Add for ioqueue
#ifdef CONFIG_OPPO_HEALTHINFO
static struct queue_sysfs_entry queue_ohm_inflight_entry = {
.attr = {.name = "ohm_inflight", .mode = S_IRUGO },
.show = queue_show_ohm_inflight,
};
#endif
#endif /* OPLUS_FEATURE_HEALTHINFO */
static struct queue_sysfs_entry queue_iostats_entry = {
.attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
.show = queue_show_iostats,
@@ -696,10 +724,27 @@ static struct queue_sysfs_entry throtl_sample_time_entry = {
.store = blk_throtl_sample_time_store,
};
#endif
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPPO_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
static struct queue_sysfs_entry queue_fgio_entry = {
.attr = {.name = "fg_io_cnt_max", .mode = S_IRUGO | S_IWUSR },
.show = queue_fg_count_max_show,
.store = queue_fg_count_max_store,
};
static struct queue_sysfs_entry queue_bothio_entry = {
.attr = {.name = "both_io_cnt_max", .mode = S_IRUGO | S_IWUSR },
.show = queue_both_count_max_show,
.store = queue_both_count_max_store,
};
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPPO_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
&queue_fgio_entry.attr,
&queue_bothio_entry.attr,
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
&queue_max_hw_sectors_entry.attr,
&queue_max_sectors_entry.attr,
&queue_max_segments_entry.attr,
@@ -724,6 +769,13 @@ static struct attribute *default_attrs[] = {
&queue_nomerges_entry.attr,
&queue_rq_affinity_entry.attr,
&queue_iostats_entry.attr,
#ifdef OPLUS_FEATURE_HEALTHINFO
// jiheng.xie@PSW.Tech.BSP.Performance, 2019/03/11
// Add for ioqueue
#ifdef CONFIG_OPPO_HEALTHINFO
&queue_ohm_inflight_entry.attr,
#endif
#endif /* OPLUS_FEATURE_HEALTHINFO */
&queue_random_entry.attr,
&queue_poll_entry.attr,
&queue_wc_entry.attr,

View File

@@ -5,7 +5,9 @@
#include <linux/idr.h>
#include <linux/blk-mq.h>
#include "blk-mq.h"
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPPO_FG_IO_OPT)
#include <linux/oppo_healthinfo/oppo_fg.h>
#endif
/* Amount of time in which a process may batch requests */
#define BLK_BATCH_TIME (HZ/50UL)
@@ -18,7 +20,10 @@
#ifdef CONFIG_DEBUG_FS
extern struct dentry *blk_debugfs_root;
#endif
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPPO_FG_IO_OPT)
extern unsigned int sysctl_fg_io_opt;
extern struct request * smart_peek_request(struct request_queue *q);
#endif
struct blk_flush_queue {
unsigned int flush_queue_delayed:1;
unsigned int flush_pending_idx:1;
@@ -157,11 +162,27 @@ static inline struct request *__elv_next_request(struct request_queue *q)
WARN_ON_ONCE(q->mq_ops);
while (1) {
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPPO_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
if (likely(sysctl_fg_io_opt)
#ifdef CONFIG_PM
&&(q->rpm_status == RPM_ACTIVE)
#endif
) {
rq = smart_peek_request(q);
if(rq)
return rq;
}
else {
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
if (!list_empty(&q->queue_head)) {
rq = list_entry_rq(q->queue_head.next);
return rq;
}
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPPO_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
}
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
/*
* Flush request is running and flush request isn't queueable
* in the drive, we can hold the queue till flush request is

39
block/elevator.c Normal file → Executable file
View File

@@ -42,7 +42,10 @@
#include "blk.h"
#include "blk-mq-sched.h"
#include "blk-wbt.h"
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPPO_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
#include "oppo_foreground_io_opt/oppo_foreground_io_opt.h"
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
static DEFINE_SPINLOCK(elv_list_lock);
static LIST_HEAD(elv_list);
@@ -415,6 +418,10 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
}
list_add(&rq->queuelist, entry);
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPPO_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
queue_throtl_add_request(q, rq, false);
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
}
EXPORT_SYMBOL(elv_dispatch_sort);
@@ -435,6 +442,10 @@ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
list_add_tail(&rq->queuelist, &q->queue_head);
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPPO_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
queue_throtl_add_request(q, rq, false);
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
}
EXPORT_SYMBOL(elv_dispatch_add_tail);
@@ -607,6 +618,13 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
*/
if (blk_account_rq(rq)) {
q->in_flight[rq_is_sync(rq)]--;
#ifdef OPLUS_FEATURE_HEALTHINFO
// jiheng.xie@PSW.Tech.BSP.Performance, 2019/03/11
// Add for ioqueue
#ifdef CONFIG_OPPO_HEALTHINFO
ohm_ioqueue_dec_inflight(q, rq);
#endif
#endif /* OPLUS_FEATURE_HEALTHINFO */
if (rq->rq_flags & RQF_SORTED)
elv_deactivate_rq(q, rq);
}
@@ -639,6 +657,10 @@ void elv_drain_elevator(struct request_queue *q)
void __elv_add_request(struct request_queue *q, struct request *rq, int where)
{
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
rq->req_ti = ktime_get();
#endif /*OPLUS_FEATURE_IOMONITOR*/
trace_block_rq_insert(q, rq);
blk_pm_add_request(q, rq);
@@ -661,12 +683,20 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
case ELEVATOR_INSERT_FRONT:
rq->rq_flags |= RQF_SOFTBARRIER;
list_add(&rq->queuelist, &q->queue_head);
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPPO_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
queue_throtl_add_request(q, rq, true);
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
break;
case ELEVATOR_INSERT_BACK:
rq->rq_flags |= RQF_SOFTBARRIER;
elv_drain_elevator(q);
list_add_tail(&rq->queuelist, &q->queue_head);
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPPO_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
queue_throtl_add_request(q, rq, false);
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
/*
* We kick the queue here for the following reasons.
* - The elevator might have returned NULL previously
@@ -801,6 +831,13 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
*/
if (blk_account_rq(rq)) {
q->in_flight[rq_is_sync(rq)]--;
#ifdef OPLUS_FEATURE_HEALTHINFO
// jiheng.xie@PSW.Tech.BSP.Performance, 2019/03/11
// Add for ioqueue
#ifdef CONFIG_OPPO_HEALTHINFO
ohm_ioqueue_dec_inflight(q, rq);
#endif
#endif /* OPLUS_FEATURE_HEALTHINFO */
if ((rq->rq_flags & RQF_SORTED) &&
e->type->ops.sq.elevator_completed_req_fn)
e->type->ops.sq.elevator_completed_req_fn(q, rq);

View File

@@ -0,0 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (C) 2018-2020 Oplus. All rights reserved.
config OPPO_FG_IO_OPT
bool "Enable oppo foreground io optimization"
depends on FG_TASK_UID
default y
help
oppo foreground io optimization

View File

@@ -0,0 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (C) 2018-2020 Oplus. All rights reserved.
obj-y += oppo_high_prio_task.o oppo_foreground_io_opt.o

View File

@@ -0,0 +1,165 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2018-2020 Oplus. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/list_sort.h>
#include "oppo_foreground_io_opt.h"
#define CREATE_TRACE_POINTS
#include <trace/oppo_foreground_io_opt_trace.h>
#define FG_CNT_DEF 20
#define BOTH_CNT_DEF 10
void fg_bg_max_count_init(struct request_queue *q)
{
q->fg_count_max = FG_CNT_DEF;
q->both_count_max = BOTH_CNT_DEF;
q->fg_count = FG_CNT_DEF;
q->both_count = BOTH_CNT_DEF;
}
static inline bool should_get_fg_req(struct request_queue *q)
{
if (!list_empty(&q->fg_head)
&& (q->fg_count > 0))
return true;
return false;
}
static inline bool should_get_bg_req(struct request_queue *q)
{
if (q->both_count > 0)
return true;
return false;
}
static struct request *get_fg_bg_req(struct request_queue *q)
{
struct request *rq = NULL;
if (!list_empty(&q->queue_head)) {
if (should_get_fg_req(q)) {
rq = list_entry(q->fg_head.next, struct request, fg_list);
q->fg_count--;
trace_block_fg_io_peek_req(current, (long)rq,"FG\0",q->fg_count);
}
else if (should_get_bg_req(q)) {
rq = list_entry_rq(q->queue_head.next);
q->both_count--;
trace_block_fg_io_peek_req(current, (long)rq,"BG\0",q->both_count);
}
else {
q->fg_count = q->fg_count_max;
q->both_count = q->both_count_max;
rq = list_entry_rq(q->queue_head.next);
}
}
return rq;
}
struct request * smart_peek_request(struct request_queue *q)
{
return get_fg_bg_req(q);
}
void queue_throtl_add_request(struct request_queue *q,
struct request *rq, bool front)
{
struct list_head *head;
if (unlikely(!sysctl_fg_io_opt))
return;
if (rq->cmd_flags & REQ_FG) {
head = &q->fg_head;
if (front)
list_add(&rq->fg_list, head);
else
list_add_tail(&rq->fg_list, head);
}
}
/*blk-sys*/
static ssize_t
queue_var_show(unsigned long var, char *page)
{
if (unlikely(!sysctl_fg_io_opt))
return 0;
return sprintf(page, "%lu\n", var);
}
static ssize_t
queue_var_store(unsigned long *var, const char *page, size_t count)
{
int err;
unsigned long v;
if (unlikely(!sysctl_fg_io_opt))
return 0;
err = kstrtoul(page, 10, &v);
if (err || v > UINT_MAX)
return -EINVAL;
*var = v;
return count;
}
ssize_t queue_fg_count_max_show(struct request_queue *q,
char *page)
{
int cnt = q->fg_count_max;
return queue_var_show(cnt, (page));
}
ssize_t queue_fg_count_max_store(struct request_queue *q,
const char *page, size_t count)
{
unsigned long cnt;
ssize_t ret = queue_var_store(&cnt, page, count);
if (ret < 0)
return ret;
q->fg_count_max= cnt;
return ret;
}
ssize_t queue_both_count_max_show(struct request_queue *q,
char *page)
{
int cnt = q->both_count_max;
return queue_var_show(cnt, (page));
}
ssize_t queue_both_count_max_store(struct request_queue *q,
const char *page, size_t count)
{
unsigned long cnt;
ssize_t ret = queue_var_store(&cnt, page, count);
if (ret < 0)
return ret;
q->both_count_max= cnt;
return ret;
}

View File

@@ -0,0 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2018-2020 Oplus. All rights reserved.
*/
#ifndef __OPPO_FOREGROUND_IO_OPT__
#define __OPPO_FOREGROUND_IO_OPT__
#ifdef CONFIG_FG_TASK_UID
#include <linux/oppo_healthinfo/oppo_fg.h>
#endif /*CONFIG_FG_TASK_UID*/
extern unsigned int sysctl_fg_io_opt;
extern void fg_bg_max_count_init(struct request_queue *q);
extern void queue_throtl_add_request(struct request_queue *q,
struct request *rq, bool front);
extern ssize_t queue_fg_count_max_show(struct request_queue *q,
char *page);
extern ssize_t queue_fg_count_max_store(struct request_queue *q,
const char *page, size_t count);
extern ssize_t queue_both_count_max_show(struct request_queue *q,
char *page);
extern ssize_t queue_both_count_max_store(struct request_queue *q,
const char *page, size_t count);
extern bool high_prio_for_task(struct task_struct *t);
extern struct request * smart_peek_request(struct request_queue *q);
#endif /*__OPPO_FOREGROUND_IO_OPT__*/

View File

@@ -0,0 +1,92 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2018-2020 Oplus. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/init.h>
#include <linux/list_sort.h>
#include <linux/sched.h>
#include "oppo_foreground_io_opt.h"
#define SYSTEM_APP_UID 1000
static bool is_system_uid(struct task_struct *t)
{
int cur_uid;
cur_uid = task_uid(t).val;
if (cur_uid == SYSTEM_APP_UID)
return true;
return false;
}
static bool is_zygote_process(struct task_struct *t)
{
const struct cred *tcred = __task_cred(t);
struct task_struct * first_child = NULL;
if(t->children.next && t->children.next != (struct list_head*)&t->children.next)
first_child = container_of(t->children.next, struct task_struct, sibling);
if(!strcmp(t->comm, "main") && (tcred->uid.val == 0) && (t->parent != 0 && !strcmp(t->parent->comm,"init")) )
return true;
else
return false;
return false;
}
static bool is_system_process(struct task_struct *t)
{
if (is_system_uid(t)) {
if (t->group_leader && (!strncmp(t->group_leader->comm,"system_server", 13) ||
!strncmp(t->group_leader->comm, "surfaceflinger", 14) ||
!strncmp(t->group_leader->comm, "servicemanager", 14) ||
!strncmp(t->group_leader->comm, "ndroid.systemui", 15)))
return true;
}
return false;
}
bool is_critial_process(struct task_struct *t)
{
if( is_zygote_process(t) || is_system_process(t))
return true;
return false;
}
bool is_filter_process(struct task_struct *t)
{
if(!strncmp(t->comm,"logcat", TASK_COMM_LEN) )
return true;
return false;
}
static inline bool is_fg_task_without_sysuid(struct task_struct *t)
{
if(!is_system_uid(t)
#ifdef CONFIG_FG_TASK_UID
&&is_fg(task_uid(t).val)
#endif /*CONFIG_FG_TASK_UID*/
)
return true;
return false;
}
bool high_prio_for_task(struct task_struct *t)
{
if (!sysctl_fg_io_opt)
return false;
if ((is_fg_task_without_sysuid(t) && !is_filter_process(t))
|| is_critial_process(t))
return true;
return false;
}

View File

@@ -0,0 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2018-2020 Oplus. All rights reserved.
*/
#undef TRACE_SYSTEM
#define TRACE_INCLUDE_PATH ../../block/oppo_foreground_io_opt/trace
#define TRACE_SYSTEM oppo_foreground_io_opt_trace
#if !defined(_OPPO_FOREGROUND_IO_OPT_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _OPPO_FOREGROUND_IO_OPT_TRACE_H
#include <linux/tracepoint.h>
/*trace*/
TRACE_EVENT(block_fg_io_peek_req,
TP_PROTO(struct task_struct *task, long req_addr, \
char * fg, int count),
TP_ARGS(task, req_addr, fg, count),
TP_STRUCT__entry(
__array(char, comm, TASK_COMM_LEN)
__field(pid_t, pid)
__field(long, req_addr)
__array(char, fg, 3)
__field(int, count)
),
TP_fast_assign(
memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
__entry->pid = task->pid;
__entry->req_addr = req_addr;
memcpy(__entry->fg, fg, 3);
__entry->count = count;
),
TP_printk("%s (%d), req_addr %x task_group:%s, count %d",
__entry->comm, __entry->pid, __entry->req_addr,
__entry->fg, __entry->count)
);
#endif /*_OPPO_FOREGROUND_IO_OPT_TRACE_H*/
#include <trace/define_trace.h>

View File

@@ -366,13 +366,15 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
pdev->devt = devt;
if (!p->policy) {
if (disk->fops->check_disk_range_wp) {
if (p->info && p->info->volname[0]
&& memcmp(p->info->volname, "otp", 3) == 0)
err = 0;
else if (disk->fops->check_disk_range_wp)
err = disk->fops->check_disk_range_wp(disk, start, len);
if (err > 0)
p->policy = 1;
else if (err != 0)
goto out_free_info;
}
if (err > 0)
p->policy = 1;
else if (err != 0)
goto out_free_info;
}
/* delay uevent until 'holders' subdir is created */

24
block/scsi_ioctl.c Normal file → Executable file
View File

@@ -212,6 +212,13 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
__set_bit(GPCMD_LOAD_UNLOAD, filter->write_ok);
__set_bit(GPCMD_SET_STREAMING, filter->write_ok);
__set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok);
#ifdef OPLUS_FEATURE_STORAGE_TOOL
// jason.wu@BSP.Storage, 2020-6-11
// add write buffer command for common user
// add vendor command for common user
__set_bit(WRITE_BUFFER, filter->write_ok);
__set_bit(VENDOR_SPECIFIC_CDB, filter->write_ok);
#endif
}
int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm)
@@ -437,6 +444,12 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
int err;
unsigned int in_len, out_len, bytes, opcode, cmdlen;
char *buffer = NULL;
#ifdef OPLUS_FEATURE_STORAGE_TOOL
// jason.wu@BSP.Storage, 2020-03-03 workaround for samsung device.
// vendor cmd len is 16 and not 10 in spec.
// in current application ,only samsung health will use this cmd.
struct scsi_device *sdev = NULL;
#endif
if (!sic)
return -EINVAL;
@@ -470,6 +483,17 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
req = scsi_req(rq);
cmdlen = COMMAND_SIZE(opcode);
#ifdef OPLUS_FEATURE_STORAGE_TOOL
// jason.wu@BSP.Storage, 2020-03-03 workaround for samsung device.
// vendor cmd len is 16 and not 10 in spec.
// in current application ,only samsung health will use this cmd.
sdev = (struct scsi_device*)(q->queuedata);
if ((VENDOR_SPECIFIC_CDB == opcode)
&&(0 == strncmp(sdev->vendor, "SAMSUNG ", 8))
){
cmdlen = 16;
}
#endif
/*
* get command and data to send to device, if any