[SQUASH] treewide: Remove useless oplus bloat

* Drop a BUNCH of useless oplus drivers, "features", loggers
  and other useless mods.

Signed-off-by: bengris32 <bengris32@protonmail.ch>
Change-Id: I5133459af68c9cabbbfe729c417e5d903375413a
This commit is contained in:
bengris32
2023-05-05 22:57:31 +01:00
parent d7d4cb6990
commit bbec2c993e
601 changed files with 610 additions and 86617 deletions

View File

@@ -5,12 +5,6 @@
#
mainmenu "Linux/$ARCH $KERNELVERSION Kernel Configuration"
config OPLUS_FEATURE_PANIC_FLUSH
bool "fsync optimization "
default y
help
define this config to fsync optimization
config SRCARCH
string
option env="SRCARCH"

View File

@@ -459,11 +459,6 @@ KBUILD_CFLAGS += -DCONFIG_HIGH_TEMP_VERSION
endif
#endif /* OPLUS_FEATURE_CHG_BASIC */
#ifdef OPLUS_FEATURE_MEMLEAK_DETECT
ifeq ($(AGING_DEBUG_MASK),1)
# enable memleak detect daemon
OPLUS_MEMLEAK_DETECT := true
endif
ifeq ($(TARGET_MEMLEAK_DETECT_TEST),0)
# disable memleak detect daemon

View File

@@ -18,68 +18,21 @@
-include nativefeatures.mk
OPLUS_CONNECTIVITY_NATIVE_FEATURE_SET :=
ifeq ($(OPLUS_FEATURE_WIFI_MTUDETECT), yes)
OPLUS_CONNECTIVITY_NATIVE_FEATURE_SET += OPLUS_FEATURE_WIFI_MTUDETECT
endif
$(foreach myfeature,$(OPLUS_CONNECTIVITY_NATIVE_FEATURE_SET),\
$( \
$(eval KBUILD_CFLAGS += -D$(myfeature)) \
$(eval KBUILD_CPPFLAGS += -D$(myfeature)) \
$(eval CFLAGS_KERNEL += -D$(myfeature)) \
$(eval CFLAGS_MODULE += -D$(myfeature)) \
) \
)
ALLOWED_MCROS := \
OPLUS_FEATURE_HANS_FREEZE \
OPLUS_FEATURE_AOD \
OPLUS_FEATURE_HEALTHINFO \
OPLUS_FEATURE_TASK_CPUSTATS \
OPLUS_FEATURE_FG_IO_OPT \
OPLUS_FEATURE_SCHED_ASSIST \
OPLUS_FEATURE_SDCARD_INFO \
OPLUS_FEATURE_FINGERPRINT \
OPLUS_FEATURE_STORAGE_TOOL \
OPLUS_FEATURE_MULTI_FREEAREA \
OPLUS_FEATURE_MULTI_KSWAPD \
OPLUS_FEATURE_NFC_CONSOFT \
OPLUS_FEATURE_ONSCREENFINGERPRINT \
OPLUS_FEATURE_PHOENIX \
OPLUS_FEATURE_AGINGTEST \
OPLUS_FEATURE_PROCESS_RECLAIM \
OPLUS_FEATURE_SENSOR \
OPLUS_FEATURE_SENSOR_ALGORITHM \
OPLUS_FEATURE_SENSOR_SMEM \
OPLUS_FEATURE_SENSOR_WISELIGHT \
OPLUS_FEATURE_IOMONITOR \
OPLUS_FEATURE_SPEAKER_MUTE \
OPLUS_FEATURE_MM_FEEDBACK \
OPLUS_FEATURE_CHG_BASIC \
OPLUS_FEATURE_VIRTUAL_RESERVE_MEMORY \
OPLUS_FEATURE_MEMLEAK_DETECT \
OPLUS_FEATURE_WIFI_MTUDETECT \
OPLUS_FEATURE_WIFI_RUSUPGRADE \
OPLUS_FEATURE_WIFI_SLA \
OPLUS_FEATURE_DATA_EVAL \
OPLUS_FEATURE_ZRAM_OPT \
OPLUS_BUG_COMPATIBILITY \
OPLUS_FEATURE_MIDAS \
OPLUS_BUG_STABILITY \
OPLUS_ARCH_INJECT \
OPLUS_ARCH_EXTENDS \
OPLUS_FEATURE_LOWMEM_DBG \
OPLUS_FEATURE_PERFORMANCE \
OPLUS_FEATURE_MTK_ION_SEPARATE_LOCK \
OPLUS_FEATURE_ALARMINFO_STANDBY \
OPLUS_FEATURE_POWERINFO_FTM \
OPLUS_FEATURE_SCHEDUTIL_USE_TL \
OPLUS_FEATURE_CORE_CTL \
OPLUS_FEATURE_CAMERA_COMMON \
OPLUS_FEATURE_WIFI_OPLUSWFD
OPLUS_FEATURE_CAMERA_COMMON
$(foreach myfeature,$(ALLOWED_MCROS),\
$(eval KBUILD_CFLAGS += -D$(myfeature)) \
@@ -88,15 +41,6 @@ $(foreach myfeature,$(ALLOWED_MCROS),\
$(eval CFLAGS_MODULE += -D$(myfeature)) \
)
# BSP team can do customzation by referring the feature variables
ifeq ($(OPLUS_FEATURE_PREFER_SILVER),yes)
export CONFIG_OPLUS_PREFER_SILVER=y
KBUILD_CFLAGS += -DCONFIG_OPLUS_PREFER_SILVER
KBUILD_CPPFLAGS += -DCONFIG_OPLUS_PREFER_SILVER
CFLAGS_KERNEL += -DCONFIG_OPLUS_PREFER_SILVER
CFLAGS_MODULE += -DCONFIG_OPLUS_PREFER_SILVER
endif
#Zhijun.Ye@MM.Display.LCD.Machine, 2020/09/23, add for multibits backlight
ifeq ($(OPLUS_FEATURE_MULTIBITS_BL),yes)
KBUILD_CFLAGS += -DOPLUS_FEATURE_MULTIBITS_BL

View File

@@ -46,9 +46,6 @@
#include <asm/tlbflush.h>
#include <acpi/ghes.h>
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
#include <linux/iomonitor/iomonitor.h>
#endif /*OPLUS_FEATURE_IOMONITOR*/
struct fault_info {
int (*fn)(unsigned long addr, unsigned int esr,
@@ -529,9 +526,6 @@ done:
*/
if (major) {
tsk->maj_flt++;
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
iomonitor_update_fs_stats(FS_MAJOR_FAULT, 1);
#endif /*OPLUS_FEATURE_IOMONITOR*/
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
addr);
} else {

View File

@@ -230,8 +230,3 @@ config BLK_MQ_RDMA
default y
source block/Kconfig.iosched
#ifdef OPLUS_FEATURE_FG_IO_OPT
#/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
source block/foreground_io_opt/Kconfig
#endif

View File

@@ -39,7 +39,3 @@ obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o
obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += keyslot-manager.o bio-crypt-ctx.o \
blk-crypto.o
obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) += blk-crypto-fallback.o
#ifdef OPLUS_FEATURE_FG_IO_OPT
#/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
obj-$(CONFIG_OPLUS_FG_IO_OPT) += foreground_io_opt/
#endif /*OPLUS_FEATURE_FG_IO_OPT*/

View File

@@ -45,18 +45,11 @@
#include "blk-mq-sched.h"
#include "blk-wbt.h"
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPLUS_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
#include "foreground_io_opt/foreground_io_opt.h"
#endif
#ifdef CONFIG_DEBUG_FS
struct dentry *blk_debugfs_root;
#endif
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
#include <linux/iomonitor/iomonitor.h>
#endif /*OPLUS_FEATURE_IOMONITOR*/
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
@@ -126,10 +119,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
memset(rq, 0, sizeof(*rq));
INIT_LIST_HEAD(&rq->queuelist);
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPLUS_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
INIT_LIST_HEAD(&rq->fg_list);
#endif
INIT_LIST_HEAD(&rq->timeout_list);
rq->cpu = -1;
rq->q = q;
@@ -848,10 +837,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
gfp_mask | __GFP_ZERO, node_id);
if (!q)
return NULL;
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPLUS_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
INIT_LIST_HEAD(&q->fg_head);
#endif
q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
if (q->id < 0)
goto fail_q;
@@ -873,10 +858,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->backing_dev_info->name = "block";
q->node = node_id;
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPLUS_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
fg_bg_max_count_init(q);
#endif
setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
laptop_mode_timer_fn, (unsigned long) q);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
@@ -1320,9 +1301,6 @@ out:
*/
if (ioc_batching(q, ioc))
ioc->nr_batch_requests--;
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
iomonitor_init_reqstats(rq);
#endif /*OPLUS_FEATURE_IOMONITOR*/
trace_block_getrq(q, bio, op);
return rq;
@@ -1814,11 +1792,6 @@ void blk_init_request_from_bio(struct request *req, struct bio *bio)
if (bio->bi_opf & REQ_RAHEAD)
req->cmd_flags |= REQ_FAILFAST_MASK;
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPLUS_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
if (bio->bi_opf & REQ_FG)
req->cmd_flags |= REQ_FG;
#endif
req->__sector = bio->bi_iter.bi_sector;
if (ioprio_valid(bio_prio(bio)))
req->ioprio = bio_prio(bio);
@@ -2315,17 +2288,11 @@ blk_qc_t submit_bio(struct bio *bio)
if (op_is_write(bio_op(bio))) {
count_vm_events(PGPGOUT, count);
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
iomonitor_update_vm_stats(PGPGOUT, count);
#endif/*OPLUS_FEATURE_IOMONITOR*/
} else {
if (bio_flagged(bio, BIO_WORKINGSET))
workingset_read = true;
task_io_account_read(bio->bi_iter.bi_size);
count_vm_events(PGPGIN, count);
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
iomonitor_update_vm_stats(PGPGIN, count);
#endif/*OPLUS_FEATURE_IOMONITOR*/
}
if (unlikely(block_dump)) {
@@ -2337,11 +2304,6 @@ blk_qc_t submit_bio(struct bio *bio)
bio_devname(bio, b), count);
}
}
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPLUS_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
if (high_prio_for_task(current))
bio->bi_opf |= REQ_FG;
#endif
/*
* If we're reading data that is part of the userspace
* workingset, count submission time as memory stall. When the
@@ -2634,9 +2596,6 @@ struct request *blk_peek_request(struct request_queue *q)
* not be passed by new incoming requests
*/
rq->rq_flags |= RQF_STARTED;
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
rq->req_td = ktime_get();
#endif /*OPLUS_FEATURE_IOMONITOR*/
trace_block_rq_issue(q, rq);
}
@@ -2696,9 +2655,6 @@ struct request *blk_peek_request(struct request_queue *q)
break;
}
}
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
iomonitor_record_io_history(rq);
#endif /*OPLUS_FEATURE_IOMONITOR*/
return rq;
}
EXPORT_SYMBOL(blk_peek_request);
@@ -2711,11 +2667,6 @@ static void blk_dequeue_request(struct request *rq)
BUG_ON(ELV_ON_HASH(rq));
list_del_init(&rq->queuelist);
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPLUS_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
if (sysctl_fg_io_opt && (rq->cmd_flags & REQ_FG))
list_del_init(&rq->fg_list);
#endif
/*
* the time frame between a request being removed from the lists
* and to it is freed is accounted as io that is in progress at
@@ -2725,10 +2676,6 @@ static void blk_dequeue_request(struct request *rq)
q->in_flight[rq_is_sync(rq)]++;
set_io_start_time_ns(rq);
}
#if defined(OPLUS_FEATURE_HEALTHINFO) && defined(CONFIG_OPLUS_HEALTHINFO)
// Add for ioqueue
ohm_ioqueue_add_inflight(q, rq);
#endif /*OPLUS_FEATURE_HEALTHINFO*/
}
/**
@@ -2811,9 +2758,6 @@ bool blk_update_request(struct request *req, blk_status_t error,
int total_bytes;
trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
iomonitor_record_reqstats(req, nr_bytes);
#endif /*OPLUS_FEATURE_IOMONITOR*/
if (!req->bio)
return false;

View File

@@ -76,10 +76,6 @@
#include "blk-mq-tag.h"
#include "blk-mq-sched.h"
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPLUS_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
#include "foreground_io_opt/foreground_io_opt.h"
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
/* PREFLUSH/FUA sequences */
enum {
@@ -147,10 +143,6 @@ static bool blk_flush_queue_rq(struct request *rq, bool add_front)
list_add(&rq->queuelist, &rq->q->queue_head);
else
list_add_tail(&rq->queuelist, &rq->q->queue_head);
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPLUS_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
queue_throtl_add_request(rq->q, rq, add_front);
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
return true;
}
}
@@ -474,15 +466,7 @@ void blk_insert_flush(struct request *rq)
if (q->mq_ops)
blk_mq_sched_insert_request(rq, false, true, false, false);
else
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPLUS_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
{
list_add_tail(&rq->queuelist, &q->queue_head);
queue_throtl_add_request(q, rq, false);
}
#else
list_add_tail(&rq->queuelist, &q->queue_head);
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
return;
}

View File

@@ -16,10 +16,6 @@
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-wbt.h"
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPLUS_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
#include "foreground_io_opt/foreground_io_opt.h"
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
struct queue_sysfs_entry {
struct attribute attr;
@@ -400,19 +396,6 @@ static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
return count;
}
#if defined(OPLUS_FEATURE_HEALTHINFO) && defined(CONFIG_OPLUS_HEALTHINFO)
// Add for ioqueue
static ssize_t queue_show_ohm_inflight(struct request_queue *q, char *page)
{
ssize_t ret;
ret = sprintf(page, "async:%d\n", q->in_flight[0]);
ret += sprintf(page + ret, "sync:%d\n", q->in_flight[1]);
ret += sprintf(page + ret, "bg:%d\n", q->in_flight[2]);
ret += sprintf(page + ret, "fg:%d\n", q->in_flight[3]);
return ret;
}
#endif /*OPLUS_FEATURE_HEALTHINFO*/
static ssize_t queue_poll_show(struct request_queue *q, char *page)
{
@@ -667,13 +650,6 @@ static struct queue_sysfs_entry queue_iostats_entry = {
.store = queue_store_iostats,
};
#if defined(OPLUS_FEATURE_HEALTHINFO) && defined(CONFIG_OPLUS_HEALTHINFO)
// Add for ioqueue
static struct queue_sysfs_entry queue_ohm_inflight_entry = {
.attr = {.name = "ohm_inflight", .mode = S_IRUGO },
.show = queue_show_ohm_inflight,
};
#endif /*OPLUS_FEATURE_HEALTHINFO*/
static struct queue_sysfs_entry queue_random_entry = {
.attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR },
@@ -722,27 +698,9 @@ static struct queue_sysfs_entry throtl_sample_time_entry = {
.store = blk_throtl_sample_time_store,
};
#endif
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPLUS_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
static struct queue_sysfs_entry queue_fgio_entry = {
.attr = {.name = "fg_io_cnt_max", .mode = S_IRUGO | S_IWUSR },
.show = queue_fg_count_max_show,
.store = queue_fg_count_max_store,
};
static struct queue_sysfs_entry queue_bothio_entry = {
.attr = {.name = "both_io_cnt_max", .mode = S_IRUGO | S_IWUSR },
.show = queue_both_count_max_show,
.store = queue_both_count_max_store,
};
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPLUS_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
&queue_fgio_entry.attr,
&queue_bothio_entry.attr,
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
&queue_max_hw_sectors_entry.attr,
&queue_max_sectors_entry.attr,
&queue_max_segments_entry.attr,
@@ -767,10 +725,6 @@ static struct attribute *default_attrs[] = {
&queue_nomerges_entry.attr,
&queue_rq_affinity_entry.attr,
&queue_iostats_entry.attr,
#if defined(OPLUS_FEATURE_HEALTHINFO) && defined(CONFIG_OPLUS_HEALTHINFO)
// Add for ioqueue
&queue_ohm_inflight_entry.attr,
#endif /*OPLUS_FEATURE_HEALTHINFO*/
&queue_random_entry.attr,
&queue_poll_entry.attr,
&queue_wc_entry.attr,

View File

@@ -5,9 +5,6 @@
#include <linux/idr.h>
#include <linux/blk-mq.h>
#include "blk-mq.h"
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPLUS_FG_IO_OPT)
#include <linux/healthinfo/fg.h>
#endif
/* Amount of time in which a process may batch requests */
#define BLK_BATCH_TIME (HZ/50UL)
@@ -20,10 +17,6 @@
#ifdef CONFIG_DEBUG_FS
extern struct dentry *blk_debugfs_root;
#endif
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPLUS_FG_IO_OPT)
extern unsigned int sysctl_fg_io_opt;
extern struct request * smart_peek_request(struct request_queue *q);
#endif
struct blk_flush_queue {
unsigned int flush_queue_delayed:1;
unsigned int flush_pending_idx:1;
@@ -162,27 +155,10 @@ static inline struct request *__elv_next_request(struct request_queue *q)
WARN_ON_ONCE(q->mq_ops);
while (1) {
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPLUS_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
if (likely(sysctl_fg_io_opt)
#ifdef CONFIG_PM
&&(q->rpm_status == RPM_ACTIVE)
#endif
) {
rq = smart_peek_request(q);
if(rq)
return rq;
}
else {
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
if (!list_empty(&q->queue_head)) {
rq = list_entry_rq(q->queue_head.next);
return rq;
}
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPLUS_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
}
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
/*
* Flush request is running and flush request isn't queueable
* in the drive, we can hold the queue till flush request is

View File

@@ -42,10 +42,6 @@
#include "blk.h"
#include "blk-mq-sched.h"
#include "blk-wbt.h"
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPLUS_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
#include "foreground_io_opt/foreground_io_opt.h"
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
static DEFINE_SPINLOCK(elv_list_lock);
static LIST_HEAD(elv_list);
@@ -418,10 +414,6 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
}
list_add(&rq->queuelist, entry);
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPLUS_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
queue_throtl_add_request(q, rq, false);
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
}
EXPORT_SYMBOL(elv_dispatch_sort);
@@ -442,10 +434,6 @@ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
list_add_tail(&rq->queuelist, &q->queue_head);
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPLUS_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
queue_throtl_add_request(q, rq, false);
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
}
EXPORT_SYMBOL(elv_dispatch_add_tail);
@@ -618,10 +606,6 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
*/
if (blk_account_rq(rq)) {
q->in_flight[rq_is_sync(rq)]--;
#if defined(OPLUS_FEATURE_HEALTHINFO) && defined(CONFIG_OPLUS_HEALTHINFO)
// Add for ioqueue
ohm_ioqueue_dec_inflight(q, rq);
#endif /*OPLUS_FEATURE_HEALTHINFO*/
if (rq->rq_flags & RQF_SORTED)
elv_deactivate_rq(q, rq);
}
@@ -654,9 +638,6 @@ void elv_drain_elevator(struct request_queue *q)
void __elv_add_request(struct request_queue *q, struct request *rq, int where)
{
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
rq->req_ti = ktime_get();
#endif /*OPLUS_FEATURE_IOMONITOR*/
trace_block_rq_insert(q, rq);
@@ -680,20 +661,12 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
case ELEVATOR_INSERT_FRONT:
rq->rq_flags |= RQF_SOFTBARRIER;
list_add(&rq->queuelist, &q->queue_head);
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPLUS_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
queue_throtl_add_request(q, rq, true);
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
break;
case ELEVATOR_INSERT_BACK:
rq->rq_flags |= RQF_SOFTBARRIER;
elv_drain_elevator(q);
list_add_tail(&rq->queuelist, &q->queue_head);
#if defined(OPLUS_FEATURE_FG_IO_OPT) && defined(CONFIG_OPLUS_FG_IO_OPT)
/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-23,add foreground io opt*/
queue_throtl_add_request(q, rq, false);
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
/*
* We kick the queue here for the following reasons.
* - The elevator might have returned NULL previously
@@ -828,10 +801,6 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
*/
if (blk_account_rq(rq)) {
q->in_flight[rq_is_sync(rq)]--;
#if defined(OPLUS_FEATURE_HEALTHINFO) && defined(CONFIG_OPLUS_HEALTHINFO)
// Add for ioqueue
ohm_ioqueue_dec_inflight(q, rq);
#endif /*OPLUS_FEATURE_HEALTHINFO*/
if ((rq->rq_flags & RQF_SORTED) &&
e->type->ops.sq.elevator_completed_req_fn)
e->type->ops.sq.elevator_completed_req_fn(q, rq);

View File

@@ -1,9 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (C) 2018-2020 Oplus. All rights reserved.
config OPLUS_FG_IO_OPT
bool "Enable foreground io optimization"
depends on FG_TASK_UID
default y
help
foreground io optimization

View File

@@ -1,4 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (C) 2018-2020 Oplus. All rights reserved.
obj-y += high_prio_task.o foreground_io_opt.o

View File

@@ -1,165 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2018-2020 Oplus. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/list_sort.h>
#include "foreground_io_opt.h"
#define CREATE_TRACE_POINTS
#include <trace/foreground_io_opt_trace.h>
#define FG_CNT_DEF 20
#define BOTH_CNT_DEF 10
void fg_bg_max_count_init(struct request_queue *q)
{
q->fg_count_max = FG_CNT_DEF;
q->both_count_max = BOTH_CNT_DEF;
q->fg_count = FG_CNT_DEF;
q->both_count = BOTH_CNT_DEF;
}
static inline bool should_get_fg_req(struct request_queue *q)
{
if (!list_empty(&q->fg_head)
&& (q->fg_count > 0))
return true;
return false;
}
static inline bool should_get_bg_req(struct request_queue *q)
{
if (q->both_count > 0)
return true;
return false;
}
static struct request *get_fg_bg_req(struct request_queue *q)
{
struct request *rq = NULL;
if (!list_empty(&q->queue_head)) {
if (should_get_fg_req(q)) {
rq = list_entry(q->fg_head.next, struct request, fg_list);
q->fg_count--;
trace_block_fg_io_peek_req(current, (long)rq,"FG\0",q->fg_count);
}
else if (should_get_bg_req(q)) {
rq = list_entry_rq(q->queue_head.next);
q->both_count--;
trace_block_fg_io_peek_req(current, (long)rq,"BG\0",q->both_count);
}
else {
q->fg_count = q->fg_count_max;
q->both_count = q->both_count_max;
rq = list_entry_rq(q->queue_head.next);
}
}
return rq;
}
struct request * smart_peek_request(struct request_queue *q)
{
return get_fg_bg_req(q);
}
void queue_throtl_add_request(struct request_queue *q,
struct request *rq, bool front)
{
struct list_head *head;
if (unlikely(!sysctl_fg_io_opt))
return;
if (rq->cmd_flags & REQ_FG) {
head = &q->fg_head;
if (front)
list_add(&rq->fg_list, head);
else
list_add_tail(&rq->fg_list, head);
}
}
/*blk-sys*/
static ssize_t
queue_var_show(unsigned long var, char *page)
{
if (unlikely(!sysctl_fg_io_opt))
return 0;
return sprintf(page, "%lu\n", var);
}
static ssize_t
queue_var_store(unsigned long *var, const char *page, size_t count)
{
int err;
unsigned long v;
if (unlikely(!sysctl_fg_io_opt))
return 0;
err = kstrtoul(page, 10, &v);
if (err || v > UINT_MAX)
return -EINVAL;
*var = v;
return count;
}
ssize_t queue_fg_count_max_show(struct request_queue *q,
char *page)
{
int cnt = q->fg_count_max;
return queue_var_show(cnt, (page));
}
ssize_t queue_fg_count_max_store(struct request_queue *q,
const char *page, size_t count)
{
unsigned long cnt;
ssize_t ret = queue_var_store(&cnt, page, count);
if (ret < 0)
return ret;
q->fg_count_max= cnt;
return ret;
}
ssize_t queue_both_count_max_show(struct request_queue *q,
char *page)
{
int cnt = q->both_count_max;
return queue_var_show(cnt, (page));
}
ssize_t queue_both_count_max_store(struct request_queue *q,
const char *page, size_t count)
{
unsigned long cnt;
ssize_t ret = queue_var_store(&cnt, page, count);
if (ret < 0)
return ret;
q->both_count_max= cnt;
return ret;
}

View File

@@ -1,28 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2018-2020 Oplus. All rights reserved.
*/
#ifndef __OPLUS_FOREGROUND_IO_OPT__
#define __OPLUS_FOREGROUND_IO_OPT__
#ifdef CONFIG_FG_TASK_UID
#include <linux/healthinfo/fg.h>
#endif /*CONFIG_FG_TASK_UID*/
extern unsigned int sysctl_fg_io_opt;
extern void fg_bg_max_count_init(struct request_queue *q);
extern void queue_throtl_add_request(struct request_queue *q,
struct request *rq, bool front);
extern ssize_t queue_fg_count_max_show(struct request_queue *q,
char *page);
extern ssize_t queue_fg_count_max_store(struct request_queue *q,
const char *page, size_t count);
extern ssize_t queue_both_count_max_show(struct request_queue *q,
char *page);
extern ssize_t queue_both_count_max_store(struct request_queue *q,
const char *page, size_t count);
extern bool high_prio_for_task(struct task_struct *t);
extern struct request * smart_peek_request(struct request_queue *q);
#endif /*__OPLUS_FOREGROUND_IO_OPT__*/

View File

@@ -1,92 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2018-2020 Oplus. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/init.h>
#include <linux/list_sort.h>
#include <linux/sched.h>
#include "foreground_io_opt.h"
#define SYSTEM_APP_UID 1000
static bool is_system_uid(struct task_struct *t)
{
int cur_uid;
cur_uid = task_uid(t).val;
if (cur_uid == SYSTEM_APP_UID)
return true;
return false;
}
static bool is_zygote_process(struct task_struct *t)
{
const struct cred *tcred = __task_cred(t);
struct task_struct * first_child = NULL;
if(t->children.next && t->children.next != (struct list_head*)&t->children.next)
first_child = container_of(t->children.next, struct task_struct, sibling);
if(!strcmp(t->comm, "main") && (tcred->uid.val == 0) && (t->parent != 0 && !strcmp(t->parent->comm,"init")) )
return true;
else
return false;
return false;
}
static bool is_system_process(struct task_struct *t)
{
if (is_system_uid(t)) {
if (t->group_leader && (!strncmp(t->group_leader->comm,"system_server", 13) ||
!strncmp(t->group_leader->comm, "surfaceflinger", 14) ||
!strncmp(t->group_leader->comm, "servicemanager", 14) ||
!strncmp(t->group_leader->comm, "ndroid.systemui", 15)))
return true;
}
return false;
}
bool is_critial_process(struct task_struct *t)
{
if( is_zygote_process(t) || is_system_process(t))
return true;
return false;
}
bool is_filter_process(struct task_struct *t)
{
if(!strncmp(t->comm,"logcat", TASK_COMM_LEN) )
return true;
return false;
}
static inline bool is_fg_task_without_sysuid(struct task_struct *t)
{
if(!is_system_uid(t)
#ifdef CONFIG_FG_TASK_UID
&&is_fg(task_uid(t).val)
#endif /*CONFIG_FG_TASK_UID*/
)
return true;
return false;
}
bool high_prio_for_task(struct task_struct *t)
{
if (!sysctl_fg_io_opt)
return false;
if ((is_fg_task_without_sysuid(t) && !is_filter_process(t))
|| is_critial_process(t))
return true;
return false;
}

View File

@@ -1,43 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2018-2020 Oplus. All rights reserved.
*/
#undef TRACE_SYSTEM
#define TRACE_INCLUDE_PATH ../../block/foreground_io_opt/trace
#define TRACE_SYSTEM foreground_io_opt_trace
#if !defined(_OPLUS_FOREGROUND_IO_OPT_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _OPLUS_FOREGROUND_IO_OPT_TRACE_H
#include <linux/tracepoint.h>
/*trace*/
TRACE_EVENT(block_fg_io_peek_req,
TP_PROTO(struct task_struct *task, long req_addr, \
char * fg, int count),
TP_ARGS(task, req_addr, fg, count),
TP_STRUCT__entry(
__array(char, comm, TASK_COMM_LEN)
__field(pid_t, pid)
__field(long, req_addr)
__array(char, fg, 3)
__field(int, count)
),
TP_fast_assign(
memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
__entry->pid = task->pid;
__entry->req_addr = req_addr;
memcpy(__entry->fg, fg, 3);
__entry->count = count;
),
TP_printk("%s (%d), req_addr %x task_group:%s, count %d",
__entry->comm, __entry->pid, __entry->req_addr,
__entry->fg, __entry->count)
);
#endif /*_OPLUS_FOREGROUND_IO_OPT_TRACE_H*/
#include <trace/define_trace.h>

View File

@@ -212,12 +212,6 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
__set_bit(GPCMD_LOAD_UNLOAD, filter->write_ok);
__set_bit(GPCMD_SET_STREAMING, filter->write_ok);
__set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok);
#ifdef OPLUS_FEATURE_STORAGE_TOOL
// add write buffer command for common user
// add vendor command for common user
__set_bit(WRITE_BUFFER, filter->write_ok);
__set_bit(VENDOR_SPECIFIC_CDB, filter->write_ok);
#endif
}
int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm)
@@ -444,11 +438,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
unsigned int in_len, out_len, bytes, opcode, cmdlen;
char *buffer = NULL;
#ifdef OPLUS_FEATURE_STORAGE_TOOL
// vendor cmd len is 16 and not 10 in spec.
// in current application ,only samsung health will use this cmd.
struct scsi_device *sdev = NULL;
#endif
if (!sic)
return -EINVAL;
@@ -482,16 +471,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
cmdlen = COMMAND_SIZE(opcode);
#ifdef OPLUS_FEATURE_STORAGE_TOOL
// vendor cmd len is 16 and not 10 in spec.
// in current application ,only samsung health will use this cmd.
sdev = (struct scsi_device*)(q->queuedata);
if ((VENDOR_SPECIFIC_CDB == opcode)
&&(0 == strncmp(sdev->vendor, "SAMSUNG ", 8))
){
cmdlen = 16;
}
#endif
/*
* get command and data to send to device, if any
*/

File diff suppressed because it is too large Load Diff

View File

@@ -37,10 +37,6 @@
#endif
#include "binder_alloc.h"
#include "binder_trace.h"
#ifdef OPLUS_FEATURE_HANS_FREEZE
//#Kun.Zhou@ANDROID.RESCONTROL, 2019/09/23, add for hans freeze manager
#include <linux/hans.h>
#endif /*OPLUS_FEATURE_HANS_FREEZE*/
struct list_lru binder_alloc_lru;
@@ -72,20 +68,6 @@ static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
{
return list_entry(buffer->entry.prev, struct binder_buffer, entry);
}
//ifdef OPLUS_BUG_STABILITY
size_t binder_alloc_buffer_size_locked(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
size_t buffer_size;
mutex_lock(&alloc->mutex);
if (list_is_last(&buffer->entry, &alloc->buffers))
buffer_size = alloc->buffer + alloc->buffer_size - buffer->user_data;
else
buffer_size = binder_buffer_next(buffer)->user_data - buffer->user_data;
mutex_unlock(&alloc->mutex);
return buffer_size;
}
//endif /*OPLUS_BUG_STABILITY*/
static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer)
@@ -452,10 +434,6 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
void __user *end_page_addr;
size_t size, data_offsets_size;
int ret;
#ifdef OPLUS_FEATURE_HANS_FREEZE
//#Kun.Zhou@ANDROID.RESCONTROL, 2019/09/23, add for hans freeze manager
struct task_struct *p = NULL;
#endif /*OPLUS_FEATURE_HANS_FREEZE*/
if (!binder_alloc_get_vma(alloc)) {
pr_err("%d: binder_alloc_buf, no vma\n",
@@ -479,19 +457,6 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
alloc->pid, extra_buffers_size);
return ERR_PTR(-EINVAL);
}
#ifdef OPLUS_FEATURE_HANS_FREEZE
//#Kun.Zhou@ANDROID.RESCONTROL, 2019/09/23, add for hans freeze manager
if (is_async
&& (alloc->free_async_space < 3 * (size + sizeof(struct binder_buffer))
|| (alloc->free_async_space < ((alloc->buffer_size / 2) * 9 / 10)))) {
rcu_read_lock();
p = find_task_by_vpid(alloc->pid);
rcu_read_unlock();
if (p != NULL && is_frozen_tg(p)) {
hans_report(ASYNC_BINDER, task_tgid_nr(current), task_uid(current).val, task_tgid_nr(p), task_uid(p).val, "free_buffer_full", -1);
}
}
#endif /*OPLUS_FEATURE_HANS_FREEZE*/
if (is_async &&
alloc->free_async_space < size + sizeof(struct binder_buffer)) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
@@ -534,13 +499,6 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
total_alloc_size += buffer_size;
if (buffer_size > largest_alloc_size)
largest_alloc_size = buffer_size;
//ifdef OPLUS_BUG_STABILITY
if(buffer_size > 100 * SZ_1K) {
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf size %zd \n",
buffer->pid, buffer_size);
}
//endif /*OPLUS_BUG_STABILITY*/
}
for (n = rb_first(&alloc->free_buffers); n != NULL;
n = rb_next(n)) {

View File

@@ -24,12 +24,6 @@
#include <linux/list_lru.h>
#include <uapi/linux/android/binder.h>
//ifdef OPLUS_BUG_STABILITY
#ifndef SZ_1K
#define SZ_1K 0x400
#endif
//endif /*OPLUS_BUG_STABILITY*/
extern struct list_lru binder_alloc_lru;
struct binder_transaction;
@@ -153,10 +147,6 @@ extern void binder_alloc_print_allocated(struct seq_file *m,
void binder_alloc_print_pages(struct seq_file *m,
struct binder_alloc *alloc);
//ifdef OPLUS_BUG_STABILITY
size_t binder_alloc_buffer_size_locked(struct binder_alloc *alloc,
struct binder_buffer *buffer);
//endif /*OPLUS_BUG_STABILITY*/
/**
* binder_alloc_get_free_async_space() - get free space available for async
* @alloc: binder_alloc for this proc

View File

@@ -31,16 +31,6 @@ config ZRAM_WRITEBACK
See Documentation/blockdev/zram.txt for more information.
#ifdef OPLUS_FEATURE_ZRAM_OPT
#/*Huacai.Zhou@Tech.Kernel.MM, 2020-03-21, add zram opt support*/
config OPLUS_ZRAM_OPT
bool "oplus zram optimization"
depends on ZRAM
default y
help
oplus zram optimization
#endif /*OPLUS_FEATURE_ZRAM_OPT*/
config ZRAM_MEMORY_TRACKING
bool "Track zRam block status"
depends on ZRAM && DEBUG_FS
@@ -50,41 +40,3 @@ config ZRAM_MEMORY_TRACKING
/sys/kernel/debug/zram/zramX/block_state.
See Documentation/blockdev/zram.txt for more information.
config HYBRIDSWAP
bool "Enable Hybridswap"
depends on MEMCG && ZRAM && !ZRAM_DEDUP && !ZRAM_WRITEBACK && !ZWB_HANDLE
default n
help
Hybridswap is a intelligent memory management solution.
config HYBRIDSWAP_SWAPD
bool "Enable hybridswap swapd thread to reclaim anon pages in background"
default n
depends on HYBRIDSWAP
help
swapd is a kernel thread that reclaim anonymous pages in the
background. When the use of swap pages reaches the watermark
and the refault of anonymous pages is high, the content of
zram will exchanged to eswap by a certain percentage.
# Selected when system need hybridswap container
config HYBRIDSWAP_CORE
bool "Hybridswap container device support"
depends on ZRAM && HYBRIDSWAP
default n
help
Say Y here if you want to use the hybridswap
as the backend device in ZRAM.
If unsure, say N here.
This module can't be compiled as a module,
the module is as one part of the ZRAM driver.
config HYBRIDSWAP_ASYNC_COMPRESS
bool "hypbridswap support asynchronous compress anon pages"
depends on ZRAM && HYBRIDSWAP
default n
help
Say Y here if you want to create asynchronous thread
for compress anon pages.
If unsure, say N here.
This feature will reduce the kswapd cpu load.

View File

@@ -2,8 +2,3 @@
zram-y := zcomp.o zram_drv.o
obj-$(CONFIG_ZRAM) += zram.o
zram-$(CONFIG_HYBRIDSWAP) += hybridswap/hybridswap_main.o
zram-$(CONFIG_HYBRIDSWAP_SWAPD) += hybridswap/hybridswap_swapd.o
zram-$(CONFIG_HYBRIDSWAP_ASYNC_COMPRESS) += hybridswap/hybridswap_akcompress.o
zram-$(CONFIG_HYBRIDSWAP_CORE) += hybridswap/hybridswap_area.o hybridswap/hybridswap_core.o hybridswap/hybridswap_ctrl.o hybridswap/hybridswap_list.o hybridswap/hybridswap_lru_rmap.o hybridswap/hybridswap_manager.o hybridswap/hybridswap_perf.o hybridswap/hybridswap_schedule.o hybridswap/hybridswap_stats.o

View File

@@ -1,98 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2022 Oplus. All rights reserved.
*/
#ifndef HYBRIDSWAP_H
#define HYBRIDSWAP_H
extern int __init hybridswap_pre_init(void);
extern ssize_t hybridswap_vmstat_show(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t hybridswap_loglevel_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len);
extern ssize_t hybridswap_loglevel_show(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t hybridswap_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len);
extern ssize_t hybridswap_enable_show(struct device *dev,
struct device_attribute *attr, char *buf);
#ifdef CONFIG_HYBRIDSWAP_CORE
extern void hybridswap_track(struct zram *zram, u32 index, struct mem_cgroup *memcg);
extern void hybridswap_untrack(struct zram *zram, u32 index);
extern int hybridswap_fault_out(struct zram *zram, u32 index);
extern bool hybridswap_delete(struct zram *zram, u32 index);
extern ssize_t hybridswap_report_show(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t hybridswap_stat_snap_show(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t hybridswap_meminfo_show(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t hybridswap_core_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len);
extern ssize_t hybridswap_core_enable_show(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t hybridswap_loop_device_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len);
extern ssize_t hybridswap_loop_device_show(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t hybridswap_dev_life_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len);
extern ssize_t hybridswap_dev_life_show(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t hybridswap_quota_day_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len);
extern ssize_t hybridswap_quota_day_show(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t hybridswap_zram_increase_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len);
extern ssize_t hybridswap_zram_increase_show(struct device *dev,
struct device_attribute *attr, char *buf);
#endif
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
/* 63---48,47--32,31-0 : cgroup id, thread_idx, index*/
#define ZRAM_INDEX_SHIFT 32
#define CACHE_INDEX_SHIFT 32
#define CACHE_INDEX_MASK ((1llu << CACHE_INDEX_SHIFT) - 1)
#define ZRAM_INDEX_MASK ((1llu << ZRAM_INDEX_SHIFT) - 1)
#define cache_idx_val(idx) (((unsigned long)idx & CACHE_INDEX_MASK) << ZRAM_INDEX_SHIFT)
#define zram_idx_val(id) ((unsigned long)id & ZRAM_INDEX_MASK)
#define mk_page_val(cache_idx, index) (cache_idx_val(cache_idx) | zram_idx_val(index))
#define get_cache_id(page) ((page->private >> 32) & CACHE_INDEX_MASK)
#define get_zram_index(page) (page->private & ZRAM_INDEX_MASK)
#define zram_set_page(zram, index, page) (zram->table[index].page = page)
#define zram_get_page(zram, index) (zram->table[index].page)
extern void del_page_from_cache(struct page *page);
extern int add_anon_page2cache(struct zram * zram, u32 index,
struct page *page);
extern ssize_t hybridswap_akcompress_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len);
extern ssize_t hybridswap_akcompress_show(struct device *dev,
struct device_attribute *attr, char *buf);
extern void put_free_page(struct page *page);
extern void put_anon_pages(struct page *page);
extern int akcompress_cache_fault_out(struct zram *zram,
struct page *page, u32 index);
extern void destroy_akcompressd_task(struct zram *zram);
#endif
#ifdef CONFIG_HYBRIDSWAP_SWAPD
extern ssize_t hybridswap_swapd_pause_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len);
extern ssize_t hybridswap_swapd_pause_show(struct device *dev,
struct device_attribute *attr, char *buf);
#endif
static inline bool current_is_swapd(void)
{
#ifdef CONFIG_HYBRIDSWAP_SWAPD
return (strncmp(current->comm, "hybridswapd:", sizeof("hybridswapd:") - 1) == 0);
#else
return false;
#endif
}
#endif /* HYBRIDSWAP_H */

View File

@@ -1,575 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2022 Oplus. All rights reserved.
*/
#define pr_fmt(fmt) "[HYBRIDSWAP]" fmt
#include <linux/types.h>
#include <linux/spinlock_types.h>
#include <linux/atomic.h>
#include <linux/idr.h>
#include <linux/freezer.h>
#include "../zram_drv.h"
#include "../zram_drv_internal.h"
#include "hybridswap_internal.h"
#include "hybridswap.h"
struct compress_info_s {
struct list_head free_page_head;
spinlock_t free_lock;
unsigned int free_cnt;
unsigned int max_cnt;
} compress_info;
#define MAX_AKCOMPRESSD_THREADS 4
#define DEFAULT_CACHE_SIZE_MB 64
#define DEFAULT_COMPRESS_BATCH_MB 1
#define DEFAULT_CACHE_COUNT ((DEFAULT_CACHE_SIZE_MB << 20) >> PAGE_SHIFT)
#define WAKEUP_AKCOMPRESSD_WATERMARK ((DEFAULT_COMPRESS_BATCH_MB << 20) >> PAGE_SHIFT)
static wait_queue_head_t akcompressd_wait;
static struct task_struct *akc_task[MAX_AKCOMPRESSD_THREADS];
static atomic64_t akc_cnt[MAX_AKCOMPRESSD_THREADS];
static int akcompressd_threads = 0;
static atomic64_t cached_cnt;
static struct zram *zram_info;
static DEFINE_MUTEX(akcompress_init_lock);
struct idr cached_idr = IDR_INIT(cached_idr);
DEFINE_SPINLOCK(cached_idr_lock);
static void wake_all_akcompressd(void);
void clear_page_memcg(struct cgroup_cache_page *cache)
{
struct list_head *pos;
struct page *page;
spin_lock(&cache->lock);
if (list_empty(&cache->head))
goto out;
list_for_each(pos, &cache->head) {
page = list_entry(pos, struct page, lru);
if (!page->mem_cgroup)
BUG();
page->mem_cgroup = NULL;
}
out:
cache->dead = 1;
spin_unlock(&cache->lock);
}
static inline struct page * get_free_page(void)
{
struct page *page = NULL;
spin_lock(&compress_info.free_lock);
if (compress_info.free_cnt > 0) {
if (list_empty(&compress_info.free_page_head))
BUG();
page = lru_to_page(&compress_info.free_page_head);
list_del(&page->lru);
compress_info.free_cnt--;
}
spin_unlock(&compress_info.free_lock);
return page;
}
void put_free_page(struct page *page)
{
set_page_private(page, 0);
spin_lock(&compress_info.free_lock);
list_add_tail(&page->lru, &compress_info.free_page_head);
compress_info.free_cnt++;
spin_unlock(&compress_info.free_lock);
}
static inline struct cgroup_cache_page *find_and_get_memcg_cache(int cache_id)
{
struct cgroup_cache_page *cache;
spin_lock(&cached_idr_lock);
cache = (struct cgroup_cache_page *)idr_find(&cached_idr, cache_id);
if (unlikely(!cache)) {
spin_unlock(&cached_idr_lock);
pr_err("cache_id %d cache not find.\n", cache_id);
return NULL;
}
get_memcg_cache(container_of(cache, memcg_hybs_t, cache));
spin_unlock(&cached_idr_lock);
return cache;
}
void del_page_from_cache(struct page *page)
{
int cache_id;
struct cgroup_cache_page *cache;
if (!page)
return;
cache_id = get_cache_id(page);
if (unlikely(cache_id < 0 || cache_id > MEM_CGROUP_ID_MAX)) {
hybp(HYB_ERR, "page %p cache_id %d index %u is invalid.\n",
page, cache_id, get_zram_index(page));
return;
}
cache = find_and_get_memcg_cache(cache_id);
if (!cache)
return;
spin_lock(&cache->lock);
list_del(&page->lru);
cache->cnt--;
spin_unlock(&cache->lock);
put_memcg_cache(container_of(cache, memcg_hybs_t, cache));
atomic64_dec(&cached_cnt);
}
void del_page_from_cache_with_cache(struct page *page,
struct cgroup_cache_page *cache)
{
spin_lock(&cache->lock);
list_del(&page->lru);
cache->cnt--;
spin_unlock(&cache->lock);
atomic64_dec(&cached_cnt);
}
void put_anon_pages(struct page *page)
{
memcg_hybs_t *hybs = MEMCGRP_ITEM_DATA(page->mem_cgroup);
spin_lock(&hybs->cache.lock);
list_add(&page->lru, &hybs->cache.head);
hybs->cache.cnt++;
spin_unlock(&hybs->cache.lock);
}
static inline bool can_stop_working(struct cgroup_cache_page *cache, int idx)
{
spin_lock(&cache->lock);
if (unlikely(!list_empty(&cache->head))) {
spin_unlock(&cache->lock);
return false;
}
spin_unlock(&cache->lock);
return 1;
}
static int check_cache_state(struct cgroup_cache_page *cache)
{
if (cache->cnt == 0 || cache->compressing == 1)
return 0;
spin_lock(&cache->lock);
if (cache->cnt == 0 || cache->compressing) {
spin_unlock(&cache->lock);
return 0;
}
cache->compressing = 1;
spin_unlock(&cache->lock);
get_memcg_cache(container_of(cache, memcg_hybs_t, cache));
return 1;
}
struct cgroup_cache_page *fetch_one_cache(void)
{
struct cgroup_cache_page *cache = NULL;
int id;
spin_lock(&cached_idr_lock);
idr_for_each_entry(&cached_idr, cache, id) {
if (check_cache_state(cache))
break;
}
spin_unlock(&cached_idr_lock);
return cache;
}
void mark_compressing_stop(struct cgroup_cache_page *cache)
{
spin_lock(&cache->lock);
if (cache->dead)
hybp(HYB_WARN, "stop compressing, may be cgroup is delelted\n");
cache->compressing = 0;
spin_unlock(&cache->lock);
put_memcg_cache(container_of(cache, memcg_hybs_t, cache));
}
static inline struct page *get_anon_page(struct zram *zram,
struct cgroup_cache_page *cache)
{
struct page *page, *prev_page;
int index;
if (compress_info.free_cnt == 0)
return NULL;
prev_page = NULL;
try_again:
page = NULL;
spin_lock(&cache->lock);
if (!list_empty(&cache->head)) {
page = lru_to_page(&cache->head);
index = get_zram_index(page);
}
spin_unlock(&cache->lock);
if (page) {
if (prev_page && (page == prev_page)) {
hybp(HYB_ERR, "zram %p index %d page %p\n",
zram, index, page);
BUG();
}
zram_slot_lock(zram, index);
if (!zram_test_flag(zram, index, ZRAM_CACHED)) {
zram_slot_unlock(zram, index);
prev_page = page;
goto try_again;
}
prev_page = NULL;
zram_clear_flag(zram, index, ZRAM_CACHED);
del_page_from_cache_with_cache(page, cache);
zram_set_flag(zram, index, ZRAM_CACHED_COMPRESS);
zram_slot_unlock(zram, index);
}
return page;
}
int add_anon_page2cache(struct zram * zram, u32 index, struct page *page)
{
struct page *dst_page;
void *src, *dst;
struct mem_cgroup *memcg;
struct cgroup_cache_page *cache;
memcg_hybs_t *hybs;
if (akcompressd_threads == 0)
return 0;
memcg = page->mem_cgroup;
if (!memcg || !MEMCGRP_ITEM_DATA(memcg))
return 0;
hybs = MEMCGRP_ITEM_DATA(memcg);
cache = &hybs->cache;
if (find_and_get_memcg_cache(cache->id) != cache)
return 0;
spin_lock(&cache->lock);
if (cache->dead == 1) {
spin_unlock(&cache->lock);
return 0;
}
spin_unlock(&cache->lock);
dst_page = get_free_page();
if (!dst_page)
return 0;
src = kmap_atomic(page);
dst = kmap_atomic(dst_page);
memcpy(dst, src, PAGE_SIZE);
kunmap_atomic(src);
kunmap_atomic(dst);
dst_page->mem_cgroup = memcg;
set_page_private(dst_page, mk_page_val(cache->id, index));
update_zram_index(zram, index, (unsigned long)dst_page);
atomic64_inc(&cached_cnt);
wake_all_akcompressd();
hybp(HYB_DEBUG, "add_anon_page2cache index %u page %p passed\n",
index, dst_page);
return 1;
}
static inline void akcompressd_try_to_sleep(wait_queue_head_t *waitq)
{
DEFINE_WAIT(wait);
prepare_to_wait(waitq, &wait, TASK_INTERRUPTIBLE);
freezable_schedule();
finish_wait(waitq, &wait);
}
static int akcompressd_func(void *data)
{
struct page *page;
int ret, thread_idx;
struct list_head compress_fail_list;
struct cgroup_cache_page *cache = NULL;
thread_idx = (int)data;
if (thread_idx < 0 || thread_idx >= MAX_AKCOMPRESSD_THREADS) {
hybp(HYB_ERR, "akcompress task idx %d is invalid.\n", thread_idx);
return -EINVAL;
}
set_freezable();
while (!kthread_should_stop()) {
akcompressd_try_to_sleep(&akcompressd_wait);
count_swapd_event(AKCOMPRESSD_WAKEUP);
cache = fetch_one_cache();
if (!cache)
continue;
finish_last_jobs:
INIT_LIST_HEAD(&compress_fail_list);
page = get_anon_page(zram_info, cache);
while (page) {
ret = async_compress_page(zram_info, page);
put_memcg_cache(container_of(cache, memcg_hybs_t, cache));
if (ret)
list_add(&page->lru, &compress_fail_list);
else {
atomic64_inc(&akc_cnt[thread_idx]);
page->mem_cgroup = NULL;
put_free_page(page);
}
page = get_anon_page(zram_info, cache);
}
if (!list_empty(&compress_fail_list))
hybp(HYB_ERR, "have some compress failed pages.\n");
if (kthread_should_stop()) {
if (!can_stop_working(cache, thread_idx))
goto finish_last_jobs;
}
mark_compressing_stop(cache);
}
return 0;
}
static int update_akcompressd_threads(int thread_count, struct zram *zram)
{
int drop, increase;
int last_idx, start_idx, hid;
static DEFINE_MUTEX(update_lock);
if (thread_count < 0 || thread_count > MAX_AKCOMPRESSD_THREADS) {
hybp(HYB_ERR, "thread_count %d is invalid\n", thread_count);
return -EINVAL;
}
mutex_lock(&update_lock);
if (!zram_info || zram_info != zram)
zram_info = zram;
if (thread_count == akcompressd_threads) {
mutex_unlock(&update_lock);
return thread_count;
}
last_idx = akcompressd_threads - 1;
if (thread_count < akcompressd_threads) {
drop = akcompressd_threads - thread_count;
for (hid = last_idx; hid > (last_idx - drop); hid--) {
if (akc_task[hid]) {
kthread_stop(akc_task[hid]);
akc_task[hid] = NULL;
}
}
} else {
increase = thread_count - akcompressd_threads;
start_idx = last_idx + 1;
for (hid = start_idx; hid < (start_idx + increase); hid++) {
if (unlikely(akc_task[hid]))
BUG();
akc_task[hid]= kthread_run(akcompressd_func,
(void*)(unsigned long)hid, "akcompressd:%d", hid);
if (IS_ERR(akc_task[hid])) {
pr_err("Failed to start akcompressd%d\n", hid);
akc_task[hid] = NULL;
break;
}
}
}
hybp(HYB_INFO, "akcompressd_threads count changed, old:%d new:%d\n",
akcompressd_threads, thread_count);
akcompressd_threads = thread_count;
mutex_unlock(&update_lock);
return thread_count;
}
static void wake_all_akcompressd(void)
{
if (atomic64_read(&cached_cnt) < WAKEUP_AKCOMPRESSD_WATERMARK)
return;
if (!waitqueue_active(&akcompressd_wait))
return;
wake_up_interruptible(&akcompressd_wait);
}
int create_akcompressd_task(struct zram *zram)
{
return update_akcompressd_threads(1, zram) != 1;
}
void destroy_akcompressd_task(struct zram *zram)
{
(void)update_akcompressd_threads(0, zram);
}
ssize_t hybridswap_akcompress_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
int ret;
unsigned long val;
struct zram *zram = dev_to_zram(dev);
ret = kstrtoul(buf, 0, &val);
if (unlikely(ret)) {
hybp(HYB_ERR, "val is error!\n");
return -EINVAL;
}
ret = update_akcompressd_threads(val, zram);
if (ret < 0) {
hybp(HYB_ERR, "create task failed, val %d\n", val);
return ret;
}
return len;
}
ssize_t hybridswap_akcompress_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int len = 0, id, i;
struct cgroup_cache_page *cache = NULL;
unsigned long cnt = atomic64_read(&cached_cnt);
memcg_hybs_t *hybs;
len += sprintf(buf + len, "akcompressd_threads: %d\n", akcompressd_threads);
len += sprintf(buf + len, "cached page cnt: %lu\n", cnt);
len += sprintf(buf + len, "free page cnt: %u\n", compress_info.free_cnt);
for (i = 0; i < MAX_AKCOMPRESSD_THREADS; i++)
len += sprintf(buf + len, "%-d %-d\n", i, atomic64_read(&akc_cnt[i]));
if (cnt == 0)
return len;
spin_lock(&cached_idr_lock);
idr_for_each_entry(&cached_idr, cache, id) {
hybs = container_of(cache, memcg_hybs_t, cache);
if (cache->cnt == 0)
continue;
len += scnprintf(buf + len, PAGE_SIZE - len, "%s %d\n",
hybs->name, cache->cnt);
if (len >= PAGE_SIZE)
break;
}
spin_unlock(&cached_idr_lock);
return len;
}
void __init akcompressd_pre_init(void)
{
int i;
struct page *page;
mutex_lock(&akcompress_init_lock);
INIT_LIST_HEAD(&compress_info.free_page_head);
spin_lock_init(&compress_info.free_lock);
compress_info.free_cnt = 0;
init_waitqueue_head(&akcompressd_wait);
atomic64_set(&cached_cnt, 0);
for (i = 0; i < MAX_AKCOMPRESSD_THREADS; i++)
atomic64_set(&akc_cnt[i], 0);
for (i = 0; i < DEFAULT_CACHE_COUNT; i ++) {
page = alloc_page(GFP_KERNEL);
if (page) {
list_add_tail(&page->lru, &compress_info.free_page_head);
} else
break;
}
compress_info.free_cnt = i;
mutex_unlock(&akcompress_init_lock);
}
void __exit akcompressd_pre_deinit(void)
{
int i;
struct page *page, *tmp;
mutex_lock(&akcompress_init_lock);
if (list_empty(&compress_info.free_page_head))
goto out;
list_for_each_entry_safe(page, tmp, &compress_info.free_page_head , lru) {
list_del(&page->lru);
free_page(page);
}
out:
compress_info.free_cnt = 0;
mutex_unlock(&akcompress_init_lock);
}
int akcompress_cache_fault_out(struct zram *zram,
struct page *page, u32 index)
{
void *src, *dst;
if (zram_test_flag(zram, index, ZRAM_CACHED)) {
struct page *src_page = (struct page *)zram_get_page(zram, index);
src = kmap_atomic(src_page);
dst = kmap_atomic(page);
memcpy(dst, src, PAGE_SIZE);
kunmap_atomic(src);
kunmap_atomic(dst);
zram_slot_unlock(zram, index);
hybp(HYB_DEBUG, "read_anon_page_from_cache index %u page %p passed, ZRAM_CACHED\n",
index, src_page);
return 1;
}
if (zram_test_flag(zram, index, ZRAM_CACHED_COMPRESS)) {
struct page *src_page = (struct page *)zram_get_page(zram, index);
src = kmap_atomic(src_page);
dst = kmap_atomic(page);
memcpy(dst, src, PAGE_SIZE);
kunmap_atomic(src);
kunmap_atomic(dst);
zram_slot_unlock(zram, index);
hybp(HYB_DEBUG, "read_anon_page_from_cache index %u page %p passed, ZRAM_CACHED_COMPRESS\n",
index, src_page);
return 1;
}
return 0;
}

View File

@@ -1,699 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2022 Oplus. All rights reserved.
*/
#define pr_fmt(fmt) "[HYBRIDSWAP]" fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/zsmalloc.h>
#include <linux/memcontrol.h>
#include "hybridswap_area.h"
#include "hybridswap_list.h"
#include "hybridswap_internal.h"
struct mem_cgroup *get_mem_cgroup(unsigned short mcg_id)
{
struct mem_cgroup *mcg = NULL;
rcu_read_lock();
mcg = mem_cgroup_from_id(mcg_id);
rcu_read_unlock();
return mcg;
}
static bool fragment_dec(bool prev_flag, bool next_flag,
struct hybridswap_stat *stat)
{
if (prev_flag && next_flag) {
atomic64_inc(&stat->frag_cnt);
return false;
}
if (prev_flag || next_flag)
return false;
return true;
}
static bool fragment_inc(bool prev_flag, bool next_flag,
struct hybridswap_stat *stat)
{
if (prev_flag && next_flag) {
atomic64_dec(&stat->frag_cnt);
return false;
}
if (prev_flag || next_flag)
return false;
return true;
}
static bool prev_is_cont(struct hybridswap_area *area, int ext_id, int mcg_id)
{
int prev;
if (is_first_idx(ext_idx(area, ext_id), mcg_idx(area, mcg_id),
area->ext_table))
return false;
prev = prev_idx(ext_idx(area, ext_id), area->ext_table);
return (prev >= 0) && (ext_idx(area, ext_id) == prev + 1);
}
static bool next_is_cont(struct hybridswap_area *area, int ext_id, int mcg_id)
{
int next;
if (is_last_idx(ext_idx(area, ext_id), mcg_idx(area, mcg_id),
area->ext_table))
return false;
next = next_idx(ext_idx(area, ext_id), area->ext_table);
return (next >= 0) && (ext_idx(area, ext_id) + 1 == next);
}
static void ext_fragment_sub(struct hybridswap_area *area, int ext_id)
{
bool prev_flag = false;
bool next_flag = false;
int mcg_id;
struct hybridswap_stat *stat = hybridswap_get_stat_obj();
if (!stat) {
hybp(HYB_ERR, "NULL stat\n");
return;
}
if (!area->ext_table) {
hybp(HYB_ERR, "NULL table\n");
return;
}
if (ext_id < 0 || ext_id >= area->nr_exts) {
hybp(HYB_ERR, "ext = %d invalid\n", ext_id);
return;
}
mcg_id = hyb_list_get_mcgid(ext_idx(area, ext_id), area->ext_table);
if (mcg_id <= 0 || mcg_id >= area->nr_mcgs) {
hybp(HYB_ERR, "mcg_id = %d invalid\n", mcg_id);
return;
}
atomic64_dec(&stat->ext_cnt);
area->mcg_id_cnt[mcg_id]--;
if (area->mcg_id_cnt[mcg_id] == 0) {
atomic64_dec(&stat->mcg_cnt);
atomic64_dec(&stat->frag_cnt);
return;
}
prev_flag = prev_is_cont(area, ext_id, mcg_id);
next_flag = next_is_cont(area, ext_id, mcg_id);
if (fragment_dec(prev_flag, next_flag, stat))
atomic64_dec(&stat->frag_cnt);
}
static void ext_fragment_add(struct hybridswap_area *area, int ext_id)
{
bool prev_flag = false;
bool next_flag = false;
int mcg_id;
struct hybridswap_stat *stat = hybridswap_get_stat_obj();
if (!stat) {
hybp(HYB_ERR, "NULL stat\n");
return;
}
if (!area->ext_table) {
hybp(HYB_ERR, "NULL table\n");
return;
}
if (ext_id < 0 || ext_id >= area->nr_exts) {
hybp(HYB_ERR, "ext = %d invalid\n", ext_id);
return;
}
mcg_id = hyb_list_get_mcgid(ext_idx(area, ext_id), area->ext_table);
if (mcg_id <= 0 || mcg_id >= area->nr_mcgs) {
hybp(HYB_ERR, "mcg_id = %d invalid\n", mcg_id);
return;
}
atomic64_inc(&stat->ext_cnt);
if (area->mcg_id_cnt[mcg_id] == 0) {
area->mcg_id_cnt[mcg_id]++;
atomic64_inc(&stat->frag_cnt);
atomic64_inc(&stat->mcg_cnt);
return;
}
area->mcg_id_cnt[mcg_id]++;
prev_flag = prev_is_cont(area, ext_id, mcg_id);
next_flag = next_is_cont(area, ext_id, mcg_id);
if (fragment_inc(prev_flag, next_flag, stat))
atomic64_inc(&stat->frag_cnt);
}
static int extent_bit2id(struct hybridswap_area *area, int bit)
{
if (bit < 0 || bit >= area->nr_exts) {
hybp(HYB_ERR, "bit = %d invalid\n", bit);
return -EINVAL;
}
return area->nr_exts - bit - 1;
}
static int extent_id2bit(struct hybridswap_area *area, int id)
{
if (id < 0 || id >= area->nr_exts) {
hybp(HYB_ERR, "id = %d invalid\n", id);
return -EINVAL;
}
return area->nr_exts - id - 1;
}
int obj_idx(struct hybridswap_area *area, int idx)
{
if (!area) {
hybp(HYB_ERR, "NULL area\n");
return -EINVAL;
}
if (idx < 0 || idx >= area->nr_objs) {
hybp(HYB_ERR, "idx = %d invalid\n", idx);
return -EINVAL;
}
return idx;
}
int ext_idx(struct hybridswap_area *area, int idx)
{
if (!area) {
hybp(HYB_ERR, "NULL area\n");
return -EINVAL;
}
if (idx < 0 || idx >= area->nr_exts) {
hybp(HYB_ERR, "idx = %d invalid\n", idx);
return -EINVAL;
}
return idx + area->nr_objs;
}
int mcg_idx(struct hybridswap_area *area, int idx)
{
if (!area) {
hybp(HYB_ERR, "NULL area\n");
return -EINVAL;
}
if (idx <= 0 || idx >= area->nr_mcgs) {
hybp(HYB_ERR, "idx = %d invalid, nr_mcgs %d\n", idx,
area->nr_mcgs);
return -EINVAL;
}
return idx + area->nr_objs + area->nr_exts;
}
static struct hyb_list_head *get_obj_table_node(int idx, void *private)
{
struct hybridswap_area *area = private;
if (!area) {
hybp(HYB_ERR, "NULL area\n");
return NULL;
}
if (idx < 0) {
hybp(HYB_ERR, "idx = %d invalid\n", idx);
return NULL;
}
if (idx < area->nr_objs)
return &area->lru[idx];
idx -= area->nr_objs;
if (idx < area->nr_exts)
return &area->rmap[idx];
idx -= area->nr_exts;
if (idx > 0 && idx < area->nr_mcgs) {
struct mem_cgroup *mcg = get_mem_cgroup(idx);
if (!mcg)
goto err_out;
return (struct hyb_list_head *)(&MEMCGRP_ITEM(mcg, zram_lru));
}
err_out:
hybp(HYB_ERR, "idx = %d invalid, mcg is NULL\n", idx);
return NULL;
}
static void free_obj_list_table(struct hybridswap_area *area)
{
if (!area) {
hybp(HYB_ERR, "NULL area\n");
return;
}
if (area->lru) {
vfree(area->lru);
area->lru = NULL;
}
if (area->rmap) {
vfree(area->rmap);
area->rmap = NULL;
}
kfree(area->obj_table);
area->obj_table = NULL;
}
static int init_obj_list_table(struct hybridswap_area *area)
{
int i;
if (!area) {
hybp(HYB_ERR, "NULL area\n");
return -EINVAL;
}
area->lru = vzalloc(sizeof(struct hyb_list_head) * area->nr_objs);
if (!area->lru) {
hybp(HYB_ERR, "area->lru alloc failed\n");
goto err_out;
}
area->rmap = vzalloc(sizeof(struct hyb_list_head) * area->nr_exts);
if (!area->rmap) {
hybp(HYB_ERR, "area->rmap alloc failed\n");
goto err_out;
}
area->obj_table = alloc_table(get_obj_table_node, area, GFP_KERNEL);
if (!area->obj_table) {
hybp(HYB_ERR, "area->obj_table alloc failed\n");
goto err_out;
}
for (i = 0; i < area->nr_objs; i++)
hyb_list_init(obj_idx(area, i), area->obj_table);
for (i = 0; i < area->nr_exts; i++)
hyb_list_init(ext_idx(area, i), area->obj_table);
hybp(HYB_INFO, "hybridswap obj list table init OK.\n");
return 0;
err_out:
free_obj_list_table(area);
hybp(HYB_ERR, "hybridswap obj list table init failed.\n");
return -ENOMEM;
}
static struct hyb_list_head *get_ext_table_node(int idx, void *private)
{
struct hybridswap_area *area = private;
if (!area) {
hybp(HYB_ERR, "NULL area\n");
return NULL;
}
if (idx < area->nr_objs)
goto err_out;
idx -= area->nr_objs;
if (idx < area->nr_exts)
return &area->ext[idx];
idx -= area->nr_exts;
if (idx > 0 && idx < area->nr_mcgs) {
struct mem_cgroup *mcg = get_mem_cgroup(idx);
if (!mcg)
return NULL;
return (struct hyb_list_head *)(&MEMCGRP_ITEM(mcg, ext_lru));
}
err_out:
hybp(HYB_ERR, "idx = %d invalid\n", idx);
return NULL;
}
static void free_ext_list_table(struct hybridswap_area *area)
{
if (!area) {
hybp(HYB_ERR, "NULL area\n");
return;
}
if (area->ext)
vfree(area->ext);
kfree(area->ext_table);
}
static int init_ext_list_table(struct hybridswap_area *area)
{
int i;
if (!area) {
hybp(HYB_ERR, "NULL area\n");
return -EINVAL;
}
area->ext = vzalloc(sizeof(struct hyb_list_head) * area->nr_exts);
if (!area->ext)
goto err_out;
area->ext_table = alloc_table(get_ext_table_node, area, GFP_KERNEL);
if (!area->ext_table)
goto err_out;
for (i = 0; i < area->nr_exts; i++)
hyb_list_init(ext_idx(area, i), area->ext_table);
hybp(HYB_INFO, "hybridswap ext list table init OK.\n");
return 0;
err_out:
free_ext_list_table(area);
hybp(HYB_ERR, "hybridswap ext list table init failed.\n");
return -ENOMEM;
}
void free_hybridswap_area(struct hybridswap_area *area)
{
if (!area) {
hybp(HYB_ERR, "NULL area\n");
return;
}
vfree(area->bitmap);
vfree(area->ext_stored_pages);
free_obj_list_table(area);
free_ext_list_table(area);
vfree(area);
}
struct hybridswap_area *alloc_hybridswap_area(unsigned long ori_size,
unsigned long comp_size)
{
struct hybridswap_area *area = vzalloc(sizeof(struct hybridswap_area));
if (!area) {
hybp(HYB_ERR, "area alloc failed\n");
goto err_out;
}
if (comp_size & (EXTENT_SIZE - 1)) {
hybp(HYB_ERR, "disksize = %ld align invalid (32K align needed)\n",
comp_size);
goto err_out;
}
area->size = comp_size;
area->nr_exts = comp_size >> EXTENT_SHIFT;
area->nr_mcgs = MEM_CGROUP_ID_MAX;
area->nr_objs = ori_size >> PAGE_SHIFT;
area->bitmap = vzalloc(BITS_TO_LONGS(area->nr_exts) * sizeof(long));
if (!area->bitmap) {
hybp(HYB_ERR, "area->bitmap alloc failed, %lu\n",
BITS_TO_LONGS(area->nr_exts) * sizeof(long));
goto err_out;
}
area->ext_stored_pages = vzalloc(sizeof(atomic_t) * area->nr_exts);
if (!area->ext_stored_pages) {
hybp(HYB_ERR, "area->ext_stored_pages alloc failed\n");
goto err_out;
}
if (init_obj_list_table(area)) {
hybp(HYB_ERR, "init obj list table failed\n");
goto err_out;
}
if (init_ext_list_table(area)) {
hybp(HYB_ERR, "init ext list table failed\n");
goto err_out;
}
hybp(HYB_INFO, "area %p size %lu nr_exts %lu nr_mcgs %lu nr_objs %lu\n",
area, area->size, area->nr_exts, area->nr_mcgs,
area->nr_objs);
hybp(HYB_INFO, "hybridswap_area init OK.\n");
return area;
err_out:
free_hybridswap_area(area);
hybp(HYB_ERR, "hybridswap_area init failed.\n");
return NULL;
}
void hybridswap_check_area_extent(struct hybridswap_area *area)
{
int i;
if (!area)
return;
for (i = 0; i < area->nr_exts; i++) {
int cnt = atomic_read(&area->ext_stored_pages[i]);
int ext_id = ext_idx(area, i);
bool priv = hyb_list_test_priv(ext_id, area->ext_table);
int mcg_id = hyb_list_get_mcgid(ext_id, area->ext_table);
if (cnt < 0 || (cnt > 0 && mcg_id == 0))
hybp(HYB_ERR, "%8d %8d %8d %8d %4d\n", i, cnt, ext_id,
mcg_id, priv);
}
}
void hybridswap_free_extent(struct hybridswap_area *area, int ext_id)
{
if (!area) {
hybp(HYB_ERR, "NULL area\n");
return;
}
if (ext_id < 0 || ext_id >= area->nr_exts) {
hybp(HYB_ERR, "INVALID ext %d\n", ext_id);
return;
}
hybp(HYB_DEBUG, "free ext id = %d.\n", ext_id);
hyb_list_set_mcgid(ext_idx(area, ext_id), area->ext_table, 0);
if (!test_and_clear_bit(extent_id2bit(area, ext_id), area->bitmap)) {
hybp(HYB_ERR, "bit not set, ext = %d\n", ext_id);
WARN_ON_ONCE(1);
}
atomic_dec(&area->stored_exts);
}
static int alloc_bitmap(unsigned long *bitmap, int max, int last_bit)
{
int bit;
if (!bitmap) {
hybp(HYB_ERR, "NULL bitmap.\n");
return -EINVAL;
}
retry:
bit = find_next_zero_bit(bitmap, max, last_bit);
if (bit == max) {
if (last_bit == 0) {
hybp(HYB_ERR, "alloc bitmap failed.\n");
return -ENOSPC;
}
last_bit = 0;
goto retry;
}
if (test_and_set_bit(bit, bitmap))
goto retry;
return bit;
}
int hybridswap_alloc_extent(struct hybridswap_area *area, struct mem_cgroup *mcg)
{
int last_bit;
int bit;
int ext_id;
int mcg_id;
if (!area) {
hybp(HYB_ERR, "NULL area\n");
return -EINVAL;
}
if (!mcg) {
hybp(HYB_ERR, "NULL mcg\n");
return -EINVAL;
}
last_bit = atomic_read(&area->last_alloc_bit);
hybp(HYB_DEBUG, "last_bit = %d.\n", last_bit);
bit = alloc_bitmap(area->bitmap, area->nr_exts, last_bit);
if (bit < 0) {
hybp(HYB_ERR, "alloc bitmap failed.\n");
return bit;
}
ext_id = extent_bit2id(area, bit);
mcg_id = hyb_list_get_mcgid(ext_idx(area, ext_id), area->ext_table);
if (mcg_id) {
hybp(HYB_ERR, "already has mcg %d, ext %d\n",
mcg_id, ext_id);
goto err_out;
}
hyb_list_set_mcgid(ext_idx(area, ext_id), area->ext_table, mcg->id.id);
atomic_set(&area->last_alloc_bit, bit);
atomic_inc(&area->stored_exts);
hybp(HYB_DEBUG, "extent %d init OK.\n", ext_id);
hybp(HYB_DEBUG, "mcg_id = %d, ext id = %d\n", mcg->id.id, ext_id);
return ext_id;
err_out:
clear_bit(bit, area->bitmap);
WARN_ON_ONCE(1);
return -EBUSY;
}
int get_extent(struct hybridswap_area *area, int ext_id)
{
int mcg_id;
if (!area) {
hybp(HYB_ERR, "NULL area\n");
return -EINVAL;
}
if (ext_id < 0 || ext_id >= area->nr_exts) {
hybp(HYB_ERR, "ext = %d invalid\n", ext_id);
return -EINVAL;
}
if (!hyb_list_clear_priv(ext_idx(area, ext_id), area->ext_table))
return -EBUSY;
mcg_id = hyb_list_get_mcgid(ext_idx(area, ext_id), area->ext_table);
if (mcg_id) {
ext_fragment_sub(area, ext_id);
hyb_list_del(ext_idx(area, ext_id), mcg_idx(area, mcg_id),
area->ext_table);
}
hybp(HYB_DEBUG, "ext id = %d\n", ext_id);
return ext_id;
}
void put_extent(struct hybridswap_area *area, int ext_id)
{
int mcg_id;
if (!area) {
hybp(HYB_ERR, "NULL area\n");
return;
}
if (ext_id < 0 || ext_id >= area->nr_exts) {
hybp(HYB_ERR, "ext = %d invalid\n", ext_id);
return;
}
mcg_id = hyb_list_get_mcgid(ext_idx(area, ext_id), area->ext_table);
if (mcg_id) {
hh_lock_list(mcg_idx(area, mcg_id), area->ext_table);
hyb_list_add_nolock(ext_idx(area, ext_id), mcg_idx(area, mcg_id),
area->ext_table);
ext_fragment_add(area, ext_id);
hh_unlock_list(mcg_idx(area, mcg_id), area->ext_table);
}
if (!hyb_list_set_priv(ext_idx(area, ext_id), area->ext_table)) {
hybp(HYB_ERR, "private not set, ext = %d\n", ext_id);
WARN_ON_ONCE(1);
return;
}
hybp(HYB_DEBUG, "put extent %d.\n", ext_id);
}
int get_memcg_extent(struct hybridswap_area *area, struct mem_cgroup *mcg)
{
int mcg_id;
int ext_id = -ENOENT;
int idx;
if (!area) {
hybp(HYB_ERR, "NULL area\n");
return -EINVAL;
}
if (!area->ext_table) {
hybp(HYB_ERR, "NULL table\n");
return -EINVAL;
}
if (!mcg) {
hybp(HYB_ERR, "NULL mcg\n");
return -EINVAL;
}
mcg_id = mcg->id.id;
hh_lock_list(mcg_idx(area, mcg_id), area->ext_table);
hyb_list_for_each_entry(idx, mcg_idx(area, mcg_id), area->ext_table)
if (hyb_list_clear_priv(idx, area->ext_table)) {
ext_id = idx - area->nr_objs;
break;
}
if (ext_id >= 0 && ext_id < area->nr_exts) {
ext_fragment_sub(area, ext_id);
hyb_list_del_nolock(idx, mcg_idx(area, mcg_id), area->ext_table);
hybp(HYB_DEBUG, "ext id = %d\n", ext_id);
}
hh_unlock_list(mcg_idx(area, mcg_id), area->ext_table);
return ext_id;
}
int get_memcg_zram_entry(struct hybridswap_area *area, struct mem_cgroup *mcg)
{
int mcg_id, idx;
int index = -ENOENT;
if (!area) {
hybp(HYB_ERR, "NULL area\n");
return -EINVAL;
}
if (!area->obj_table) {
hybp(HYB_ERR, "NULL table\n");
return -EINVAL;
}
if (!mcg) {
hybp(HYB_ERR, "NULL mcg\n");
return -EINVAL;
}
mcg_id = mcg->id.id;
hh_lock_list(mcg_idx(area, mcg_id), area->obj_table);
hyb_list_for_each_entry(idx, mcg_idx(area, mcg_id), area->obj_table) {
index = idx;
break;
}
hh_unlock_list(mcg_idx(area, mcg_id), area->obj_table);
return index;
}
int get_extent_zram_entry(struct hybridswap_area *area, int ext_id)
{
int index = -ENOENT;
int idx;
if (!area) {
hybp(HYB_ERR, "NULL area\n");
return -EINVAL;
}
if (!area->obj_table) {
hybp(HYB_ERR, "NULL table\n");
return -EINVAL;
}
if (ext_id < 0 || ext_id >= area->nr_exts) {
hybp(HYB_ERR, "ext = %d invalid\n", ext_id);
return -EINVAL;
}
hh_lock_list(ext_idx(area, ext_id), area->obj_table);
hyb_list_for_each_entry(idx, ext_idx(area, ext_id), area->obj_table) {
index = idx;
break;
}
hh_unlock_list(ext_idx(area, ext_id), area->obj_table);
return index;
}

View File

@@ -1,50 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2022 Oplus. All rights reserved.
*/
#ifndef _HYBRIDSWAP_AREA_H
#define _HYBRIDSWAP_AREA_H
#include <linux/memcontrol.h>
struct hybridswap_area {
unsigned long size;
int nr_objs;
int nr_exts;
int nr_mcgs;
unsigned long *bitmap;
atomic_t last_alloc_bit;
struct hyb_list_table *ext_table;
struct hyb_list_head *ext;
struct hyb_list_table *obj_table;
struct hyb_list_head *rmap;
struct hyb_list_head *lru;
atomic_t stored_exts;
atomic_t *ext_stored_pages;
unsigned int mcg_id_cnt[MEM_CGROUP_ID_MAX + 1];
};
struct mem_cgroup *get_mem_cgroup(unsigned short mcg_id);
int obj_idx(struct hybridswap_area *area, int idx);
int ext_idx(struct hybridswap_area *area, int idx);
int mcg_idx(struct hybridswap_area *area, int idx);
void free_hybridswap_area(struct hybridswap_area *area);
struct hybridswap_area *alloc_hybridswap_area(unsigned long ori_size,
unsigned long comp_size);
void hybridswap_check_area_extent(struct hybridswap_area *area);
void hybridswap_free_extent(struct hybridswap_area *area, int ext_id);
int hybridswap_alloc_extent(struct hybridswap_area *area, struct mem_cgroup *mcg);
int get_extent(struct hybridswap_area *area, int ext_id);
void put_extent(struct hybridswap_area *area, int ext_id);
int get_memcg_extent(struct hybridswap_area *area, struct mem_cgroup *mcg);
int get_memcg_zram_entry(struct hybridswap_area *area, struct mem_cgroup *mcg);
int get_extent_zram_entry(struct hybridswap_area *area, int ext_id);
#endif /* _HYBRIDSWAP_AREA_H */

File diff suppressed because it is too large Load Diff

View File

@@ -1,678 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2022 Oplus. All rights reserved.
*/
#define pr_fmt(fmt) "[HYBRIDSWAP]" fmt
#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/atomic.h>
#include <linux/memcontrol.h>
#include <linux/swap.h>
#include "../zram_drv.h"
#include "../zram_drv_internal.h"
#include "hybridswap_internal.h"
#include "hybridswap.h"
#define PRE_EOL_INFO_OVER_VAL 2
#define LIFE_TIME_EST_OVER_VAL 8
#define DEFAULT_STORED_WM_RATIO 90
struct zs_ext_para {
struct hybridswap_page_pool *pool;
size_t alloc_size;
bool fast;
bool nofail;
};
struct hybridswap_cfg {
atomic_t enable;
atomic_t reclaim_in_enable;
struct hybridswap_stat *stat;
struct workqueue_struct *reclaim_wq;
struct zram *zram;
atomic_t dev_life;
unsigned long quota_day;
struct timer_list lpc_timer;
struct work_struct lpc_work;
};
struct hybridswap_cfg global_settings;
#define DEVICE_NAME_LEN 64
static char loop_device[DEVICE_NAME_LEN];
void *hybridswap_malloc(size_t size, bool fast, bool nofail)
{
void *mem = NULL;
if (likely(fast)) {
mem = kzalloc(size, GFP_ATOMIC);
if (likely(mem || !nofail))
return mem;
}
mem = kzalloc(size, GFP_NOIO);
return mem;
}
void hybridswap_free(const void *mem)
{
kfree(mem);
}
struct page *hybridswap_alloc_page_common(void *data, gfp_t gfp)
{
struct page *page = NULL;
struct zs_ext_para *ext_para = (struct zs_ext_para *)data;
if (ext_para->pool) {
spin_lock(&ext_para->pool->page_pool_lock);
if (!list_empty(&ext_para->pool->page_pool_list)) {
page = list_first_entry(
&ext_para->pool->page_pool_list,
struct page, lru);
list_del(&page->lru);
}
spin_unlock(&ext_para->pool->page_pool_lock);
}
if (!page) {
if (ext_para->fast) {
page = alloc_page(GFP_ATOMIC);
if (likely(page))
goto out;
}
if (ext_para->nofail)
page = alloc_page(GFP_NOIO);
else
page = alloc_page(gfp);
}
out:
return page;
}
unsigned long hybridswap_zsmalloc(struct zs_pool *zs_pool,
size_t size, struct hybridswap_page_pool *pool)
{
gfp_t gfp = __GFP_DIRECT_RECLAIM | __GFP_KSWAPD_RECLAIM |
__GFP_NOWARN | __GFP_HIGHMEM | __GFP_MOVABLE;
return zs_malloc(zs_pool, size, gfp);
}
unsigned long zram_zsmalloc(struct zs_pool *zs_pool, size_t size, gfp_t gfp)
{
return zs_malloc(zs_pool, size, gfp);
}
struct page *hybridswap_alloc_page(struct hybridswap_page_pool *pool,
gfp_t gfp, bool fast, bool nofail)
{
struct zs_ext_para ext_para;
ext_para.pool = pool;
ext_para.fast = fast;
ext_para.nofail = nofail;
return hybridswap_alloc_page_common((void *)&ext_para, gfp);
}
void hybridswap_page_recycle(struct page *page, struct hybridswap_page_pool *pool)
{
if (pool) {
spin_lock(&pool->page_pool_lock);
list_add(&page->lru, &pool->page_pool_list);
spin_unlock(&pool->page_pool_lock);
} else {
__free_page(page);
}
}
bool hybridswap_reclaim_in_enable(void)
{
return !!atomic_read(&global_settings.reclaim_in_enable);
}
void hybridswap_set_reclaim_in_disable(void)
{
atomic_set(&global_settings.reclaim_in_enable, false);
}
void hybridswap_set_reclaim_in_enable(bool en)
{
atomic_set(&global_settings.reclaim_in_enable, en ? 1 : 0);
}
bool hybridswap_core_enabled(void)
{
return !!atomic_read(&global_settings.enable);
}
void hybridswap_set_enable(bool en)
{
hybridswap_set_reclaim_in_enable(en);
if (!hybridswap_core_enabled())
atomic_set(&global_settings.enable, en ? 1 : 0);
}
struct hybridswap_stat *hybridswap_get_stat_obj(void)
{
return global_settings.stat;
}
bool hybridswap_dev_life(void)
{
return !!atomic_read(&global_settings.dev_life);
}
void hybridswap_set_dev_life(bool en)
{
atomic_set(&global_settings.dev_life, en ? 1 : 0);
}
unsigned long hybridswap_quota_day(void)
{
return global_settings.quota_day;
}
void hybridswap_set_quota_day(unsigned long val)
{
global_settings.quota_day = val;
}
bool hybridswap_reach_life_protect(void)
{
struct hybridswap_stat *stat = hybridswap_get_stat_obj();
unsigned long quota = hybridswap_quota_day();
if (hybridswap_dev_life())
quota /= 10;
return atomic64_read(&stat->reclaimin_bytes_daily) > quota;
}
static void hybridswap_life_protect_ctrl_work(struct work_struct *work)
{
struct tm tm;
struct timespec64 ts;
struct hybridswap_stat *stat = hybridswap_get_stat_obj();
ktime_get_real_ts64(&ts);
time64_to_tm(ts.tv_sec - sys_tz.tz_minuteswest * 60, 0, &tm);
if (tm.tm_hour > 2)
atomic64_set(&stat->reclaimin_bytes_daily, 0);
}
static void hybridswap_life_protect_ctrl_timer(struct timer_list *t)
{
schedule_work(&global_settings.lpc_work);
mod_timer(&global_settings.lpc_timer,
jiffies + HYBRIDSWAP_CHECK_INTERVAL * HZ);
}
void hybridswap_close_bdev(struct block_device *bdev, struct file *backing_dev)
{
if (bdev)
blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
if (backing_dev)
filp_close(backing_dev, NULL);
}
struct file *hybridswap_open_bdev(const char *file_name)
{
struct file *backing_dev = NULL;
backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
if (unlikely(IS_ERR(backing_dev))) {
hybp(HYB_ERR, "open the %s failed! eno = %lld\n",
file_name, PTR_ERR(backing_dev));
backing_dev = NULL;
return NULL;
}
if (unlikely(!S_ISBLK(backing_dev->f_mapping->host->i_mode))) {
hybp(HYB_ERR, "%s isn't a blk device\n", file_name);
hybridswap_close_bdev(NULL, backing_dev);
return NULL;
}
return backing_dev;
}
int hybridswap_bind(struct zram *zram, const char *file_name)
{
struct file *backing_dev = NULL;
struct inode *inode = NULL;
unsigned long nr_pages;
struct block_device *bdev = NULL;
int err;
backing_dev = hybridswap_open_bdev(file_name);
if (unlikely(!backing_dev))
return -EINVAL;
inode = backing_dev->f_mapping->host;
bdev = blkdev_get_by_dev(inode->i_rdev,
FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
if (IS_ERR(bdev)) {
hybp(HYB_ERR, "%s blkdev_get failed!\n", file_name);
err = PTR_ERR(bdev);
bdev = NULL;
goto out;
}
nr_pages = (unsigned long)i_size_read(inode) >> PAGE_SHIFT;
err = set_blocksize(bdev, PAGE_SIZE);
if (unlikely(err)) {
hybp(HYB_ERR,
"%s set blocksize failed! eno = %d\n", file_name, err);
goto out;
}
zram->bdev = bdev;
zram->backing_dev = backing_dev;
zram->nr_pages = nr_pages;
return 0;
out:
hybridswap_close_bdev(bdev, backing_dev);
return err;
}
static inline unsigned long get_original_used_swap(void)
{
struct sysinfo val;
si_swapinfo(&val);
return val.totalswap - val.freeswap;
}
void hybridswap_stat_init(struct hybridswap_stat *stat)
{
int i;
atomic64_set(&stat->reclaimin_cnt, 0);
atomic64_set(&stat->reclaimin_bytes, 0);
atomic64_set(&stat->reclaimin_real_load, 0);
atomic64_set(&stat->dropped_ext_size, 0);
atomic64_set(&stat->reclaimin_bytes_daily, 0);
atomic64_set(&stat->reclaimin_pages, 0);
atomic64_set(&stat->reclaimin_infight, 0);
atomic64_set(&stat->batchout_cnt, 0);
atomic64_set(&stat->batchout_bytes, 0);
atomic64_set(&stat->batchout_real_load, 0);
atomic64_set(&stat->batchout_pages, 0);
atomic64_set(&stat->batchout_inflight, 0);
atomic64_set(&stat->fault_cnt, 0);
atomic64_set(&stat->hybridswap_fault_cnt, 0);
atomic64_set(&stat->reout_pages, 0);
atomic64_set(&stat->reout_bytes, 0);
atomic64_set(&stat->zram_stored_pages, 0);
atomic64_set(&stat->zram_stored_size, 0);
atomic64_set(&stat->stored_pages, 0);
atomic64_set(&stat->stored_size, 0);
atomic64_set(&stat->notify_free, 0);
atomic64_set(&stat->frag_cnt, 0);
atomic64_set(&stat->mcg_cnt, 0);
atomic64_set(&stat->ext_cnt, 0);
atomic64_set(&stat->miss_free, 0);
atomic64_set(&stat->mcgid_clear, 0);
atomic64_set(&stat->skip_track_cnt, 0);
atomic64_set(&stat->null_memcg_skip_track_cnt, 0);
atomic64_set(&stat->used_swap_pages, get_original_used_swap());
atomic64_set(&stat->stored_wm_ratio, DEFAULT_STORED_WM_RATIO);
for (i = 0; i < HYBRIDSWAP_SCENARIO_BUTT; ++i) {
atomic64_set(&stat->io_fail_cnt[i], 0);
atomic64_set(&stat->alloc_fail_cnt[i], 0);
atomic64_set(&stat->lat[i].total_lat, 0);
atomic64_set(&stat->lat[i].max_lat, 0);
}
stat->record.num = 0;
spin_lock_init(&stat->record.lock);
}
static bool hybridswap_global_setting_init(struct zram *zram)
{
if (unlikely(global_settings.stat))
return false;
global_settings.zram = zram;
hybridswap_set_enable(false);
global_settings.stat = hybridswap_malloc(
sizeof(struct hybridswap_stat), false, true);
if (unlikely(!global_settings.stat)) {
hybp(HYB_ERR, "global stat allocation failed!\n");
return false;
}
hybridswap_stat_init(global_settings.stat);
global_settings.reclaim_wq = alloc_workqueue("hybridswap_reclaim",
WQ_CPU_INTENSIVE, 0);
if (unlikely(!global_settings.reclaim_wq)) {
hybp(HYB_ERR, "reclaim workqueue allocation failed!\n");
hybridswap_free(global_settings.stat);
global_settings.stat = NULL;
return false;
}
global_settings.quota_day = HYBRIDSWAP_QUOTA_DAY;
INIT_WORK(&global_settings.lpc_work, hybridswap_life_protect_ctrl_work);
global_settings.lpc_timer.expires = jiffies + HYBRIDSWAP_CHECK_INTERVAL * HZ;
timer_setup(&global_settings.lpc_timer, hybridswap_life_protect_ctrl_timer, 0);
add_timer(&global_settings.lpc_timer);
hybp(HYB_DEBUG, "global settings init success\n");
return true;
}
void hybridswap_global_setting_deinit(void)
{
destroy_workqueue(global_settings.reclaim_wq);
hybridswap_free(global_settings.stat);
global_settings.stat = NULL;
global_settings.zram = NULL;
global_settings.reclaim_wq = NULL;
}
struct workqueue_struct *hybridswap_get_reclaim_workqueue(void)
{
return global_settings.reclaim_wq;
}
static int hybridswap_core_init(struct zram *zram)
{
int ret;
if (loop_device[0] == '\0') {
hybp(HYB_ERR, "please setting loop_device first\n");
return -EINVAL;
}
if (!hybridswap_global_setting_init(zram))
return -EINVAL;
ret = hybridswap_bind(zram, loop_device);
if (unlikely(ret)) {
hybp(HYB_ERR, "bind storage device failed! %d\n", ret);
hybridswap_global_setting_deinit();
}
return 0;
}
int hybridswap_set_enable_init(bool en)
{
int ret;
if (hybridswap_core_enabled() || !en)
return 0;
if (!global_settings.stat) {
hybp(HYB_ERR, "global_settings.stat is null!\n");
return -EINVAL;
}
ret = hybridswap_manager_init(global_settings.zram);
if (unlikely(ret)) {
hybp(HYB_ERR, "init manager failed! %d\n", ret);
return -EINVAL;
}
ret = hybridswap_schedule_init();
if (unlikely(ret)) {
hybp(HYB_ERR, "init schedule failed! %d\n", ret);
hybridswap_manager_deinit(global_settings.zram);
return -EINVAL;
}
return 0;
}
ssize_t hybridswap_core_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
int ret;
unsigned long val;
ret = kstrtoul(buf, 0, &val);
if (unlikely(ret)) {
hybp(HYB_ERR, "val is error!\n");
return -EINVAL;
}
if (hybridswap_set_enable_init(!!val))
return -EINVAL;
hybridswap_set_enable(!!val);
return len;
}
ssize_t hybridswap_core_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int len = snprintf(buf, PAGE_SIZE, "hybridswap %s reclaim_in %s\n",
hybridswap_core_enabled() ? "enable" : "disable",
hybridswap_reclaim_in_enable() ? "enable" : "disable");
return len;
}
ssize_t hybridswap_loop_device_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct zram *zram;
int ret = 0;
if (len > (DEVICE_NAME_LEN - 1)) {
hybp(HYB_ERR, "buf %s len %d is too long\n", buf, len);
return -EINVAL;
}
memcpy(loop_device, buf, len);
loop_device[len] = '\0';
strstrip(loop_device);
zram = dev_to_zram(dev);
down_write(&zram->init_lock);
if (zram->disksize == 0) {
hybp(HYB_ERR, "disksize is 0\n");
goto out;
}
ret = hybridswap_core_init(zram);
if (ret)
hybp(HYB_ERR, "hybridswap_core_init init failed\n");
out:
up_write(&zram->init_lock);
return len;
}
ssize_t hybridswap_loop_device_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int len = 0;
len = sprintf(buf, "%s\n", loop_device);
return len;
}
ssize_t hybridswap_dev_life_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
int ret;
unsigned long val;
ret = kstrtoul(buf, 0, &val);
if (unlikely(ret)) {
hybp(HYB_ERR, "val is error!\n");
return -EINVAL;
}
hybridswap_set_dev_life(!!val);
return len;
}
ssize_t hybridswap_dev_life_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int len = 0;
len = sprintf(buf, "%s\n",
hybridswap_dev_life() ? "enable" : "disable");
return len;
}
ssize_t hybridswap_quota_day_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
int ret;
unsigned long val;
ret = kstrtoul(buf, 0, &val);
if (unlikely(ret)) {
hybp(HYB_ERR, "val is error!\n");
return -EINVAL;
}
hybridswap_set_quota_day(val);
return len;
}
ssize_t hybridswap_quota_day_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int len = 0;
len = sprintf(buf, "%llu\n", hybridswap_quota_day());
return len;
}
ssize_t hybridswap_zram_increase_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
char *type_buf = NULL;
unsigned long val;
struct zram *zram = dev_to_zram(dev);
type_buf = strstrip((char *)buf);
if (kstrtoul(type_buf, 0, &val))
return -EINVAL;
zram->increase_nr_pages = (val << 8);
return len;
}
ssize_t hybridswap_zram_increase_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t size = 0;
struct zram *zram = dev_to_zram(dev);
size += scnprintf(buf + size, PAGE_SIZE - size,
"%lu\n", zram->increase_nr_pages >> 8);
return size;
}
int mem_cgroup_stored_wm_ratio_write(
struct cgroup_subsys_state *css, struct cftype *cft, s64 val)
{
if (val > MAX_RATIO || val < MIN_RATIO)
return -EINVAL;
if (!global_settings.stat)
return -EINVAL;
atomic64_set(&global_settings.stat->stored_wm_ratio, val);
return 0;
}
s64 mem_cgroup_stored_wm_ratio_read(
struct cgroup_subsys_state *css, struct cftype *cft)
{
if (!global_settings.stat)
return -EINVAL;
return atomic64_read(&global_settings.stat->stored_wm_ratio);
}
int hybridswap_stored_info(unsigned long *total, unsigned long *used)
{
if (!total || !used)
return -EINVAL;
if (!global_settings.stat || !global_settings.zram) {
*total = 0;
*used = 0;
return 0;
}
*used = atomic64_read(&global_settings.stat->ext_cnt) * EXTENT_PG_CNT;
*total = global_settings.zram->nr_pages;
return 0;
}
bool hybridswap_stored_wm_ok(void)
{
unsigned long ratio, stored_pages, total_pages, wm_ratio;
int ret;
if (!hybridswap_core_enabled())
return false;
ret = hybridswap_stored_info(&total_pages, &stored_pages);
if (ret)
return false;
ratio = (stored_pages * 100) / (total_pages + 1);
wm_ratio = atomic64_read(&global_settings.stat->stored_wm_ratio);
return ratio <= wm_ratio;
}
int hybridswap_core_enable(void)
{
int ret;
ret = hybridswap_set_enable_init(true);
if (ret) {
hybp(HYB_ERR, "set true failed, ret=%d\n", ret);
return ret;
}
hybridswap_set_enable(true);
return 0;
}
void hybridswap_core_disable(void)
{
(void)hybridswap_set_enable_init(false);
hybridswap_set_enable(false);
}

View File

@@ -1,561 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2022 Oplus. All rights reserved.
*/
#ifndef HYBRIDSWAP_INTERNAL_H
#define HYBRIDSWAP_INTERNAL_H
#include <linux/sched.h>
#include <linux/zsmalloc.h>
#include <linux/timer.h>
#include <linux/device.h>
#include <linux/memcontrol.h>
#define EXTENT_SHIFT 15
#define EXTENT_SIZE (1UL << EXTENT_SHIFT)
#define EXTENT_PG_CNT (EXTENT_SIZE >> PAGE_SHIFT)
#define EXTENT_SECTOR_SIZE (EXTENT_PG_CNT << 3)
#define EXTENT_MAX_OBJ_CNT (30 * EXTENT_PG_CNT)
#define EXTENT_MASK (~(EXTENT_SIZE - 1))
#define EXTENT_ALIGN_UP(size) ((size + EXTENT_SIZE - 1) & EXTENT_MASK)
#define MAX_FAIL_RECORD_NUM 10
#define MEM_CGROUP_NAME_MAX_LEN 32
#define MAX_APP_SCORE 1000
#define HYBRIDSWAP_QUOTA_DAY 0x280000000 /* 10G bytes */
#define HYBRIDSWAP_CHECK_INTERVAL 86400 /* 24 hour */
#define MAX_RATIO 100
#define MIN_RATIO 0
enum {
HYB_ERR = 0,
HYB_WARN,
HYB_INFO,
HYB_DEBUG,
HYB_MAX
};
void hybridswap_loglevel_set(int level);
int hybridswap_loglevel(void);
#define DUMP_STACK_ON_ERR 0
#define pt(l, f, ...) pr_err("[%s]<%d:%s>:"f, #l, __LINE__, __func__, ##__VA_ARGS__)
static inline void pr_none(void) {}
#define hybp(l, f, ...) do {\
(l <= hybridswap_loglevel()) ? pt(l, f, ##__VA_ARGS__) : pr_none();\
if (DUMP_STACK_ON_ERR && l == HYB_ERR) dump_stack();\
} while (0)
enum hybridswap_scenario {
HYBRIDSWAP_RECLAIM_IN = 0,
HYBRIDSWAP_FAULT_OUT,
HYBRIDSWAP_BATCH_OUT,
HYBRIDSWAP_PRE_OUT,
HYBRIDSWAP_SCENARIO_BUTT
};
enum hybridswap_key_point {
HYBRIDSWAP_START = 0,
HYBRIDSWAP_INIT,
HYBRIDSWAP_IOENTRY_ALLOC,
HYBRIDSWAP_FIND_EXTENT,
HYBRIDSWAP_IO_EXTENT,
HYBRIDSWAP_SEGMENT_ALLOC,
HYBRIDSWAP_BIO_ALLOC,
HYBRIDSWAP_SUBMIT_BIO,
HYBRIDSWAP_END_IO,
HYBRIDSWAP_SCHED_WORK,
HYBRIDSWAP_END_WORK,
HYBRIDSWAP_CALL_BACK,
HYBRIDSWAP_WAKE_UP,
HYBRIDSWAP_ZRAM_LOCK,
HYBRIDSWAP_DONE,
HYBRIDSWAP_KYE_POINT_BUTT
};
enum hybridswap_mcg_member {
MCG_ZRAM_STORED_SZ = 0,
MCG_ZRAM_STORED_PG_SZ,
MCG_DISK_STORED_SZ,
MCG_DISK_STORED_PG_SZ,
MCG_ANON_FAULT_CNT,
MCG_DISK_FAULT_CNT,
MCG_ESWAPOUT_CNT,
MCG_ESWAPOUT_SZ,
MCG_ESWAPIN_CNT,
MCG_ESWAPIN_SZ,
MCG_DISK_SPACE,
MCG_DISK_SPACE_PEAK,
};
enum hybridswap_fail_point {
HYBRIDSWAP_FAULT_OUT_INIT_FAIL = 0,
HYBRIDSWAP_FAULT_OUT_ENTRY_ALLOC_FAIL,
HYBRIDSWAP_FAULT_OUT_IO_ENTRY_PARA_FAIL,
HYBRIDSWAP_FAULT_OUT_SEGMENT_ALLOC_FAIL,
HYBRIDSWAP_FAULT_OUT_BIO_ALLOC_FAIL,
HYBRIDSWAP_FAULT_OUT_BIO_ADD_FAIL,
HYBRIDSWAP_FAULT_OUT_IO_FAIL,
HYBRIDSWAP_FAIL_POINT_BUTT
};
struct hybridswap_fail_record {
unsigned char task_comm[TASK_COMM_LEN];
enum hybridswap_fail_point point;
ktime_t time;
u32 index;
int ext_id;
};
struct hybridswap_fail_record_info {
int num;
spinlock_t lock;
struct hybridswap_fail_record record[MAX_FAIL_RECORD_NUM];
};
struct hybridswap_key_point_info {
unsigned int record_cnt;
unsigned int end_cnt;
ktime_t first_time;
ktime_t last_time;
s64 proc_total_time;
s64 proc_max_time;
unsigned long long last_ravg_sum;
unsigned long long proc_ravg_sum;
spinlock_t time_lock;
};
struct hybridswap_key_point_record {
struct timer_list lat_monitor;
unsigned long warning_threshold;
int page_cnt;
int segment_cnt;
int nice;
bool timeout_flag;
unsigned char task_comm[TASK_COMM_LEN];
struct task_struct *task;
enum hybridswap_scenario scenario;
struct hybridswap_key_point_info key_point[HYBRIDSWAP_KYE_POINT_BUTT];
};
struct hybridswap_lat_stat {
atomic64_t total_lat;
atomic64_t max_lat;
atomic64_t timeout_cnt;
};
struct hybridswap_fault_timeout_cnt{
atomic64_t timeout_100ms_cnt;
atomic64_t timeout_500ms_cnt;
};
struct hybridswap_stat {
atomic64_t reclaimin_cnt;
atomic64_t reclaimin_bytes;
atomic64_t reclaimin_real_load;
atomic64_t reclaimin_bytes_daily;
atomic64_t reclaimin_pages;
atomic64_t reclaimin_infight;
atomic64_t batchout_cnt;
atomic64_t batchout_bytes;
atomic64_t batchout_real_load;
atomic64_t batchout_pages;
atomic64_t batchout_inflight;
atomic64_t fault_cnt;
atomic64_t hybridswap_fault_cnt;
atomic64_t reout_pages;
atomic64_t reout_bytes;
atomic64_t zram_stored_pages;
atomic64_t zram_stored_size;
atomic64_t stored_pages;
atomic64_t stored_size;
atomic64_t notify_free;
atomic64_t frag_cnt;
atomic64_t mcg_cnt;
atomic64_t ext_cnt;
atomic64_t miss_free;
atomic64_t mcgid_clear;
atomic64_t skip_track_cnt;
atomic64_t used_swap_pages;
atomic64_t null_memcg_skip_track_cnt;
atomic64_t stored_wm_ratio;
atomic64_t dropped_ext_size;
atomic64_t io_fail_cnt[HYBRIDSWAP_SCENARIO_BUTT];
atomic64_t alloc_fail_cnt[HYBRIDSWAP_SCENARIO_BUTT];
struct hybridswap_lat_stat lat[HYBRIDSWAP_SCENARIO_BUTT];
struct hybridswap_fault_timeout_cnt fault_stat[2]; /* 0:bg 1:fg */
struct hybridswap_fail_record_info record;
};
struct hybridswap_page_pool {
struct list_head page_pool_list;
spinlock_t page_pool_lock;
};
struct io_extent {
int ext_id;
struct zram *zram;
struct mem_cgroup *mcg;
struct page *pages[EXTENT_PG_CNT];
u32 index[EXTENT_MAX_OBJ_CNT];
int cnt;
int real_load;
struct hybridswap_page_pool *pool;
};
struct hybridswap_buffer {
struct zram *zram;
struct hybridswap_page_pool *pool;
struct page **dest_pages;
};
struct hybridswap_entry {
int ext_id;
sector_t addr;
struct page **dest_pages;
int pages_sz;
struct list_head list;
void *private;
void *manager_private;
};
struct hybridswap_io_req;
struct hybridswap_io {
struct block_device *bdev;
enum hybridswap_scenario scenario;
void (*done_callback)(struct hybridswap_entry *, int, struct hybridswap_io_req *);
void (*complete_notify)(void *);
void *private;
struct hybridswap_key_point_record *record;
};
struct hybridswap_io_req {
struct hybridswap_io io_para;
struct kref refcount;
struct mutex refmutex;
struct wait_queue_head io_wait;
atomic_t extent_inflight;
struct completion io_end_flag;
struct hybridswap_segment *segment;
bool limit_inflight_flag;
bool wait_io_finish_flag;
int page_cnt;
int segment_cnt;
int nice;
atomic64_t real_load;
};
/* Change hybridswap_event_item, you should change swapd_text togather*/
enum hybridswap_event_item {
#ifdef CONFIG_HYBRIDSWAP_SWAPD
SWAPD_WAKEUP,
SWAPD_REFAULT,
SWAPD_MEDIUM_PRESS,
SWAPD_CRITICAL_PRESS,
SWAPD_MEMCG_RATIO_SKIP,
SWAPD_MEMCG_REFAULT_SKIP,
SWAPD_SHRINK_ANON,
SWAPD_SWAPOUT,
SWAPD_SKIP_SWAPOUT,
SWAPD_EMPTY_ROUND,
SWAPD_OVER_MIN_BUFFER_SKIP_TIMES,
SWAPD_EMPTY_ROUND_SKIP_TIMES,
SWAPD_SNAPSHOT_TIMES,
SWAPD_SKIP_SHRINK_OF_WINDOW,
SWAPD_MANUAL_PAUSE,
#ifdef CONFIG_OPLUS_JANK
SWAPD_CPU_BUSY_SKIP_TIMES,
SWAPD_CPU_BUSY_BREAK_TIMES,
#endif
#endif
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
AKCOMPRESSD_WAKEUP,
#endif
NR_EVENT_ITEMS
};
struct swapd_event_state {
unsigned long event[NR_EVENT_ITEMS];
};
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
struct cgroup_cache_page {
spinlock_t lock;
struct list_head head;
unsigned int cnt;
int id;
char compressing;
char dead;
};
#endif
typedef struct mem_cgroup_hybridswap {
#ifdef CONFIG_HYBRIDSWAP
atomic64_t ub_ufs2zram_ratio;
atomic_t ub_zram2ufs_ratio;
atomic64_t app_score;
atomic64_t app_uid;
struct list_head score_node;
char name[MEM_CGROUP_NAME_MAX_LEN];
struct zram *zram;
struct mem_cgroup *memcg;
refcount_t usage;
#endif
#ifdef CONFIG_HYBRIDSWAP_SWAPD
atomic_t ub_mem2zram_ratio;
atomic_t refault_threshold;
unsigned long long reclaimed_pagefault;
long long can_reclaimed;
#endif
#ifdef CONFIG_HYBRIDSWAP_CORE
unsigned long zram_lru;
unsigned long ext_lru;
struct list_head link_list;
spinlock_t zram_init_lock;
long long can_eswaped;
atomic64_t zram_stored_size;
atomic64_t zram_page_size;
unsigned long zram_watermark;
atomic_t hybridswap_extcnt;
atomic_t hybridswap_peakextcnt;
atomic64_t hybridswap_stored_pages;
atomic64_t hybridswap_stored_size;
atomic64_t hybridswap_ext_notify_free;
atomic64_t hybridswap_outcnt;
atomic64_t hybridswap_incnt;
atomic64_t hybridswap_allfaultcnt;
atomic64_t hybridswap_faultcnt;
atomic64_t hybridswap_outextcnt;
atomic64_t hybridswap_inextcnt;
struct mutex swap_lock;
bool in_swapin;
bool force_swapout;
#endif
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
struct cgroup_cache_page cache;
#endif
}memcg_hybs_t;
#define MEMCGRP_ITEM_DATA(memcg) ((memcg_hybs_t *)(memcg)->android_oem_data1)
#define MEMCGRP_ITEM(memcg, item) (MEMCGRP_ITEM_DATA(memcg)->item)
extern void __put_memcg_cache(memcg_hybs_t *hybs);
static inline memcg_hybs_t *get_memcg_cache(memcg_hybs_t *hybs)
{
refcount_inc(&hybs->usage);
return hybs;
}
static inline void put_memcg_cache(memcg_hybs_t *hybs)
{
if (refcount_dec_and_test(&hybs->usage))
__put_memcg_cache(hybs);
}
DECLARE_PER_CPU(struct swapd_event_state, swapd_event_states);
extern struct mutex reclaim_para_lock;
static inline void __count_swapd_event(enum hybridswap_event_item item)
{
raw_cpu_inc(swapd_event_states.event[item]);
}
static inline void count_swapd_event(enum hybridswap_event_item item)
{
this_cpu_inc(swapd_event_states.event[item]);
}
static inline void __count_swapd_events(enum hybridswap_event_item item, long delta)
{
raw_cpu_add(swapd_event_states.event[item], delta);
}
static inline void count_swapd_events(enum hybridswap_event_item item, long delta)
{
this_cpu_add(swapd_event_states.event[item], delta);
}
void *hybridswap_malloc(size_t size, bool fast, bool nofail);
void hybridswap_free(const void *mem);
unsigned long hybridswap_zsmalloc(struct zs_pool *zs_pool,
size_t size, struct hybridswap_page_pool *pool);
struct page *hybridswap_alloc_page(
struct hybridswap_page_pool *pool, gfp_t gfp,
bool fast, bool nofail);
void hybridswap_page_recycle(struct page *page,
struct hybridswap_page_pool *pool);
struct hybridswap_stat *hybridswap_get_stat_obj(void);
int hybridswap_manager_init(struct zram *zram);
void hybridswap_manager_memcg_init(struct zram *zram,
struct mem_cgroup *memcg);
void hybridswap_manager_memcg_deinit(struct mem_cgroup *mcg);
void hybridswap_zram_lru_add(struct zram *zram, u32 index,
struct mem_cgroup *memcg);
void hybridswap_zram_lru_del(struct zram *zram, u32 index);
unsigned long hybridswap_extent_create(struct mem_cgroup *memcg,
int *ext_id,
struct hybridswap_buffer *dest_buf,
void **private);
void hybridswap_extent_register(void *private, struct hybridswap_io_req *req);
void hybridswap_extent_objs_del(struct zram *zram, u32 index);
int hybridswap_find_extent_by_idx(
unsigned long eswpentry, struct hybridswap_buffer *buf, void **private);
int hybridswap_find_extent_by_memcg(
struct mem_cgroup *mcg,
struct hybridswap_buffer *dest_buf, void **private);
void hybridswap_extent_destroy(void *private, enum hybridswap_scenario scenario);
void hybridswap_extent_exception(enum hybridswap_scenario scenario,
void *private);
void hybridswap_manager_deinit(struct zram *zram);
struct mem_cgroup *hybridswap_zram_get_memcg(struct zram *zram, u32 index);
int hybridswap_schedule_init(void);
void *hybridswap_plug_start(struct hybridswap_io *io_para);
int hybridswap_read_extent(void *io_handler,
struct hybridswap_entry *io_entry);
int hybridswap_write_extent(void *io_handler,
struct hybridswap_entry *io_entry);
int hybridswap_plug_finish(void *io_handler);
void hybridswap_perf_start(
struct hybridswap_key_point_record *record,
ktime_t stsrt, unsigned long long start_ravg_sum,
enum hybridswap_scenario scenario);
void hybridswap_perf_end(struct hybridswap_key_point_record *record);
void hybridswap_perf_lat_start(
struct hybridswap_key_point_record *record,
enum hybridswap_key_point type);
void hybridswap_perf_lat_end(
struct hybridswap_key_point_record *record,
enum hybridswap_key_point type);
void hybridswap_perf_lat_point(
struct hybridswap_key_point_record *record,
enum hybridswap_key_point type);
void hybridswap_perf_async_perf(
struct hybridswap_key_point_record *record,
enum hybridswap_key_point type, ktime_t start,
unsigned long long start_ravg_sum);
void hybridswap_perf_io_stat(
struct hybridswap_key_point_record *record, int page_cnt,
int segment_cnt);
static inline unsigned long long hybridswap_get_ravg_sum(void)
{
return 0;
}
void hybridswap_fail_record(enum hybridswap_fail_point point,
u32 index, int ext_id, unsigned char *task_comm);
bool hybridswap_reach_life_protect(void);
struct workqueue_struct *hybridswap_get_reclaim_workqueue(void);
extern struct mem_cgroup *get_next_memcg(struct mem_cgroup *prev);
extern void get_next_memcg_break(struct mem_cgroup *prev);
extern memcg_hybs_t *hybridswap_cache_alloc(struct mem_cgroup *memcg, bool atomic);
extern void memcg_app_score_resort(void);
extern unsigned long memcg_anon_pages(struct mem_cgroup *memcg);
#ifdef CONFIG_HYBRIDSWAP_CORE
extern bool hybridswap_core_enabled(void);
extern bool hybridswap_reclaim_in_enable(void);
extern void hybridswap_mem_cgroup_deinit(struct mem_cgroup *memcg);
extern unsigned long hybridswap_reclaim_in(unsigned long size);
extern int hybridswap_batch_out(struct mem_cgroup *mcg,
unsigned long size, bool preload);
extern unsigned long zram_zsmalloc(struct zs_pool *zs_pool,
size_t size, gfp_t gfp);
extern struct task_struct *get_task_from_proc(struct inode *inode);
extern unsigned long hybridswap_get_zram_used_pages(void);
extern unsigned long long hybridswap_get_zram_pagefault(void);
extern bool hybridswap_reclaim_work_running(void);
extern void hybridswap_force_reclaim(struct mem_cgroup *mcg);
extern bool hybridswap_stored_wm_ok(void);
extern void mem_cgroup_id_remove_hook(void *data, struct mem_cgroup *memcg);
extern int mem_cgroup_stored_wm_ratio_write(
struct cgroup_subsys_state *css, struct cftype *cft, s64 val);
extern s64 mem_cgroup_stored_wm_ratio_read(
struct cgroup_subsys_state *css, struct cftype *cft);
extern bool hybridswap_delete(struct zram *zram, u32 index);
extern int hybridswap_stored_info(unsigned long *total, unsigned long *used);
extern unsigned long long hybridswap_read_mcg_stats(
struct mem_cgroup *mcg, enum hybridswap_mcg_member mcg_member);
extern int hybridswap_core_enable(void);
extern void hybridswap_core_disable(void);
extern int hybridswap_psi_show(struct seq_file *m, void *v);
#else
static inline unsigned long long hybridswap_read_mcg_stats(
struct mem_cgroup *mcg, enum hybridswap_mcg_member mcg_member)
{
return 0;
}
static inline unsigned long long hybridswap_get_zram_pagefault(void)
{
return 0;
}
static inline unsigned long hybridswap_get_zram_used_pages(void)
{
return 0;
}
static inline bool hybridswap_reclaim_work_running(void)
{
return false;
}
static inline bool hybridswap_core_enabled(void) { return false; }
static inline bool hybridswap_reclaim_in_enable(void) { return false; }
#endif
#ifdef CONFIG_HYBRIDSWAP_SWAPD
extern atomic_long_t fault_out_pause;
extern atomic_long_t fault_out_pause_cnt;
extern struct cftype mem_cgroup_swapd_legacy_files[];
extern bool zram_watermark_ok(void);
extern void wake_all_swapd(void);
extern void alloc_pages_slowpath_hook(void *data, gfp_t gfp_mask,
unsigned int order, unsigned long delta);
extern void rmqueue_hook(void *data, struct zone *preferred_zone,
struct zone *zone, unsigned int order, gfp_t gfp_flags,
unsigned int alloc_flags, int migratetype);
extern void __init swapd_pre_init(void);
extern void swapd_pre_deinit(void);
extern void update_swapd_memcg_param(struct mem_cgroup *memcg);
extern bool free_zram_is_ok(void);
extern unsigned long get_nr_zram_total(void);
extern int swapd_init(struct zram *zram);
extern void swapd_exit(void);
extern bool hybridswap_swapd_enabled(void);
#else
static inline bool hybridswap_swapd_enabled(void) { return false; }
#endif
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
extern spinlock_t cached_idr_lock;
extern struct idr cached_idr;
extern void __init akcompressd_pre_init(void);
extern void __exit akcompressd_pre_deinit(void);
extern int create_akcompressd_task(struct zram *zram);
extern void clear_page_memcg(struct cgroup_cache_page *cache);
#endif
#endif /* end of HYBRIDSWAP_INTERNAL_H */

View File

@@ -1,323 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2022 Oplus. All rights reserved.
*/
#define pr_fmt(fmt) "[HYBRIDSWAP]" fmt
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/bit_spinlock.h>
#include <linux/zsmalloc.h>
#include "hybridswap_list.h"
#include "hybridswap_internal.h"
static struct hyb_list_head *get_node_default(int idx, void *private)
{
struct hyb_list_head *table = private;
return &table[idx];
}
struct hyb_list_table *alloc_table(struct hyb_list_head *(*get_node)(int, void *),
void *private, gfp_t gfp)
{
struct hyb_list_table *table =
kmalloc(sizeof(struct hyb_list_table), gfp);
if (!table)
return NULL;
table->get_node = get_node ? get_node : get_node_default;
table->private = private;
return table;
}
void hh_lock_list(int idx, struct hyb_list_table *table)
{
struct hyb_list_head *node = idx_node(idx, table);
if (!node) {
hybp(HYB_ERR, "idx = %d, table = %pK\n", idx, table);
return;
}
bit_spin_lock(HH_LIST_LOCK_BIT, (unsigned long *)node);
}
void hh_unlock_list(int idx, struct hyb_list_table *table)
{
struct hyb_list_head *node = idx_node(idx, table);
if (!node) {
hybp(HYB_ERR, "idx = %d, table = %pK\n", idx, table);
return;
}
bit_spin_unlock(HH_LIST_LOCK_BIT, (unsigned long *)node);
}
bool hyb_list_empty(int hidx, struct hyb_list_table *table)
{
bool ret = false;
hh_lock_list(hidx, table);
ret = (prev_idx(hidx, table) == hidx) && (next_idx(hidx, table) == hidx);
hh_unlock_list(hidx, table);
return ret;
}
void hyb_list_init(int idx, struct hyb_list_table *table)
{
struct hyb_list_head *node = idx_node(idx, table);
if (!node) {
hybp(HYB_ERR, "idx = %d, table = %pS func %pS\n",
idx, table, table->get_node);
return;
}
memset(node, 0, sizeof(struct hyb_list_head));
node->prev = idx;
node->next = idx;
}
void hyb_list_add_nolock(int idx, int hidx, struct hyb_list_table *table)
{
struct hyb_list_head *node = NULL;
struct hyb_list_head *head = NULL;
struct hyb_list_head *next = NULL;
int nidx;
node = idx_node(idx, table);
if (!node) {
hybp(HYB_ERR,
"NULL node, idx = %d, hidx = %d, table = %pK\n",
idx, hidx, table);
return;
}
head = idx_node(hidx, table);
if (!head) {
hybp(HYB_ERR,
"NULL head, idx = %d, hidx = %d, table = %pK\n",
idx, hidx, table);
return;
}
next = idx_node(head->next, table);
if (!next) {
hybp(HYB_ERR,
"NULL next, idx = %d, hidx = %d, table = %pK\n",
idx, hidx, table);
return;
}
nidx = head->next;
if (idx != hidx)
hh_lock_list(idx, table);
node->prev = hidx;
node->next = nidx;
if (idx != hidx)
hh_unlock_list(idx, table);
head->next = idx;
if (nidx != hidx)
hh_lock_list(nidx, table);
next->prev = idx;
if (nidx != hidx)
hh_unlock_list(nidx, table);
}
void hyb_list_add_tail_nolock(int idx, int hidx, struct hyb_list_table *table)
{
struct hyb_list_head *node = NULL;
struct hyb_list_head *head = NULL;
struct hyb_list_head *tail = NULL;
int tidx;
node = idx_node(idx, table);
if (!node) {
hybp(HYB_ERR,
"NULL node, idx = %d, hidx = %d, table = %pK\n",
idx, hidx, table);
return;
}
head = idx_node(hidx, table);
if (!head) {
hybp(HYB_ERR,
"NULL head, idx = %d, hidx = %d, table = %pK\n",
idx, hidx, table);
return;
}
tail = idx_node(head->prev, table);
if (!tail) {
hybp(HYB_ERR,
"NULL tail, idx = %d, hidx = %d, table = %pK\n",
idx, hidx, table);
return;
}
tidx = head->prev;
if (idx != hidx)
hh_lock_list(idx, table);
node->prev = tidx;
node->next = hidx;
if (idx != hidx)
hh_unlock_list(idx, table);
head->prev = idx;
if (tidx != hidx)
hh_lock_list(tidx, table);
tail->next = idx;
if (tidx != hidx)
hh_unlock_list(tidx, table);
}
void hyb_list_del_nolock(int idx, int hidx, struct hyb_list_table *table)
{
struct hyb_list_head *node = NULL;
struct hyb_list_head *prev = NULL;
struct hyb_list_head *next = NULL;
int pidx, nidx;
node = idx_node(idx, table);
if (!node) {
hybp(HYB_ERR,
"NULL node, idx = %d, hidx = %d, table = %pK\n",
idx, hidx, table);
return;
}
prev = idx_node(node->prev, table);
if (!prev) {
hybp(HYB_ERR,
"NULL prev, idx = %d, hidx = %d, table = %pK\n",
idx, hidx, table);
return;
}
next = idx_node(node->next, table);
if (!next) {
hybp(HYB_ERR,
"NULL next, idx = %d, hidx = %d, table = %pK\n",
idx, hidx, table);
return;
}
if (idx != hidx)
hh_lock_list(idx, table);
pidx = node->prev;
nidx = node->next;
node->prev = idx;
node->next = idx;
if (idx != hidx)
hh_unlock_list(idx, table);
if (pidx != hidx)
hh_lock_list(pidx, table);
prev->next = nidx;
if (pidx != hidx)
hh_unlock_list(pidx, table);
if (nidx != hidx)
hh_lock_list(nidx, table);
next->prev = pidx;
if (nidx != hidx)
hh_unlock_list(nidx, table);
}
void hyb_list_add(int idx, int hidx, struct hyb_list_table *table)
{
hh_lock_list(hidx, table);
hyb_list_add_nolock(idx, hidx, table);
hh_unlock_list(hidx, table);
}
void hyb_list_add_tail(int idx, int hidx, struct hyb_list_table *table)
{
hh_lock_list(hidx, table);
hyb_list_add_tail_nolock(idx, hidx, table);
hh_unlock_list(hidx, table);
}
void hyb_list_del(int idx, int hidx, struct hyb_list_table *table)
{
hh_lock_list(hidx, table);
hyb_list_del_nolock(idx, hidx, table);
hh_unlock_list(hidx, table);
}
unsigned short hyb_list_get_mcgid(int idx, struct hyb_list_table *table)
{
struct hyb_list_head *node = idx_node(idx, table);
int mcg_id;
if (!node) {
hybp(HYB_ERR, "idx = %d, table = %pK\n", idx, table);
return 0;
}
hh_lock_list(idx, table);
mcg_id = (node->mcg_hi << HH_LIST_MCG_SHIFT_HALF) | node->mcg_lo;
hh_unlock_list(idx, table);
return mcg_id;
}
void hyb_list_set_mcgid(int idx, struct hyb_list_table *table, int mcg_id)
{
struct hyb_list_head *node = idx_node(idx, table);
if (!node) {
hybp(HYB_ERR, "idx = %d, table = %pK, mcg = %d\n",
idx, table, mcg_id);
return;
}
hh_lock_list(idx, table);
node->mcg_hi = (u32)mcg_id >> HH_LIST_MCG_SHIFT_HALF;
node->mcg_lo = (u32)mcg_id & ((1 << HH_LIST_MCG_SHIFT_HALF) - 1);
hh_unlock_list(idx, table);
}
bool hyb_list_set_priv(int idx, struct hyb_list_table *table)
{
struct hyb_list_head *node = idx_node(idx, table);
bool ret = false;
if (!node) {
hybp(HYB_ERR, "idx = %d, table = %pK\n", idx, table);
return false;
}
hh_lock_list(idx, table);
ret = !test_and_set_bit(HH_LIST_PRIV_BIT, (unsigned long *)node);
hh_unlock_list(idx, table);
return ret;
}
bool hyb_list_test_priv(int idx, struct hyb_list_table *table)
{
struct hyb_list_head *node = idx_node(idx, table);
bool ret = false;
if (!node) {
hybp(HYB_ERR, "idx = %d, table = %pK\n", idx, table);
return false;
}
hh_lock_list(idx, table);
ret = test_bit(HH_LIST_PRIV_BIT, (unsigned long *)node);
hh_unlock_list(idx, table);
return ret;
}
bool hyb_list_clear_priv(int idx, struct hyb_list_table *table)
{
struct hyb_list_head *node = idx_node(idx, table);
bool ret = false;
if (!node) {
hybp(HYB_ERR, "idx = %d, table = %pK\n", idx, table);
return false;
}
hh_lock_list(idx, table);
ret = test_and_clear_bit(HH_LIST_PRIV_BIT, (unsigned long *)node);
hh_unlock_list(idx, table);
return ret;
}

View File

@@ -1,67 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2022 Oplus. All rights reserved.
*/
#ifndef _HYBRIDSWAP_LIST_H_
#define _HYBRIDSWAP_LIST_H_
#define HH_LIST_PTR_SHIFT 23
#define HH_LIST_MCG_SHIFT_HALF 8
#define HH_LIST_LOCK_BIT HH_LIST_MCG_SHIFT_HALF
#define HH_LIST_PRIV_BIT (HH_LIST_PTR_SHIFT + HH_LIST_MCG_SHIFT_HALF + \
HH_LIST_MCG_SHIFT_HALF + 1)
struct hyb_list_head {
unsigned int mcg_hi : HH_LIST_MCG_SHIFT_HALF;
unsigned int lock : 1;
unsigned int prev : HH_LIST_PTR_SHIFT;
unsigned int mcg_lo : HH_LIST_MCG_SHIFT_HALF;
unsigned int priv : 1;
unsigned int next : HH_LIST_PTR_SHIFT;
};
struct hyb_list_table {
struct hyb_list_head *(*get_node)(int, void *);
void *private;
};
#define idx_node(idx, tab) ((tab)->get_node((idx), (tab)->private))
#define next_idx(idx, tab) (idx_node((idx), (tab))->next)
#define prev_idx(idx, tab) (idx_node((idx), (tab))->prev)
#define is_last_idx(idx, hidx, tab) (next_idx(idx, tab) == (hidx))
#define is_first_idx(idx, hidx, tab) (prev_idx(idx, tab) == (hidx))
struct hyb_list_table *alloc_table(struct hyb_list_head *(*get_node)(int, void *),
void *private, gfp_t gfp);
void hh_lock_list(int idx, struct hyb_list_table *table);
void hh_unlock_list(int idx, struct hyb_list_table *table);
void hyb_list_init(int idx, struct hyb_list_table *table);
void hyb_list_add_nolock(int idx, int hidx, struct hyb_list_table *table);
void hyb_list_add_tail_nolock(int idx, int hidx, struct hyb_list_table *table);
void hyb_list_del_nolock(int idx, int hidx, struct hyb_list_table *table);
void hyb_list_add(int idx, int hidx, struct hyb_list_table *table);
void hyb_list_add_tail(int idx, int hidx, struct hyb_list_table *table);
void hyb_list_del(int idx, int hidx, struct hyb_list_table *table);
unsigned short hyb_list_get_mcgid(int idx, struct hyb_list_table *table);
void hyb_list_set_mcgid(int idx, struct hyb_list_table *table, int mcg_id);
bool hyb_list_set_priv(int idx, struct hyb_list_table *table);
bool hyb_list_clear_priv(int idx, struct hyb_list_table *table);
bool hyb_list_test_priv(int idx, struct hyb_list_table *table);
bool hyb_list_empty(int hidx, struct hyb_list_table *table);
#define hyb_list_for_each_entry(idx, hidx, tab) \
for ((idx) = next_idx((hidx), (tab)); \
(idx) != (hidx); (idx) = next_idx((idx), (tab)))
#define hyb_list_for_each_entry_safe(idx, tmp, hidx, tab) \
for ((idx) = next_idx((hidx), (tab)), (tmp) = next_idx((idx), (tab)); \
(idx) != (hidx); (idx) = (tmp), (tmp) = next_idx((idx), (tab)))
#define hyb_list_for_each_entry_reverse(idx, hidx, tab) \
for ((idx) = prev_idx((hidx), (tab)); \
(idx) != (hidx); (idx) = prev_idx((idx), (tab)))
#define hyb_list_for_each_entry_reverse_safe(idx, tmp, hidx, tab) \
for ((idx) = prev_idx((hidx), (tab)), (tmp) = prev_idx((idx), (tab)); \
(idx) != (hidx); (idx) = (tmp), (tmp) = prev_idx((idx), (tab)))
#endif /* _HYBRIDSWAP_LIST_H_ */

View File

@@ -1,293 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2022 Oplus. All rights reserved.
*/
#define pr_fmt(fmt) "[HYBRIDSWAP]" fmt
#include <linux/kernel.h>
#include <linux/swap.h>
#include "../zram_drv.h"
#include "../zram_drv_internal.h"
#include "hybridswap.h"
#include "hybridswap_internal.h"
#include "hybridswap_list.h"
#include "hybridswap_area.h"
#include "hybridswap_lru_rmap.h"
#define esentry_extid(e) ((e) >> EXTENT_SHIFT)
void zram_set_memcg(struct zram *zram, u32 index, int mcg_id)
{
hyb_list_set_mcgid(obj_idx(zram->area, index),
zram->area->obj_table, mcg_id);
}
struct mem_cgroup *zram_get_memcg(struct zram *zram, u32 index)
{
unsigned short mcg_id;
mcg_id = hyb_list_get_mcgid(obj_idx(zram->area, index),
zram->area->obj_table);
return get_mem_cgroup(mcg_id);
}
int zram_get_memcg_coldest_index(struct hybridswap_area *area,
struct mem_cgroup *mcg,
int *index, int max_cnt)
{
int cnt = 0;
u32 i, tmp;
if (!area) {
hybp(HYB_ERR, "NULL area\n");
return 0;
}
if (!area->obj_table) {
hybp(HYB_ERR, "NULL table\n");
return 0;
}
if (!mcg) {
hybp(HYB_ERR, "NULL mcg\n");
return 0;
}
if (!index) {
hybp(HYB_ERR, "NULL index\n");
return 0;
}
hh_lock_list(mcg_idx(area, mcg->id.id), area->obj_table);
hyb_list_for_each_entry_reverse_safe(i, tmp,
mcg_idx(area, mcg->id.id), area->obj_table) {
if (i >= (u32)area->nr_objs) {
hybp(HYB_ERR, "index = %d invalid\n", i);
continue;
}
index[cnt++] = i;
if (cnt >= max_cnt)
break;
}
hh_unlock_list(mcg_idx(area, mcg->id.id), area->obj_table);
return cnt;
}
int zram_rmap_get_extent_index(struct hybridswap_area *area,
int ext_id, int *index)
{
int cnt = 0;
u32 i;
if (!area) {
hybp(HYB_ERR, "NULL area\n");
return 0;
}
if (!area->obj_table) {
hybp(HYB_ERR, "NULL table\n");
return 0;
}
if (!index) {
hybp(HYB_ERR, "NULL index\n");
return 0;
}
if (ext_id < 0 || ext_id >= area->nr_exts) {
hybp(HYB_ERR, "ext = %d invalid\n", ext_id);
return 0;
}
hh_lock_list(ext_idx(area, ext_id), area->obj_table);
hyb_list_for_each_entry(i, ext_idx(area, ext_id), area->obj_table) {
if (cnt >= (int)EXTENT_MAX_OBJ_CNT) {
WARN_ON_ONCE(1);
break;
}
index[cnt++] = i;
}
hh_unlock_list(ext_idx(area, ext_id), area->obj_table);
return cnt;
}
void zram_lru_add(struct zram *zram, u32 index, struct mem_cgroup *memcg)
{
unsigned long size;
struct hybridswap_stat *stat = hybridswap_get_stat_obj();
if (!stat) {
hybp(HYB_ERR, "NULL stat\n");
return;
}
if (!zram) {
hybp(HYB_ERR, "NULL zram\n");
return;
}
if (index >= (u32)zram->area->nr_objs) {
hybp(HYB_ERR, "index = %d invalid\n", index);
return;
}
if (zram_test_flag(zram, index, ZRAM_WB)) {
hybp(HYB_ERR, "WB object, index = %d\n", index);
return;
}
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
if (zram_test_flag(zram, index, ZRAM_CACHED)) {
hybp(HYB_ERR, "CACHED object, index = %d\n", index);
return;
}
if (zram_test_flag(zram, index, ZRAM_CACHED_COMPRESS)) {
hybp(HYB_ERR, "CACHED_COMPRESS object, index = %d\n", index);
return;
}
#endif
if (zram_test_flag(zram, index, ZRAM_SAME))
return;
zram_set_memcg(zram, index, memcg->id.id);
hyb_list_add(obj_idx(zram->area, index),
mcg_idx(zram->area, memcg->id.id),
zram->area->obj_table);
size = zram_get_obj_size(zram, index);
atomic64_add(size, &MEMCGRP_ITEM(memcg, zram_stored_size));
atomic64_inc(&MEMCGRP_ITEM(memcg, zram_page_size));
atomic64_add(size, &stat->zram_stored_size);
atomic64_inc(&stat->zram_stored_pages);
}
void zram_lru_add_tail(struct zram *zram, u32 index, struct mem_cgroup *mcg)
{
unsigned long size;
struct hybridswap_stat *stat = hybridswap_get_stat_obj();
if (!stat) {
hybp(HYB_ERR, "NULL stat\n");
return;
}
if (!zram) {
hybp(HYB_ERR, "NULL zram\n");
return;
}
if (!mcg || !MEMCGRP_ITEM(mcg, zram) || !MEMCGRP_ITEM(mcg, zram)->area) {
hybp(HYB_ERR, "invalid mcg\n");
return;
}
if (index >= (u32)zram->area->nr_objs) {
hybp(HYB_ERR, "index = %d invalid\n", index);
return;
}
if (zram_test_flag(zram, index, ZRAM_WB)) {
hybp(HYB_ERR, "WB object, index = %d\n", index);
return;
}
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
if (zram_test_flag(zram, index, ZRAM_CACHED)) {
hybp(HYB_ERR, "CACHED object, index = %d\n", index);
return;
}
if (zram_test_flag(zram, index, ZRAM_CACHED_COMPRESS)) {
hybp(HYB_ERR, "CACHED_COMPRESS object, index = %d\n", index);
return;
}
#endif
if (zram_test_flag(zram, index, ZRAM_SAME))
return;
zram_set_memcg(zram, index, mcg->id.id);
hyb_list_add_tail(obj_idx(zram->area, index),
mcg_idx(zram->area, mcg->id.id),
zram->area->obj_table);
size = zram_get_obj_size(zram, index);
atomic64_add(size, &MEMCGRP_ITEM(mcg, zram_stored_size));
atomic64_inc(&MEMCGRP_ITEM(mcg, zram_page_size));
atomic64_add(size, &stat->zram_stored_size);
atomic64_inc(&stat->zram_stored_pages);
}
void zram_lru_del(struct zram *zram, u32 index)
{
struct mem_cgroup *mcg = NULL;
unsigned long size;
struct hybridswap_stat *stat = hybridswap_get_stat_obj();
if (!stat) {
hybp(HYB_ERR, "NULL stat\n");
return;
}
if (!zram || !zram->area) {
hybp(HYB_ERR, "NULL zram\n");
return;
}
if (index >= (u32)zram->area->nr_objs) {
hybp(HYB_ERR, "index = %d invalid\n", index);
return;
}
if (zram_test_flag(zram, index, ZRAM_WB)) {
hybp(HYB_ERR, "WB object, index = %d\n", index);
return;
}
mcg = zram_get_memcg(zram, index);
if (!mcg || !MEMCGRP_ITEM(mcg, zram) || !MEMCGRP_ITEM(mcg, zram)->area)
return;
if (zram_test_flag(zram, index, ZRAM_SAME))
return;
size = zram_get_obj_size(zram, index);
hyb_list_del(obj_idx(zram->area, index),
mcg_idx(zram->area, mcg->id.id),
zram->area->obj_table);
zram_set_memcg(zram, index, 0);
atomic64_sub(size, &MEMCGRP_ITEM(mcg, zram_stored_size));
atomic64_dec(&MEMCGRP_ITEM(mcg, zram_page_size));
atomic64_sub(size, &stat->zram_stored_size);
atomic64_dec(&stat->zram_stored_pages);
}
void zram_rmap_insert(struct zram *zram, u32 index)
{
unsigned long eswpentry;
u32 ext_id;
if (!zram) {
hybp(HYB_ERR, "NULL zram\n");
return;
}
if (index >= (u32)zram->area->nr_objs) {
hybp(HYB_ERR, "index = %d invalid\n", index);
return;
}
eswpentry = zram_get_handle(zram, index);
ext_id = esentry_extid(eswpentry);
hyb_list_add_tail(obj_idx(zram->area, index),
ext_idx(zram->area, ext_id),
zram->area->obj_table);
}
void zram_rmap_erase(struct zram *zram, u32 index)
{
unsigned long eswpentry;
u32 ext_id;
if (!zram) {
hybp(HYB_ERR, "NULL zram\n");
return;
}
if (index >= (u32)zram->area->nr_objs) {
hybp(HYB_ERR, "index = %d invalid\n", index);
return;
}
eswpentry = zram_get_handle(zram, index);
ext_id = esentry_extid(eswpentry);
hyb_list_del(obj_idx(zram->area, index),
ext_idx(zram->area, ext_id),
zram->area->obj_table);
}

View File

@@ -1,22 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2022 Oplus. All rights reserved.
*/
#ifndef _HYBRIDSWAP_LRU_MAP_
#define _HYBRIDSWAP_LRU_MAP_
void zram_set_memcg(struct zram *zram, u32 index, int mcg_id);
struct mem_cgroup *zram_get_memcg(struct zram *zram, u32 index);
int zram_get_memcg_coldest_index(struct hybridswap_area *area,
struct mem_cgroup *mcg,
int *index, int max_cnt);
int zram_rmap_get_extent_index(struct hybridswap_area *area,
int ext_id, int *index);
void zram_lru_add(struct zram *zram, u32 index, struct mem_cgroup *memcg);
void zram_lru_add_tail(struct zram *zram, u32 index, struct mem_cgroup *mcg);
void zram_lru_del(struct zram *zram, u32 index);
void zram_rmap_insert(struct zram *zram, u32 index);
void zram_rmap_erase(struct zram *zram, u32 index);
#endif /* _HYBRIDSWAP_LRU_MAP_ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,351 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2022 Oplus. All rights reserved.
*/
#define pr_fmt(fmt) "[HYBRIDSWAP]" fmt
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/timer.h>
#include <linux/zsmalloc.h>
#include <linux/spinlock.h>
#include <linux/sched/task.h>
#include <linux/sched/debug.h>
#ifdef CONFIG_FG_TASK_UID
#include <linux/healthinfo/fg.h>
#endif
#include "hybridswap_internal.h"
#define DUMP_BUF_LEN 512
static unsigned long warning_threshold[HYBRIDSWAP_SCENARIO_BUTT] = {
0, 200, 500, 0
};
const char *key_point_name[HYBRIDSWAP_KYE_POINT_BUTT] = {
"START",
"INIT",
"IOENTRY_ALLOC",
"FIND_EXTENT",
"IO_EXTENT",
"SEGMENT_ALLOC",
"BIO_ALLOC",
"SUBMIT_BIO",
"END_IO",
"SCHED_WORK",
"END_WORK",
"CALL_BACK",
"WAKE_UP",
"ZRAM_LOCK",
"DONE"
};
static void hybridswap_dump_point_lat(
struct hybridswap_key_point_record *record, ktime_t start)
{
int i;
for (i = 0; i < HYBRIDSWAP_KYE_POINT_BUTT; ++i) {
if (!record->key_point[i].record_cnt)
continue;
hybp(HYB_ERR,
"%s diff %lld cnt %u end %u lat %lld ravg_sum %llu\n",
key_point_name[i],
ktime_us_delta(record->key_point[i].first_time, start),
record->key_point[i].record_cnt,
record->key_point[i].end_cnt,
record->key_point[i].proc_total_time,
record->key_point[i].proc_ravg_sum);
}
}
static void hybridswap_dump_no_record_point(
struct hybridswap_key_point_record *record, char *log,
unsigned int *count)
{
int i;
unsigned int point = 0;
for (i = 0; i < HYBRIDSWAP_KYE_POINT_BUTT; ++i)
if (record->key_point[i].record_cnt)
point = i;
point++;
if (point < HYBRIDSWAP_KYE_POINT_BUTT)
*count += snprintf(log + *count,
(size_t)(DUMP_BUF_LEN - *count),
" no_record_point %s", key_point_name[point]);
else
*count += snprintf(log + *count,
(size_t)(DUMP_BUF_LEN - *count), " all_point_record");
}
static long long hybridswap_calc_speed(s64 page_cnt, s64 time)
{
s64 size;
if (!page_cnt)
return 0;
size = page_cnt * PAGE_SIZE * BITS_PER_BYTE;
if (time)
return size * USEC_PER_SEC / time;
else
return S64_MAX;
}
static void hybridswap_dump_lat(
struct hybridswap_key_point_record *record, ktime_t curr_time,
bool perf_end_flag)
{
char log[DUMP_BUF_LEN] = { 0 };
unsigned int count = 0;
ktime_t start;
s64 total_time;
start = record->key_point[HYBRIDSWAP_START].first_time;
total_time = ktime_us_delta(curr_time, start);
count += snprintf(log + count,
(size_t)(DUMP_BUF_LEN - count),
"totaltime(us) %lld scenario %u task %s nice %d",
total_time, record->scenario, record->task_comm, record->nice);
if (perf_end_flag)
count += snprintf(log + count, (size_t)(DUMP_BUF_LEN - count),
" page %d segment %d speed(bps) %lld threshold %llu",
record->page_cnt, record->segment_cnt,
hybridswap_calc_speed(record->page_cnt, total_time),
record->warning_threshold);
else
count += snprintf(log + count, (size_t)(DUMP_BUF_LEN - count),
" state %c", task_state_to_char(record->task));
hybridswap_dump_no_record_point(record, log, &count);
hybp(HYB_ERR, "perf end flag %u %s\n", perf_end_flag, log);
hybridswap_dump_point_lat(record, start);
dump_stack();
}
static unsigned long hybridswap_perf_warning_threshold(
enum hybridswap_scenario scenario)
{
if (unlikely(scenario >= HYBRIDSWAP_SCENARIO_BUTT))
return 0;
return warning_threshold[scenario];
}
void hybridswap_perf_warning(struct timer_list *t)
{
struct hybridswap_key_point_record *record =
from_timer(record, t, lat_monitor);
static unsigned long last_dump_lat_jiffies = 0;
if (!record->warning_threshold)
return;
if (jiffies_to_msecs(jiffies - last_dump_lat_jiffies) <= 60000)
return;
hybridswap_dump_lat(record, ktime_get(), false);
if (likely(record->task))
sched_show_task(record->task);
last_dump_lat_jiffies = jiffies;
record->warning_threshold <<= 2;
record->timeout_flag = true;
mod_timer(&record->lat_monitor,
jiffies + msecs_to_jiffies(record->warning_threshold));
}
static void hybridswap_perf_init_monitor(
struct hybridswap_key_point_record *record,
enum hybridswap_scenario scenario)
{
record->warning_threshold = hybridswap_perf_warning_threshold(scenario);
if (!record->warning_threshold)
return;
record->task = current;
get_task_struct(record->task);
timer_setup(&record->lat_monitor, hybridswap_perf_warning, 0);
mod_timer(&record->lat_monitor,
jiffies + msecs_to_jiffies(record->warning_threshold));
}
static void hybridswap_perf_stop_monitor(
struct hybridswap_key_point_record *record)
{
if (!record->warning_threshold)
return;
del_timer_sync(&record->lat_monitor);
put_task_struct(record->task);
}
static void hybridswap_perf_init(struct hybridswap_key_point_record *record,
enum hybridswap_scenario scenario)
{
int i;
for (i = 0; i < HYBRIDSWAP_KYE_POINT_BUTT; ++i)
spin_lock_init(&record->key_point[i].time_lock);
record->nice = task_nice(current);
record->scenario = scenario;
get_task_comm(record->task_comm, current);
hybridswap_perf_init_monitor(record, scenario);
}
void hybridswap_perf_start_proc(
struct hybridswap_key_point_record *record,
enum hybridswap_key_point type, ktime_t curr_time,
unsigned long long current_ravg_sum)
{
struct hybridswap_key_point_info *key_point =
&record->key_point[type];
if (!key_point->record_cnt)
key_point->first_time = curr_time;
key_point->record_cnt++;
key_point->last_time = curr_time;
key_point->last_ravg_sum = current_ravg_sum;
}
void hybridswap_perf_end_proc(
struct hybridswap_key_point_record *record,
enum hybridswap_key_point type, ktime_t curr_time,
unsigned long long current_ravg_sum)
{
struct hybridswap_key_point_info *key_point =
&record->key_point[type];
s64 diff_time = ktime_us_delta(curr_time, key_point->last_time);
key_point->proc_total_time += diff_time;
if (diff_time > key_point->proc_max_time)
key_point->proc_max_time = diff_time;
key_point->proc_ravg_sum += current_ravg_sum -
key_point->last_ravg_sum;
key_point->end_cnt++;
key_point->last_time = curr_time;
key_point->last_ravg_sum = current_ravg_sum;
}
void hybridswap_perf_async_perf(
struct hybridswap_key_point_record *record,
enum hybridswap_key_point type, ktime_t start,
unsigned long long start_ravg_sum)
{
unsigned long long current_ravg_sum = ((type == HYBRIDSWAP_CALL_BACK) ||
(type == HYBRIDSWAP_END_WORK)) ? hybridswap_get_ravg_sum() : 0;
unsigned long flags;
spin_lock_irqsave(&record->key_point[type].time_lock, flags);
hybridswap_perf_start_proc(record, type, start, start_ravg_sum);
hybridswap_perf_end_proc(record, type, ktime_get(),
current_ravg_sum);
spin_unlock_irqrestore(&record->key_point[type].time_lock, flags);
}
void hybridswap_perf_lat_point(
struct hybridswap_key_point_record *record,
enum hybridswap_key_point type)
{
hybridswap_perf_start_proc(record, type, ktime_get(),
hybridswap_get_ravg_sum());
record->key_point[type].end_cnt++;
}
void hybridswap_perf_start(
struct hybridswap_key_point_record *record,
ktime_t stsrt, unsigned long long start_ravg_sum,
enum hybridswap_scenario scenario)
{
hybridswap_perf_init(record, scenario);
hybridswap_perf_start_proc(record, HYBRIDSWAP_START, stsrt,
start_ravg_sum);
record->key_point[HYBRIDSWAP_START].end_cnt++;
}
void hybridswap_perf_lat_stat(
struct hybridswap_key_point_record *record)
{
int task_is_fg = 0;
struct hybridswap_stat *stat = hybridswap_get_stat_obj();
s64 curr_lat;
/* reclaim_in: 2s, fault_out: 100ms, batch_out: 500ms, pre_out: 2s */
s64 timeout_value[HYBRIDSWAP_SCENARIO_BUTT] = {
2000000, 100000, 500000, 2000000
};
if (!stat || (record->scenario >= HYBRIDSWAP_SCENARIO_BUTT))
return;
curr_lat = ktime_us_delta(record->key_point[HYBRIDSWAP_DONE].first_time,
record->key_point[HYBRIDSWAP_START].first_time);
atomic64_add(curr_lat, &stat->lat[record->scenario].total_lat);
if (curr_lat > atomic64_read(&stat->lat[record->scenario].max_lat))
atomic64_set(&stat->lat[record->scenario].max_lat, curr_lat);
if (curr_lat > timeout_value[record->scenario])
atomic64_inc(&stat->lat[record->scenario].timeout_cnt);
if (record->scenario == HYBRIDSWAP_FAULT_OUT) {
if (curr_lat <= timeout_value[HYBRIDSWAP_FAULT_OUT])
return;
#ifdef CONFIG_FG_TASK_UID
task_is_fg = current_is_fg() ? 1 : 0;
#endif
if (curr_lat > 500000)
atomic64_inc(&stat->fault_stat[task_is_fg].timeout_500ms_cnt);
else if (curr_lat > 100000)
atomic64_inc(&stat->fault_stat[task_is_fg].timeout_100ms_cnt);
hybp(HYB_INFO, "task %s:%d fault out timeout us %llu fg %d\n",
current->comm, current->pid, curr_lat, task_is_fg);
}
}
void hybridswap_perf_end(struct hybridswap_key_point_record *record)
{
int loglevel;
hybridswap_perf_stop_monitor(record);
hybridswap_perf_lat_point(record, HYBRIDSWAP_DONE);
hybridswap_perf_lat_stat(record);
loglevel = record->timeout_flag ? HYB_ERR : HYB_DEBUG;
if (loglevel > hybridswap_loglevel())
return;
hybridswap_dump_lat(record,
record->key_point[HYBRIDSWAP_DONE].first_time, true);
}
void hybridswap_perf_lat_start(
struct hybridswap_key_point_record *record,
enum hybridswap_key_point type)
{
hybridswap_perf_start_proc(record, type, ktime_get(),
hybridswap_get_ravg_sum());
}
void hybridswap_perf_lat_end(
struct hybridswap_key_point_record *record,
enum hybridswap_key_point type)
{
hybridswap_perf_end_proc(record, type, ktime_get(),
hybridswap_get_ravg_sum());
}
void hybridswap_perf_io_stat(
struct hybridswap_key_point_record *record, int page_cnt,
int segment_cnt)
{
record->page_cnt = page_cnt;
record->segment_cnt = segment_cnt;
}

View File

@@ -1,724 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2022 Oplus. All rights reserved.
*/
#define pr_fmt(fmt) "[HYBRIDSWAP]" fmt
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/bio.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/sched/task.h>
#include <linux/list.h>
#include <linux/random.h>
#include <linux/zsmalloc.h>
#include <linux/delay.h>
#include "hybridswap_internal.h"
/* default key index is zero */
#define HYBRIDSWAP_KEY_INDEX 0
#define HYBRIDSWAP_KEY_SIZE 64
#define HYBRIDSWAP_KEY_INDEX_SHIFT 3
#define HYBRIDSWAP_MAX_INFILGHT_NUM 256
#define HYBRIDSWAP_SECTOR_SHIFT 9
#define HYBRIDSWAP_PAGE_SIZE_SECTOR (PAGE_SIZE >> HYBRIDSWAP_SECTOR_SHIFT)
#define HYBRIDSWAP_READ_TIME 10
#define HYBRIDSWAP_WRITE_TIME 100
#define HYBRIDSWAP_FAULT_OUT_TIME 10
struct hybridswap_segment_time {
ktime_t submit_bio;
ktime_t end_io;
};
struct hybridswap_segment {
sector_t segment_sector;
int extent_cnt;
int page_cnt;
struct list_head io_entries;
struct hybridswap_entry *io_entries_fifo[BIO_MAX_PAGES];
struct work_struct endio_work;
struct hybridswap_io_req *req;
struct hybridswap_segment_time time;
u32 bio_result;
};
static u8 hybridswap_io_key[HYBRIDSWAP_KEY_SIZE];
#ifdef CONFIG_SCSI_UFS_ENHANCED_INLINE_CRYPTO_V3
static u8 hybridswap_io_metadata[METADATA_BYTE_IN_KDF];
#endif
static struct workqueue_struct *hybridswap_proc_read_workqueue;
static struct workqueue_struct *hybridswap_proc_write_workqueue;
bool hybridswap_schedule_init_flag;
static void hybridswap_stat_io_bytes(struct hybridswap_io_req *req)
{
struct hybridswap_stat *stat = hybridswap_get_stat_obj();
if (!stat || !req->page_cnt)
return;
if (req->io_para.scenario == HYBRIDSWAP_RECLAIM_IN) {
atomic64_add(req->page_cnt * PAGE_SIZE, &stat->reclaimin_bytes);
atomic64_add(req->page_cnt * PAGE_SIZE, &stat->reclaimin_bytes_daily);
atomic64_add(atomic64_read(&req->real_load), &stat->reclaimin_real_load);
atomic64_inc(&stat->reclaimin_cnt);
} else {
atomic64_add(req->page_cnt * PAGE_SIZE, &stat->batchout_bytes);
atomic64_inc(&stat->batchout_cnt);
}
}
static void hybridswap_key_init(void)
{
get_random_bytes(hybridswap_io_key, HYBRIDSWAP_KEY_SIZE);
#ifdef CONFIG_SCSI_UFS_ENHANCED_INLINE_CRYPTO_V3
get_random_bytes(hybridswap_io_metadata, METADATA_BYTE_IN_KDF);
#endif
}
static void hybridswap_io_req_release(struct kref *ref)
{
struct hybridswap_io_req *req =
container_of(ref, struct hybridswap_io_req, refcount);
if (req->io_para.complete_notify && req->io_para.private)
req->io_para.complete_notify(req->io_para.private);
kfree(req);
}
static void hybridswap_segment_free(struct hybridswap_io_req *req,
struct hybridswap_segment *segment)
{
int i;
for (i = 0; i < segment->extent_cnt; ++i) {
INIT_LIST_HEAD(&segment->io_entries_fifo[i]->list);
req->io_para.done_callback(segment->io_entries_fifo[i], -EIO, req);
}
kfree(segment);
}
static void hybridswap_limit_inflight(struct hybridswap_io_req *req)
{
int ret;
if (!req->limit_inflight_flag)
return;
if (atomic_read(&req->extent_inflight) >= HYBRIDSWAP_MAX_INFILGHT_NUM) {
do {
hybp(HYB_DEBUG, "wait inflight start\n");
ret = wait_event_timeout(req->io_wait,
atomic_read(&req->extent_inflight) <
HYBRIDSWAP_MAX_INFILGHT_NUM,
msecs_to_jiffies(100));
} while (!ret);
}
}
static void hybridswap_wait_io_finish(struct hybridswap_io_req *req)
{
int ret;
unsigned int wait_time;
if (!req->wait_io_finish_flag || !req->page_cnt)
return;
if (req->io_para.scenario == HYBRIDSWAP_FAULT_OUT) {
hybp(HYB_DEBUG, "fault out wait finish start\n");
wait_for_completion_io_timeout(&req->io_end_flag,
MAX_SCHEDULE_TIMEOUT);
return;
}
wait_time = (req->io_para.scenario == HYBRIDSWAP_RECLAIM_IN) ?
HYBRIDSWAP_WRITE_TIME : HYBRIDSWAP_READ_TIME;
do {
hybp(HYB_DEBUG, "wait finish start\n");
ret = wait_event_timeout(req->io_wait,
(!atomic_read(&req->extent_inflight)),
msecs_to_jiffies(wait_time));
} while (!ret);
}
static void hybridswap_inflight_inc(struct hybridswap_segment *segment)
{
mutex_lock(&segment->req->refmutex);
kref_get(&segment->req->refcount);
mutex_unlock(&segment->req->refmutex);
atomic_add(segment->page_cnt, &segment->req->extent_inflight);
}
static void hybridswap_inflight_dec(struct hybridswap_io_req *req,
int num)
{
if ((atomic_sub_return(num, &req->extent_inflight) <
HYBRIDSWAP_MAX_INFILGHT_NUM) && req->limit_inflight_flag &&
wq_has_sleeper(&req->io_wait))
wake_up(&req->io_wait);
}
static void hybridswap_io_end_wake_up(struct hybridswap_io_req *req)
{
if (req->io_para.scenario == HYBRIDSWAP_FAULT_OUT) {
complete(&req->io_end_flag);
return;
}
if (wq_has_sleeper(&req->io_wait))
wake_up(&req->io_wait);
}
static void hybridswap_io_entry_proc(struct hybridswap_segment *segment)
{
int i;
struct hybridswap_io_req *req = segment->req;
struct hybridswap_key_point_record *record = req->io_para.record;
int page_num;
ktime_t callback_start;
unsigned long long callback_start_ravg_sum;
for (i = 0; i < segment->extent_cnt; ++i) {
INIT_LIST_HEAD(&segment->io_entries_fifo[i]->list);
page_num = segment->io_entries_fifo[i]->pages_sz;
hybp(HYB_DEBUG, "extent_id[%d] %d page_num %d\n",
i, segment->io_entries_fifo[i]->ext_id, page_num);
callback_start = ktime_get();
callback_start_ravg_sum = hybridswap_get_ravg_sum();
if (req->io_para.done_callback)
req->io_para.done_callback(segment->io_entries_fifo[i],
0, req);
hybridswap_perf_async_perf(record, HYBRIDSWAP_CALL_BACK,
callback_start, callback_start_ravg_sum);
hybridswap_inflight_dec(req, page_num);
}
}
static void hybridswap_io_err_record(enum hybridswap_fail_point point,
struct hybridswap_io_req *req, int ext_id)
{
if (req->io_para.scenario == HYBRIDSWAP_FAULT_OUT)
hybridswap_fail_record(point, 0, ext_id,
req->io_para.record->task_comm);
}
static void hybridswap_stat_io_fail(enum hybridswap_scenario scenario)
{
struct hybridswap_stat *stat = hybridswap_get_stat_obj();
if (!stat || (scenario >= HYBRIDSWAP_SCENARIO_BUTT))
return;
atomic64_inc(&stat->io_fail_cnt[scenario]);
}
static void hybridswap_io_err_proc(struct hybridswap_io_req *req,
struct hybridswap_segment *segment)
{
hybp(HYB_ERR, "segment sector 0x%llx, extent_cnt %d\n",
segment->segment_sector, segment->extent_cnt);
hybp(HYB_ERR, "scenario %u, bio_result %u\n",
req->io_para.scenario, segment->bio_result);
hybridswap_stat_io_fail(req->io_para.scenario);
hybridswap_io_err_record(HYBRIDSWAP_FAULT_OUT_IO_FAIL, req,
segment->io_entries_fifo[0]->ext_id);
hybridswap_inflight_dec(req, segment->page_cnt);
hybridswap_io_end_wake_up(req);
hybridswap_segment_free(req, segment);
kref_put_mutex(&req->refcount, hybridswap_io_req_release,
&req->refmutex);
}
static void hybridswap_io_end_work(struct work_struct *work)
{
struct hybridswap_segment *segment =
container_of(work, struct hybridswap_segment, endio_work);
struct hybridswap_io_req *req = segment->req;
struct hybridswap_key_point_record *record = req->io_para.record;
int old_nice = task_nice(current);
ktime_t work_start;
unsigned long long work_start_ravg_sum;
if (unlikely(segment->bio_result)) {
hybridswap_io_err_proc(req, segment);
return;
}
hybp(HYB_DEBUG, "segment sector 0x%llx, extent_cnt %d passed\n",
segment->segment_sector, segment->extent_cnt);
hybp(HYB_DEBUG, "scenario %u, bio_result %u passed\n",
req->io_para.scenario, segment->bio_result);
set_user_nice(current, req->nice);
hybridswap_perf_async_perf(record, HYBRIDSWAP_SCHED_WORK,
segment->time.end_io, 0);
work_start = ktime_get();
work_start_ravg_sum = hybridswap_get_ravg_sum();
hybridswap_io_entry_proc(segment);
hybridswap_perf_async_perf(record, HYBRIDSWAP_END_WORK, work_start,
work_start_ravg_sum);
hybridswap_io_end_wake_up(req);
kref_put_mutex(&req->refcount, hybridswap_io_req_release,
&req->refmutex);
kfree(segment);
set_user_nice(current, old_nice);
}
static void hybridswap_end_io(struct bio *bio)
{
struct hybridswap_segment *segment = bio->bi_private;
struct hybridswap_io_req *req = NULL;
struct workqueue_struct *workqueue = NULL;
struct hybridswap_key_point_record *record = NULL;
if (unlikely(!segment || !(segment->req))) {
hybp(HYB_ERR, "segment or req null\n");
bio_put(bio);
return;
}
req = segment->req;
record = req->io_para.record;
hybridswap_perf_async_perf(record, HYBRIDSWAP_END_IO,
segment->time.submit_bio, 0);
workqueue = (req->io_para.scenario == HYBRIDSWAP_RECLAIM_IN) ?
hybridswap_proc_write_workqueue : hybridswap_proc_read_workqueue;
segment->time.end_io = ktime_get();
segment->bio_result = bio->bi_status;
queue_work(workqueue, &segment->endio_work);
bio_put(bio);
}
static bool hybridswap_ext_merge_back(
struct hybridswap_segment *segment,
struct hybridswap_entry *io_entry)
{
struct hybridswap_entry *tail_io_entry =
list_last_entry(&segment->io_entries,
struct hybridswap_entry, list);
return ((tail_io_entry->addr +
tail_io_entry->pages_sz * HYBRIDSWAP_PAGE_SIZE_SECTOR) ==
io_entry->addr);
}
static bool hybridswap_ext_merge_front(
struct hybridswap_segment *segment,
struct hybridswap_entry *io_entry)
{
struct hybridswap_entry *head_io_entry =
list_first_entry(&segment->io_entries,
struct hybridswap_entry, list);
return (head_io_entry->addr ==
(io_entry->addr +
io_entry->pages_sz * HYBRIDSWAP_PAGE_SIZE_SECTOR));
}
static bool hybridswap_ext_merge(struct hybridswap_io_req *req,
struct hybridswap_entry *io_entry)
{
struct hybridswap_segment *segment = req->segment;
if (segment == NULL)
return false;
if ((segment->page_cnt + io_entry->pages_sz) > BIO_MAX_PAGES)
return false;
if (hybridswap_ext_merge_front(segment, io_entry)) {
list_add(&io_entry->list, &segment->io_entries);
segment->io_entries_fifo[segment->extent_cnt++] = io_entry;
segment->segment_sector = io_entry->addr;
segment->page_cnt += io_entry->pages_sz;
return true;
}
if (hybridswap_ext_merge_back(segment, io_entry)) {
list_add_tail(&io_entry->list, &segment->io_entries);
segment->io_entries_fifo[segment->extent_cnt++] = io_entry;
segment->page_cnt += io_entry->pages_sz;
return true;
}
return false;
}
static struct bio *hybridswap_bio_alloc(enum hybridswap_scenario scenario)
{
gfp_t gfp = (scenario != HYBRIDSWAP_RECLAIM_IN) ? GFP_ATOMIC : GFP_NOIO;
struct bio *bio = bio_alloc(gfp, BIO_MAX_PAGES);
if (!bio && (scenario == HYBRIDSWAP_FAULT_OUT))
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
return bio;
}
static int hybridswap_bio_add_page(struct bio *bio,
struct hybridswap_segment *segment)
{
int i;
int k = 0;
struct hybridswap_entry *io_entry = NULL;
struct hybridswap_entry *tmp = NULL;
list_for_each_entry_safe(io_entry, tmp, &segment->io_entries, list) {
for (i = 0; i < io_entry->pages_sz; i++) {
io_entry->dest_pages[i]->index =
bio->bi_iter.bi_sector + k;
if (unlikely(!bio_add_page(bio,
io_entry->dest_pages[i], PAGE_SIZE, 0))) {
return -EIO;
}
k += HYBRIDSWAP_PAGE_SIZE_SECTOR;
}
}
return 0;
}
static void hybridswap_set_bio_opf(struct bio *bio,
struct hybridswap_segment *segment)
{
if (segment->req->io_para.scenario == HYBRIDSWAP_RECLAIM_IN) {
bio->bi_opf |= REQ_BACKGROUND;
return;
}
bio->bi_opf |= REQ_SYNC;
}
int hybridswap_submit_bio(struct hybridswap_segment *segment)
{
unsigned int op =
(segment->req->io_para.scenario == HYBRIDSWAP_RECLAIM_IN) ?
REQ_OP_WRITE : REQ_OP_READ;
struct hybridswap_entry *head_io_entry =
list_first_entry(&segment->io_entries,
struct hybridswap_entry, list);
struct hybridswap_key_point_record *record =
segment->req->io_para.record;
struct bio *bio = NULL;
hybridswap_perf_lat_start(record, HYBRIDSWAP_BIO_ALLOC);
bio = hybridswap_bio_alloc(segment->req->io_para.scenario);
hybridswap_perf_lat_end(record, HYBRIDSWAP_BIO_ALLOC);
if (unlikely(!bio)) {
hybp(HYB_ERR, "bio is null.\n");
hybridswap_io_err_record(HYBRIDSWAP_FAULT_OUT_BIO_ALLOC_FAIL,
segment->req, segment->io_entries_fifo[0]->ext_id);
return -ENOMEM;
}
bio->bi_iter.bi_sector = segment->segment_sector;
bio_set_dev(bio, segment->req->io_para.bdev);
bio->bi_private = segment;
bio_set_op_attrs(bio, op, 0);
bio->bi_end_io = hybridswap_end_io;
hybridswap_set_bio_opf(bio, segment);
if (unlikely(hybridswap_bio_add_page(bio, segment))) {
bio_put(bio);
hybp(HYB_ERR, "bio_add_page fail\n");
hybridswap_io_err_record(HYBRIDSWAP_FAULT_OUT_BIO_ADD_FAIL,
segment->req, segment->io_entries_fifo[0]->ext_id);
return -EIO;
}
hybridswap_inflight_inc(segment);
hybp(HYB_DEBUG, "submit bio sector %llu ext_id %d\n",
segment->segment_sector, head_io_entry->ext_id);
hybp(HYB_DEBUG, "extent_cnt %d scenario %u\n",
segment->extent_cnt, segment->req->io_para.scenario);
segment->req->page_cnt += segment->page_cnt;
segment->req->segment_cnt++;
segment->time.submit_bio = ktime_get();
hybridswap_perf_lat_start(record, HYBRIDSWAP_SUBMIT_BIO);
submit_bio(bio);
hybridswap_perf_lat_end(record, HYBRIDSWAP_SUBMIT_BIO);
return 0;
}
static int hybridswap_new_segment_init(struct hybridswap_io_req *req,
struct hybridswap_entry *io_entry)
{
gfp_t gfp = (req->io_para.scenario != HYBRIDSWAP_RECLAIM_IN) ?
GFP_ATOMIC : GFP_NOIO;
struct hybridswap_segment *segment = NULL;
struct hybridswap_key_point_record *record = req->io_para.record;
hybridswap_perf_lat_start(record, HYBRIDSWAP_SEGMENT_ALLOC);
segment = kzalloc(sizeof(struct hybridswap_segment), gfp);
if (!segment && (req->io_para.scenario == HYBRIDSWAP_FAULT_OUT))
segment = kzalloc(sizeof(struct hybridswap_segment), GFP_NOIO);
hybridswap_perf_lat_end(record, HYBRIDSWAP_SEGMENT_ALLOC);
if (unlikely(!segment)) {
hybridswap_io_err_record(HYBRIDSWAP_FAULT_OUT_SEGMENT_ALLOC_FAIL,
req, io_entry->ext_id);
return -ENOMEM;
}
segment->req = req;
INIT_LIST_HEAD(&segment->io_entries);
list_add_tail(&io_entry->list, &segment->io_entries);
segment->io_entries_fifo[segment->extent_cnt++] = io_entry;
segment->page_cnt = io_entry->pages_sz;
INIT_WORK(&segment->endio_work, hybridswap_io_end_work);
segment->segment_sector = io_entry->addr;
req->segment = segment;
return 0;
}
static int hybridswap_io_submit(struct hybridswap_io_req *req,
bool merge_flag)
{
int ret;
struct hybridswap_segment *segment = req->segment;
if (!segment || ((merge_flag) && (segment->page_cnt < BIO_MAX_PAGES)))
return 0;
hybridswap_limit_inflight(req);
ret = hybridswap_submit_bio(segment);
if (unlikely(ret)) {
hybp(HYB_WARN, "submit bio failed, ret %d\n", ret);
hybridswap_segment_free(req, segment);
}
req->segment = NULL;
return ret;
}
static bool hybridswap_check_io_para_err(struct hybridswap_io *io_para)
{
if (unlikely(!io_para)) {
hybp(HYB_ERR, "io_para null\n");
return true;
}
if (unlikely(!io_para->bdev ||
(io_para->scenario >= HYBRIDSWAP_SCENARIO_BUTT))) {
hybp(HYB_ERR, "io_para err, scenario %u\n",
io_para->scenario);
return true;
}
if (unlikely(!io_para->done_callback)) {
hybp(HYB_ERR, "done_callback err\n");
return true;
}
return false;
}
static bool hybridswap_check_entry_err(
struct hybridswap_entry *io_entry)
{
int i;
if (unlikely(!io_entry)) {
hybp(HYB_ERR, "io_entry null\n");
return true;
}
if (unlikely((!io_entry->dest_pages) ||
(io_entry->ext_id < 0) ||
(io_entry->pages_sz > BIO_MAX_PAGES) ||
(io_entry->pages_sz <= 0))) {
hybp(HYB_ERR, "ext_id %d, page_sz %d\n", io_entry->ext_id,
io_entry->pages_sz);
return true;
}
for (i = 0; i < io_entry->pages_sz; ++i) {
if (!io_entry->dest_pages[i]) {
hybp(HYB_ERR, "dest_pages[%d] is null\n", i);
return true;
}
}
return false;
}
static int hybridswap_io_extent(void *io_handler,
struct hybridswap_entry *io_entry)
{
int ret;
struct hybridswap_io_req *req = (struct hybridswap_io_req *)io_handler;
if (unlikely(hybridswap_check_entry_err(io_entry))) {
hybridswap_io_err_record(HYBRIDSWAP_FAULT_OUT_IO_ENTRY_PARA_FAIL,
req, io_entry ? io_entry->ext_id : -EINVAL);
req->io_para.done_callback(io_entry, -EIO, req);
return -EFAULT;
}
hybp(HYB_DEBUG, "ext id %d, pages_sz %d, addr %llx\n",
io_entry->ext_id, io_entry->pages_sz,
io_entry->addr);
if (hybridswap_ext_merge(req, io_entry))
return hybridswap_io_submit(req, true);
ret = hybridswap_io_submit(req, false);
if (unlikely(ret)) {
hybp(HYB_ERR, "submit fail %d\n", ret);
req->io_para.done_callback(io_entry, -EIO, req);
return ret;
}
ret = hybridswap_new_segment_init(req, io_entry);
if (unlikely(ret)) {
hybp(HYB_ERR, "hybridswap_new_segment_init fail %d\n", ret);
req->io_para.done_callback(io_entry, -EIO, req);
return ret;
}
return 0;
}
int hybridswap_schedule_init(void)
{
if (hybridswap_schedule_init_flag)
return 0;
hybridswap_proc_read_workqueue = alloc_workqueue("proc_hybridswap_read",
WQ_HIGHPRI | WQ_UNBOUND, 0);
if (unlikely(!hybridswap_proc_read_workqueue))
return -EFAULT;
hybridswap_proc_write_workqueue = alloc_workqueue("proc_hybridswap_write",
WQ_CPU_INTENSIVE, 0);
if (unlikely(!hybridswap_proc_write_workqueue)) {
destroy_workqueue(hybridswap_proc_read_workqueue);
return -EFAULT;
}
hybridswap_key_init();
hybridswap_schedule_init_flag = true;
return 0;
}
void *hybridswap_plug_start(struct hybridswap_io *io_para)
{
gfp_t gfp;
struct hybridswap_io_req *req = NULL;
if (unlikely(hybridswap_check_io_para_err(io_para)))
return NULL;
gfp = (io_para->scenario != HYBRIDSWAP_RECLAIM_IN) ?
GFP_ATOMIC : GFP_NOIO;
req = kzalloc(sizeof(struct hybridswap_io_req), gfp);
if (!req && (io_para->scenario == HYBRIDSWAP_FAULT_OUT))
req = kzalloc(sizeof(struct hybridswap_io_req), GFP_NOIO);
if (unlikely(!req)) {
hybp(HYB_ERR, "io_req null\n");
return NULL;
}
kref_init(&req->refcount);
mutex_init(&req->refmutex);
atomic_set(&req->extent_inflight, 0);
init_waitqueue_head(&req->io_wait);
req->io_para.bdev = io_para->bdev;
req->io_para.scenario = io_para->scenario;
req->io_para.done_callback = io_para->done_callback;
req->io_para.complete_notify = io_para->complete_notify;
req->io_para.private = io_para->private;
req->io_para.record = io_para->record;
req->limit_inflight_flag =
(io_para->scenario == HYBRIDSWAP_RECLAIM_IN) ||
(io_para->scenario == HYBRIDSWAP_PRE_OUT);
req->wait_io_finish_flag =
(io_para->scenario == HYBRIDSWAP_RECLAIM_IN) ||
(io_para->scenario == HYBRIDSWAP_FAULT_OUT);
req->nice = task_nice(current);
init_completion(&req->io_end_flag);
return (void *)req;
}
/* io_handler validity guaranteed by the caller */
int hybridswap_read_extent(void *io_handler,
struct hybridswap_entry *io_entry)
{
return hybridswap_io_extent(io_handler, io_entry);
}
/* io_handler validity guaranteed by the caller */
int hybridswap_write_extent(void *io_handler,
struct hybridswap_entry *io_entry)
{
return hybridswap_io_extent(io_handler, io_entry);
}
/* io_handler validity guaranteed by the caller */
int hybridswap_plug_finish(void *io_handler)
{
int ret;
struct hybridswap_io_req *req = (struct hybridswap_io_req *)io_handler;
hybridswap_perf_lat_start(req->io_para.record, HYBRIDSWAP_IO_EXTENT);
ret = hybridswap_io_submit(req, false);
if (unlikely(ret))
hybp(HYB_ERR, "submit fail %d\n", ret);
hybridswap_perf_lat_end(req->io_para.record, HYBRIDSWAP_IO_EXTENT);
hybridswap_wait_io_finish(req);
hybridswap_perf_lat_point(req->io_para.record, HYBRIDSWAP_WAKE_UP);
hybridswap_stat_io_bytes(req);
hybridswap_perf_io_stat(req->io_para.record, req->page_cnt,
req->segment_cnt);
kref_put_mutex(&req->refcount, hybridswap_io_req_release,
&req->refmutex);
hybp(HYB_DEBUG, "io schedule finish succ\n");
return ret;
}

View File

@@ -1,482 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2022 Oplus. All rights reserved.
*/
#define pr_fmt(fmt) "[HYBRIDSWAP]" fmt
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/atomic.h>
#include <linux/zsmalloc.h>
#include <linux/memcontrol.h>
#include <linux/proc_fs.h>
#include "../zram_drv.h"
#include "../zram_drv_internal.h"
#include "hybridswap_internal.h"
#include "hybridswap.h"
#define SCENARIO_NAME_LEN 32
#define MBYTE_SHIFT 20
static char scenario_name[HYBRIDSWAP_SCENARIO_BUTT][SCENARIO_NAME_LEN] = {
"reclaim_in",
"fault_out",
"batch_out",
"pre_out"
};
static char *fg_bg[2] = {"BG", "FG"};
static void hybridswap_lat_show(struct seq_file *m,
struct hybridswap_stat *stat)
{
int i;
for (i = 0; i < HYBRIDSWAP_SCENARIO_BUTT; ++i) {
seq_printf(m, "hybridswap_%s_total_lat: %lld\n",
scenario_name[i],
atomic64_read(&stat->lat[i].total_lat));
seq_printf(m, "hybridswap_%s_max_lat: %lld\n",
scenario_name[i],
atomic64_read(&stat->lat[i].max_lat));
seq_printf(m, "hybridswap_%s_timeout_cnt: %lld\n",
scenario_name[i],
atomic64_read(&stat->lat[i].timeout_cnt));
}
for (i = 0; i < 2; i++) {
seq_printf(m, "fault_out_timeout_100ms_cnt(%s): %lld\n",
fg_bg[i],
atomic64_read(&stat->fault_stat[i].timeout_100ms_cnt));
seq_printf(m, "fault_out_timeout_500ms_cnt(%s): %lld\n",
fg_bg[i],
atomic64_read(&stat->fault_stat[i].timeout_500ms_cnt));
}
}
static void hybridswap_stats_show(struct seq_file *m,
struct hybridswap_stat *stat)
{
seq_printf(m, "hybridswap_out_times: %lld\n",
atomic64_read(&stat->reclaimin_cnt));
seq_printf(m, "hybridswap_out_comp_size: %lld MB\n",
atomic64_read(&stat->reclaimin_bytes) >> MBYTE_SHIFT);
if (PAGE_SHIFT < MBYTE_SHIFT)
seq_printf(m, "hybridswap_out_ori_size: %lld MB\n",
atomic64_read(&stat->reclaimin_pages) >>
(MBYTE_SHIFT - PAGE_SHIFT));
seq_printf(m, "hybridswap_in_times: %lld\n",
atomic64_read(&stat->batchout_cnt));
seq_printf(m, "hybridswap_in_comp_size: %lld MB\n",
atomic64_read(&stat->batchout_bytes) >> MBYTE_SHIFT);
if (PAGE_SHIFT < MBYTE_SHIFT)
seq_printf(m, "hybridswap_in_ori_size: %lld MB\n",
atomic64_read(&stat->batchout_pages) >>
(MBYTE_SHIFT - PAGE_SHIFT));
seq_printf(m, "hybridswap_all_fault: %lld\n",
atomic64_read(&stat->fault_cnt));
seq_printf(m, "hybridswap_fault: %lld\n",
atomic64_read(&stat->hybridswap_fault_cnt));
}
static void hybridswap_area_info_show(struct seq_file *m,
struct hybridswap_stat *stat)
{
seq_printf(m, "hybridswap_reout_ori_size: %lld MB\n",
atomic64_read(&stat->reout_pages) >>
(MBYTE_SHIFT - PAGE_SHIFT));
seq_printf(m, "hybridswap_reout_comp_size: %lld MB\n",
atomic64_read(&stat->reout_bytes) >> MBYTE_SHIFT);
seq_printf(m, "hybridswap_store_comp_size: %lld MB\n",
atomic64_read(&stat->stored_size) >> MBYTE_SHIFT);
seq_printf(m, "hybridswap_store_ori_size: %lld MB\n",
atomic64_read(&stat->stored_pages) >>
(MBYTE_SHIFT - PAGE_SHIFT));
seq_printf(m, "hybridswap_notify_free_size: %lld MB\n",
atomic64_read(&stat->notify_free) >>
(MBYTE_SHIFT - EXTENT_SHIFT));
seq_printf(m, "hybridswap_store_memcg_cnt: %lld\n",
atomic64_read(&stat->mcg_cnt));
seq_printf(m, "hybridswap_store_extent_cnt: %lld\n",
atomic64_read(&stat->ext_cnt));
seq_printf(m, "hybridswap_store_fragment_cnt: %lld\n",
atomic64_read(&stat->frag_cnt));
}
static void hybridswap_fail_show(struct seq_file *m,
struct hybridswap_stat *stat)
{
int i;
for (i = 0; i < HYBRIDSWAP_SCENARIO_BUTT; ++i) {
seq_printf(m, "hybridswap_%s_io_fail_cnt: %lld\n",
scenario_name[i],
atomic64_read(&stat->io_fail_cnt[i]));
seq_printf(m, "hybridswap_%s_alloc_fail_cnt: %lld\n",
scenario_name[i],
atomic64_read(&stat->alloc_fail_cnt[i]));
}
}
int hybridswap_psi_show(struct seq_file *m, void *v)
{
struct hybridswap_stat *stat = NULL;
if (!hybridswap_core_enabled())
return -EINVAL;
stat = hybridswap_get_stat_obj();
if (unlikely(!stat)) {
hybp(HYB_ERR, "can't get stat obj!\n");
return -EINVAL;
}
hybridswap_stats_show(m, stat);
hybridswap_area_info_show(m, stat);
hybridswap_lat_show(m, stat);
hybridswap_fail_show(m, stat);
return 0;
}
unsigned long hybridswap_get_zram_used_pages(void)
{
struct hybridswap_stat *stat = NULL;
if (!hybridswap_core_enabled())
return 0;
stat = hybridswap_get_stat_obj();
if (unlikely(!stat)) {
hybp(HYB_ERR, "can't get stat obj!\n");
return 0;
}
return atomic64_read(&stat->zram_stored_pages);
}
unsigned long long hybridswap_get_zram_pagefault(void)
{
struct hybridswap_stat *stat = NULL;
if (!hybridswap_core_enabled())
return 0;
stat = hybridswap_get_stat_obj();
if (unlikely(!stat)) {
hybp(HYB_ERR, "can't get stat obj!\n");
return 0;
}
return atomic64_read(&stat->fault_cnt);
}
bool hybridswap_reclaim_work_running(void)
{
struct hybridswap_stat *stat = NULL;
if (!hybridswap_core_enabled())
return false;
stat = hybridswap_get_stat_obj();
if (unlikely(!stat)) {
hybp(HYB_ERR, "can't get stat obj!\n");
return 0;
}
return atomic64_read(&stat->reclaimin_infight) ? true : false;
}
unsigned long long hybridswap_read_mcg_stats(struct mem_cgroup *mcg,
enum hybridswap_mcg_member mcg_member)
{
struct mem_cgroup_hybridswap *mcg_hybs;
unsigned long long val = 0;
int extcnt;
if (!hybridswap_core_enabled())
return 0;
mcg_hybs = MEMCGRP_ITEM_DATA(mcg);
if (!mcg_hybs) {
hybp(HYB_DEBUG, "NULL mcg_hybs\n");
return 0;
}
switch (mcg_member) {
case MCG_ZRAM_STORED_SZ:
val = atomic64_read(&mcg_hybs->zram_stored_size);
break;
case MCG_ZRAM_STORED_PG_SZ:
val = atomic64_read(&mcg_hybs->zram_page_size);
break;
case MCG_DISK_STORED_SZ:
val = atomic64_read(&mcg_hybs->hybridswap_stored_size);
break;
case MCG_DISK_STORED_PG_SZ:
val = atomic64_read(&mcg_hybs->hybridswap_stored_pages);
break;
case MCG_ANON_FAULT_CNT:
val = atomic64_read(&mcg_hybs->hybridswap_allfaultcnt);
break;
case MCG_DISK_FAULT_CNT:
val = atomic64_read(&mcg_hybs->hybridswap_faultcnt);
break;
case MCG_ESWAPOUT_CNT:
val = atomic64_read(&mcg_hybs->hybridswap_outcnt);
break;
case MCG_ESWAPOUT_SZ:
val = atomic64_read(&mcg_hybs->hybridswap_outextcnt) << EXTENT_SHIFT;
break;
case MCG_ESWAPIN_CNT:
val = atomic64_read(&mcg_hybs->hybridswap_incnt);
break;
case MCG_ESWAPIN_SZ:
val = atomic64_read(&mcg_hybs->hybridswap_inextcnt) << EXTENT_SHIFT;
break;
case MCG_DISK_SPACE:
extcnt = atomic_read(&mcg_hybs->hybridswap_extcnt);
if (extcnt < 0)
extcnt = 0;
val = ((unsigned long long) extcnt) << EXTENT_SHIFT;
break;
case MCG_DISK_SPACE_PEAK:
extcnt = atomic_read(&mcg_hybs->hybridswap_peakextcnt);
if (extcnt < 0)
extcnt = 0;
val = ((unsigned long long) extcnt) << EXTENT_SHIFT;
break;
default:
break;
}
return val;
}
void hybridswap_fail_record(enum hybridswap_fail_point point,
u32 index, int ext_id, unsigned char *task_comm)
{
struct hybridswap_stat *stat = NULL;
unsigned long flags;
unsigned int copylen = strlen(task_comm) + 1;
stat = hybridswap_get_stat_obj();
if (unlikely(!stat)) {
hybp(HYB_ERR, "can't get stat obj!\n");
return;
}
if (copylen > TASK_COMM_LEN) {
hybp(HYB_ERR, "task_comm len %d is err\n", copylen);
return;
}
spin_lock_irqsave(&stat->record.lock, flags);
if (stat->record.num < MAX_FAIL_RECORD_NUM) {
stat->record.record[stat->record.num].point = point;
stat->record.record[stat->record.num].index = index;
stat->record.record[stat->record.num].ext_id = ext_id;
stat->record.record[stat->record.num].time = ktime_get();
memcpy(stat->record.record[stat->record.num].task_comm,
task_comm, copylen);
stat->record.num++;
}
spin_unlock_irqrestore(&stat->record.lock, flags);
}
static void hybridswap_fail_record_get(
struct hybridswap_fail_record_info *record_info)
{
struct hybridswap_stat *stat = NULL;
unsigned long flags;
if (!hybridswap_core_enabled())
return;
stat = hybridswap_get_stat_obj();
if (unlikely(!stat)) {
hybp(HYB_ERR, "can't get stat obj!\n");
return;
}
spin_lock_irqsave(&stat->record.lock, flags);
memcpy(record_info, &stat->record,
sizeof(struct hybridswap_fail_record_info));
stat->record.num = 0;
spin_unlock_irqrestore(&stat->record.lock, flags);
}
static ssize_t hybridswap_fail_record_show(char *buf)
{
int i;
ssize_t size = 0;
struct hybridswap_fail_record_info record_info = { 0 };
hybridswap_fail_record_get(&record_info);
size += scnprintf(buf + size, PAGE_SIZE,
"hybridswap_fail_record_num: %d\n", record_info.num);
for (i = 0; i < record_info.num; ++i)
size += scnprintf(buf + size, PAGE_SIZE - size,
"point[%u]time[%lld]taskname[%s]index[%u]ext_id[%d]\n",
record_info.record[i].point,
ktime_us_delta(ktime_get(),
record_info.record[i].time),
record_info.record[i].task_comm,
record_info.record[i].index,
record_info.record[i].ext_id);
return size;
}
ssize_t hybridswap_report_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return hybridswap_fail_record_show(buf);
}
static inline meminfo_show(struct hybridswap_stat *stat, char *buf, ssize_t len)
{
unsigned long eswap_total_pages = 0, eswap_compressed_pages = 0;
unsigned long eswap_used_pages = 0;
unsigned long zram_total_pags, zram_used_pages, zram_compressed;
ssize_t size = 0;
if (!stat || !buf || !len)
return 0;
(void)hybridswap_stored_info(&eswap_total_pages, &eswap_compressed_pages);
eswap_used_pages = atomic64_read(&stat->stored_pages);
#ifdef CONFIG_HYBRIDSWAP_SWAPD
zram_total_pags = get_nr_zram_total();
#else
zram_total_pags = 0;
#endif
zram_compressed = atomic64_read(&stat->zram_stored_size);
zram_used_pages = atomic64_read(&stat->zram_stored_pages);
size += scnprintf(buf + size, len - size, "%-32s %12llu KB\n",
"EST:", eswap_total_pages << (PAGE_SHIFT - 10));
size += scnprintf(buf + size, len - size, "%-32s %12llu KB\n",
"ESU_C:", eswap_compressed_pages << (PAGE_SHIFT - 10));
size += scnprintf(buf + size, len - size, "%-32s %12llu KB\n",
"ESU_O:", eswap_used_pages << (PAGE_SHIFT - 10));
size += scnprintf(buf + size, len - size, "%-32s %12llu KB\n",
"ZST:", zram_total_pags << (PAGE_SHIFT - 10));
size += scnprintf(buf + size, len - size, "%-32s %12llu KB\n",
"ZSU_C:", zram_compressed >> 10);
size += scnprintf(buf + size, len - size, "%-32s %12llu KB\n",
"ZSU_O:", zram_used_pages << (PAGE_SHIFT - 10));
return size;
}
ssize_t hybridswap_stat_snap_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t size = 0;
struct hybridswap_stat *stat = NULL;
if (!hybridswap_core_enabled())
return 0;
stat = hybridswap_get_stat_obj();
if (unlikely(!stat)) {
hybp(HYB_INFO, "can't get stat obj!\n");
return 0;
}
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu\n",
"reclaimin_cnt:", atomic64_read(&stat->reclaimin_cnt));
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu KB\n",
"reclaimin_bytes:", atomic64_read(&stat->reclaimin_bytes) / SZ_1K);
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu KB\n",
"reclaimin_real_load:", atomic64_read(&stat->reclaimin_real_load) / SZ_1K);
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu KB\n",
"reclaimin_bytes_daily:", atomic64_read(&stat->reclaimin_bytes_daily) / SZ_1K);
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu KB\n",
"reclaimin_pages:", atomic64_read(&stat->reclaimin_pages) * PAGE_SIZE / SZ_1K);
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu\n",
"reclaimin_infight:", atomic64_read(&stat->reclaimin_infight));
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu\n",
"batchout_cnt:", atomic64_read(&stat->batchout_cnt));
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu KB\n",
"batchout_bytes:", atomic64_read(&stat->batchout_bytes) / SZ_1K);
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu KB\n",
"batchout_real_load:", atomic64_read(&stat->batchout_real_load) / SZ_1K);
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu KB\n",
"batchout_pages:", atomic64_read(&stat->batchout_pages) * PAGE_SIZE / SZ_1K);
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu\n",
"batchout_inflight:", atomic64_read(&stat->batchout_inflight));
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu\n",
"fault_cnt:", atomic64_read(&stat->fault_cnt));
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu\n",
"hybridswap_fault_cnt:", atomic64_read(&stat->hybridswap_fault_cnt));
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu KB\n",
"reout_pages:", atomic64_read(&stat->reout_pages) * PAGE_SIZE / SZ_1K);
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu KB\n",
"reout_bytes:", atomic64_read(&stat->reout_bytes) / SZ_1K);
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu KB\n",
"zram_stored_pages:", atomic64_read(&stat->zram_stored_pages) * PAGE_SIZE / SZ_1K);
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu KB\n",
"zram_stored_size:", atomic64_read(&stat->zram_stored_size) / SZ_1K);
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu KB\n",
"stored_pages:", atomic64_read(&stat->stored_pages) * PAGE_SIZE / SZ_1K);
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu KB\n",
"stored_size:", atomic64_read(&stat->stored_size) / SZ_1K);
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu KB\n",
"reclain-batchout:", (atomic64_read(&stat->reclaimin_real_load) -
atomic64_read(&stat->batchout_real_load)) / SZ_1K);
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12lld KB\n",
"reclain-batchout-stored:",
(atomic64_read(&stat->reclaimin_real_load) -
atomic64_read(&stat->batchout_real_load) -
atomic64_read(&stat->stored_size)) / SZ_1K);
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12lld KB\n",
"dropped_ext_size:", atomic64_read(&stat->dropped_ext_size) / SZ_1K);
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu\n",
"notify_free:", atomic64_read(&stat->notify_free));
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu\n",
"frag_cnt:", atomic64_read(&stat->frag_cnt));
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu\n",
"mcg_cnt:", atomic64_read(&stat->mcg_cnt));
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu\n",
"ext_cnt:", atomic64_read(&stat->ext_cnt));
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu\n",
"miss_free:", atomic64_read(&stat->miss_free));
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu\n",
"mcgid_clear:", atomic64_read(&stat->mcgid_clear));
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu\n",
"skip_track_cnt:", atomic64_read(&stat->skip_track_cnt));
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu\n",
"null_memcg_skip_track_cnt:",
atomic64_read(&stat->null_memcg_skip_track_cnt));
size += scnprintf(buf + size, PAGE_SIZE - size, "%-32s %12llu\n",
"used_swap_pages:", atomic64_read(&stat->used_swap_pages) * PAGE_SIZE / SZ_1K);
size += meminfo_show(stat, buf + size, PAGE_SIZE - size);
return size;
}
ssize_t hybridswap_meminfo_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hybridswap_stat *stat = NULL;
if (!hybridswap_core_enabled())
return 0;
stat = hybridswap_get_stat_obj();
if (unlikely(!stat)) {
hybp(HYB_INFO, "can't get stat obj!\n");
return 0;
}
return meminfo_show(stat, buf, PAGE_SIZE);
}

File diff suppressed because it is too large Load Diff

View File

@@ -37,9 +37,6 @@
#include "zram_drv.h"
#include "zram_drv_internal.h"
#ifdef CONFIG_HYBRIDSWAP
#include "hybridswap/hybridswap.h"
#endif
static DEFINE_IDR(zram_index_idr);
/* idr index must be protected */
@@ -1123,26 +1120,7 @@ static void zram_free_page(struct zram *zram, size_t index)
atomic64_dec(&zram->stats.huge_pages);
}
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
if (zram_test_flag(zram, index, ZRAM_CACHED)) {
struct page *page = (struct page *)zram_get_page(zram, index);
del_page_from_cache(page);
page->mem_cgroup = NULL;
put_free_page(page);
zram_clear_flag(zram, index, ZRAM_CACHED);
goto out;
}
if (zram_test_flag(zram, index, ZRAM_CACHED_COMPRESS)) {
zram_clear_flag(zram, index, ZRAM_CACHED_COMPRESS);
goto out;
}
#endif
#ifdef CONFIG_HYBRIDSWAP_CORE
hybridswap_untrack(zram, index);
#endif
if (zram_test_flag(zram, index, ZRAM_WB)) {
zram_clear_flag(zram, index, ZRAM_WB);
@@ -1179,124 +1157,6 @@ out:
~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB));
}
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
void update_zram_index(struct zram *zram, u32 index, unsigned long page)
{
zram_slot_lock(zram, index);
put_anon_pages((struct page*)page);
zram_free_page(zram, index);
zram_set_flag(zram, index, ZRAM_CACHED);
zram_set_page(zram, index, page);
zram_set_obj_size(zram, index, PAGE_SIZE);
zram_slot_unlock(zram, index);
}
int async_compress_page(struct zram *zram, struct page* page)
{
int ret = 0;
unsigned long alloced_pages;
unsigned long handle = 0;
unsigned int comp_len = 0;
void *src, *dst;
struct zcomp_strm *zstrm;
int index = get_zram_index(page);
compress_again:
zram_slot_lock(zram, index);
if (!zram_test_flag(zram, index, ZRAM_CACHED_COMPRESS)) {
zram_slot_unlock(zram, index);
return 0;
}
zram_slot_unlock(zram, index);
zstrm = zcomp_stream_get(zram->comp);
src = kmap_atomic(page);
ret = zcomp_compress(zstrm, src, &comp_len);
kunmap_atomic(src);
if (unlikely(ret)) {
zcomp_stream_put(zram->comp);
pr_err("Compression failed! err=%d\n", ret);
zs_free(zram->mem_pool, handle);
return ret;
}
if (comp_len >= huge_class_size)
comp_len = PAGE_SIZE;
if (!handle)
handle = zs_malloc(zram->mem_pool, comp_len,
__GFP_KSWAPD_RECLAIM |
__GFP_NOWARN |
__GFP_HIGHMEM |
__GFP_MOVABLE |
__GFP_CMA);
if (!handle) {
zcomp_stream_put(zram->comp);
atomic64_inc(&zram->stats.writestall);
handle = zs_malloc(zram->mem_pool, comp_len,
GFP_NOIO | __GFP_HIGHMEM |
__GFP_MOVABLE | __GFP_CMA |
GFP_ATOMIC | ___GFP_HIGH_ATOMIC_ZRAM);
if (handle)
goto compress_again;
return -ENOMEM;
}
alloced_pages = zs_get_total_pages(zram->mem_pool);
update_used_max(zram, alloced_pages);
if (zram->limit_pages && alloced_pages > zram->limit_pages) {
zcomp_stream_put(zram->comp);
zs_free(zram->mem_pool, handle);
return -ENOMEM;
}
dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
src = zstrm->buffer;
if (comp_len == PAGE_SIZE)
src = kmap_atomic(page);
memcpy(dst, src, comp_len);
if (comp_len == PAGE_SIZE)
kunmap_atomic(src);
zcomp_stream_put(zram->comp);
zs_unmap_object(zram->mem_pool, handle);
atomic64_add(comp_len, &zram->stats.compr_data_size);
/*
* Free memory associated with this sector
* before overwriting unused sectors.
*/
zram_slot_lock(zram, index);
if (!zram_test_flag(zram, index, ZRAM_CACHED_COMPRESS)) {
atomic64_sub(comp_len, &zram->stats.compr_data_size);
zs_free(zram->mem_pool, handle);
zram_slot_unlock(zram, index);
return 0;
}
zram_free_page(zram, index);
if (comp_len == PAGE_SIZE) {
zram_set_flag(zram, index, ZRAM_HUGE);
atomic64_inc(&zram->stats.huge_pages);
}
zram_set_handle(zram, index, handle);
zram_set_obj_size(zram, index, comp_len);
#ifdef CONFIG_HYBRIDSWAP_CORE
hybridswap_track(zram, index, page->mem_cgroup);
#endif
zram_slot_unlock(zram, index);
/* Update stats */
atomic64_inc(&zram->stats.pages_stored);
return ret;
}
#endif
static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
struct bio *bio, bool partial_io)
@@ -1307,22 +1167,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
void *src, *dst;
zram_slot_lock(zram, index);
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
if (akcompress_cache_fault_out(zram, page, index))
return 0;
#endif
#ifdef CONFIG_HYBRIDSWAP_CORE
if (likely(!bio)) {
ret = hybridswap_fault_out(zram, index);
if (unlikely(ret)) {
pr_err("search in hybridswap failed! err=%d, page=%u\n",
ret, index);
zram_slot_unlock(zram, index);
return ret;
}
}
#endif
if (zram_test_flag(zram, index, ZRAM_WB)) {
struct bio_vec bvec;
@@ -1431,12 +1276,6 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
}
kunmap_atomic(mem);
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
if ((current_is_kswapd() || current_is_swapd(current)) &&
add_anon_page2cache(zram, index, page)) {
return 0;
}
#endif
compress_again:
zstrm = zcomp_stream_get(zram->comp);
src = kmap_atomic(page);
@@ -1524,9 +1363,6 @@ out:
zram_set_obj_size(zram, index, comp_len);
}
#ifdef CONFIG_HYBRIDSWAP_CORE
hybridswap_track(zram, index, page->mem_cgroup);
#endif
zram_slot_unlock(zram, index);
/* Update stats */
@@ -1734,13 +1570,6 @@ static void zram_slot_free_notify(struct block_device *bdev,
return;
}
#ifdef CONFIG_HYBRIDSWAP_CORE
if (!hybridswap_delete(zram, index)) {
zram_slot_unlock(zram, index);
atomic64_inc(&zram->stats.miss_free);
return;
}
#endif
zram_free_page(zram, index);
zram_slot_unlock(zram, index);
}
@@ -1957,27 +1786,6 @@ static DEVICE_ATTR_WO(writeback);
static DEVICE_ATTR_RW(writeback_limit);
static DEVICE_ATTR_RW(writeback_limit_enable);
#endif
#ifdef CONFIG_HYBRIDSWAP
static DEVICE_ATTR_RO(hybridswap_vmstat);
static DEVICE_ATTR_RW(hybridswap_loglevel);
static DEVICE_ATTR_RW(hybridswap_enable);
#endif
#ifdef CONFIG_HYBRIDSWAP_SWAPD
static DEVICE_ATTR_RW(hybridswap_swapd_pause);
#endif
#ifdef CONFIG_HYBRIDSWAP_CORE
static DEVICE_ATTR_RW(hybridswap_core_enable);
static DEVICE_ATTR_RW(hybridswap_loop_device);
static DEVICE_ATTR_RW(hybridswap_dev_life);
static DEVICE_ATTR_RW(hybridswap_quota_day);
static DEVICE_ATTR_RO(hybridswap_report);
static DEVICE_ATTR_RO(hybridswap_stat_snap);
static DEVICE_ATTR_RO(hybridswap_meminfo);
static DEVICE_ATTR_RW(hybridswap_zram_increase);
#endif
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
static DEVICE_ATTR_RW(hybridswap_akcompress);
#endif
static struct attribute *zram_disk_attrs[] = {
&dev_attr_disksize.attr,
@@ -2001,27 +1809,6 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_bd_stat.attr,
#endif
&dev_attr_debug_stat.attr,
#ifdef CONFIG_HYBRIDSWAP
&dev_attr_hybridswap_vmstat.attr,
&dev_attr_hybridswap_loglevel.attr,
&dev_attr_hybridswap_enable.attr,
#endif
#ifdef CONFIG_HYBRIDSWAP_SWAPD
&dev_attr_hybridswap_swapd_pause.attr,
#endif
#ifdef CONFIG_HYBRIDSWAP_CORE
&dev_attr_hybridswap_core_enable.attr,
&dev_attr_hybridswap_report.attr,
&dev_attr_hybridswap_meminfo.attr,
&dev_attr_hybridswap_stat_snap.attr,
&dev_attr_hybridswap_loop_device.attr,
&dev_attr_hybridswap_dev_life.attr,
&dev_attr_hybridswap_quota_day.attr,
&dev_attr_hybridswap_zram_increase.attr,
#endif
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
&dev_attr_hybridswap_akcompress.attr,
#endif
NULL,
};
@@ -2169,9 +1956,6 @@ static int zram_remove(struct zram *zram)
del_gendisk(zram->disk);
blk_cleanup_queue(zram->disk->queue);
put_disk(zram->disk);
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
destroy_akcompressd_task(zram);
#endif
if (zram_devices == zram)
zram_devices = NULL;
kfree(zram);
@@ -2374,11 +2158,6 @@ static int __init zram_init(void)
proc_create("zraminfo", 0644, NULL, &zraminfo_proc_fops);
#endif
#ifdef CONFIG_HYBRIDSWAP
ret = hybridswap_pre_init();
if (ret)
goto out_error;
#endif
return 0;

View File

@@ -51,16 +51,6 @@ enum zram_pageflags {
ZRAM_UNDER_WB, /* page is under writeback */
ZRAM_HUGE, /* Incompressible page */
ZRAM_IDLE, /* not accessed page since last idle marking */
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
ZRAM_CACHED, /* page is cached in async compress cache buffer */
ZRAM_CACHED_COMPRESS, /* page is under async compress */
#endif
#ifdef CONFIG_HYBRIDSWAP_CORE
ZRAM_BATCHING_OUT,
ZRAM_FROM_HYBRIDSWAP,
ZRAM_MCGID_CLEAR,
ZRAM_IN_BD, /* zram stored in back device */
#endif
__NR_ZRAM_PAGEFLAGS,
};
@@ -71,9 +61,6 @@ struct zram_table_entry {
union {
unsigned long handle;
unsigned long element;
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
unsigned long page;
#endif
};
unsigned long flags;
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
@@ -138,15 +125,12 @@ struct zram {
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
struct dentry *debugfs_dir;
#endif
#if (defined CONFIG_ZRAM_WRITEBACK) || (defined CONFIG_HYBRIDSWAP_CORE)
#if (defined CONFIG_ZRAM_WRITEBACK)
struct block_device *bdev;
unsigned int old_block_size;
unsigned long nr_pages;
unsigned long increase_nr_pages;
#endif
#ifdef CONFIG_HYBRIDSWAP_CORE
struct hybridswap_area *area;
#endif
};
/* mlog */

View File

@@ -35,8 +35,4 @@
zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size; \
} while(0)
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
extern int async_compress_page(struct zram *zram, struct page* page);
extern void update_zram_index(struct zram *zram, u32 index, unsigned long page);
#endif
#endif /* _ZRAM_DRV_INTERNAL_H_ */

View File

@@ -32,9 +32,6 @@
#include <linux/tick.h>
#include <trace/events/power.h>
#if defined(OPLUS_FEATURE_TASK_CPUSTATS) && defined(CONFIG_OPLUS_SCHED)
#include <linux/task_sched_info.h>
#endif /* defined(OPLUS_FEATURE_TASK_CPUSTATS) && defined(CONFIG_OPLUS_SCHED) */
static LIST_HEAD(cpufreq_policy_list);
@@ -349,9 +346,6 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
CPUFREQ_POSTCHANGE, freqs);
if (likely(policy) && likely(policy->cpu == freqs->cpu)) {
policy->cur = freqs->new;
#if defined(OPLUS_FEATURE_TASK_CPUSTATS) && defined(CONFIG_OPLUS_SCHED)
update_freq_info(policy);
#endif /* defined(OPLUS_FEATURE_TASK_CPUSTATS) && defined(CONFIG_OPLUS_SCHED) */
}
break;
}
@@ -2264,9 +2258,6 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
policy->min = new_policy->min;
policy->max = new_policy->max;
#if defined(OPLUS_FEATURE_TASK_CPUSTATS) && defined(CONFIG_OPLUS_SCHED)
update_freq_limit_info(policy);
#endif /* defined(OPLUS_FEATURE_TASK_CPUSTATS) && defined(CONFIG_OPLUS_SCHED) */
arch_set_max_freq_scale(policy->cpus, policy->max);
arch_set_min_freq_scale(policy->cpus, policy->min);

View File

@@ -25,9 +25,6 @@
#include <linux/spinlock.h>
#include <linux/threads.h>
#ifdef CONFIG_OPLUS_FEATURE_MIDAS
#include <linux/oplus_midas.h>
#endif
#define UID_HASH_BITS 10
@@ -428,9 +425,6 @@ void cpufreq_acct_update_power(struct task_struct *p, u64 cputime)
uid_entry->time_in_state[state] += cputime;
spin_unlock_irqrestore(&uid_lock, flags);
#ifdef CONFIG_OPLUS_FEATURE_MIDAS
midas_record_task_times(uid, cputime, p, state);
#endif
rcu_read_lock();
uid_entry = find_uid_entry_rcu(uid);

View File

@@ -19,9 +19,6 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#ifdef OPLUS_BUG_STABILITY
#include <soc/oplus/system/oplus_mm_kevent_fb.h>
#endif
#include "mtk_drm_drv.h"
#include "mtk_drm_crtc.h"
@@ -3777,9 +3774,6 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
DDPAEE("%s:%d, failed to request irq:%d ret:%d comp_id:%d\n",
__func__, __LINE__,
irq, ret, comp_id);
#ifdef OPLUS_BUG_STABILITY
mm_fb_display_kevent("DisplayDriverID@@501$$", MM_FB_KEY_RATELIMIT_1H, "ovl_probe error irq:%d ret:%d comp_id:%d", irq, ret, comp_id);
#endif
return ret;
}

View File

@@ -33,9 +33,6 @@
#include "mtk_layering_rule.h"
#include "mtk_drm_trace.h"
#include "swpm_me.h"
#ifdef OPLUS_BUG_STABILITY
#include <soc/oplus/system/oplus_mm_kevent_fb.h>
#endif
#define DISP_REG_RDMA_INT_ENABLE 0x0000
#define DISP_REG_RDMA_INT_STATUS 0x0004
@@ -404,11 +401,6 @@ static irqreturn_t mtk_disp_rdma_irq_handler(int irq, void *dev_id)
DDPAEE("%s: underflow! cnt=%d\n",
mtk_dump_comp_str(rdma),
priv->underflow_cnt);
#ifdef OPLUS_BUG_STABILITY
if ((priv->underflow_cnt) < 5) {
mm_fb_display_kevent("DisplayDriverID@@502$$", MM_FB_KEY_RATELIMIT_1H, "underflow cnt=%d", priv->underflow_cnt);
}
#endif
}
}

View File

@@ -28,9 +28,6 @@
#include <linux/kthread.h>
#include <linux/sched.h>
#include <uapi/linux/sched/types.h>
#ifdef OPLUS_BUG_STABILITY
#include <soc/oplus/system/oplus_mm_kevent_fb.h>
#endif
#include "mtk_drm_arr.h"
#include "mtk_drm_drv.h"
@@ -6148,9 +6145,6 @@ static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
DDPAEE("%s:%d, invalid vblank:%d, crtc:%p\n",
__func__, __LINE__,
drm_crtc_vblank_get(crtc), crtc);
#ifdef OPLUS_BUG_STABILITY
mm_fb_display_kevent("DisplayDriverID@@503$$", MM_FB_KEY_RATELIMIT_1H, "invalid vblank:%d", drm_crtc_vblank_get(crtc));
#endif
}
mtk_crtc->event = state->base.event;
state->base.event = NULL;

View File

@@ -20,9 +20,6 @@
#include <linux/pm_runtime.h>
#include <drm/drmP.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#ifdef OPLUS_BUG_STABILITY
#include <soc/oplus/system/oplus_mm_kevent_fb.h>
#endif
#include "mtk_drm_ddp.h"
#include "mtk_drm_crtc.h"
@@ -8317,9 +8314,6 @@ static int mtk_ddp_probe(struct platform_device *pdev)
DDPAEE("%s:%d, failed to request irq:%d ret:%d\n",
__func__, __LINE__,
irq, ret);
#ifdef OPLUS_BUG_STABILITY
mm_fb_display_kevent("DisplayDriverID@@504$$", MM_FB_KEY_RATELIMIT_1H, "mtk_ddp_probe failed to request irq:%d ret:%d", irq, ret);
#endif
return ret;
}

View File

@@ -21,9 +21,6 @@
#include "mtk_drm_drv.h"
#include "mtk_drm_fb.h"
#include "mtk_drm_gem.h"
#ifdef OPLUS_BUG_STABILITY
#include <soc/oplus/system/oplus_mm_kevent_fb.h>
#endif
/*
* mtk specific framebuffer structure.
@@ -197,9 +194,6 @@ int mtk_fb_wait(struct drm_framebuffer *fb)
DDPAEE("%s:%d, invalid ret:%ld\n",
__func__, __LINE__,
ret);
#ifdef OPLUS_BUG_STABILITY
mm_fb_display_kevent("DisplayDriverID@@505$$", MM_FB_KEY_RATELIMIT_1H, "mtk_fb_wait invalid ret:%ld", ret);
#endif
return ret;
}

View File

@@ -53,9 +53,6 @@
#include "mtk_drm_fbdev.h"
#include "mtk_fbconfig_kdebug.h"
/* ********* end Panel Master *********** */
#ifdef OPLUS_BUG_STABILITY
#include <soc/oplus/system/oplus_mm_kevent_fb.h>
#endif
#include <asm/arch_timer.h>
/* ********** bridge ic ***************** */
@@ -1895,9 +1892,6 @@ static irqreturn_t mtk_dsi_irq_status(int irq, void *dev_id)
dsi->encoder.crtc);
}
dsi_underrun_trigger = 0;
#ifdef OPLUS_BUG_STABILITY
mm_fb_display_kevent("DisplayDriverID@@506$$", MM_FB_KEY_RATELIMIT_1H, "underrun");
#endif
}
}
@@ -4148,19 +4142,6 @@ int mtk_dsi_esd_cmp(struct mtk_ddp_comp *comp, void *handle, void *slot)
DDPPR_ERR("[DSI]cmp fail:read(0x%x)!=expect(0x%x)\n",
chk_val, lcm_esd_tb->para_list[0]);
ret = -1;
#ifdef OPLUS_BUG_STABILITY
if (ret < 0) {
char payload[200] = "";
int cnt = 0;
cnt += scnprintf(payload + cnt, sizeof(payload) - cnt, "DisplayDriverID@@507$$");
cnt += scnprintf(payload + cnt, sizeof(payload) - cnt, "ESD:");
cnt += scnprintf(payload + cnt, sizeof(payload) - cnt, "%02x = %02x",
lcm_esd_tb->cmd,lcm_esd_tb->para_list[0]);
DDPPR_ERR("ESD check failed: %s\n", payload);
mm_fb_display_kevent(payload, MM_FB_KEY_RATELIMIT_1H, "ESD check failed");
}
#endif
break;
}
#ifdef OPLUS_BUG_STABILITY

View File

@@ -30,7 +30,6 @@
#include <linux/platform_device.h>
#include <linux/of_graph.h>
#include <soc/oplus/device_info.h>
#include <soc/oplus/system/oplus_mm_kevent_fb.h>
#include <mt-plat/mtk_boot_common.h>
#define CONFIG_MTK_PANEL_EXT
@@ -1302,11 +1301,6 @@ static int lcm_panel_poweroff(struct drm_panel *panel)
msleep(70);
/*if (fan53870_fail_flag == 1) {
mm_fb_display_kevent("DisplayDriverID@@509$$", MM_FB_KEY_RATELIMIT_1H, "fan53870 no ok");
fan53870_fail_flag = 0;
}*/
return 0;
}

View File

@@ -38,11 +38,6 @@
#endif /*CONFIG_MACH_MT6768*/
//#endif /*OPLUS_FEATURE_TP_BASIC*/
#if IS_ENABLED(CONFIG_OPLUS_FEATURE_THEIA)
#include <soc/oplus/system/oplus_bscheck.h>
#include <soc/oplus/system/oplus_brightscreen_check.h>
#endif
#define KPD_NAME "mtk-kpd"
#ifdef CONFIG_LONG_PRESS_MODE_EN
@@ -515,14 +510,6 @@ void kpd_pwrkey_pmic_handler(unsigned long pressed)
return;
}
kpd_pmic_pwrkey_hal(pressed);
#if IS_ENABLED(CONFIG_OPLUS_FEATURE_THEIA)
if(pressed){
//we should canel per work
black_screen_timer_restart();
bright_screen_timer_restart();
}
#endif
}
#endif

View File

@@ -191,14 +191,6 @@ void kpd_pmic_pwrkey_hal(unsigned long pressed)
input_sync(kpd_input_dev);
kpd_print(KPD_SAY "(%s) HW keycode =%d using PMIC\n",
pressed ? "pressed" : "released", kpd_dts_data.kpd_sw_pwrkey);
#if IS_ENABLED(CONFIG_OPLUS_FEATURE_THEIA)
if(pressed){
//we should canel per work
black_screen_timer_restart();
bright_screen_timer_restart();
}
#endif
}
static int mrdump_eint_state;

View File

@@ -181,9 +181,6 @@ enum rtc_spare_enum {
RTC_SENSOR_CAUSE_PANIC,
#endif /* OPLUS_BUG_STABILITY */
RTC_SAFE_BOOT,
#ifdef OPLUS_FEATURE_AGINGTEST
RTC_AGINGTEST_BOOT,
#endif /*OPLUS_FEATURE_AGINGTEST */
#ifdef OPLUS_BUG_STABILITY
RTC_EDL_BOOT,
#endif /* OPLUS_BUG_STABILITY */
@@ -231,9 +228,6 @@ u16 rtc_spare_reg[RTC_SPAR_NUM][3] = {
#endif /* OPLUS_BUG_STABILITY */
#ifdef OPLUS_BUG_STABILITY
{RTC_SPAR0, 0x1, 15},
#ifdef OPLUS_FEATURE_AGINGTEST
{RTC_SPAR0, 0x01, 14},
#endif /*OPLUS_FEATURE_AGINGTEST */
{RTC_AL_DOW, 0x1, 15},
#endif /* OPLUS_BUG_STABILITY */
};
@@ -785,17 +779,6 @@ void oppo_rtc_mark_sau(void)
spin_unlock_irqrestore(&rtc_misc->lock, flags);
}
#ifdef OPLUS_FEATURE_AGINGTEST
void oppo_rtc_mark_agingtest(void)
{
unsigned long flags;
pr_notice("rtc_mark_agingtest\n");
spin_lock_irqsave(&rtc_misc->lock, flags);
mtk_rtc_set_spare_register(RTC_AGINGTEST_BOOT, 0x01);
spin_unlock_irqrestore(&rtc_misc->lock, flags);
}
#endif /*OPLUS_FEATURE_AGINGTEST */
void oppo_rtc_mark_factory(void)
{

View File

@@ -575,5 +575,4 @@ source "drivers/misc/tri_state_key/Kconfig"
#ifdef OPLUS_FEATURE_CHG_BASIC//Fanhong.Kong@ProDrv.CHG,add 2018/10/25 for vib aw8697
source "drivers/misc/aw8697_haptic/Kconfig"
#endif
source "drivers/misc/oplus_misc_healthinfo/Kconfig"
endmenu

View File

@@ -68,8 +68,6 @@ obj-$(CONFIG_AW8697_HAPTIC) += aw8697_haptic/
obj-$(CONFIG_MTK_FB) += oplus/
#endif
obj-$(CONFIG_OPLUS_MISC_HEALTHONFO) += oplus_misc_healthinfo/
lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_heap.o

View File

@@ -19,9 +19,6 @@
#include "adsp_platform_driver.h"
#include "adsp_excep.h"
#include "adsp_logger.h"
#ifdef CONFIG_OPLUS_FEATURE_MM_FEEDBACK
#include <soc/oplus/system/oplus_mm_kevent_fb.h>
#endif /* CONFIG_OPLUS_FEATURE_MM_FEEDBACK */
#define ADSP_MISC_EXTRA_SIZE 0x400 //1KB
#define ADSP_MISC_BUF_SIZE 0x10000 //64KB
@@ -216,10 +213,6 @@ static void adsp_exception_dump(struct adsp_exception_control *ctrl)
coredump->assert_log);
}
pr_info("%s", detail);
#ifdef CONFIG_OPLUS_FEATURE_MM_FEEDBACK
mm_fb_audio_kevent_named(OPLUS_AUDIO_EVENTID_ADSP_CRASH, \
MM_FB_KEY_RATELIMIT_5MIN, "FieldData@@%s$$detailData@@audio$$module@@adsp", coredump->assert_log);
#endif //CONFIG_OPLUS_FEATURE_MM_FEEDBACK
/* adsp aed api, only detail information available*/
aed_common_exception_api("adsp", (const int *)coredump, coredump_size,

View File

@@ -19,9 +19,6 @@
#include "adsp_platform_driver.h"
#include "adsp_excep.h"
#include "adsp_logger.h"
#ifdef CONFIG_OPLUS_FEATURE_MM_FEEDBACK
#include <soc/oplus/system/oplus_mm_kevent_fb.h>
#endif /* CONFIG_OPLUS_FEATURE_MM_FEEDBACK */
#define ADSP_MISC_EXTRA_SIZE 0x400 //1KB
#define ADSP_MISC_BUF_SIZE 0x10000 //64KB
@@ -220,10 +217,6 @@ static void adsp_exception_dump(struct adsp_exception_control *ctrl)
coredump->assert_log);
}
pr_info("%s", detail);
#ifdef CONFIG_OPLUS_FEATURE_MM_FEEDBACK
mm_fb_audio_kevent_named(OPLUS_AUDIO_EVENTID_ADSP_CRASH, \
MM_FB_KEY_RATELIMIT_5MIN, "FieldData@@%s$$detailData@@audio$$module@@adsp", coredump->assert_log);
#endif //CONFIG_OPLUS_FEATURE_MM_FEEDBACK
/* adsp aed api, only detail information available*/
aed_common_exception_api("adsp", (const int *)coredump, coredump_size,
NULL, 0, detail, db_opt);
@@ -279,10 +272,6 @@ void adsp_aed_worker(struct work_struct *ws)
"[ADSP]",
"ASSERT: ADSP DEAD! Recovery Fail");
#ifdef CONFIG_OPLUS_FEATURE_MM_FEEDBACK
mm_fb_audio_kevent_named(OPLUS_AUDIO_EVENTID_ADSP_RECOVERY_FAIL, \
MM_FB_KEY_RATELIMIT_5MIN, "payload@@ADSP DEAD! Recovery Fail,ret=%d", ret);
#endif //CONFIG_OPLUS_FEATURE_MM_FEEDBACK
/* BUG_ON(1); */
}

View File

@@ -38,11 +38,6 @@
#include <linux/arm-smccc.h>
#include <uapi/linux/psci.h>
#ifdef OPLUS_FEATURE_PERFORMANCE
//ZuoTong@ANDROID.PERFORMANCE, 2020/06/28,Add for flushing device cache before goto dump mode!
extern bool is_triggering_panic;
extern void flush_cache_on_panic(void);
#endif /*OPLUS_FEATURE_PERFORMANCE*/
static char mrdump_lk[12];
bool mrdump_ddr_reserve_ready;
@@ -151,47 +146,12 @@ __weak void aee_wdt_zap_locks(void)
pr_notice("%s:weak function\n", __func__);
}
#ifdef OPLUS_FEATURE_PHOENIX
extern void deal_fatal_err(void);
extern int kernel_panic_happened;
extern int hwt_happened;
#endif /* OPLUS_FEATURE_PHOENIX */
#ifdef CONFIG_OPLUS_FEATURE_PANIC_FLUSH
extern int panic_flush_device_cache(int timeout);
#endif
int mrdump_common_die(int fiq_step, int reboot_reason, const char *msg,
struct pt_regs *regs)
{
#ifdef OPLUS_FEATURE_PHOENIX
if((AEE_REBOOT_MODE_KERNEL_OOPS == reboot_reason || AEE_REBOOT_MODE_KERNEL_PANIC == reboot_reason)
&& !kernel_panic_happened)
{
kernel_panic_happened = 1;
deal_fatal_err();
}
else if (AEE_REBOOT_MODE_WDT == reboot_reason && !hwt_happened)
{
hwt_happened = 1;
deal_fatal_err();
}
#endif /* OPLUS_FEATURE_PHOENIX */
#ifdef CONFIG_OPLUS_FEATURE_PANIC_FLUSH
panic_flush_device_cache(2000);
#endif
#ifdef OPLUS_FEATURE_PERFORMANCE
//ZuoTong@ANDROID.PERFORMANCE, 2020/06/28,Add for flushing device cache before go to dump mode!
if(!is_triggering_panic)
{
is_triggering_panic = true;
pr_notice("is_triggering_panic : true\n");
flush_cache_on_panic();
}
#endif // OPLUS_FEATURE_PERFORMANCE
bust_spinlocks(1);
aee_disable_api();

View File

@@ -81,9 +81,6 @@
#include "swpm_v1/mtk_swpm_interface.h"
#endif
#if defined(OPLUS_FEATURE_TASK_CPUSTATS) && defined(CONFIG_OPLUS_SCHED)
#include <linux/task_sched_info.h>
#endif /* defined(OPLUS_FEATURE_TASK_CPUSTATS) && defined(CONFIG_OPLUS_SCHED) */
#ifdef CONFIG_MTK_CPU_MSSV
extern unsigned int cpumssv_get_state(void);
@@ -423,9 +420,6 @@ int Ripi_cpu_dvfs_thread(void *data)
freqs.new, 0);
}
#endif
#if defined(OPLUS_FEATURE_TASK_CPUSTATS) && defined(CONFIG_OPLUS_SCHED)
update_freq_limit_info(p->mt_policy);
#endif /* defined(OPLUS_FEATURE_TASK_CPUSTATS) && defined(CONFIG_OPLUS_SCHED) */
trace_cpu_frequency_limits(p->mt_policy->max,
p->mt_policy->min,
p->mt_policy->cpu);

View File

@@ -1383,14 +1383,7 @@ static struct freq_attr *_mt_cpufreq_attr[] = {
};
static struct cpufreq_driver _mt_cpufreq_driver = {
#if defined(OPLUS_FEATURE_SCHEDUTIL_USE_TL) && defined(CONFIG_SCHEDUTIL_USE_TL) \
|| (defined(CONFIG_MTK_PLAT_MT6885_EMULATION) || defined(CONFIG_MACH_MT6893) \
|| defined(CONFIG_MACH_MT6833))
.flags = CPUFREQ_ASYNC_NOTIFICATION | CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
#else
.flags = CPUFREQ_ASYNC_NOTIFICATION,
#endif
.verify = _mt_cpufreq_verify,
.target = _mt_cpufreq_target,
.init = _mt_cpufreq_init,

View File

@@ -1193,11 +1193,7 @@ static struct freq_attr *_mt_cpufreq_attr[] = {
};
static struct cpufreq_driver _mt_cpufreq_driver = {
#if defined(OPLUS_FEATURE_SCHEDUTIL_USE_TL) && defined(CONFIG_SCHEDUTIL_USE_TL)
.flags = CPUFREQ_ASYNC_NOTIFICATION | CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
#else
.flags = CPUFREQ_ASYNC_NOTIFICATION,
#endif
.verify = _mt_cpufreq_ver_dbgify,
.target = _mt_cpufreq_target,
.init = _mt_cpufreq_init,

View File

@@ -16,10 +16,6 @@
#include "cmdq-sec-tl-api.h"
#include "cmdq-util.h"
#ifdef OPLUS_BUG_STABILITY
#include <soc/oplus/system/oplus_mm_kevent_fb.h>
#endif
#ifdef CMDQ_SECURE_MTEE_SUPPORT
#include "cmdq_sec_mtee.h"
#endif
@@ -1804,9 +1800,6 @@ static s32 cmdq_sec_late_init_wsm(void *data)
err = cmdq_sec_session_init(cmdq->context);
mutex_unlock(&cmdq->exec_lock);
if (err) {
#ifdef OPLUS_BUG_STABILITY
mm_fb_display_kevent("DisplayDriverID@@509$$", MM_FB_KEY_RATELIMIT_1H, "cmdq sec session init failed:%d", err);
#endif
err = -CMDQ_ERR_SEC_CTX_SETUP;
cmdq_err("session init failed:%d", err);
continue;

View File

@@ -913,19 +913,6 @@ SYSDVT_OBJS += $(SYSDVT_DIR)dvt_dmashdl.o
endif
endif
#ifdef OPLUS_FEATURE_WIFI_OPLUSWFD
#//XiaZijian@CONNECTIVITY.WIFI.BASIC.26106 2021/03/18 , add for oplus wfd
ifeq ($(CONFIG_OPLUS_FEATURE_WIFI_OPLUSWFD), m)
ccflags-y += -DOPLUS_FEATURE_WIFI_OPLUSWFD=1
else ifeq ($(CONFIG_OPLUS_FEATURE_WIFI_OPLUSWFD), y)
ccflags-y += -DOPLUS_FEATURE_WIFI_OPLUSWFD=1
else ifeq ($(CONFIG_OPLUS_FEATURE_WIFI_OPLUSWFD), n)
ccflags-y += -UOPLUS_FEATURE_WIFI_OPLUSWFD
endif
OPLUS_WFD_OBJS += oplus_wfd/wlan_oplus_wfd.o
#endif /* OPLUS_FEATURE_WIFI_OPLUSWFD */
# ---------------------------------------------------
# Service git List
# ---------------------------------------------------
@@ -964,11 +951,6 @@ $(MODULE_NAME)-objs += $(SYSDVT_OBJS)
$(MODULE_NAME)-objs += $(NAN_OBJS)
$(MODULE_NAME)-objs += $(NAN_SEC_OBJS)
#ifdef OPLUS_FEATURE_WIFI_OPLUSWFD
#//XiaZijian@CONNECTIVITY.WIFI.BASIC.26106 2021/03/18 , add for oplus wfd
$(MODULE_NAME)-objs += $(OPLUS_WFD_OBJS)
#endif /* OPLUS_FEATURE_WIFI_OPLUSWFD */
ifneq ($(findstring UT_TEST_MODE,$(MTK_COMBO_CHIP)),)
include $(src)/test/ut.make
endif

View File

@@ -1,336 +0,0 @@
#ifdef OPLUS_FEATURE_WIFI_OPLUSWFD
#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/wireless.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <net/arp.h>
#include <net/cfg80211.h>
#include <net/mac80211.h>
#include <linux/nl80211.h>
#include <linux/spinlock.h>
#include "gl_os.h"
#include "debug.h"
#include "wlan_lib.h"
#include "gl_wext.h"
#include "gl_cfg80211.h"
#include "gl_kal.h"
#include "precomp.h"
#include "nic_cmd_event.h"
#include "wsys_cmd_handler_fw.h"
#include <net/oplus/oplus_wfd_wlan.h>
#include <linux/preempt.h>
#define BIT_SUPPORTED_PHY_NO_HT 1
#define BIT_SUPPORTED_PHY_HT 2
#define BIT_SUPPORTED_PHY_VHT 4
#define BIT_SUPPORTED_PHY_HE 8
#define MTK_SUPPORT_AVOID_CHANNEL 1
static int is_remove_He_ie_from_prebe_request = 0;
static struct wireless_dev *s_hdd_ctx = NULL;
static DEFINE_MUTEX(ctx_mutex);
static DECLARE_COMPLETION(s_avoid_channel_query_comp);
static char s_avoid_channel_query_comp_inited = 0;
static struct EVENT_LTE_SAFE_CHN s_event_lte_safe_chn;
extern uint8_t wlanGetChannelIndex(enum ENUM_BAND eBand, uint8_t channel);
#if CFG_SUPPORT_IDC_CH_SWITCH
extern struct EVENT_LTE_SAFE_CHN g_rLteSafeChInfo;
#endif
int oplus_wfd_get_remove_He_ie_flag(void);
void oplus_wfd_set_hdd_ctx(struct wireless_dev *hdd_ctx);
void oplus_register_oplus_wfd_wlan_ops_qcom(void);
static void remove_he_ie_from_probe_request(int remove);
static int get_dbs_capacity(void);
static int get_phy_capacity(int band);
static void get_avoid_channels(int *len, int* freqs, int max_num);
static struct oplus_wfd_wlan_ops_t oplus_wfd_wlan_ops_mtk = {
.remove_he_ie_from_probe_request = remove_he_ie_from_probe_request,
.get_dbs_capacity = get_dbs_capacity,
.get_phy_capacity = get_phy_capacity,
.get_supported_channels = NULL,
#if MTK_SUPPORT_AVOID_CHANNEL
.get_avoid_channels = get_avoid_channels,
#else
.get_avoid_channels = NULL,
#endif
};
static void remove_he_ie_from_probe_request(int remove) {
is_remove_He_ie_from_prebe_request = remove;
}
static struct wireless_dev * get_wdev_sta()
{
return s_hdd_ctx;
}
static struct ADAPTER * get_adapter() {
struct wireless_dev *wdev_sta = NULL;
struct GLUE_INFO *prGlueInfo = NULL;
struct ADAPTER *prAdapter = NULL;
struct wiphy *wiphy;
wdev_sta = get_wdev_sta();
if (wdev_sta == NULL) {
goto EXIT;
}
if (kalIsHalted()) {
goto EXIT;
}
wiphy = wdev_sta->wiphy;
if (wiphy == NULL) {
goto EXIT;
}
WIPHY_PRIV(wiphy, prGlueInfo);
if (prGlueInfo == NULL) {
goto EXIT;
}
if (!prGlueInfo->fgIsRegistered) {
goto EXIT;
}
prAdapter = prGlueInfo->prAdapter;
if (prAdapter == NULL) {
goto EXIT;
}
EXIT:
return prAdapter;
}
static int get_dbs_capacity(void)
{
#define DBS_UNKNOWN 0
#define DBS_NULL 1
#define DBS_SISO 2
#define DBS_MIMO 3
int cap = DBS_UNKNOWN;
struct ADAPTER *prAdapter = NULL;
mutex_lock(&ctx_mutex);
prAdapter = get_adapter();
if (prAdapter == NULL) {
goto EXIT;
}
cap = DBS_NULL;
EXIT:
mutex_unlock(&ctx_mutex);
return cap;
}
static int get_phy_capacity(int band)
{
int phy_bit = 0;
struct ADAPTER *prAdapter = NULL;
mutex_lock(&ctx_mutex);
prAdapter = get_adapter();
if (prAdapter == NULL) {
goto EXIT;
}
if (prAdapter->rWifiVar.ucStaHt) {
phy_bit |= BIT_SUPPORTED_PHY_HT;
}
if (prAdapter->rWifiVar.ucStaVht) {
phy_bit |= BIT_SUPPORTED_PHY_VHT | BIT_SUPPORTED_PHY_HT;
}
#if (CFG_SUPPORT_802_11AX == 1)
if (prAdapter->rWifiVar.ucStaHe) {
phy_bit |= BIT_SUPPORTED_PHY_HE | BIT_SUPPORTED_PHY_VHT | BIT_SUPPORTED_PHY_HT;
}
#endif
EXIT:
mutex_unlock(&ctx_mutex);
return phy_bit;
}
static int convertChannelToFrequency(int channel) {
if ((channel >= 0) && (channel < 14)) {
return 2412 + 5 * (channel - 1);
} else if (channel == 14) {
return 2484;
} else if ((channel >= 36) && (channel <= 181)) {
return 5180 + (channel - 36) * 5;
}
return -1;
}
#if MTK_SUPPORT_AVOID_CHANNEL
void oplus_nicOidCmdTimeoutCommon(IN struct ADAPTER *prAdapter,
IN struct CMD_INFO *prCmdInfo) {
}
void oplus_WfdCmdEventQueryLteSafeChn(IN struct ADAPTER *prAdapter,
IN struct CMD_INFO *prCmdInfo,
IN uint8_t *pucEventBuf)
{
struct EVENT_LTE_SAFE_CHN *prEvent;
int index;
do {
if ((prAdapter == NULL) || (prCmdInfo == NULL) || (pucEventBuf == NULL)
|| (prCmdInfo->pvInformationBuffer == NULL)) {
break;
}
prEvent = (struct EVENT_LTE_SAFE_CHN *) pucEventBuf;
if (prEvent->u4Flags & BIT(0)) {
memcpy(&s_event_lte_safe_chn, prEvent, sizeof(s_event_lte_safe_chn));
for (index = 0 ; index < ENUM_SAFE_CH_MASK_MAX_NUM; index++) {
}
} else {
}
} while (false);
complete(&s_avoid_channel_query_comp);
}
static uint32_t oplus_wlanQueryLteSafeChannel(IN struct ADAPTER *prAdapter,
IN uint8_t ucRoleIndex)
{
uint32_t rResult = WLAN_STATUS_FAILURE;
struct CMD_GET_LTE_SAFE_CHN rQuery_LTE_SAFE_CHN;
kalMemZero(&rQuery_LTE_SAFE_CHN, sizeof(rQuery_LTE_SAFE_CHN));
do {
if (!prAdapter)
break;
kalMemZero(&s_event_lte_safe_chn, sizeof(s_event_lte_safe_chn));
/* Get LTE safe channel list */
wlanSendSetQueryCmd(prAdapter,
CMD_ID_GET_LTE_CHN,
FALSE,
TRUE,
FALSE, /* Query ID */
oplus_WfdCmdEventQueryLteSafeChn, /* The handler to receive*/
oplus_nicOidCmdTimeoutCommon,
sizeof(struct CMD_GET_LTE_SAFE_CHN),
(uint8_t *)&rQuery_LTE_SAFE_CHN,
&s_event_lte_safe_chn,
0);
rResult = WLAN_STATUS_SUCCESS;
} while (0);
return rResult;
}
static void get_avoid_channels(int* out_len, int* out_freqs, int max_num)
{
struct ADAPTER *prAdapter = NULL;
uint32_t u4LteSafeChnBitMask_2G = 0, u4LteSafeChnBitMask_5G_1 = 0,
u4LteSafeChnBitMask_5G_2 = 0;
struct RF_CHANNEL_INFO aucChannelList[MAX_CHN_NUM];
uint8_t ucNumOfChannel;
int freq;
int len = 0;
int index = 0;
enum ENUM_BAND eBand;
*out_len = 0;
mutex_lock(&ctx_mutex);
prAdapter = get_adapter();
if (prAdapter == NULL) {
goto EXIT;
}
reinit_completion(&s_avoid_channel_query_comp);
if (WLAN_STATUS_SUCCESS == oplus_wlanQueryLteSafeChannel(prAdapter, 0)) {
pr_debug("get_avoid_channels begine to wait for fw");
if (!wait_for_completion_timeout(&s_avoid_channel_query_comp, msecs_to_jiffies(1000))) {
} else {
if (s_event_lte_safe_chn.u4Flags & BIT(0)) {
u4LteSafeChnBitMask_2G = s_event_lte_safe_chn
.rLteSafeChn.au4SafeChannelBitmask[0];
u4LteSafeChnBitMask_5G_1 = s_event_lte_safe_chn
.rLteSafeChn.au4SafeChannelBitmask[1];
u4LteSafeChnBitMask_5G_2 = s_event_lte_safe_chn
.rLteSafeChn.au4SafeChannelBitmask[2];
} else {
goto EXIT;
}
for (eBand = BAND_2G4; eBand <= BAND_5G; eBand++) {
kalMemZero(aucChannelList, sizeof(struct RF_CHANNEL_INFO) * MAX_CHN_NUM);
rlmDomainGetChnlList(prAdapter, eBand, TRUE, MAX_CHN_NUM,
&ucNumOfChannel, aucChannelList);
//goto safe channel loop
for (index = 0; index < ucNumOfChannel && len < max_num; index++) {
uint8_t ucIdx;
freq = -1;
ucIdx = wlanGetChannelIndex(eBand, aucChannelList[index].ucChannelNum);
if (ucIdx >= MAX_CHN_NUM) {
continue;
}
if (aucChannelList[index].ucChannelNum <= 14) {
if (!(u4LteSafeChnBitMask_2G & BIT(aucChannelList[index].ucChannelNum))) {
freq = convertChannelToFrequency(aucChannelList[index].ucChannelNum);
}
} else if ((aucChannelList[index].ucChannelNum >= 36) && (aucChannelList[index].ucChannelNum <= 144)) {
if (!(u4LteSafeChnBitMask_5G_1 & BIT((aucChannelList[index].ucChannelNum - 36) / 4))) {
freq = convertChannelToFrequency(aucChannelList[index].ucChannelNum);
}
} else if ((aucChannelList[index].ucChannelNum >= 149) && (aucChannelList[index].ucChannelNum <= 181)) {
if (!(u4LteSafeChnBitMask_5G_2 & BIT((aucChannelList[index].ucChannelNum - 149) / 4))) {
freq = convertChannelToFrequency(aucChannelList[index].ucChannelNum);
}
}
if (freq > 0 && len < max_num) {
out_freqs[len++] = freq;
}
}
}
}
} else {
}
EXIT:
*out_len = len;
mutex_unlock(&ctx_mutex);
}
#endif
/*************public begin********************/
int oplus_wfd_get_remove_He_ie_flag(void)
{
return is_remove_He_ie_from_prebe_request;
}
void oplus_wfd_set_hdd_ctx(struct wireless_dev *hdd_ctx)
{
mutex_lock(&ctx_mutex);
s_hdd_ctx = hdd_ctx;
mutex_unlock(&ctx_mutex);
}
void oplus_register_oplus_wfd_wlan_ops_mtk(void)
{
if (s_avoid_channel_query_comp_inited == 0) {
init_completion(&s_avoid_channel_query_comp);
s_avoid_channel_query_comp_inited = 1;
}
register_oplus_wfd_wlan_ops(&oplus_wfd_wlan_ops_mtk);
}
#endif

View File

@@ -96,11 +96,6 @@
#include "fw_log_wifi.h"
#endif
#ifdef OPLUS_FEATURE_WIFI_OPLUSWFD
//XiaZijian@CONNECTIVITY.WIFI.BASIC.26106,20200703
void oplus_wfd_set_hdd_ctx(struct wireless_dev *hdd_ctx);
void oplus_register_oplus_wfd_wlan_ops_mtk(void);
#endif
/*******************************************************************************
* C O N S T A N T S
*******************************************************************************
@@ -3105,11 +3100,6 @@ static void wlanDestroyAllWdev(void)
#if CFG_ENABLE_WIFI_DIRECT
int i = 0;
#endif
#ifdef OPLUS_FEATURE_WIFI_OPLUSWFD
//XiaZijian@CONNECTIVITY.WIFI.BASIC.26106,20200703
DBGLOG(INIT, INFO, "wlanDestroyAllWdev");
oplus_wfd_set_hdd_ctx(NULL);
#endif
WIPHY_PRIV(wlanGetWiphy(), prGlueInfo);
kalMemFree(prGlueInfo, VIR_MEM_TYPE, sizeof(struct GLUE_INFO));
@@ -3508,10 +3498,6 @@ void wlanNetDestroy(struct wireless_dev *prWdev)
struct GLUE_INFO *prGlueInfo = NULL;
ASSERT(prWdev);
#ifdef OPLUS_FEATURE_WIFI_OPLUSWFD
//XiaZijian@CONNECTIVITY.WIFI.BASIC.26106,20200703
oplus_wfd_set_hdd_ctx(NULL);
#endif
if (!prWdev) {
DBGLOG(INIT, ERROR, "The device context is NULL\n");
return;
@@ -5942,13 +5928,6 @@ static int32_t wlanProbe(void *pvData, void *pvDriverData)
break;
}
}
#ifdef OPLUS_FEATURE_WIFI_OPLUSWFD
//XiaZijian@CONNECTIVITY.WIFI.BASIC.26106,20200703
if (i4Status == 0) {
oplus_wfd_set_hdd_ctx(gprWdev[0]);
oplus_register_oplus_wfd_wlan_ops_mtk();
}
#endif
return i4Status;
} /* end of wlanProbe() */
@@ -6003,10 +5982,6 @@ static void wlanRemove(void)
uint8_t i;
#endif
DBGLOG(INIT, INFO, "Remove wlan!\n");
#ifdef OPLUS_FEATURE_WIFI_OPLUSWFD
//XiaZijian@CONNECTIVITY.WIFI.BASIC.26106,20200703
oplus_wfd_set_hdd_ctx(NULL);
#endif
kalSetHalted(TRUE);
/*reset NVRAM State to ready for the next wifi-no*/

View File

@@ -26,10 +26,6 @@
#include "modem_sys.h"
#include "md_sys1_platform.h"
//#ifdef OPLUS_FEATURE_MODEM_MINIDUMP
//MaiWentian@NETWORK.RF.1448074, 2018/06/26,Add for monitor modem crash
#include <soc/oppo/mmkey_log.h>
//#endif /*OPLUS_FEATURE_MODEM_MINIDUMP*/
#ifndef DB_OPT_DEFAULT
#define DB_OPT_DEFAULT (0) /* Dummy macro define to avoid build error */
@@ -65,14 +61,6 @@ static void ccci_aed_v3(struct ccci_fsm_ee *mdee, unsigned int dump_flag,
int md_dbg_dump_flag = per_md_data->md_dbg_dump_flag;
#endif
//#ifdef OPLUS_FEATURE_MODEM_MINIDUMP
//MaiWentian@NETWORK.RF.1448074, 2018/06/26,Add for monitor modem crash
int temp_i;
int checkID = 0;
unsigned int hashId = 0;
char *logBuf;
char *aed_str_for_hash = NULL;
//#endif /*OPLUS_FEATURE_MODEM_MINIDUMP*/
int ret = 0;
@@ -101,52 +89,6 @@ static void ccci_aed_v3(struct ccci_fsm_ee *mdee, unsigned int dump_flag,
CCCI_ERROR_LOG(md_id, FSM, "%s-%d:snprintf fail,ret = %d\n",
__func__, __LINE__, ret);
memset(mdee->ex_start_time, 0x0, sizeof(mdee->ex_start_time));
//#ifdef OPLUS_FEATURE_MODEM_MINIDUMP
//MaiWentian@NETWORK.RF.1448074, 2018/06/26,Add for monitor modem crash
#define MCU_CORE_MSG "(MCU_core"
aed_str_for_hash = aed_str;
if( aed_str_for_hash != NULL ) {
if( (strncmp(aed_str_for_hash, MCU_CORE_MSG, strlen(MCU_CORE_MSG)) == 0) ) {
while(aed_str_for_hash[0] != '\n') {
++aed_str_for_hash;
}
++aed_str_for_hash; //skip '\n'
}
hashId = BKDRHash(aed_str_for_hash, strlen(aed_str_for_hash));
}
else {
CCCI_ERROR_LOG(md_id, FSM, "aed_str_for_hash is null!!");
}
logBuf = vmalloc(BUF_LOG_LENGTH);
if ((logBuf != NULL)&&(aed_str_for_hash != NULL)) {
for (temp_i = 0 ; (temp_i < BUF_LOG_LENGTH) && (temp_i < strlen(aed_str_for_hash)) ; temp_i++) {
//chenyihuai@NETWORK.EM, 2019/12/12, Modify for some othe type dump,start
/*
if(aed_str_for_hash[temp_i] == '\n') {
logBuf[temp_i] = '\0';
break;
}
logBuf[temp_i] = aed_str_for_hash[temp_i];
*/
if(aed_str_for_hash[temp_i] == '\n') {
checkID++;
CCCI_ERROR_LOG(md_id, FSM, "checkID = %d",checkID);
if(2 == checkID) {
logBuf[temp_i] = '\0';
break;
}
logBuf[temp_i] = ' ';
}else {
logBuf[temp_i] = aed_str_for_hash[temp_i];
}
//end
}
logBuf[BUF_LOG_LENGTH - 1] = '\0';
CCCI_NORMAL_LOG(md_id, FSM, "modem crash wirte to critical log. hashid = %u, cause = %s.", hashId, logBuf);
mm_keylog_write_modemdump(hashId, logBuf, MODEM_MONITOR_ID, "modem");
vfree(logBuf);
}
//#endif /*OPLUS_FEATURE_MODEM_MINIDUMP*/
/* MD ID must sync with aee_dump_ccci_debug_info() */
err_exit1:
if (dump_flag & CCCI_AED_DUMP_CCIF_REG) {

View File

@@ -230,11 +230,5 @@ struct mdee_dumper_v3 {
/* request by modem, change to 2k: include struct ex_PL_log*/
unsigned char ex_pl_info[MD_HS1_FAIL_DUMP_SIZE];
};
//#ifdef OPLUS_FEATURE_MODEM_MINIDUMP
//MaiWentian@NETWORK.RF.1448074, 2018/06/26,Add for monitor modem crash
#define MODEM_MONITOR_ID 509 //modem crash
#define BUF_LOG_LENGTH 2148
unsigned int BKDRHash(const char* str, unsigned int len);
//#endif /*OPLUS_FEATURE_MODEM_MINIDUMP*/
#endif /* __MDEE_DUMPER_V3_H__ */

View File

@@ -22,9 +22,6 @@
#include "ccci_config.h"
#include "ccci_fsm_sys.h"
//#ifdef OPLUS_FEATURE_MODEM_MINIDUMP
#include <soc/oppo/mmkey_log.h>
//#endif /*OPLUS_FEATURE_MODEM_MINIDUMP*/
#ifndef DB_OPT_DEFAULT
#define DB_OPT_DEFAULT (0) /* Dummy macro define to avoid build error */
@@ -60,13 +57,6 @@ static void ccci_aed_v5(struct ccci_fsm_ee *mdee, unsigned int dump_flag,
int md_dbg_dump_flag = per_md_data->md_dbg_dump_flag;
#endif
//#ifdef OPLUS_FEATURE_MODEM_MINIDUMP
int temp_i;
int checkID = 0;
unsigned int hashId = 0;
char *logBuf;
char *aed_str_for_hash = NULL;
//#endif /*OPLUS_FEATURE_MODEM_MINIDUMP*/
int ret = 0;
@@ -97,43 +87,6 @@ static void ccci_aed_v5(struct ccci_fsm_ee *mdee, unsigned int dump_flag,
goto err_exit1;
}
memset(mdee->ex_start_time, 0x0, sizeof(mdee->ex_start_time));
//#ifdef OPLUS_FEATURE_MODEM_MINIDUMP
#define MCU_CORE_MSG "(MCU_core"
aed_str_for_hash = aed_str;
if( aed_str_for_hash != NULL ) {
if( (strncmp(aed_str_for_hash, MCU_CORE_MSG, strlen(MCU_CORE_MSG)) == 0) ) {
while(aed_str_for_hash[0] != '\n') {
++aed_str_for_hash;
}
++aed_str_for_hash; //skip '\n'
}
hashId = BKDRHash(aed_str_for_hash, strlen(aed_str_for_hash));
}
else {
CCCI_ERROR_LOG(md_id, FSM, "aed_str_for_hash is null!!");
}
logBuf = vmalloc(BUF_LOG_LENGTH);
if ((logBuf != NULL)&&(aed_str_for_hash != NULL)) {
for (temp_i = 0 ; (temp_i < BUF_LOG_LENGTH) && (temp_i < strlen(aed_str_for_hash)) ; temp_i++) {
if(aed_str_for_hash[temp_i] == '\n') {
checkID++;
CCCI_ERROR_LOG(md_id, FSM, "checkID = %d",checkID);
if(2 == checkID) {
logBuf[temp_i] = '\0';
break;
}
logBuf[temp_i] = ' ';
}else {
logBuf[temp_i] = aed_str_for_hash[temp_i];
}
//end
}
logBuf[BUF_LOG_LENGTH - 1] = '\0';
CCCI_NORMAL_LOG(md_id, FSM, "modem crash wirte to critical log. hashid = %u, cause = %s.", hashId, logBuf);
mm_keylog_write_modemdump(hashId, logBuf, MODEM_MONITOR_ID, "modem");
vfree(logBuf);
}
//#endif /*OPLUS_FEATURE_MODEM_MINIDUMP*/
/* MD ID must sync with aee_dump_ccci_debug_info() */
err_exit1:
if (dump_flag & CCCI_AED_DUMP_CCIF_REG) {
@@ -1003,21 +956,3 @@ int mdee_dumper_v5_alloc(struct ccci_fsm_ee *mdee)
return 0;
}
//#ifdef OPLUS_FEATURE_MODEM_MINIDUMP
unsigned int BKDRHash(const char* str, unsigned int len)
{
unsigned int seed = 131; /* 31 131 1313 13131 131313 etc.. */
unsigned int hash = 0;
int i = 0;
if (str == NULL) {
return 0;
}
for(i = 0; i < len; str++, i++) {
hash = (hash * seed) + (*str);
}
return hash;
}
//#endif /*OPLUS_FEATURE_MODEM_MINIDUMP*/

View File

@@ -239,10 +239,4 @@ struct mdee_dumper_v5 {
/* request by modem, change to 2k: include struct ex_PL_log*/
unsigned char ex_pl_info[MD_HS1_FAIL_DUMP_SIZE];
};
//#ifdef OPLUS_FEATURE_MODEM_MINIDUMP
#define MODEM_MONITOR_ID 509 //modem crash
#define BUF_LOG_LENGTH 2148
unsigned int BKDRHash(const char* str, unsigned int len);
//#endif /*OPLUS_FEATURE_MODEM_MINIDUMP*/
#endif /* __MDEE_DUMPER_V5_H__ */

View File

@@ -166,24 +166,6 @@ static int proc_gpu_memoryusage_show(struct seq_file *m, void *v)
return ret;
}
#ifdef OPLUS_BUG_STABILITY
#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */
int get_gl_mem_by_pid(pid_t pid)
{
ssize_t ret = 0;
#ifdef ENABLE_MTK_MEMINFO
int i = 0;
for (i = 0; (i < MTK_MEMINFO_SIZE) && (g_mtk_gpu_meminfo[i].pid != 0); i++) {
if(g_mtk_gpu_meminfo[i].pid == pid) { //no lock protecte?
return P2K(g_mtk_gpu_meminfo[i].used_pages);
}
}
#endif /* ENABLE_MTK_MEMINFO */
return ret;
}
EXPORT_SYMBOL(get_gl_mem_by_pid);
#endif
static int kbasep_gpu_memoryusage_debugfs_open(struct inode *in, struct file *file)
{
return single_open(file, proc_gpu_memoryusage_show, NULL);

View File

@@ -166,24 +166,6 @@ static int proc_gpu_memoryusage_show(struct seq_file *m, void *v)
return ret;
}
#ifdef OPLUS_BUG_STABILITY
#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */
int get_gl_mem_by_pid(pid_t pid)
{
ssize_t ret = 0;
#ifdef ENABLE_MTK_MEMINFO
int i = 0;
for (i = 0; (i < MTK_MEMINFO_SIZE) && (g_mtk_gpu_meminfo[i].pid != 0); i++) {
if(g_mtk_gpu_meminfo[i].pid == pid) { //no lock protecte?
return P2K(g_mtk_gpu_meminfo[i].used_pages);
}
}
#endif /* ENABLE_MTK_MEMINFO */
return ret;
}
EXPORT_SYMBOL(get_gl_mem_by_pid);
#endif
static int kbasep_gpu_memoryusage_debugfs_open(struct inode *in, struct file *file)
{
return single_open(file, proc_gpu_memoryusage_show, NULL);

View File

@@ -166,25 +166,6 @@ static int proc_gpu_memoryusage_show(struct seq_file *m, void *v)
return ret;
}
#ifdef OPLUS_BUG_STABILITY
f;kf
#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */
int get_gl_mem_by_pid(pid_t pid)
{
ssize_t ret = 0;
#ifdef ENABLE_MTK_MEMINFO
int i = 0;
for (i = 0; (i < MTK_MEMINFO_SIZE) && (g_mtk_gpu_meminfo[i].pid != 0); i++) {
if(g_mtk_gpu_meminfo[i].pid == pid) { //no lock protecte?
return P2K(g_mtk_gpu_meminfo[i].used_pages);
}
}
#endif /* ENABLE_MTK_MEMINFO */
return ret;
}
EXPORT_SYMBOL(get_gl_mem_by_pid);
#endif
static int kbasep_gpu_memoryusage_debugfs_open(struct inode *in, struct file *file)
{
return single_open(file, proc_gpu_memoryusage_show, NULL);

View File

@@ -166,24 +166,6 @@ static int proc_gpu_memoryusage_show(struct seq_file *m, void *v)
return ret;
}
#ifdef OPLUS_BUG_STABILITY
#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */
int get_gl_mem_by_pid(pid_t pid)
{
ssize_t ret = 0;
#ifdef ENABLE_MTK_MEMINFO
int i = 0;
for (i = 0; (i < MTK_MEMINFO_SIZE) && (g_mtk_gpu_meminfo[i].pid != 0); i++) {
if(g_mtk_gpu_meminfo[i].pid == pid) { //no lock protecte?
return P2K(g_mtk_gpu_meminfo[i].used_pages);
}
}
#endif /* ENABLE_MTK_MEMINFO */
return ret;
}
EXPORT_SYMBOL(get_gl_mem_by_pid);
#endif
static int kbasep_gpu_memoryusage_debugfs_open(struct inode *in, struct file *file)
{
return single_open(file, proc_gpu_memoryusage_show, NULL);

View File

@@ -166,25 +166,6 @@ static int proc_gpu_memoryusage_show(struct seq_file *m, void *v)
return ret;
}
#ifdef OPLUS_BUG_STABILITY
#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */
int get_gl_mem_by_pid(pid_t pid)
{
ssize_t ret = 0;
#ifdef ENABLE_MTK_MEMINFO
int i = 0;
for (i = 0; (i < MTK_MEMINFO_SIZE) && (g_mtk_gpu_meminfo[i].pid != 0); i++) {
if(g_mtk_gpu_meminfo[i].pid == pid) { //no lock protecte?
return P2K(g_mtk_gpu_meminfo[i].used_pages);
}
}
#endif /* ENABLE_MTK_MEMINFO */
return ret;
}
EXPORT_SYMBOL(get_gl_mem_by_pid);
#endif
static int kbasep_gpu_memoryusage_debugfs_open(struct inode *in, struct file *file)
{
return single_open(file, proc_gpu_memoryusage_show, NULL);

View File

@@ -166,24 +166,6 @@ static int proc_gpu_memoryusage_show(struct seq_file *m, void *v)
return ret;
}
#ifdef OPLUS_BUG_STABILITY
#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */
int get_gl_mem_by_pid(pid_t pid)
{
ssize_t ret = 0;
#ifdef ENABLE_MTK_MEMINFO
int i = 0;
for (i = 0; (i < MTK_MEMINFO_SIZE) && (g_mtk_gpu_meminfo[i].pid != 0); i++) {
if(g_mtk_gpu_meminfo[i].pid == pid) { //no lock protecte?
return P2K(g_mtk_gpu_meminfo[i].used_pages);
}
}
#endif /* ENABLE_MTK_MEMINFO */
return ret;
}
EXPORT_SYMBOL(get_gl_mem_by_pid);
#endif
static int kbasep_gpu_memoryusage_debugfs_open(struct inode *in, struct file *file)
{
return single_open(file, proc_gpu_memoryusage_show, NULL);

View File

@@ -166,24 +166,6 @@ static int proc_gpu_memoryusage_show(struct seq_file *m, void *v)
return ret;
}
#ifdef OPLUS_BUG_STABILITY
#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */
int get_gl_mem_by_pid(pid_t pid)
{
ssize_t ret = 0;
#ifdef ENABLE_MTK_MEMINFO
int i = 0;
for (i = 0; (i < MTK_MEMINFO_SIZE) && (g_mtk_gpu_meminfo[i].pid != 0); i++) {
if(g_mtk_gpu_meminfo[i].pid == pid) { //no lock protecte?
return P2K(g_mtk_gpu_meminfo[i].used_pages);
}
}
#endif /* ENABLE_MTK_MEMINFO */
return ret;
}
EXPORT_SYMBOL(get_gl_mem_by_pid);
#endif
static int kbasep_gpu_memoryusage_debugfs_open(struct inode *in, struct file *file)
{
return single_open(file, proc_gpu_memoryusage_show, NULL);

View File

@@ -193,24 +193,6 @@ static int proc_gpu_memoryusage_show(struct seq_file *m, void *v)
return 0;
}
#ifdef OPLUS_BUG_STABILITY
#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */
int get_gl_mem_by_pid(pid_t pid)
{
ssize_t ret = 0;
#ifdef ENABLE_MTK_MEMINFO
int i = 0;
for (i = 0; (i < MTK_MEMINFO_SIZE) && (g_mtk_gpu_meminfo[i].pid != 0); i++) {
if(g_mtk_gpu_meminfo[i].pid == pid) { //no lock protecte?
return P2K(g_mtk_gpu_meminfo[i].used_pages);
}
}
#endif /* ENABLE_MTK_MEMINFO */
return ret;
}
EXPORT_SYMBOL(get_gl_mem_by_pid);
#endif
static int kbasep_gpu_memoryusage_debugfs_open(struct inode *in, struct file *file)
{
return single_open(file, proc_gpu_memoryusage_show, NULL);

View File

@@ -195,24 +195,6 @@ static int proc_gpu_memoryusage_show(struct seq_file *m, void *v)
return 0;
}
#ifdef OPLUS_BUG_STABILITY
#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */
int get_gl_mem_by_pid(pid_t pid)
{
ssize_t ret = 0;
#ifdef ENABLE_MTK_MEMINFO
int i = 0;
for (i = 0; (i < MTK_MEMINFO_SIZE) && (g_mtk_gpu_meminfo[i].pid != 0); i++) {
if(g_mtk_gpu_meminfo[i].pid == pid) { //no lock protecte?
return P2K(g_mtk_gpu_meminfo[i].used_pages);
}
}
#endif /* ENABLE_MTK_MEMINFO */
return ret;
}
EXPORT_SYMBOL(get_gl_mem_by_pid);
#endif
static int kbasep_gpu_memoryusage_debugfs_open(struct inode *in, struct file *file)
{
return single_open(file, proc_gpu_memoryusage_show, NULL);

View File

@@ -170,24 +170,6 @@ static int proc_gpu_memoryusage_show(struct seq_file *m, void *v)
return ret;
}
#ifdef OPLUS_BUG_STABILITY
#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */
int get_gl_mem_by_pid(pid_t pid)
{
ssize_t ret = 0;
#ifdef ENABLE_MTK_MEMINFO
int i = 0;
for (i = 0; (i < MTK_MEMINFO_SIZE) && (g_mtk_gpu_meminfo[i].pid != 0); i++) {
if(g_mtk_gpu_meminfo[i].pid == pid) { //no lock protecte?
return P2K(g_mtk_gpu_meminfo[i].used_pages);
}
}
#endif /* ENABLE_MTK_MEMINFO */
return ret;
}
EXPORT_SYMBOL(get_gl_mem_by_pid);
#endif
static int kbasep_gpu_memoryusage_debugfs_open(struct inode *in, struct file *file)
{
return single_open(file, proc_gpu_memoryusage_show, NULL);

View File

@@ -170,24 +170,6 @@ static int proc_gpu_memoryusage_show(struct seq_file *m, void *v)
return ret;
}
#ifdef OPLUS_BUG_STABILITY
#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */
int get_gl_mem_by_pid(pid_t pid)
{
ssize_t ret = 0;
#ifdef ENABLE_MTK_MEMINFO
int i = 0;
for (i = 0; (i < MTK_MEMINFO_SIZE) && (g_mtk_gpu_meminfo[i].pid != 0); i++) {
if(g_mtk_gpu_meminfo[i].pid == pid) { //no lock protecte?
return P2K(g_mtk_gpu_meminfo[i].used_pages);
}
}
#endif /* ENABLE_MTK_MEMINFO */
return ret;
}
EXPORT_SYMBOL(get_gl_mem_by_pid);
#endif
static int kbasep_gpu_memoryusage_debugfs_open(struct inode *in, struct file *file)
{
return single_open(file, proc_gpu_memoryusage_show, NULL);

View File

@@ -142,24 +142,6 @@ static int mtk_common_gpu_memory_show(struct seq_file *m, void *v)
DEFINE_SHOW_ATTRIBUTE(mtk_common_gpu_utilization);
DEFINE_SHOW_ATTRIBUTE(mtk_common_gpu_memory);
#ifdef OPLUS_BUG_STABILITY
#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */
int get_gl_mem_by_pid(pid_t pid)
{
ssize_t ret = 0;
#ifdef ENABLE_MTK_MEMINFO
int i = 0;
for (i = 0; (i < MTK_MEMINFO_SIZE) && (g_mtk_gpu_meminfo[i].pid != 0); i++) {
if(g_mtk_gpu_meminfo[i].pid == pid) { //no lock protecte?
return P2K(g_mtk_gpu_meminfo[i].used_pages);
}
}
#endif /* ENABLE_MTK_MEMINFO */
return ret;
}
EXPORT_SYMBOL(get_gl_mem_by_pid);
#endif
void mtk_common_procfs_init(void)
{
mtk_mali_root = proc_mkdir("mtk_mali", NULL);

View File

@@ -141,24 +141,6 @@ static int mtk_common_gpu_memory_show(struct seq_file *m, void *v)
}
DEFINE_SHOW_ATTRIBUTE(mtk_common_gpu_memory);
#ifdef OPLUS_BUG_STABILITY
#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */
int get_gl_mem_by_pid(pid_t pid)
{
ssize_t ret = 0;
#ifdef ENABLE_MTK_MEMINFO
int i = 0;
for (i = 0; (i < MTK_MEMINFO_SIZE) && (g_mtk_gpu_meminfo[i].pid != 0); i++) {
if(g_mtk_gpu_meminfo[i].pid == pid) { //no lock protecte?
return P2K(g_mtk_gpu_meminfo[i].used_pages);
}
}
#endif /* ENABLE_MTK_MEMINFO */
return ret;
}
EXPORT_SYMBOL(get_gl_mem_by_pid);
#endif
void mtk_common_procfs_init(void)
{
mtk_mali_root = proc_mkdir("mtk_mali", NULL);

View File

@@ -141,42 +141,6 @@ static int mtk_common_gpu_memory_show(struct seq_file *m, void *v)
}
DEFINE_SHOW_ATTRIBUTE(mtk_common_gpu_memory);
#ifdef OPLUS_BUG_STABILITY
#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */
int get_gl_mem_by_pid(pid_t pid)
{
ssize_t ret = 0;
#ifdef ENABLE_MTK_MEMINFO
int i = 0;
for (i = 0; (i < MTK_MEMINFO_SIZE) && (g_mtk_gpu_meminfo[i].pid != 0); i++) {
if(g_mtk_gpu_meminfo[i].pid == pid) { //no lock protecte?
return P2K(g_mtk_gpu_meminfo[i].used_pages);
}
}
#endif /* ENABLE_MTK_MEMINFO */
return ret;
}
EXPORT_SYMBOL(get_gl_mem_by_pid);
#endif
#if defined(OPLUS_FEATURE_MEMLEAK_DETECT) && defined(CONFIG_DUMP_TASKS_MEM)
#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */
unsigned long get_gpumem_by_pid(pid_t pid, int mem_type)
{
ssize_t ret = 0;
#ifdef ENABLE_MTK_MEMINFO
int i = 0;
for (i = 0; (i < MTK_MEMINFO_SIZE) && (g_mtk_gpu_meminfo[i].pid != 0); i++) {
if(g_mtk_gpu_meminfo[i].pid == pid) { //no lock protecte?
return P2K(g_mtk_gpu_meminfo[i].used_pages);
}
}
#endif /* ENABLE_MTK_MEMINFO */
return ret;
}
EXPORT_SYMBOL(get_gpumem_by_pid);
#endif
void mtk_common_procfs_init(void)
{
mtk_mali_root = proc_mkdir("mtk_mali", NULL);

View File

@@ -54,10 +54,6 @@
#define NQ_TEE_WORKER_THREADS 1
#endif
//#ifdef OPLUS_FEATURE_SECURITY_COMMON
extern int phx_is_system_boot_completed(void);
//#endif /* OPLUS_FEATURE_SECURITY_COMMON */
static struct {
struct mutex buffer_mutex; /* Lock on SWd communication buffer */
struct mcp_buffer *mcp_buffer;
@@ -517,10 +513,6 @@ static void nq_dump_status(void)
size_t i;
cpumask_t old_affinity;
//#ifdef OPLUS_FEATURE_SECURITY_COMMON
int boot_completed_tee = 0;
//#endif /* OPLUS_FEATURE_SECURITY_COMMON */
if (l_ctx.dump.off)
ret = -EBUSY;
@@ -566,16 +558,7 @@ static void nq_dump_status(void)
tee_restore_affinity(old_affinity);
mc_dev_info(" %-22s= 0x%s", "mcExcep.uuid", uuid_str);
//#ifdef OPLUS_FEATURE_SECURITY_COMMON
if(0 == strcmp(uuid_str, "07170000000000000000000000000000")) {
boot_completed_tee = phx_is_system_boot_completed();
if(boot_completed_tee == 1) {
mc_dev_info("tee boot complete\n");
} else {
BUG();
}
}
//#endif /* OPLUS_FEATURE_SECURITY_COMMON */
if (ret >= 0)
ret = kasnprintf(&l_ctx.dump, "%-22s= 0x%s\n", "mcExcep.uuid",
uuid_str);

View File

@@ -60,9 +60,6 @@ extern void oppo_rtc_mark_edl(void);
extern void oppo_rtc_mark_sensor_cause_panic(void);
extern int oppo_get_rtc_sensor_cause_panic_value(void);
extern void oppo_clear_rtc_sensor_cause_panic(void);
#ifdef OPLUS_FEATURE_AGINGTEST
extern void oppo_rtc_mark_agingtest(void);
#endif /*OPLUS_FEATURE_AGINGTEST */
#endif /* OPLUS_BUG_STABILITY */
extern u16 rtc_rdwr_uart_bits(u16 *val);
extern void rtc_bbpu_power_down(void);
@@ -105,9 +102,6 @@ extern bool crystal_exist_status(void);
#define oppo_rtc_mark_sensor_cause_panic() do {} while (0)
#define oppo_get_rtc_sensor_cause_panic_value() do {} while (0)
#define oppo_clear_rtc_sensor_cause_panic() do {} while (0)
#ifdef OPLUS_FEATURE_AGINGTEST
#define oppo_rtc_mark_agingtest() do {} while (0)
#endif /*OPLUS_FEATURE_AGINGTEST */
#endif /* OPLUS_BUG_STABILITY */
#define rtc_read_pwron_alarm(alm) ({ 0; })
#define get_rtc_spare_fg_value() ({ 0; })

View File

@@ -52,9 +52,6 @@ enum rtc_spare_enum {
RTC_SENSOR_CAUSE_PANIC,
#endif /* OPLUS_BUG_STABILITY */
RTC_SAFE_BOOT,
#ifdef OPLUS_FEATURE_AGINGTEST
RTC_AGINGTEST_BOOT,
#endif /*OPLUS_FEATURE_AGINGTEST */
RTC_SPAR_NUM
};

View File

@@ -35,9 +35,6 @@
#include <linux/poll.h>
#include <linux/init.h>
#if defined(OPLUS_FEATURE_MULTI_FREEAREA) && defined(CONFIG_PHYSICAL_ANTI_FRAGMENTATION)
#include <linux/mmzone.h>
#endif
#ifdef CONFIG_MTK_GPU_SUPPORT
#include <mt-plat/mtk_gpu_utility.h>
@@ -408,35 +405,16 @@ static void mlog_buddyinfo(void)
for_each_populated_zone(zone) {
unsigned long flags;
unsigned int order;
#if defined(OPLUS_FEATURE_MULTI_FREEAREA) && defined(CONFIG_PHYSICAL_ANTI_FRAGMENTATION)
unsigned long nr[FREE_AREA_COUNTS][MAX_ORDER] = {0};
unsigned int flc;
#else
unsigned long nr[MAX_ORDER] = {0};
#endif
spin_lock_irqsave(&zone->lock, flags);
#if defined(OPLUS_FEATURE_MULTI_FREEAREA) && defined(CONFIG_PHYSICAL_ANTI_FRAGMENTATION)
for (flc = 0; flc < FREE_AREA_COUNTS; ++flc) {
for (order = 0; order < MAX_ORDER; ++order)
nr[flc][order] = zone->free_area[flc][order].nr_free;
}
#else
for (order = 0; order < MAX_ORDER; ++order)
nr[order] = zone->free_area[order].nr_free;
#endif
spin_unlock_irqrestore(&zone->lock, flags);
/* emit logs */
spin_lock_bh(&mlogbuf_lock);
#if defined(OPLUS_FEATURE_MULTI_FREEAREA) && defined(CONFIG_PHYSICAL_ANTI_FRAGMENTATION)
for (flc = 0; flc < FREE_AREA_COUNTS; ++flc) {
for (order = 0; order < MAX_ORDER; ++order)
mlog_emit(nr[flc][order]);
}
#else
for (order = 0; order < MAX_ORDER; ++order)
mlog_emit(nr[order]);
#endif
spin_unlock_bh(&mlogbuf_lock);
}
}

View File

@@ -23,14 +23,8 @@ extern unsigned int mt_ppm_userlimit_freq_limit_by_others(
unsigned int cluster);
extern unsigned long get_cpu_orig_capacity(unsigned int cpu);
extern int upower_get_turn_point(void);
#if defined(OPLUS_FEATURE_SCHEDUTIL_USE_TL) && defined(CONFIG_SCHEDUTIL_USE_TL)
extern void set_capacity_margin_dvfs(unsigned int margin);
extern void set_capacity_margin_dvfs_changed(bool changed);
extern unsigned int get_capacity_margin_dvfs(void);
#else
extern void set_capacity_margin(unsigned int margin);
extern unsigned int get_capacity_margin(void);
#endif
extern void set_user_nice(struct task_struct *p, long nice);
extern int fpsgo_fbt2minitop_start(int count, struct fpsgo_loading *fl);

View File

@@ -687,20 +687,10 @@ static void fbt_set_cap_margin_locked(int set)
fpsgo_systrace_c_fbt_gm(-100, 0, set?1024:def_capacity_margin,
"cap_margin");
#if defined(OPLUS_FEATURE_SCHEDUTIL_USE_TL) && defined(CONFIG_SCHEDUTIL_USE_TL)
if (set)
set_capacity_margin_dvfs(1024);
else
set_capacity_margin_dvfs(def_capacity_margin);
#if defined(CONFIG_SCHEDUTIL_USE_TL)
set_capacity_margin_dvfs_changed(!!set);
#endif /* CONFIG_SCHEDUTIL_USE_TL */
#else
if (set)
set_capacity_margin(1024);
else
set_capacity_margin(def_capacity_margin);
#endif /* OPLUS_FEATURE_SCHEDUTIL_USE_TL */
set_cap_margin = set;
}
@@ -4629,15 +4619,9 @@ static ssize_t enable_switch_cap_margin_show(struct kobject *kobj,
FPSGO_SYSFS_MAX_BUFF_SIZE - posi,
"set_cap_margin %d\n", set_cap_margin);
posi += length;
#if defined(OPLUS_FEATURE_SCHEDUTIL_USE_TL) && defined(CONFIG_SCHEDUTIL_USE_TL)
length = scnprintf(temp + posi,
FPSGO_SYSFS_MAX_BUFF_SIZE - posi,
"get_cap_margin %d\n", get_capacity_margin_dvfs());
#else
length = scnprintf(temp + posi,
FPSGO_SYSFS_MAX_BUFF_SIZE - posi,
"get_cap_margin %d\n", get_capacity_margin());
#endif
posi += length;
mutex_unlock(&fbt_mlock);
@@ -5180,11 +5164,7 @@ int __init fbt_cpu_init(void)
fbt_down_throttle_enable = 1;
sync_flag = -1;
fbt_sync_flag_enable = 1;
#if defined(OPLUS_FEATURE_SCHEDUTIL_USE_TL) && defined(CONFIG_SCHEDUTIL_USE_TL)
def_capacity_margin = get_capacity_margin_dvfs();
#else
def_capacity_margin = get_capacity_margin();
#endif
fbt_cap_margin_enable = 1;
boost_ta = fbt_get_default_boost_ta();
adjust_loading = fbt_get_default_adj_loading();

View File

@@ -121,9 +121,6 @@ u16 rtc_spare_reg[RTC_SPAR_NUM][3] = {
{RTC_SPAR0, 0x1, 12},
{RTC_SPAR0, 0x1, 13},
#endif
#ifdef OPLUS_FEATURE_AGINGTEST
{RTC_SPAR0, 0x01, 14},
#endif /*OPLUS_FEATURE_AGINGTEST */
#ifdef CONFIG_OPLUS_CHARGER_MTK6771
{RTC_SPAR0, 0x01, 15},
#endif /* OPLUS_BUG_STABILITY */

View File

@@ -468,17 +468,6 @@ void oppo_rtc_mark_factory(void)
spin_unlock_irqrestore(&rtc_lock, flags);
}
#ifdef OPLUS_FEATURE_AGINGTEST
void oppo_rtc_mark_agingtest(void)
{
unsigned long flags;
rtc_xinfo("rtc_mark_agingtest\n");
spin_lock_irqsave(&rtc_lock, flags);
hal_rtc_set_spare_register(RTC_AGINGTEST_BOOT, 0x01);
spin_unlock_irqrestore(&rtc_lock, flags);
}
#endif /*OPLUS_FEATURE_AGINGTEST */
void oppo_rtc_mark_safe(void)
{

View File

@@ -34,9 +34,6 @@
#include "mtk_devinfo.h"
#endif
#ifdef OPLUS_FEATURE_SCHED_ASSIST
#include <linux/sched_assist/sched_assist_common.h>
#endif /* OPLUS_FEATURE_SCHED_ASSIST */
#define SCHED_HINT_THROTTLE_NSEC 10000000 /* 10ms for throttle */
@@ -408,8 +405,7 @@ err:
late_initcall(sched_hint_init);
#ifdef CONFIG_MTK_SCHED_BOOST
//OPLUS_FEATURE_SCHED_ASSIST remove static
/*static*/ int sched_boost_type = SCHED_NO_BOOST;
static int sched_boost_type = SCHED_NO_BOOST;
inline int valid_cpu_prefer(int task_prefer)
{
@@ -616,9 +612,6 @@ void __init init_efuse_info(void)
efuse_aware_big_thermal = (get_devinfo_with_index(7) & 0xFF) == 0x30;
}
#endif
#ifdef CONFIG_MTK_SCHED_BOOST
extern oplus_task_sched_boost(struct task_struct *p, int *task_prefer);
#endif
int select_task_prefer_cpu(struct task_struct *p, int new_cpu)
{
int task_prefer;
@@ -633,15 +626,6 @@ int select_task_prefer_cpu(struct task_struct *p, int new_cpu)
#endif
task_prefer = cpu_prefer(p);
#ifdef OPLUS_FEATURE_SCHED_ASSIST
if(task_prefer == SCHED_PREFER_LITTLE && (test_task_ux(p) || is_sf(p)) && sysctl_sched_assist_enabled && (sched_assist_scene(SA_SLIDE)|| sched_assist_scene(SA_INPUT) || sched_assist_scene(SA_LAUNCHER_SI) || sched_assist_scene(SA_ANIM))){
task_prefer = SCHED_PREFER_NONE;
p->cpu_prefer = SCHED_PREFER_NONE;
}
#endif /* OPLUS_FEATURE_SCHED_ASSIST */
#ifdef CONFIG_MTK_SCHED_BOOST //OPLUS_FEATURE_SCHED_ASSIST
oplus_task_sched_boost(p, &task_prefer);
#endif
if (!hinted_cpu_prefer(task_prefer))
goto out;
@@ -651,17 +635,8 @@ int select_task_prefer_cpu(struct task_struct *p, int new_cpu)
}
for (i = 0; i < domain_cnt; i++) {
#ifdef OPLUS_FEATURE_SCHED_ASSIST
if (task_prefer == SCHED_PREFER_BIG)
iter_domain = domain_cnt - i - 1;
else if (task_prefer == SCHED_PREFER_MEDIUM)
iter_domain = (i < domain_cnt -1) ? i + 1 : 0;
else
iter_domain = i;
#else
iter_domain = (task_prefer == SCHED_PREFER_BIG) ?
domain_cnt-i-1 : i;
#endif /* OPLUS_FEATURE_SCHED_ASSIST */
domain = tmp_domain[iter_domain];
@@ -789,9 +764,7 @@ int set_sched_boost(unsigned int val)
if (val == SCHED_ALL_BOOST)
sched_scheduler_switch(SCHED_HMP_LB);
else if (val == SCHED_FG_BOOST) {
//OPLUS_FEATURE_SCHED_ASSIST
//In MTK platform,we use oplus_task_sched_boost
//sched_set_boost_fg();
sched_set_boost_fg();
}
}
printk_deferred("[name:sched_boost&] sched boost: set %d\n",
@@ -949,13 +922,8 @@ int sched_walt_enable(int user, int en)
}
#ifdef CONFIG_SCHED_WALT
#ifdef OPLUS_FEATURE_SCHED_ASSIST
sysctl_sched_use_walt_cpu_util = 0;
sysctl_sched_use_walt_task_util = 0;
#else /* OPLUS_FEATURE_SCHED_ASSIST */
sysctl_sched_use_walt_cpu_util = walted;
sysctl_sched_use_walt_task_util = walted;
#endif /* OPLUS_FEATURE_SCHED_ASSIST */
trace_sched_ctl_walt(user_mask, walted);
#endif

View File

@@ -3,8 +3,3 @@ config OPLUS_SENSOR_MTK68XX
default n
help
It support 68xx.
config OPLUS_SENSOR_FB_MTK
tristate "sensor feedback config for qcom"
help
To compile this driver as a module, set M in kernel build config
module will be called sensor_feedback.

View File

@@ -15,4 +15,3 @@ subdir-ccflags-y += -I$(srctree)/drivers/misc/mediatek/scp/$(CONFIG_MTK_PLATFORM
subdir-ccflags-y += -D CONFIG_OPLUS_SENSOR_MTK68XX
endif
obj-$(CONFIG_NANOHUB) += sensor_devinfo.o
obj-$(CONFIG_OPLUS_SENSOR_FB_MTK) += oplus_sensor_feedback/

View File

@@ -1,2 +0,0 @@
oplus_bsp_sensor_feedback-y := sensor_feedback.o
obj-$(CONFIG_OPLUS_SENSOR_FB_MTK) += oplus_bsp_sensor_feedback.o

View File

@@ -1,607 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2018-2020 Oplus. All rights reserved.
*/
#define pr_fmt(fmt) "<sensor_feedback>" fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/kthread.h>
#include <linux/soc/qcom/smem.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/err.h>
#include <linux/time64.h>
#include <linux/kdev_t.h>
#include <linux/vmalloc.h>
#include "scp_helper.h"
#include "sensor_feedback.h"
#ifdef CONFIG_OPLUS_FEATURE_FEEDBACK
#include <soc/oplus/system/kernel_fb.h>
#endif
#define SENSOR_DEVICE_TYPE "10002"
#define SENSOR_POWER_TYPE "10003"
#define SENSOR_STABILITY_TYPE "10004"
#define SENSOR_PFMC_TYPE "10005"
#define SENSOR_MEMORY_TYPE "10006"
#define SENSOR_DEBUG_DEVICE_TYPE "20002"
#define SENSOR_DEBUG_POWER_TYPE "20003"
#define SENSOR_DEBUG_STABILITY_TYPE "20004"
#define SENSOR_DEBUG_PFMC_TYPE "20005"
#define SENSOR_DEBUG_MEMORY_TYPE "20006"
static struct sensor_fb_cxt *g_sensor_fb_cxt = NULL;
/*fb_field :maxlen 19*/
struct sensor_fb_conf g_fb_conf[] = {
{PS_INIT_FAIL_ID, "device_ps_init_fail", SENSOR_DEVICE_TYPE},
{PS_I2C_ERR_ID, "device_ps_i2c_err", SENSOR_DEVICE_TYPE},
{PS_ALLOC_FAIL_ID, "device_ps_alloc_fail", SENSOR_DEVICE_TYPE},
{PS_ESD_REST_ID, "device_ps_esd_reset", SENSOR_DEVICE_TYPE},
{PS_NO_INTERRUPT_ID, "device_ps_no_irq", SENSOR_DEVICE_TYPE},
{PS_FIRST_REPORT_DELAY_COUNT_ID, "device_ps_rpt_delay", SENSOR_DEBUG_DEVICE_TYPE},
{PS_ORIGIN_DATA_TO_ZERO_ID, "device_ps_to_zero", SENSOR_DEBUG_DEVICE_TYPE},
{PS_CALI_DATA_ID, "device_ps_cali_data", SENSOR_DEBUG_DEVICE_TYPE},
{ALS_INIT_FAIL_ID, "device_als_init_fail", SENSOR_DEVICE_TYPE},
{ALS_I2C_ERR_ID, "device_als_i2c_err", SENSOR_DEVICE_TYPE},
{ALS_ALLOC_FAIL_ID, "device_als_alloc_fail", SENSOR_DEVICE_TYPE},
{ALS_ESD_REST_ID, "device_als_esd_reset", SENSOR_DEVICE_TYPE},
{ALS_NO_INTERRUPT_ID, "device_als_no_irq", SENSOR_DEVICE_TYPE},
{ALS_FIRST_REPORT_DELAY_COUNT_ID, "device_als_rpt_delay", SENSOR_DEBUG_DEVICE_TYPE},
{ALS_ORIGIN_DATA_TO_ZERO_ID, "device_als_to_zero", SENSOR_DEBUG_DEVICE_TYPE},
{ALS_CALI_DATA_ID, "device_als_cali_data", SENSOR_DEBUG_DEVICE_TYPE},
{ACCEL_INIT_FAIL_ID, "device_acc_init_fail", SENSOR_DEVICE_TYPE},
{ACCEL_I2C_ERR_ID, "device_acc_i2c_err", SENSOR_DEVICE_TYPE},
{ACCEL_ALLOC_FAIL_ID, "device_acc_alloc_fail", SENSOR_DEVICE_TYPE},
{ACCEL_ESD_REST_ID, "device_acc_esd_reset", SENSOR_DEVICE_TYPE},
{ACCEL_NO_INTERRUPT_ID, "device_acc_no_irq", SENSOR_DEVICE_TYPE},
{ACCEL_FIRST_REPORT_DELAY_COUNT_ID, "device_acc_rpt_delay", SENSOR_DEBUG_DEVICE_TYPE},
{ACCEL_ORIGIN_DATA_TO_ZERO_ID, "device_acc_to_zero", SENSOR_DEBUG_DEVICE_TYPE},
{ACCEL_CALI_DATA_ID, "device_acc_cali_data", SENSOR_DEBUG_DEVICE_TYPE},
{GYRO_INIT_FAIL_ID, "device_gyro_init_fail", SENSOR_DEVICE_TYPE},
{GYRO_I2C_ERR_ID, "device_gyro_i2c_err", SENSOR_DEVICE_TYPE},
{GYRO_ALLOC_FAIL_ID, "device_gyro_alloc_fail", SENSOR_DEVICE_TYPE},
{GYRO_ESD_REST_ID, "device_gyro_esd_reset", SENSOR_DEVICE_TYPE},
{GYRO_NO_INTERRUPT_ID, "device_gyro_no_irq", SENSOR_DEVICE_TYPE},
{GYRO_FIRST_REPORT_DELAY_COUNT_ID, "device_gyro_rpt_delay", SENSOR_DEBUG_DEVICE_TYPE},
{GYRO_ORIGIN_DATA_TO_ZERO_ID, "device_gyro_to_zero", SENSOR_DEBUG_DEVICE_TYPE},
{GYRO_CALI_DATA_ID, "device_gyro_cali_data", SENSOR_DEBUG_DEVICE_TYPE},
{MAG_INIT_FAIL_ID, "device_mag_init_fail", SENSOR_DEVICE_TYPE},
{MAG_I2C_ERR_ID, "device_mag_i2c_err", SENSOR_DEVICE_TYPE},
{MAG_ALLOC_FAIL_ID, "device_mag_alloc_fail", SENSOR_DEVICE_TYPE},
{MAG_ESD_REST_ID, "device_mag_esd_reset", SENSOR_DEVICE_TYPE},
{MAG_NO_INTERRUPT_ID, "device_mag_no_irq", SENSOR_DEVICE_TYPE},
{MAG_FIRST_REPORT_DELAY_COUNT_ID, "device_mag_rpt_delay", SENSOR_DEBUG_DEVICE_TYPE},
{MAG_ORIGIN_DATA_TO_ZERO_ID, "device_mag_to_zero", SENSOR_DEBUG_DEVICE_TYPE},
{MAG_CALI_DATA_ID, "device_mag_cali_data", SENSOR_DEBUG_DEVICE_TYPE},
{SAR_INIT_FAIL_ID, "device_sar_init_fail", SENSOR_DEVICE_TYPE},
{SAR_I2C_ERR_ID, "device_sar_i2c_err", SENSOR_DEVICE_TYPE},
{SAR_ALLOC_FAIL_ID, "device_sar_alloc_fail", SENSOR_DEVICE_TYPE},
{SAR_ESD_REST_ID, "device_sar_esd_reset", SENSOR_DEVICE_TYPE},
{SAR_NO_INTERRUPT_ID, "device_sar_no_irq", SENSOR_DEVICE_TYPE},
{SAR_FIRST_REPORT_DELAY_COUNT_ID, "device_sar_rpt_delay", SENSOR_DEBUG_DEVICE_TYPE},
{SAR_ORIGIN_DATA_TO_ZERO_ID, "device_sar_to_zero", SENSOR_DEBUG_DEVICE_TYPE},
{SAR_CALI_DATA_ID, "device_sar_cali_data", SENSOR_DEBUG_DEVICE_TYPE},
{POWER_SENSOR_INFO_ID, "debug_power_sns_info", SENSOR_DEBUG_POWER_TYPE},
{POWER_ACCEL_INFO_ID, "debug_power_acc_info", SENSOR_DEBUG_POWER_TYPE},
{POWER_GYRO_INFO_ID, "debug_power_gyro_info", SENSOR_DEBUG_POWER_TYPE},
{POWER_MAG_INFO_ID, "debug_power_mag_info", SENSOR_DEBUG_POWER_TYPE},
{POWER_PROXIMITY_INFO_ID, "debug_power_prox_info", SENSOR_DEBUG_POWER_TYPE},
{POWER_LIGHT_INFO_ID, "debug_power_light_info", SENSOR_DEBUG_POWER_TYPE},
{POWER_WISE_LIGHT_INFO_ID, "debug_power_wiseligt_info", SENSOR_DEBUG_POWER_TYPE},
{POWER_WAKE_UP_RATE_ID, "debug_power_wakeup_rate", SENSOR_DEBUG_POWER_TYPE},
{POWER_ADSP_SLEEP_RATIO_ID, "power_adsp_sleep_ratio", SENSOR_POWER_TYPE},
{DOUBLE_TAP_REPORTED_ID, "device_double_tap_reported", SENSOR_DEBUG_DEVICE_TYPE},
{DOUBLE_TAP_PREVENTED_BY_NEAR_ID, "device_double_tap_prevented_by_near", SENSOR_DEBUG_DEVICE_TYPE},
{DOUBLE_TAP_PREVENTED_BY_ATTITUDE_ID, "device_double_prevented_by_attitude", SENSOR_DEBUG_DEVICE_TYPE},
{DOUBLE_TAP_PREVENTED_BY_FREEFALL_Z_ID, "device_double_prevented_by_freefall_z", SENSOR_DEBUG_DEVICE_TYPE},
{DOUBLE_TAP_PREVENTED_BY_FREEFALL_SLOPE_ID, "device_double_prevented_by_freefall_slope", SENSOR_DEBUG_DEVICE_TYPE},
{ALAILABLE_SENSOR_LIST_ID, "available_sensor_list", SENSOR_DEBUG_DEVICE_TYPE},
{HAL_SENSOR_NOT_FOUND, "device_hal_not_found", SENSOR_DEVICE_TYPE},
{HAL_QMI_ERROR, "device_hal_qmi_error", SENSOR_DEVICE_TYPE},
{HAL_SENSOR_TIMESTAMP_ERROR, "device_hal_ts_error", SENSOR_DEBUG_DEVICE_TYPE}
};
static int find_event_id(int16_t event_id)
{
int len = sizeof(g_fb_conf) / sizeof(g_fb_conf[0]);
int ret = -1;
int index = 0;
for (index = 0; index < len; index++) {
if (g_fb_conf[index].event_id == event_id) {
ret = index;
}
}
return ret;
}
static ssize_t adsp_notify_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sensor_fb_cxt *sensor_fb_cxt = g_sensor_fb_cxt;
uint16_t adsp_event_counts = 0;
spin_lock(&sensor_fb_cxt->rw_lock);
adsp_event_counts = sensor_fb_cxt->adsp_event_counts;
spin_unlock(&sensor_fb_cxt->rw_lock);
pr_info("adsp_value = %d\n", adsp_event_counts);
return snprintf(buf, PAGE_SIZE, "%d\n", adsp_event_counts);
}
static ssize_t adsp_notify_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct sensor_fb_cxt *sensor_fb_cxt = g_sensor_fb_cxt;
uint16_t adsp_event_counts = 0;
uint16_t node_type = 0;
int err = 0;
err = sscanf(buf, "%hu %hu", &node_type, &adsp_event_counts);
if (err < 0) {
pr_err("adsp_notify_store error: err = %d\n", err);
return err;
}
spin_lock(&sensor_fb_cxt->rw_lock);
sensor_fb_cxt->adsp_event_counts = adsp_event_counts;
sensor_fb_cxt->node_type = node_type;
spin_unlock(&sensor_fb_cxt->rw_lock);
pr_info("adsp_value = %d, node_type=%d\n", adsp_event_counts,
node_type);
set_bit(THREAD_WAKEUP, (unsigned long *)&sensor_fb_cxt->wakeup_flag);
/*wake_up_interruptible(&sensor_fb_cxt->wq);*/
wake_up(&sensor_fb_cxt->wq);
return count;
}
int scp_notify_store(uint16_t node_type, uint16_t adsp_event_counts)
{
if (g_sensor_fb_cxt != NULL) {
struct sensor_fb_cxt *sensor_fb_cxt = g_sensor_fb_cxt;
spin_lock(&sensor_fb_cxt->rw_lock);
sensor_fb_cxt->adsp_event_counts = adsp_event_counts;
sensor_fb_cxt->node_type = node_type;
spin_unlock(&sensor_fb_cxt->rw_lock);
pr_info("adsp_value = %d, node_type=%d\n", adsp_event_counts,
node_type);
set_bit(THREAD_WAKEUP, (unsigned long *)&sensor_fb_cxt->wakeup_flag);
/*wake_up_interruptible(&sensor_fb_cxt->wq);*/
wake_up(&sensor_fb_cxt->wq);
} else {
pr_err("error: g_sensor_fb_cxt is NULL\n");
}
return 0;
}
static ssize_t hal_info_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
uint16_t event_ct = 0;
uint16_t event_id = 0;
char strbuf[32] = {0x00};
int err = 0;
int index = 0;
unsigned char payload[1024] = {0x00};
pr_info("hal_info_store\n");
memset(strbuf, 0, 32);
memset(payload, 0, 1024);
err = sscanf(buf, "%u %u %31s", &event_id, &event_ct, strbuf);
if (err < 0) {
pr_err("hal_info_store error: err = %d\n", err);
return count;
}
strbuf[31] = '\0';
index = find_event_id(event_id);
if (index == -1) {
pr_info("nout find event_id =%d\n", event_id);
return count;
}
scnprintf(payload, sizeof(payload),
"NULL$$EventField@@%s$$FieldData@@%d$$detailData@@%s",
g_fb_conf[index].fb_field,
event_ct,
strbuf);
pr_info("payload =%s\n", payload);
#ifdef CONFIG_OPLUS_FEATURE_FEEDBACK
oplus_kevent_fb(FB_SENSOR, g_fb_conf[index].fb_event_id, payload);
#endif
return count;
}
static ssize_t test_id_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct sensor_fb_cxt *sensor_fb_cxt = g_sensor_fb_cxt;
uint16_t adsp_event_counts = 0;
uint16_t node_type = 0;
uint16_t event_id = 0;
uint16_t event_data = 0;
int err = 0;
err = sscanf(buf, "%hu %hu %hu %hu", &node_type, &adsp_event_counts, &event_id,
&event_data);
if (err < 0) {
pr_err("test_id_store error: err = %d\n", err);
return count;
}
spin_lock(&sensor_fb_cxt->rw_lock);
sensor_fb_cxt->adsp_event_counts = adsp_event_counts;
sensor_fb_cxt->node_type = node_type;
spin_unlock(&sensor_fb_cxt->rw_lock);
sensor_fb_cxt->fb_smem.event[0].event_id = event_id;
sensor_fb_cxt->fb_smem.event[0].count = event_data;
pr_info("test_id_store adsp_value = %d, node_type=%d \n", adsp_event_counts,
node_type);
pr_info("test_id_store event_id = %d, event_data=%d \n", event_id, event_data);
set_bit(THREAD_WAKEUP, (unsigned long *)&sensor_fb_cxt->wakeup_flag);
/*wake_up_interruptible(&sensor_fb_cxt->wq);*/
wake_up(&sensor_fb_cxt->wq);
return count;
}
static ssize_t sensor_list_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sensor_fb_cxt *sensor_fb_cxt = g_sensor_fb_cxt;
uint16_t sensor_list[2] = {0x00};
spin_lock(&sensor_fb_cxt->rw_lock);
sensor_list[0] = sensor_fb_cxt->sensor_list[0];
sensor_list[1] = sensor_fb_cxt->sensor_list[1];
spin_unlock(&sensor_fb_cxt->rw_lock);
pr_info("phy = 0x%x, virt = 0x%x\n", sensor_list[0], sensor_list[1]);
return snprintf(buf, PAGE_SIZE, "phy = 0x%x, virt = 0x%x\n", sensor_list[0],
sensor_list[1]);
}
DEVICE_ATTR(adsp_notify, 0644, adsp_notify_show, adsp_notify_store);
DEVICE_ATTR(hal_info, 0644, NULL, hal_info_store);
DEVICE_ATTR(test_id, 0644, NULL, test_id_store);
DEVICE_ATTR(sensor_list, 0644, sensor_list_show, NULL);
static struct attribute *sensor_feedback_attributes[] = {
&dev_attr_adsp_notify.attr,
&dev_attr_hal_info.attr,
&dev_attr_test_id.attr,
&dev_attr_sensor_list.attr,
NULL
};
static struct attribute_group sensor_feedback_attribute_group = {
.attrs = sensor_feedback_attributes
};
#define SMEM_SENSOR_FEEDBACK (128)
static int read_data_from_share_mem(struct sensor_fb_cxt *sensor_fb_cxt)
{
void *smem_addr = NULL;
struct fb_event_smem *fb_event = NULL;
smem_addr = (void *)(long)scp_get_reserve_mem_virt(SENS_FB_MEM_ID);
fb_event = (struct fb_event_smem *)smem_addr;
if (fb_event == NULL) {
return -2;
}
memcpy((void *)&sensor_fb_cxt->fb_smem, (void *)fb_event,
sizeof(sensor_fb_cxt->fb_smem));
return 0;
}
int procce_special_event_id(unsigned short event_id, int count,
struct sensor_fb_cxt *sensor_fb_cxt)
{
int ret = 0;
if (event_id == ALAILABLE_SENSOR_LIST_ID) {
sensor_fb_cxt->sensor_list[0] = (uint32_t)
sensor_fb_cxt->fb_smem.event[count].buff[0];
sensor_fb_cxt->sensor_list[1] = (uint32_t)
sensor_fb_cxt->fb_smem.event[count].buff[1];
pr_info("sensor_list virt_sns = 0x%x, phy_sns = 0x%x\n",
sensor_fb_cxt->sensor_list[0], sensor_fb_cxt->sensor_list[1]);
ret = 1;
}
return ret;
}
static int parse_shr_info(struct sensor_fb_cxt *sensor_fb_cxt)
{
int ret = 0;
int count = 0;
uint16_t event_id = 0;
int index = 0;
unsigned char payload[1024] = {0x00};
int fb_len = 0;
unsigned char detail_buff[128] = {0x00};
for (count = 0; count < sensor_fb_cxt->adsp_event_counts; count ++) {
event_id = sensor_fb_cxt->fb_smem.event[count].event_id;
pr_info("event_id =%d, count =%d\n", event_id, count);
index = find_event_id(event_id);
if (index == -1) {
pr_info("not find event_id =%d, count =%d\n", event_id, count);
continue;
}
ret = procce_special_event_id(event_id, count, sensor_fb_cxt);
if (ret == 1) {
continue;
}
memset(payload, 0, sizeof(payload));
memset(detail_buff, 0, sizeof(detail_buff));
snprintf(detail_buff, sizeof(detail_buff), "%d %d %d",
sensor_fb_cxt->fb_smem.event[count].buff[0],
sensor_fb_cxt->fb_smem.event[count].buff[1],
sensor_fb_cxt->fb_smem.event[count].buff[2]);
fb_len += scnprintf(payload, sizeof(payload),
"NULL$$EventField@@%s$$FieldData@@%d$$detailData@@%s",
g_fb_conf[index].fb_field,
sensor_fb_cxt->fb_smem.event[count].count,
detail_buff);
pr_info("payload =%s\n", payload);
#ifdef CONFIG_OPLUS_FEATURE_FEEDBACK
oplus_kevent_fb(FB_SENSOR, g_fb_conf[index].fb_event_id, payload);
#endif
}
return ret;
}
static int sensor_report_thread(void *arg)
{
int ret = 0;
struct sensor_fb_cxt *sensor_fb_cxt = (struct sensor_fb_cxt *)arg;
uint16_t node_type = 0;
pr_info("sensor_report_thread step1!\n");
while (!kthread_should_stop()) {
wait_event_interruptible(sensor_fb_cxt->wq, test_bit(THREAD_WAKEUP,
(unsigned long *)&sensor_fb_cxt->wakeup_flag));
clear_bit(THREAD_WAKEUP, (unsigned long *)&sensor_fb_cxt->wakeup_flag);
set_bit(THREAD_SLEEP, (unsigned long *)&sensor_fb_cxt->wakeup_flag);
spin_lock(&sensor_fb_cxt->rw_lock);
node_type = sensor_fb_cxt->node_type;
spin_unlock(&sensor_fb_cxt->rw_lock);
if (node_type == 0) {
ret = read_data_from_share_mem(sensor_fb_cxt);
} else if (node_type == 2) {
} else if (node_type == 3) { //power done
} else {
pr_info("test from node\n");
}
ret = parse_shr_info(sensor_fb_cxt);
spin_lock(&sensor_fb_cxt->rw_lock);
memset((void *)&sensor_fb_cxt->fb_smem, 0, sizeof(struct fb_event_smem));
sensor_fb_cxt->adsp_event_counts = 0;
spin_unlock(&sensor_fb_cxt->rw_lock);
}
pr_info("step2 ret =%s\n", ret);
return ret;
}
static ssize_t sensor_list_read_proc(struct file *file, char __user *buf,
size_t count, loff_t *off)
{
char page[128] = {0};
int len = 0;
struct sensor_fb_cxt *sensor_fb_cxt = (struct sensor_fb_cxt *)PDE_DATA(
file_inode(file));
len = snprintf(page, sizeof(page), "phy = 0x%x, virt = 0x%x\n",
sensor_fb_cxt->sensor_list[0], sensor_fb_cxt->sensor_list[1]);
len = simple_read_from_buffer(buf, count, off, page, strlen(page));
pr_info("phy = 0x%x, virt = 0x%x, len=%d \n", sensor_fb_cxt->sensor_list[0],
sensor_fb_cxt->sensor_list[1],
len);
return len;
}
static struct file_operations sensor_list_fops = {
.owner = THIS_MODULE,
.read = sensor_list_read_proc,
};
static int create_sensor_node(struct sensor_fb_cxt *sensor_fb_cxt)
{
int err = 0;
struct proc_dir_entry *pentry = NULL;
err = sysfs_create_group(&sensor_fb_cxt->sensor_fb_dev->dev.kobj,
&sensor_feedback_attribute_group);
if (err < 0) {
pr_err("unable to create sensor_feedback_attribute_group file err=%d\n", err);
goto sysfs_create_failed;
}
kobject_uevent(&sensor_fb_cxt->sensor_fb_dev->dev.kobj, KOBJ_ADD);
sensor_fb_cxt->proc_sns = proc_mkdir("sns_debug", NULL);
if (!sensor_fb_cxt->proc_sns) {
pr_err("can't create sns_debug proc\n");
err = -EFAULT;
goto sysfs_create_failed;
}
pentry = proc_create_data("sensor_list", 0666, sensor_fb_cxt->proc_sns,
&sensor_list_fops, sensor_fb_cxt);
if (!pentry) {
pr_err("create sensor_list proc failed.\n");
err = -EFAULT;
goto sysfs_create_failed;
}
return 0;
sysfs_create_failed:
sysfs_remove_group(&sensor_fb_cxt->sensor_fb_dev->dev.kobj,
&sensor_feedback_attribute_group);
return err;
}
static int sensor_feedback_probe(struct platform_device *pdev)
{
int err = 0;
struct sensor_fb_cxt *sensor_fb_cxt = NULL;
sensor_fb_cxt = kzalloc(sizeof(struct sensor_fb_cxt), GFP_KERNEL);
if (sensor_fb_cxt == NULL) {
pr_err("kzalloc g_sensor_fb_cxt failed\n");
err = -ENOMEM;
goto alloc_sensor_fb_failed;
}
/*sensor_fb_cxt init*/
sensor_fb_cxt->sensor_fb_dev = pdev;
g_sensor_fb_cxt = sensor_fb_cxt;
spin_lock_init(&sensor_fb_cxt->rw_lock);
init_waitqueue_head(&sensor_fb_cxt->wq);
set_bit(THREAD_SLEEP, (unsigned long *)&sensor_fb_cxt->wakeup_flag);
platform_set_drvdata(pdev, sensor_fb_cxt);
err = create_sensor_node(sensor_fb_cxt);
if (err != 0) {
pr_info("create_sensor_node failed\n");
goto create_sensor_node_failed;
}
/*create sensor_feedback_task thread*/
sensor_fb_cxt->report_task = kthread_create(sensor_report_thread,
(void *)sensor_fb_cxt,
"sensor_feedback_task");
if (IS_ERR(sensor_fb_cxt->report_task)) {
pr_info("kthread_create failed\n");
err = PTR_ERR(sensor_fb_cxt->report_task);
goto create_task_failed;
}
/*wake up thread of report_task*/
wake_up_process(sensor_fb_cxt->report_task);
pr_info("sensor_feedback_init success\n");
return 0;
create_task_failed:
create_sensor_node_failed:
kfree(sensor_fb_cxt);
g_sensor_fb_cxt = NULL;
alloc_sensor_fb_failed:
return err;
}
static int sensor_feedback_remove(struct platform_device *pdev)
{
struct sensor_fb_cxt *sensor_fb_cxt = g_sensor_fb_cxt;
sysfs_remove_group(&sensor_fb_cxt->sensor_fb_dev->dev.kobj,
&sensor_feedback_attribute_group);
kfree(sensor_fb_cxt);
g_sensor_fb_cxt = NULL;
return 0;
}
static const struct of_device_id of_drv_match[] = {
{ .compatible = "oplus,sensor-feedback"},
{},
};
MODULE_DEVICE_TABLE(of, of_drv_match);
static struct platform_driver _driver = {
.probe = sensor_feedback_probe,
.remove = sensor_feedback_remove,
.driver = {
.name = "sensor_feedback",
.of_match_table = of_drv_match,
},
};
static int __init sensor_feedback_init(void)
{
pr_info("sensor_feedback_init call\n");
platform_driver_register(&_driver);
return 0;
}
/*
static int __exit sensor_feedback_exit(void)
{
pr_info("sensor_feedback_exit call\n");
platform_driver_unregister(&_driver);
return 0;
}*/
core_initcall(sensor_feedback_init);
//module_init(sensor_feedback_init);
//module_exit(sensor_feedback_exit);
MODULE_AUTHOR("JangHua.Tang");
MODULE_LICENSE("GPL v2");

View File

@@ -1,159 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2018-2020 Oplus. All rights reserved.
*/
#ifndef __SENSOR_FEEDBACK_H__
#define __SENSOR_FEEDBACK_H__
#include <linux/miscdevice.h>
#ifdef CONFIG_ARM
#include <linux/sched.h>
#else
#include <linux/wait.h>
#endif
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/kobject.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/param.h>
#include <linux/proc_fs.h>
#include <linux/time.h>
#define THREAD_WAKEUP 0
#define THREAD_SLEEP 1
#undef SUBSYS_COUNTS
#define SUBSYS_COUNTS (3)
struct sensor_fb_conf {
uint16_t event_id;
char *fb_field;
char *fb_event_id;
};
enum sensor_fb_event_id {
FD_HEAD_EVENT_ID = 0,
//1~100
PS_INIT_FAIL_ID = 1,
PS_I2C_ERR_ID = 2,
PS_ALLOC_FAIL_ID = 3,
PS_ESD_REST_ID = 4,
PS_NO_INTERRUPT_ID = 5,
PS_FIRST_REPORT_DELAY_COUNT_ID = 6,
PS_ORIGIN_DATA_TO_ZERO_ID = 7,
PS_CALI_DATA_ID = 8,
//100~200
ALS_INIT_FAIL_ID = 100,
ALS_I2C_ERR_ID = 101,
ALS_ALLOC_FAIL_ID = 102,
ALS_ESD_REST_ID = 103,
ALS_NO_INTERRUPT_ID = 104,
ALS_FIRST_REPORT_DELAY_COUNT_ID = 105,
ALS_ORIGIN_DATA_TO_ZERO_ID = 106,
ALS_CALI_DATA_ID = 107,
//200~300
ACCEL_INIT_FAIL_ID = 200,
ACCEL_I2C_ERR_ID = 201,
ACCEL_ALLOC_FAIL_ID = 202,
ACCEL_ESD_REST_ID = 203,
ACCEL_NO_INTERRUPT_ID = 204,
ACCEL_FIRST_REPORT_DELAY_COUNT_ID = 205,
ACCEL_ORIGIN_DATA_TO_ZERO_ID = 206,
ACCEL_CALI_DATA_ID = 207,
//300~400
GYRO_INIT_FAIL_ID = 300,
GYRO_I2C_ERR_ID = 301,
GYRO_ALLOC_FAIL_ID = 302,
GYRO_ESD_REST_ID = 303,
GYRO_NO_INTERRUPT_ID = 304,
GYRO_FIRST_REPORT_DELAY_COUNT_ID = 305,
GYRO_ORIGIN_DATA_TO_ZERO_ID = 306,
GYRO_CALI_DATA_ID = 307,
//400~500
MAG_INIT_FAIL_ID = 400,
MAG_I2C_ERR_ID = 401,
MAG_ALLOC_FAIL_ID = 402,
MAG_ESD_REST_ID = 403,
MAG_NO_INTERRUPT_ID = 404,
MAG_FIRST_REPORT_DELAY_COUNT_ID = 405,
MAG_ORIGIN_DATA_TO_ZERO_ID = 406,
MAG_CALI_DATA_ID = 407,
//500~600
SAR_INIT_FAIL_ID = 500,
SAR_I2C_ERR_ID = 501,
SAR_ALLOC_FAIL_ID = 502,
SAR_ESD_REST_ID = 503,
SAR_NO_INTERRUPT_ID = 504,
SAR_FIRST_REPORT_DELAY_COUNT_ID = 505,
SAR_ORIGIN_DATA_TO_ZERO_ID = 506,
SAR_CALI_DATA_ID = 507,
//600~700
POWER_SENSOR_INFO_ID = 600,
POWER_ACCEL_INFO_ID = 601,
POWER_GYRO_INFO_ID = 602,
POWER_MAG_INFO_ID = 603,
POWER_PROXIMITY_INFO_ID = 604,
POWER_LIGHT_INFO_ID = 605,
POWER_WISE_LIGHT_INFO_ID = 606,
POWER_WAKE_UP_RATE_ID = 607,
POWER_ADSP_SLEEP_RATIO_ID = 608,
//700~800
DOUBLE_TAP_REPORTED_ID = 701,
DOUBLE_TAP_PREVENTED_BY_NEAR_ID = 702,
DOUBLE_TAP_PREVENTED_BY_ATTITUDE_ID = 703,
DOUBLE_TAP_PREVENTED_BY_FREEFALL_Z_ID = 704,
DOUBLE_TAP_PREVENTED_BY_FREEFALL_SLOPE_ID = 705,
//1000
ALAILABLE_SENSOR_LIST_ID = 1000,
// 10000 , sensor-hal
HAL_SENSOR_NOT_FOUND = 10000,
HAL_QMI_ERROR = 10001,
HAL_SENSOR_TIMESTAMP_ERROR = 10002,
};
struct fd_data {
int data_x;
int data_y;
int data_z;
};
#define EVNET_DATA_LEN 3
struct sns_fb_event {
unsigned short event_id;
unsigned int count;
union {
int buff[EVNET_DATA_LEN];
struct fd_data data;
};
};
#define EVNET_NUM_MAX 109
struct fb_event_smem {
struct sns_fb_event event[EVNET_NUM_MAX];
};
struct sensor_fb_cxt {
/*struct miscdevice sensor_fb_dev;*/
struct platform_device *sensor_fb_dev;
spinlock_t rw_lock;
wait_queue_head_t wq;
struct task_struct *report_task; /*kernel thread*/
uint16_t adsp_event_counts;
struct fb_event_smem fb_smem;
uint16_t node_type;
unsigned long wakeup_flag;
uint32_t sensor_list[2];
struct proc_dir_entry *proc_sns;
};
#endif /*__SENSOR_FEEDBACK_H__*/

View File

@@ -15,4 +15,3 @@ subdir-ccflags-y += -I$(srctree)/drivers/misc/mediatek/scp/$(CONFIG_MTK_PLATFORM
subdir-ccflags-y += -D CONFIG_OPLUS_SENSOR_MTK68XX
endif
obj-$(CONFIG_NANOHUB) += sensor_devinfo.o
obj-$(CONFIG_OPLUS_SENSOR_FB_MTK) += oplus_sensor_feedback/

Some files were not shown because too many files have changed in this diff Show More