[ALPS04936086] BLOCKTAG: IO state extension
Its a legacy feature of IO trace in MTK platform. Change: - MQ, CMDQD changed to worker(HW dispatch queue) ToDo: - decouple with debugfs - distinguish emmc and SD - vmstat support (mdlog had been removed) MTK-Commit-Id: 42b0bd45cc6e7e096959be88fce8ed38d13854c3 Change-Id: Ie78cad95a0884d0f6eda2045f27f647e070705ae Signed-off-by: mtk81325 <peng.zhou@mediatek.com> CR-Id: ALPS04936086 Feature: [Android Default] F2FS File System
This commit is contained in:
@@ -2,7 +2,7 @@
|
||||
#
|
||||
# Makefile for the kernel block layer
|
||||
#
|
||||
|
||||
ccflags-y += -I$(srctree)/drivers/mmc/core
|
||||
obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
|
||||
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
|
||||
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
|
||||
|
||||
@@ -37,6 +37,7 @@
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/psi.h>
|
||||
#include <linux/blk-crypto.h>
|
||||
#include <mt-plat/mtk_blocktag.h> /* MTK PATCH */
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/block.h>
|
||||
@@ -45,6 +46,7 @@
|
||||
#include "blk-mq.h"
|
||||
#include "blk-mq-sched.h"
|
||||
#include "blk-rq-qos.h"
|
||||
#include "mtk_mmc_block.h"
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct dentry *blk_debugfs_root;
|
||||
@@ -413,7 +415,7 @@ void blk_sync_queue(struct request_queue *q)
|
||||
if (q->mq_ops) {
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
|
||||
mt_bio_queue_free(current);
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
cancel_delayed_work_sync(&hctx->run_work);
|
||||
} else {
|
||||
@@ -2571,6 +2573,9 @@ blk_qc_t submit_bio(struct bio *bio)
|
||||
count_vm_events(PGPGIN, count);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MTK_BLOCK_TAG
|
||||
mtk_btag_pidlog_submit_bio(bio);
|
||||
#endif
|
||||
if (unlikely(block_dump)) {
|
||||
char b[BDEVNAME_SIZE];
|
||||
printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include <trace/events/block.h>
|
||||
#include <mt-plat/mtk_blocktag.h> /* MTK PATCH */
|
||||
|
||||
#include "blk.h"
|
||||
|
||||
@@ -417,9 +418,13 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
|
||||
int cluster = blk_queue_cluster(q), nsegs = 0;
|
||||
|
||||
for_each_bio(bio)
|
||||
bio_for_each_segment(bvec, bio, iter)
|
||||
bio_for_each_segment(bvec, bio, iter) {
|
||||
__blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
|
||||
&nsegs, &cluster);
|
||||
#ifdef CONFIG_MTK_BLOCK_TAG
|
||||
mtk_btag_pidlog_map_sg(q, bio, &bvec);
|
||||
#endif
|
||||
}
|
||||
|
||||
return nsegs;
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@
|
||||
#include "blk-stat.h"
|
||||
#include "blk-mq-sched.h"
|
||||
#include "blk-rq-qos.h"
|
||||
#include "mtk_mmc_block.h"
|
||||
|
||||
static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
|
||||
static void blk_mq_poll_stats_start(struct request_queue *q);
|
||||
@@ -1463,6 +1464,7 @@ EXPORT_SYMBOL(blk_mq_queue_stopped);
|
||||
*/
|
||||
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
mt_bio_queue_free(current);
|
||||
cancel_delayed_work(&hctx->run_work);
|
||||
|
||||
set_bit(BLK_MQ_S_STOPPED, &hctx->state);
|
||||
@@ -2186,6 +2188,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
|
||||
if (node == NUMA_NO_NODE)
|
||||
node = hctx->numa_node = set->numa_node;
|
||||
|
||||
mt_bio_queue_alloc(current, q);
|
||||
INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
|
||||
spin_lock_init(&hctx->lock);
|
||||
INIT_LIST_HEAD(&hctx->dispatch);
|
||||
|
||||
@@ -16,7 +16,7 @@ config MTK_ENG_BUILD
|
||||
If unsure, say N here.
|
||||
|
||||
menu "Storage"
|
||||
|
||||
source "drivers/misc/mediatek/blocktag/Kconfig"
|
||||
endmenu # Storage
|
||||
|
||||
menu "Power, PMIC, Battery & Low Power"
|
||||
|
||||
@@ -79,3 +79,4 @@ obj-$(CONFIG_RT_FLASHLIGHT) += flashlight/richtek/
|
||||
obj-$(CONFIG_USB_MTK_HDRC) += usb20/
|
||||
obj-$(CONFIG_MTK_USB_TYPEC) += typec/
|
||||
obj-$(CONFIG_MTK_CCU) += ccu/src/
|
||||
obj-$(CONFIG_MTK_BLOCK_TAG) += blocktag/
|
||||
|
||||
19
drivers/misc/mediatek/blocktag/Kconfig
Normal file
19
drivers/misc/mediatek/blocktag/Kconfig
Normal file
@@ -0,0 +1,19 @@
|
||||
#
|
||||
# block tag trace
|
||||
#
|
||||
|
||||
comment "Storage Block Tag"
|
||||
|
||||
config MTK_BLOCK_TAG
|
||||
bool "Storage Block Tag"
|
||||
depends on BLOCK
|
||||
depends on (MTK_GMO_RAM_OPTIMIZE && MTK_ENG_BUILD) || \
|
||||
!MTK_GMO_RAM_OPTIMIZE
|
||||
depends on DEBUG_FS
|
||||
help
|
||||
Enable block tagging at block driver, tag requester pid to
|
||||
the accessing pages. This allows MMC/UFS Block IO log to obtian
|
||||
IO statistics of each process. The Block Tag also provides
|
||||
utility functions to MMC/UFS Block IO log, such as throughput
|
||||
calculation, log printing, and ring trace handling.
|
||||
|
||||
6
drivers/misc/mediatek/blocktag/Makefile
Normal file
6
drivers/misc/mediatek/blocktag/Makefile
Normal file
@@ -0,0 +1,6 @@
|
||||
#
|
||||
# Makefile for block tag drivers
|
||||
#
|
||||
|
||||
obj-$(CONFIG_MTK_BLOCK_TAG) += blocktag.o
|
||||
|
||||
1364
drivers/misc/mediatek/blocktag/blocktag.c
Normal file
1364
drivers/misc/mediatek/blocktag/blocktag.c
Normal file
File diff suppressed because it is too large
Load Diff
262
drivers/misc/mediatek/include/mt-plat/mtk_blocktag.h
Normal file
262
drivers/misc/mediatek/include/mt-plat/mtk_blocktag.h
Normal file
@@ -0,0 +1,262 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2019 MediaTek Inc.
|
||||
*/
|
||||
|
||||
#ifndef _MTK_BLOCKTAG_H
|
||||
#define _MTK_BLOCKTAG_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#if defined(CONFIG_MTK_BLOCK_TAG)
|
||||
|
||||
/*
|
||||
* MTK_BTAG_FEATURE_MICTX_IOSTAT
|
||||
*
|
||||
* Shall be defined if we can provide iostat
|
||||
* produced by mini context.
|
||||
*
|
||||
* This feature is used to extend kernel
|
||||
* trace events to have more I/O information.
|
||||
*/
|
||||
#define MTK_BTAG_FEATURE_MICTX_IOSTAT
|
||||
|
||||
#define BLOCKTAG_PIDLOG_ENTRIES 50
|
||||
#define BLOCKTAG_NAME_LEN 16
|
||||
#define BLOCKTAG_PRINT_LEN 4096
|
||||
|
||||
#define BTAG_RT(btag) (btag ? &btag->rt : NULL)
|
||||
#define BTAG_CTX(btag) (btag ? btag->ctx.priv : NULL)
|
||||
#define BTAG_KLOGEN(btag) (btag ? btag->klog_enable : 0)
|
||||
|
||||
struct page_pid_logger {
|
||||
unsigned short pid;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MTK_USE_RESERVED_EXT_MEM
|
||||
extern void *extmem_malloc_page_align(size_t bytes);
|
||||
#endif
|
||||
|
||||
enum {
|
||||
PIDLOG_MODE_BLK_SUBMIT_BIO = 0,
|
||||
PIDLOG_MODE_MM_FS
|
||||
};
|
||||
|
||||
enum mtk_btag_storage_type {
|
||||
BTAG_STORAGE_EMBEDDED = 0,
|
||||
BTAG_STORAGE_EXTERNAL
|
||||
};
|
||||
|
||||
struct mtk_btag_workload {
|
||||
__u64 period; /* period time (ns) */
|
||||
__u64 usage; /* busy time (ns) */
|
||||
__u32 percent; /* workload */
|
||||
__u32 count; /* access count */
|
||||
};
|
||||
|
||||
struct mtk_btag_throughput_rw {
|
||||
__u64 usage; /* busy time (ns) */
|
||||
__u32 size; /* transferred bytes */
|
||||
__u32 speed; /* KB/s */
|
||||
};
|
||||
|
||||
struct mtk_btag_throughput {
|
||||
struct mtk_btag_throughput_rw r; /* read */
|
||||
struct mtk_btag_throughput_rw w; /* write */
|
||||
};
|
||||
|
||||
struct mtk_btag_req_rw {
|
||||
__u16 count;
|
||||
__u32 size; /* bytes */
|
||||
};
|
||||
|
||||
struct mtk_btag_req {
|
||||
struct mtk_btag_req_rw r; /* read */
|
||||
struct mtk_btag_req_rw w; /* write */
|
||||
};
|
||||
|
||||
/*
|
||||
* public structure to provide IO statistics
|
||||
* in a period of time.
|
||||
*
|
||||
* Make sure MTK_BTAG_FEATURE_MICTX_IOSTAT is
|
||||
* defined alone with mictx series.
|
||||
*/
|
||||
struct mtk_btag_mictx_iostat_struct {
|
||||
__u64 duration; /* duration time for below performance data (ns) */
|
||||
__u32 tp_req_r; /* throughput (per-request): read (KB/s) */
|
||||
__u32 tp_req_w; /* throughput (per-request): write (KB/s) */
|
||||
__u32 tp_all_r; /* throughput (overlapped) : read (KB/s) */
|
||||
__u32 tp_all_w; /* throughput (overlapped) : write (KB/s) */
|
||||
__u32 reqsize_r; /* request size : read (Bytes) */
|
||||
__u32 reqsize_w; /* request size : write (Bytes) */
|
||||
__u32 reqcnt_r; /* request count: read */
|
||||
__u32 reqcnt_w; /* request count: write */
|
||||
__u16 wl; /* storage device workload (%) */
|
||||
__u16 q_depth; /* storage cmdq queue depth */
|
||||
};
|
||||
|
||||
/*
|
||||
* mini context for integration with
|
||||
* other performance analysis tools.
|
||||
*/
|
||||
struct mtk_btag_mictx_struct {
|
||||
struct mtk_btag_throughput tp;
|
||||
struct mtk_btag_req req;
|
||||
__u64 window_begin;
|
||||
__u64 tp_min_time;
|
||||
__u64 tp_max_time;
|
||||
__u64 idle_begin;
|
||||
__u64 idle_total;
|
||||
__u32 q_depth;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct mtk_btag_vmstat {
|
||||
__u64 file_pages;
|
||||
__u64 file_dirty;
|
||||
__u64 dirtied;
|
||||
__u64 writeback;
|
||||
__u64 written;
|
||||
__u64 fmflt;
|
||||
};
|
||||
|
||||
struct mtk_btag_pidlogger_entry_rw {
|
||||
__u16 count;
|
||||
__u32 length;
|
||||
};
|
||||
|
||||
struct mtk_btag_pidlogger_entry {
|
||||
__u16 pid;
|
||||
struct mtk_btag_pidlogger_entry_rw r; /* read */
|
||||
struct mtk_btag_pidlogger_entry_rw w; /* write */
|
||||
};
|
||||
|
||||
struct mtk_btag_pidlogger {
|
||||
__u16 current_pid;
|
||||
struct mtk_btag_pidlogger_entry info[BLOCKTAG_PIDLOG_ENTRIES];
|
||||
};
|
||||
|
||||
struct mtk_btag_cpu {
|
||||
__u64 user;
|
||||
__u64 nice;
|
||||
__u64 system;
|
||||
__u64 idle;
|
||||
__u64 iowait;
|
||||
__u64 irq;
|
||||
__u64 softirq;
|
||||
};
|
||||
|
||||
/* Trace: entry of the ring buffer */
|
||||
struct mtk_btag_trace {
|
||||
uint64_t time;
|
||||
pid_t pid;
|
||||
u32 qid;
|
||||
struct mtk_btag_workload workload;
|
||||
struct mtk_btag_throughput throughput;
|
||||
struct mtk_btag_vmstat vmstat;
|
||||
struct mtk_btag_pidlogger pidlog;
|
||||
struct mtk_btag_cpu cpu;
|
||||
};
|
||||
|
||||
/* Ring Trace */
|
||||
struct mtk_btag_ringtrace {
|
||||
struct mtk_btag_trace *trace;
|
||||
spinlock_t lock;
|
||||
int index;
|
||||
int max;
|
||||
};
|
||||
|
||||
typedef size_t (*mtk_btag_seq_f) (char **, unsigned long *, struct seq_file *);
|
||||
|
||||
/* BlockTag */
|
||||
struct mtk_blocktag {
|
||||
char name[BLOCKTAG_NAME_LEN];
|
||||
struct mtk_btag_ringtrace rt;
|
||||
|
||||
struct prbuf_t {
|
||||
spinlock_t lock;
|
||||
char buf[BLOCKTAG_PRINT_LEN];
|
||||
} prbuf;
|
||||
|
||||
/* lock order: ctx.priv->lock => prbuf.lock */
|
||||
struct context_t {
|
||||
int count;
|
||||
int size;
|
||||
void *priv;
|
||||
} ctx;
|
||||
|
||||
struct dentry_t {
|
||||
struct dentry *droot;
|
||||
struct dentry *dklog;
|
||||
struct dentry *dlog;
|
||||
struct dentry *dlog_mictx;
|
||||
struct dentry *dmem;
|
||||
} dentry;
|
||||
|
||||
mtk_btag_seq_f seq_show;
|
||||
|
||||
unsigned int klog_enable;
|
||||
unsigned int used_mem;
|
||||
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct mtk_blocktag *mtk_btag_alloc(const char *name,
|
||||
unsigned int ringtrace_count, size_t ctx_size, unsigned int ctx_count,
|
||||
mtk_btag_seq_f seq_show);
|
||||
void mtk_btag_free(struct mtk_blocktag *btag);
|
||||
|
||||
struct mtk_btag_trace *mtk_btag_curr_trace(struct mtk_btag_ringtrace *rt);
|
||||
struct mtk_btag_trace *mtk_btag_next_trace(struct mtk_btag_ringtrace *rt);
|
||||
|
||||
int mtk_btag_pidlog_add_mmc(struct request_queue *q, pid_t pid, __u32 len,
|
||||
int rw);
|
||||
int mtk_btag_pidlog_add_ufs(struct request_queue *q, pid_t pid, __u32 len,
|
||||
int rw);
|
||||
void mtk_btag_pidlog_insert(struct mtk_btag_pidlogger *pidlog, pid_t pid,
|
||||
__u32 len, int rw);
|
||||
|
||||
void mtk_btag_cpu_eval(struct mtk_btag_cpu *cpu);
|
||||
void mtk_btag_pidlog_eval(struct mtk_btag_pidlogger *pl,
|
||||
struct mtk_btag_pidlogger *ctx_pl);
|
||||
void mtk_btag_throughput_eval(struct mtk_btag_throughput *tp);
|
||||
void mtk_btag_vmstat_eval(struct mtk_btag_vmstat *vm);
|
||||
|
||||
void mtk_btag_task_timetag(char *buf, unsigned int len, unsigned int stage,
|
||||
unsigned int max, const char *name[], uint64_t *t, __u32 bytes);
|
||||
void mtk_btag_klog(struct mtk_blocktag *btag, struct mtk_btag_trace *tr);
|
||||
|
||||
void mtk_btag_pidlog_map_sg(struct request_queue *q, struct bio *bio,
|
||||
struct bio_vec *bvec);
|
||||
void mtk_btag_pidlog_copy_pid(struct page *src, struct page *dst);
|
||||
void mtk_btag_pidlog_submit_bio(struct bio *bio);
|
||||
void mtk_btag_pidlog_set_pid(struct page *p);
|
||||
|
||||
void mtk_btag_mictx_enable(int enable);
|
||||
void mtk_btag_mictx_eval_tp(
|
||||
unsigned int rw, __u64 usage, __u32 size);
|
||||
void mtk_btag_mictx_eval_req(
|
||||
unsigned int rw, __u32 cnt, __u32 size);
|
||||
int mtk_btag_mictx_get_data(
|
||||
struct mtk_btag_mictx_iostat_struct *iostat);
|
||||
void mtk_btag_mictx_update_ctx(__u32 q_depth);
|
||||
|
||||
#else
|
||||
|
||||
#define mtk_btag_pidlog_copy_pid(...)
|
||||
#define mtk_btag_pidlog_map_sg(...)
|
||||
#define mtk_btag_pidlog_submit_bio(...)
|
||||
#define mtk_btag_pidlog_set_pid(...)
|
||||
|
||||
#define mtk_btag_mictx_enable(...)
|
||||
#define mtk_btag_mictx_eval_tp(...)
|
||||
#define mtk_btag_mictx_eval_req(...)
|
||||
#define mtk_btag_mictx_get_data(...)
|
||||
#define mtk_btag_mictx_update_ctx(...)
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
@@ -89,3 +89,16 @@ config MMC_CRYPTO
|
||||
capabilities of the MMC device (if present) to perform crypto
|
||||
operations on data being transferred to/from the device.
|
||||
|
||||
config MMC_BLOCK_IO_LOG
|
||||
bool "Block IO log"
|
||||
depends on MMC_BLOCK
|
||||
depends on MTK_BLOCK_TAG
|
||||
default y
|
||||
help
|
||||
The BLOCK TAG trace provides I/O logs for performance analysis and
|
||||
application access pattern study. The BLOCK TAG trace summarizes the
|
||||
I/O of each process every 1 second, and saves to ring buffer. The log
|
||||
can be displayed by "cat /sys/kernel/debug/blockio".
|
||||
The trace is default enabled on user and eng builts, say N here to
|
||||
disable it.
|
||||
|
||||
|
||||
@@ -19,3 +19,4 @@ mmc_block-objs := block.o queue.o
|
||||
obj-$(CONFIG_MMC_TEST) += mmc_test.o
|
||||
obj-$(CONFIG_SDIO_UART) += sdio_uart.o
|
||||
obj-$(CONFIG_MMC_CRYPTO) += mmc-crypto.o
|
||||
obj-$(CONFIG_MMC_BLOCK_IO_LOG) += mtk_mmc_block.o
|
||||
|
||||
@@ -47,6 +47,7 @@
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include "mtk_mmc_block.h"
|
||||
#include "queue.h"
|
||||
#include "block.h"
|
||||
#include "core.h"
|
||||
@@ -2230,12 +2231,14 @@ static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
|
||||
err = mmc_blk_rw_wait(mq, &prev_req);
|
||||
if (err)
|
||||
goto out_post_req;
|
||||
mt_biolog_mmcqd_req_end(mqrq->brq.mrq.data);
|
||||
|
||||
mq->rw_wait = true;
|
||||
err = mmc_start_request(host, &mqrq->brq.mrq);
|
||||
|
||||
if (prev_req)
|
||||
mmc_blk_mq_post_req(mq, prev_req);
|
||||
mt_biolog_mmcqd_req_start(host);
|
||||
|
||||
if (err)
|
||||
mq->rw_wait = false;
|
||||
@@ -2308,6 +2311,7 @@ static int mmc_blk_swcq_issue_rw_rq(struct mmc_queue *mq,
|
||||
else
|
||||
return -EBUSY;
|
||||
|
||||
mt_biolog_mmcqd_req_check();
|
||||
mq->mqrq[index].req = req;
|
||||
atomic_set(&mqrq->index, index + 1);
|
||||
atomic_set(&mq->mqrq[index].index, index + 1);
|
||||
@@ -3212,7 +3216,7 @@ static int __init mmc_blk_init(void)
|
||||
res = mmc_register_driver(&mmc_driver);
|
||||
if (res)
|
||||
goto out_blkdev_unreg;
|
||||
|
||||
mt_mmc_biolog_init();
|
||||
return 0;
|
||||
|
||||
out_blkdev_unreg:
|
||||
|
||||
@@ -51,6 +51,7 @@
|
||||
#include "mmc_ops.h"
|
||||
#include "sd_ops.h"
|
||||
#include "sdio_ops.h"
|
||||
#include "mtk_mmc_block.h"
|
||||
|
||||
/* The max erase timeout, used when host->max_busy_timeout isn't specified */
|
||||
#define MMC_ERASE_TIMEOUT_MS (60 * 1000) /* 60 s */
|
||||
@@ -897,9 +898,10 @@ int mmc_run_queue_thread(void *data)
|
||||
u64 chk_time = 0;
|
||||
|
||||
pr_info("[CQ] start cmdq thread\n");
|
||||
mt_bio_queue_alloc(current, NULL);
|
||||
|
||||
while (1) {
|
||||
|
||||
mt_biolog_cmdq_check();
|
||||
/* End request stage 1/2 */
|
||||
if (atomic_read(&host->cq_rw)
|
||||
|| (atomic_read(&host->areq_cnt) <= 1)) {
|
||||
@@ -942,7 +944,7 @@ int mmc_run_queue_thread(void *data)
|
||||
if (done_mrq && !done_mrq->data->error
|
||||
&& !done_mrq->cmd->error) {
|
||||
task_id = (done_mrq->cmd->arg >> 16) & 0x1f;
|
||||
//mt_biolog_cmdq_dma_end(task_id);
|
||||
mt_biolog_cmdq_dma_end(task_id);
|
||||
mmc_check_write(host, done_mrq);
|
||||
host->cur_rw_task = CQ_TASK_IDLE;
|
||||
is_done = true;
|
||||
@@ -982,7 +984,7 @@ int mmc_run_queue_thread(void *data)
|
||||
if (err == -EINVAL)
|
||||
WARN_ON(1);
|
||||
host->ops->request(host, dat_mrq);
|
||||
//mt_biolog_cmdq_dma_start(task_id);
|
||||
mt_biolog_cmdq_dma_start(task_id);
|
||||
atomic_dec(&host->cq_rdy_cnt);
|
||||
dat_mrq = NULL;
|
||||
}
|
||||
@@ -991,10 +993,10 @@ int mmc_run_queue_thread(void *data)
|
||||
/* End request stage 2/2 */
|
||||
if (is_done) {
|
||||
task_id = (done_mrq->cmd->arg >> 16) & 0x1f;
|
||||
//mt_biolog_cmdq_isdone_start(task_id,
|
||||
// host->areq_que[task_id]->mrq_que);
|
||||
//mt_biolog_cmdq_isdone_end(task_id);
|
||||
//mt_biolog_cmdq_check();
|
||||
mt_biolog_cmdq_isdone_start(task_id,
|
||||
host->areq_que[task_id]->mrq_que);
|
||||
mt_biolog_cmdq_isdone_end(task_id);
|
||||
mt_biolog_cmdq_check();
|
||||
mmc_blk_end_queued_req(host, done_mrq->areq, task_id);
|
||||
done_mrq = NULL;
|
||||
is_done = false;
|
||||
@@ -1009,7 +1011,7 @@ int mmc_run_queue_thread(void *data)
|
||||
|
||||
while (cmd_mrq) {
|
||||
task_id = ((cmd_mrq->sbc->arg >> 16) & 0x1f);
|
||||
//mt_biolog_cmdq_queue_task(task_id, cmd_mrq);
|
||||
mt_biolog_cmdq_queue_task(task_id, cmd_mrq);
|
||||
if (host->task_id_index & (1 << task_id)) {
|
||||
pr_info(
|
||||
"[%s] BUG!!! task_id %d used, task_id_index 0x%08lx, areq_cnt = %d, cq_wait_rdy = %d\n",
|
||||
@@ -1089,14 +1091,14 @@ int mmc_run_queue_thread(void *data)
|
||||
}
|
||||
|
||||
/* Sleep when nothing to do */
|
||||
//mt_biolog_cmdq_check();
|
||||
mt_biolog_cmdq_check();
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (atomic_read(&host->areq_cnt) == 0)
|
||||
schedule();
|
||||
|
||||
set_current_state(TASK_RUNNING);
|
||||
}
|
||||
//mt_bio_queue_free(current);
|
||||
mt_bio_queue_free(current);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
915
drivers/mmc/core/mtk_mmc_block.c
Normal file
915
drivers/mmc/core/mtk_mmc_block.c
Normal file
@@ -0,0 +1,915 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2019 MediaTek Inc.
|
||||
*/
|
||||
|
||||
#define DEBUG 1
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/spinlock_types.h>
|
||||
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/blk_types.h>
|
||||
#include <linux/mmc/core.h>
|
||||
#include <linux/mmc/host.h>
|
||||
#include <linux/mmc/card.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#ifdef CONFIG_MTK_USE_RESERVED_EXT_MEM
|
||||
#include <linux/exm_driver.h>
|
||||
#endif
|
||||
|
||||
#include "mtk_mmc_block.h"
|
||||
#include <mt-plat/mtk_blocktag.h>
|
||||
|
||||
#define SECTOR_SHIFT 9
|
||||
|
||||
/* ring trace for debugfs */
|
||||
struct mtk_blocktag *mtk_btag_mmc;
|
||||
|
||||
/* context buffer of each request queue */
|
||||
struct mt_bio_context *mt_ctx_map[MMC_BIOLOG_CONTEXTS] = { 0 };
|
||||
|
||||
/* context index for mt_ctx_map */
|
||||
enum {
|
||||
CTX_MMCQD0 = 0,
|
||||
CTX_MMCQD1 = 1,
|
||||
CTX_MMCQD0_BOOT0 = 2,
|
||||
CTX_MMCQD0_BOOT1 = 3,
|
||||
CTX_MMCQD0_RPMB = 4,
|
||||
CTX_MMCCMDQD0 = 5,
|
||||
CTX_EXECQ = 9
|
||||
};
|
||||
|
||||
/* context state for command queue */
|
||||
enum {
|
||||
CMDQ_CTX_NOT_DMA = 0,
|
||||
CMDQ_CTX_IN_DMA = 1,
|
||||
CMDQ_CTX_QUEUE = 2
|
||||
};
|
||||
|
||||
/* context state for mmcqd */
|
||||
enum {
|
||||
MMCQD_NORMAL = 0,
|
||||
MMCQD_CMDQ_MODE_EN = 1
|
||||
};
|
||||
|
||||
#define MT_BIO_TRACE_LATENCY (unsigned long long)(1000000000)
|
||||
|
||||
#define REQ_EXECQ "exe_cq"
|
||||
#define REQ_MMCQD0 "kworker"
|
||||
#define REQ_MMCQD0_BOOT0 "mmcqd/0boot0"
|
||||
#define REQ_MMCQD0_BOOT1 "mmcqd/0boot1"
|
||||
#define REQ_MMCQD0_RPMB "mmcqd/0rpmb"
|
||||
#define REQ_MMCCMDQD0 "mmc-cmdqd/0"
|
||||
#define REQ_MMCQD1 "mmcqd/1"
|
||||
|
||||
static void mt_bio_ctx_count_usage(struct mt_bio_context *ctx,
|
||||
__u64 start, __u64 end);
|
||||
static uint64_t mt_bio_get_period_busy(struct mt_bio_context *ctx);
|
||||
|
||||
/* queue id:
|
||||
* 0=internal storage (emmc:mmcqd0/exe_cq),
|
||||
* 1=external storage (t-card:mmcqd1)
|
||||
*/
|
||||
static int get_qid_by_name(const char *str)
|
||||
{
|
||||
if (strncmp(str, REQ_EXECQ, strlen(REQ_EXECQ)) == 0)
|
||||
return BTAG_STORAGE_EMBEDDED;
|
||||
if (strncmp(str, REQ_MMCQD0, strlen(REQ_MMCQD0)) == 0)
|
||||
return BTAG_STORAGE_EMBEDDED; /* this includes boot0, boot1 */
|
||||
if (strncmp(str, REQ_MMCCMDQD0, strlen(REQ_MMCCMDQD0)) == 0)
|
||||
return BTAG_STORAGE_EMBEDDED;
|
||||
if (strncmp(str, REQ_MMCQD1, strlen(REQ_MMCQD1)) == 0)
|
||||
return BTAG_STORAGE_EXTERNAL;
|
||||
return 99;
|
||||
}
|
||||
|
||||
/* get context id to mt_ctx_map[] by name */
|
||||
static int get_ctxid_by_name(const char *str)
|
||||
{
|
||||
if (strncmp(str, REQ_EXECQ, strlen(REQ_EXECQ)) == 0)
|
||||
return CTX_EXECQ;
|
||||
if (strncmp(str, REQ_MMCQD0_RPMB, strlen(REQ_MMCQD0_RPMB)) == 0)
|
||||
return CTX_MMCQD0_RPMB;
|
||||
if (strncmp(str, REQ_MMCQD0_BOOT0, strlen(REQ_MMCQD0_BOOT0)) == 0)
|
||||
return CTX_MMCQD0_BOOT0;
|
||||
if (strncmp(str, REQ_MMCQD0_BOOT1, strlen(REQ_MMCQD0_BOOT1)) == 0)
|
||||
return CTX_MMCQD0_BOOT1;
|
||||
if (strncmp(str, REQ_MMCCMDQD0, strlen(REQ_MMCCMDQD0)) == 0)
|
||||
return CTX_MMCCMDQD0;
|
||||
if (strncmp(str, REQ_MMCQD0, strlen(REQ_MMCQD0)) == 0)
|
||||
return CTX_MMCQD0;
|
||||
if (strncmp(str, REQ_MMCQD1, strlen(REQ_MMCQD1)) == 0)
|
||||
return CTX_MMCQD1;
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void mt_bio_init_task(struct mt_bio_context_task *tsk)
|
||||
{
|
||||
int i;
|
||||
|
||||
tsk->task_id = -1;
|
||||
tsk->arg = 0;
|
||||
for (i = 0; i < tsk_max; i++)
|
||||
tsk->t[i] = 0;
|
||||
}
|
||||
|
||||
static void mt_bio_init_ctx(struct mt_bio_context *ctx,
|
||||
struct task_struct *thread, struct request_queue *q)
|
||||
{
|
||||
int i;
|
||||
|
||||
ctx->q = q;
|
||||
ctx->pid = task_pid_nr(thread);
|
||||
get_task_comm(ctx->comm, thread);
|
||||
ctx->qid = get_qid_by_name(ctx->comm);
|
||||
spin_lock_init(&ctx->lock);
|
||||
ctx->id = get_ctxid_by_name(ctx->comm);
|
||||
if (ctx->id >= 0)
|
||||
mt_ctx_map[ctx->id] = ctx;
|
||||
ctx->period_start_t = sched_clock();
|
||||
|
||||
for (i = 0; i < MMC_BIOLOG_CONTEXT_TASKS; i++)
|
||||
mt_bio_init_task(&ctx->task[i]);
|
||||
}
|
||||
|
||||
void mt_bio_queue_alloc(struct task_struct *thread, struct request_queue *q)
|
||||
{
|
||||
int i;
|
||||
pid_t pid;
|
||||
struct mt_bio_context *ctx = BTAG_CTX(mtk_btag_mmc);
|
||||
|
||||
if (!ctx)
|
||||
return;
|
||||
|
||||
pid = task_pid_nr(thread);
|
||||
|
||||
for (i = 0; i < MMC_BIOLOG_CONTEXTS; i++) {
|
||||
if (ctx[i].pid == pid)
|
||||
break;
|
||||
if (ctx[i].pid == 0) {
|
||||
mt_bio_init_ctx(&ctx[i], thread, q);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void mt_bio_queue_free(struct task_struct *thread)
|
||||
{
|
||||
int i;
|
||||
pid_t pid;
|
||||
struct mt_bio_context *ctx = BTAG_CTX(mtk_btag_mmc);
|
||||
|
||||
if (!ctx)
|
||||
return;
|
||||
|
||||
pid = task_pid_nr(thread);
|
||||
|
||||
for (i = 0; i < MMC_BIOLOG_CONTEXTS; i++) {
|
||||
if (ctx[i].pid == pid) {
|
||||
mt_ctx_map[ctx[i].id] = NULL;
|
||||
memset(&ctx[i], 0, sizeof(struct mt_bio_context));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static struct mt_bio_context *mt_bio_curr_queue(struct request_queue *q)
|
||||
{
|
||||
int i;
|
||||
pid_t qd_pid;
|
||||
struct mt_bio_context *ctx = BTAG_CTX(mtk_btag_mmc);
|
||||
|
||||
if (!ctx)
|
||||
return NULL;
|
||||
|
||||
qd_pid = task_pid_nr(current);
|
||||
|
||||
for (i = 0; i < MMC_BIOLOG_CONTEXTS; i++) {
|
||||
if (ctx[i].pid == 0)
|
||||
continue;
|
||||
if ((qd_pid == ctx[i].pid) || (ctx[i].q && ctx[i].q == q))
|
||||
return &ctx[i];
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* get context correspond to current process */
|
||||
static inline struct mt_bio_context *mt_bio_curr_ctx(void)
|
||||
{
|
||||
return mt_bio_curr_queue(NULL);
|
||||
}
|
||||
|
||||
/* get other queue's context by context id */
|
||||
static struct mt_bio_context *mt_bio_get_ctx(int id)
|
||||
{
|
||||
if (id < 0 || id >= MMC_BIOLOG_CONTEXTS)
|
||||
return NULL;
|
||||
|
||||
return mt_ctx_map[id];
|
||||
}
|
||||
|
||||
/* append a pidlog to given context */
|
||||
int mtk_btag_pidlog_add_mmc(struct request_queue *q, pid_t pid, __u32 len,
|
||||
int write)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct mt_bio_context *ctx;
|
||||
|
||||
ctx = mt_bio_curr_queue(q);
|
||||
if (!ctx)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&ctx->lock, flags);
|
||||
mtk_btag_pidlog_insert(&ctx->pidlog, pid, len, write);
|
||||
|
||||
if (ctx->qid == BTAG_STORAGE_EMBEDDED)
|
||||
mtk_btag_mictx_eval_req(write, 1, len);
|
||||
|
||||
spin_unlock_irqrestore(&ctx->lock, flags);
|
||||
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mtk_btag_pidlog_add_mmc);
|
||||
|
||||
/* evaluate throughput and workload of given context */
|
||||
static void mt_bio_context_eval(struct mt_bio_context *ctx)
|
||||
{
|
||||
struct mt_bio_context_task *tsk;
|
||||
uint64_t min, period, tsk_start;
|
||||
int i;
|
||||
|
||||
min = ctx->period_end_t;
|
||||
|
||||
/* for all tasks if there is an on-going request */
|
||||
for (i = 0; i < MMC_BIOLOG_CONTEXT_TASKS; i++) {
|
||||
tsk = &ctx->task[i];
|
||||
if (tsk->task_id >= 0) {
|
||||
tsk_start = tsk->t[tsk_req_start];
|
||||
if (tsk_start &&
|
||||
tsk_start >= ctx->period_start_t &&
|
||||
tsk_start < min)
|
||||
min = tsk_start;
|
||||
}
|
||||
}
|
||||
|
||||
mt_bio_ctx_count_usage(ctx, min, ctx->period_end_t);
|
||||
ctx->workload.usage = mt_bio_get_period_busy(ctx);
|
||||
|
||||
if (ctx->workload.period > (ctx->workload.usage * 100)) {
|
||||
ctx->workload.percent = 1;
|
||||
} else {
|
||||
period = ctx->workload.period;
|
||||
do_div(period, 100);
|
||||
ctx->workload.percent =
|
||||
(__u32)ctx->workload.usage / (__u32)period;
|
||||
}
|
||||
|
||||
mtk_btag_throughput_eval(&ctx->throughput);
|
||||
}
|
||||
|
||||
/* print context to trace ring buffer */
|
||||
static void mt_bio_print_trace(struct mt_bio_context *ctx)
|
||||
{
|
||||
struct mtk_btag_ringtrace *rt = BTAG_RT(mtk_btag_mmc);
|
||||
struct mtk_btag_trace *tr;
|
||||
struct mt_bio_context *pid_ctx = ctx;
|
||||
unsigned long flags;
|
||||
|
||||
if (!rt)
|
||||
return;
|
||||
|
||||
if (ctx->id == CTX_EXECQ)
|
||||
pid_ctx = mt_bio_get_ctx(CTX_MMCQD0);
|
||||
|
||||
spin_lock_irqsave(&rt->lock, flags);
|
||||
tr = mtk_btag_curr_trace(rt);
|
||||
|
||||
if (!tr)
|
||||
goto out;
|
||||
|
||||
memset(tr, 0, sizeof(struct mtk_btag_trace));
|
||||
tr->pid = ctx->pid;
|
||||
tr->qid = ctx->qid;
|
||||
memcpy(&tr->throughput, &ctx->throughput,
|
||||
sizeof(struct mtk_btag_throughput));
|
||||
memcpy(&tr->workload, &ctx->workload, sizeof(struct mtk_btag_workload));
|
||||
|
||||
if (pid_ctx)
|
||||
mtk_btag_pidlog_eval(&tr->pidlog, &pid_ctx->pidlog);
|
||||
|
||||
mtk_btag_vmstat_eval(&tr->vmstat);
|
||||
mtk_btag_cpu_eval(&tr->cpu);
|
||||
|
||||
tr->time = sched_clock();
|
||||
|
||||
mtk_btag_klog(mtk_btag_mmc, tr);
|
||||
mtk_btag_next_trace(rt);
|
||||
out:
|
||||
spin_unlock_irqrestore(&rt->lock, flags);
|
||||
}
|
||||
|
||||
|
||||
static struct mt_bio_context_task *mt_bio_get_task(struct mt_bio_context *ctx,
|
||||
unsigned int task_id)
|
||||
{
|
||||
struct mt_bio_context_task *tsk;
|
||||
|
||||
if (!ctx)
|
||||
return NULL;
|
||||
|
||||
if (task_id >= MMC_BIOLOG_CONTEXT_TASKS) {
|
||||
pr_notice("%s: invalid task id %d\n",
|
||||
__func__, task_id);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tsk = &ctx->task[task_id];
|
||||
tsk->task_id = task_id;
|
||||
|
||||
return tsk;
|
||||
}
|
||||
|
||||
static struct mt_bio_context_task *mt_bio_curr_task_by_ctx_id(
|
||||
unsigned int task_id,
|
||||
struct mt_bio_context **curr_ctx, int mt_ctx_map)
|
||||
{
|
||||
struct mt_bio_context *ctx;
|
||||
|
||||
if (mt_ctx_map == -1)
|
||||
/* get ctx by current pid */
|
||||
ctx = mt_bio_curr_ctx();
|
||||
else
|
||||
/* get ctx by ctx map id */
|
||||
ctx = mt_bio_get_ctx(mt_ctx_map);
|
||||
if (curr_ctx)
|
||||
*curr_ctx = ctx;
|
||||
return mt_bio_get_task(ctx, task_id);
|
||||
}
|
||||
|
||||
static struct mt_bio_context_task *mt_bio_curr_task(unsigned int task_id,
|
||||
struct mt_bio_context **curr_ctx)
|
||||
{
|
||||
/* get ctx by current pid */
|
||||
return mt_bio_curr_task_by_ctx_id(task_id, curr_ctx, -1);
|
||||
}
|
||||
|
||||
static const char *task_name[tsk_max] = {
|
||||
"req_start", "dma_start", "dma_end", "isdone_start", "isdone_end"};
|
||||
|
||||
static void mt_pr_cmdq_tsk(struct mt_bio_context_task *tsk, int stage)
|
||||
{
|
||||
int rw;
|
||||
int klogen = BTAG_KLOGEN(mtk_btag_mmc);
|
||||
__u32 bytes;
|
||||
char buf[256];
|
||||
|
||||
if (!((klogen == 2 && stage == tsk_isdone_end) || (klogen == 3)))
|
||||
return;
|
||||
|
||||
rw = tsk->arg & (1 << 30); /* write: 0, read: 1 */
|
||||
bytes = (tsk->arg & 0xFFFF) << SECTOR_SHIFT;
|
||||
|
||||
mtk_btag_task_timetag(buf, 256, stage, tsk_max, task_name, tsk->t,
|
||||
bytes);
|
||||
|
||||
pr_debug("[BLOCK_TAG] cmdq: tsk[%d],%d,%s,len=%d%s\n",
|
||||
tsk->task_id, stage+1, (rw)?"r":"w", bytes, buf);
|
||||
}
|
||||
|
||||
void mt_biolog_cmdq_check(void)
|
||||
{
|
||||
struct mt_bio_context *ctx;
|
||||
__u64 end_time, period_time;
|
||||
unsigned long flags;
|
||||
|
||||
ctx = mt_bio_curr_ctx();
|
||||
if (!ctx)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&ctx->lock, flags);
|
||||
end_time = sched_clock();
|
||||
period_time = end_time - ctx->period_start_t;
|
||||
|
||||
if (period_time >= MT_BIO_TRACE_LATENCY) {
|
||||
ctx->period_end_t = end_time;
|
||||
ctx->workload.period = period_time;
|
||||
mt_bio_context_eval(ctx);
|
||||
mt_bio_print_trace(ctx);
|
||||
ctx->period_start_t = end_time;
|
||||
ctx->period_end_t = 0;
|
||||
ctx->wl.period_busy = 0;
|
||||
ctx->wl.period_left_window_end_t = 0;
|
||||
ctx->wl.period_right_window_end_t = 0;
|
||||
ctx->wl.period_right_window_start_t = 0;
|
||||
memset(&ctx->throughput, 0, sizeof(struct mtk_btag_throughput));
|
||||
memset(&ctx->workload, 0, sizeof(struct mtk_btag_workload));
|
||||
}
|
||||
spin_unlock_irqrestore(&ctx->lock, flags);
|
||||
}
|
||||
|
||||
/* Command Queue Hook: stage1: queue task */
|
||||
void mt_biolog_cmdq_queue_task(unsigned int task_id, struct mmc_request *req)
|
||||
{
|
||||
struct mt_bio_context *ctx;
|
||||
struct mt_bio_context_task *tsk;
|
||||
int i;
|
||||
|
||||
if (!req)
|
||||
return;
|
||||
if (!req->sbc)
|
||||
return;
|
||||
|
||||
tsk = mt_bio_curr_task(task_id, &ctx);
|
||||
if (!tsk)
|
||||
return;
|
||||
|
||||
if (ctx->state == CMDQ_CTX_NOT_DMA)
|
||||
ctx->state = CMDQ_CTX_QUEUE;
|
||||
|
||||
tsk->arg = req->sbc->arg;
|
||||
tsk->t[tsk_req_start] = sched_clock();
|
||||
|
||||
ctx->q_depth++;
|
||||
mtk_btag_mictx_update_ctx(ctx->q_depth);
|
||||
|
||||
for (i = tsk_dma_start; i < tsk_max; i++)
|
||||
tsk->t[i] = 0;
|
||||
|
||||
if (!ctx->period_start_t)
|
||||
ctx->period_start_t = tsk->t[tsk_req_start];
|
||||
|
||||
mt_pr_cmdq_tsk(tsk, tsk_req_start);
|
||||
}
|
||||
|
||||
static void mt_bio_ctx_count_usage(struct mt_bio_context *ctx,
|
||||
__u64 start, __u64 end)
|
||||
{
|
||||
if (start <= ctx->period_start_t) {
|
||||
/*
|
||||
* if 'start' is located in previous period,
|
||||
* reset right window and period_busy,
|
||||
* and finally only left window will be existed.
|
||||
*/
|
||||
ctx->wl.period_left_window_end_t = end;
|
||||
ctx->wl.period_right_window_start_t =
|
||||
ctx->wl.period_right_window_end_t =
|
||||
ctx->wl.period_busy = 0;
|
||||
} else {
|
||||
/* if left window is existed */
|
||||
if (ctx->wl.period_left_window_end_t) {
|
||||
if (start < ctx->wl.period_left_window_end_t) {
|
||||
/*
|
||||
* if 'start' is located inside left window,
|
||||
* reset right window and period_busy,
|
||||
* and finally only left window will be existed.
|
||||
*/
|
||||
ctx->wl.period_left_window_end_t = end;
|
||||
ctx->wl.period_right_window_start_t =
|
||||
ctx->wl.period_right_window_end_t =
|
||||
ctx->wl.period_busy = 0;
|
||||
} else
|
||||
goto new_window;
|
||||
} else
|
||||
goto new_window;
|
||||
}
|
||||
|
||||
goto out;
|
||||
|
||||
new_window:
|
||||
|
||||
if (ctx->wl.period_right_window_start_t) {
|
||||
if (start > ctx->wl.period_right_window_end_t) {
|
||||
ctx->wl.period_busy +=
|
||||
(ctx->wl.period_right_window_end_t -
|
||||
ctx->wl.period_right_window_start_t);
|
||||
ctx->wl.period_right_window_start_t = start;
|
||||
}
|
||||
ctx->wl.period_right_window_end_t = end;
|
||||
} else {
|
||||
ctx->wl.period_right_window_start_t = start;
|
||||
ctx->wl.period_right_window_end_t = end;
|
||||
}
|
||||
|
||||
out:
|
||||
return;
|
||||
}
|
||||
|
||||
static uint64_t mt_bio_get_period_busy(struct mt_bio_context *ctx)
|
||||
{
|
||||
uint64_t busy;
|
||||
|
||||
busy = ctx->wl.period_busy;
|
||||
|
||||
if (ctx->wl.period_left_window_end_t) {
|
||||
busy +=
|
||||
(ctx->wl.period_left_window_end_t -
|
||||
ctx->period_start_t);
|
||||
}
|
||||
|
||||
if (ctx->wl.period_right_window_start_t) {
|
||||
busy +=
|
||||
(ctx->wl.period_right_window_end_t -
|
||||
ctx->wl.period_right_window_start_t);
|
||||
}
|
||||
|
||||
return busy;
|
||||
}
|
||||
|
||||
/* Command Queue Hook: stage2: dma start */
|
||||
void mt_biolog_cmdq_dma_start(unsigned int task_id)
|
||||
{
|
||||
struct mt_bio_context_task *tsk;
|
||||
struct mt_bio_context *ctx;
|
||||
|
||||
tsk = mt_bio_curr_task(task_id, &ctx);
|
||||
if (!tsk)
|
||||
return;
|
||||
tsk->t[tsk_dma_start] = sched_clock();
|
||||
|
||||
/* count first queue task time in workload usage,
|
||||
* if it was not overlapped with DMA
|
||||
*/
|
||||
if (ctx->state == CMDQ_CTX_QUEUE)
|
||||
mt_bio_ctx_count_usage(ctx, tsk->t[tsk_req_start],
|
||||
tsk->t[tsk_dma_start]);
|
||||
ctx->state = CMDQ_CTX_IN_DMA;
|
||||
mt_pr_cmdq_tsk(tsk, tsk_dma_start);
|
||||
}
|
||||
|
||||
/* Command Queue Hook: stage3: dma end */
|
||||
void mt_biolog_cmdq_dma_end(unsigned int task_id)
|
||||
{
|
||||
struct mt_bio_context_task *tsk;
|
||||
struct mt_bio_context *ctx;
|
||||
|
||||
tsk = mt_bio_curr_task(task_id, &ctx);
|
||||
if (!tsk)
|
||||
return;
|
||||
tsk->t[tsk_dma_end] = sched_clock();
|
||||
ctx->state = CMDQ_CTX_NOT_DMA;
|
||||
mt_pr_cmdq_tsk(tsk, tsk_dma_end);
|
||||
}
|
||||
|
||||
/* Command Queue Hook: stage4: isdone start */
|
||||
void mt_biolog_cmdq_isdone_start(unsigned int task_id, struct mmc_request *req)
|
||||
{
|
||||
struct mt_bio_context_task *tsk;
|
||||
|
||||
tsk = mt_bio_curr_task(task_id, NULL);
|
||||
if (!tsk)
|
||||
return;
|
||||
tsk->t[tsk_isdone_start] = sched_clock();
|
||||
mt_pr_cmdq_tsk(tsk, tsk_isdone_start);
|
||||
}
|
||||
|
||||
/* Command Queue Hook: stage5: isdone end */
|
||||
void mt_biolog_cmdq_isdone_end(unsigned int task_id)
|
||||
{
|
||||
int write, i;
|
||||
__u32 bytes;
|
||||
__u64 end_time, busy_time;
|
||||
struct mt_bio_context *ctx;
|
||||
struct mt_bio_context_task *tsk;
|
||||
struct mtk_btag_throughput_rw *tp;
|
||||
|
||||
tsk = mt_bio_curr_task(task_id, &ctx);
|
||||
if (!tsk)
|
||||
return;
|
||||
|
||||
/* return if there's no on-going request */
|
||||
for (i = 0; i < tsk_isdone_end; i++)
|
||||
if (!tsk->t[i])
|
||||
return;
|
||||
|
||||
tsk->t[tsk_isdone_end] = end_time = sched_clock();
|
||||
|
||||
ctx->q_depth--;
|
||||
mtk_btag_mictx_update_ctx(ctx->q_depth);
|
||||
|
||||
/* throughput usage := duration of handling this request */
|
||||
|
||||
/* tsk->arg & (1 << 30): 0 means write */
|
||||
write = tsk->arg & (1 << 30);
|
||||
write = (write) ? 0 : 1;
|
||||
|
||||
bytes = tsk->arg & 0xFFFF;
|
||||
bytes = bytes << SECTOR_SHIFT;
|
||||
busy_time = end_time - tsk->t[tsk_req_start];
|
||||
tp = (write) ? &ctx->throughput.w : &ctx->throughput.r;
|
||||
tp->usage += busy_time;
|
||||
tp->size += bytes;
|
||||
|
||||
mtk_btag_mictx_eval_tp(write, busy_time, bytes);
|
||||
|
||||
/* workload statistics */
|
||||
ctx->workload.count++;
|
||||
|
||||
/* count DMA time in workload usage */
|
||||
|
||||
/* count isdone time in workload usage,
|
||||
* if it was not overlapped with DMA
|
||||
*/
|
||||
if (ctx->state == CMDQ_CTX_NOT_DMA)
|
||||
mt_bio_ctx_count_usage(ctx, tsk->t[tsk_dma_start], end_time);
|
||||
else
|
||||
mt_bio_ctx_count_usage(ctx, tsk->t[tsk_dma_start],
|
||||
tsk->t[tsk_dma_end]);
|
||||
|
||||
mt_pr_cmdq_tsk(tsk, tsk_isdone_end);
|
||||
|
||||
mt_bio_init_task(tsk);
|
||||
}
|
||||
|
||||
void mt_biolog_cqhci_check(void)
|
||||
{
|
||||
mt_biolog_cmdq_check();
|
||||
}
|
||||
|
||||
void mt_biolog_cqhci_queue_task(unsigned int task_id, struct mmc_request *req)
|
||||
{
|
||||
struct mt_bio_context *ctx;
|
||||
struct mt_bio_context_task *tsk;
|
||||
unsigned long flags;
|
||||
|
||||
if (!req)
|
||||
return;
|
||||
|
||||
tsk = mt_bio_curr_task_by_ctx_id(task_id,
|
||||
&ctx, CTX_MMCCMDQD0);
|
||||
if (!tsk)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&ctx->lock, flags);
|
||||
|
||||
#ifdef CONFIG_MTK_EMMC_HW_CQ
|
||||
/* convert cqhci to legacy sbc arg */
|
||||
tsk->arg = (!!(req->cmdq_req->cmdq_req_flags & DIR)) << 30 |
|
||||
(req->cmdq_req->data.blocks & 0xFFFF);
|
||||
#else
|
||||
if (req->sbc)
|
||||
tsk->arg = req->sbc->arg;
|
||||
#endif
|
||||
|
||||
tsk->t[tsk_req_start] = sched_clock();
|
||||
|
||||
ctx->q_depth++;
|
||||
mtk_btag_mictx_update_ctx(ctx->q_depth);
|
||||
|
||||
if (!ctx->period_start_t)
|
||||
ctx->period_start_t = tsk->t[tsk_req_start];
|
||||
|
||||
spin_unlock_irqrestore(&ctx->lock, flags);
|
||||
|
||||
mt_pr_cmdq_tsk(tsk, tsk_req_start);
|
||||
}
|
||||
|
||||
void mt_biolog_cqhci_complete(unsigned int task_id)
|
||||
{
|
||||
int write;
|
||||
__u32 bytes;
|
||||
__u64 end_time, busy_time;
|
||||
struct mt_bio_context *ctx;
|
||||
struct mt_bio_context_task *tsk;
|
||||
struct mtk_btag_throughput_rw *tp;
|
||||
unsigned long flags;
|
||||
|
||||
tsk = mt_bio_curr_task_by_ctx_id(task_id,
|
||||
&ctx, CTX_MMCCMDQD0);
|
||||
if (!tsk)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&ctx->lock, flags);
|
||||
|
||||
tsk->t[tsk_isdone_end] = end_time = sched_clock();
|
||||
|
||||
ctx->q_depth--;
|
||||
mtk_btag_mictx_update_ctx(ctx->q_depth);
|
||||
|
||||
/* throughput usage := duration of handling this request */
|
||||
|
||||
/* tsk->arg & (1 << 30): 0 means write */
|
||||
write = tsk->arg & (1 << 30);
|
||||
write = (write) ? 0 : 1;
|
||||
|
||||
bytes = tsk->arg & 0xFFFF;
|
||||
bytes = bytes << SECTOR_SHIFT;
|
||||
busy_time = end_time - tsk->t[tsk_req_start];
|
||||
|
||||
tp = (write) ? &ctx->throughput.w : &ctx->throughput.r;
|
||||
tp->usage += busy_time;
|
||||
tp->size += bytes;
|
||||
|
||||
mtk_btag_mictx_eval_tp(write, busy_time, bytes);
|
||||
|
||||
/* workload statistics */
|
||||
ctx->workload.count++;
|
||||
|
||||
/* count doorbell to complete time in workload usage */
|
||||
mt_bio_ctx_count_usage(ctx, tsk->t[tsk_req_start], end_time);
|
||||
|
||||
mt_pr_cmdq_tsk(tsk, tsk_isdone_end);
|
||||
|
||||
mt_bio_init_task(tsk);
|
||||
spin_unlock_irqrestore(&ctx->lock, flags);
|
||||
}
|
||||
|
||||
/* MMC Queue Hook: check function at mmc_blk_issue_rw_rq() */
|
||||
void mt_biolog_mmcqd_req_check(void)
|
||||
{
|
||||
struct mt_bio_context *ctx;
|
||||
__u64 end_time, period_time;
|
||||
|
||||
ctx = mt_bio_curr_ctx();
|
||||
if (!ctx)
|
||||
return;
|
||||
|
||||
/* skip mmcqd0, if command queue is applied */
|
||||
if ((ctx->id == CTX_MMCQD0) && (ctx->state == MMCQD_CMDQ_MODE_EN))
|
||||
return;
|
||||
|
||||
end_time = sched_clock();
|
||||
period_time = end_time - ctx->period_start_t;
|
||||
|
||||
if (period_time >= MT_BIO_TRACE_LATENCY) {
|
||||
ctx->period_end_t = end_time;
|
||||
ctx->workload.period = period_time;
|
||||
mt_bio_context_eval(ctx);
|
||||
mt_bio_print_trace(ctx);
|
||||
ctx->period_start_t = end_time;
|
||||
ctx->period_end_t = 0;
|
||||
ctx->wl.period_busy = 0;
|
||||
ctx->wl.period_left_window_end_t = 0;
|
||||
ctx->wl.period_right_window_end_t = 0;
|
||||
ctx->wl.period_right_window_start_t = 0;
|
||||
memset(&ctx->throughput, 0, sizeof(struct mtk_btag_throughput));
|
||||
memset(&ctx->workload, 0, sizeof(struct mtk_btag_workload));
|
||||
}
|
||||
}
|
||||
|
||||
/* MMC Queue Hook: request start function at mmc_start_req() */
|
||||
void mt_biolog_mmcqd_req_start(struct mmc_host *host)
|
||||
{
|
||||
struct mt_bio_context *ctx;
|
||||
struct mt_bio_context_task *tsk;
|
||||
|
||||
tsk = mt_bio_curr_task(0, &ctx);
|
||||
if (!tsk)
|
||||
return;
|
||||
tsk->t[tsk_req_start] = sched_clock();
|
||||
|
||||
#ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
|
||||
if ((ctx->id == CTX_MMCQD0) &&
|
||||
(ctx->state == MMCQD_NORMAL) &&
|
||||
host->card->ext_csd.cmdq_en)
|
||||
ctx->state = MMCQD_CMDQ_MODE_EN;
|
||||
|
||||
/*
|
||||
* CMDQ mode, embedded eMMC mictx will be
|
||||
* updated in mt_biolog_cmdq_*, so bypass it here.
|
||||
*/
|
||||
#else
|
||||
/* Legacy mode. Update mictx for embedded eMMC only */
|
||||
if (ctx->qid == BTAG_STORAGE_EMBEDDED)
|
||||
mtk_btag_mictx_update_ctx(1);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* MMC Queue Hook: request end function at mmc_start_req() */
|
||||
void mt_biolog_mmcqd_req_end(struct mmc_data *data)
|
||||
{
|
||||
int rw;
|
||||
__u32 size;
|
||||
struct mt_bio_context *ctx;
|
||||
struct mt_bio_context_task *tsk;
|
||||
struct mtk_btag_throughput_rw *tp;
|
||||
__u64 end_time, busy_time;
|
||||
|
||||
end_time = sched_clock();
|
||||
|
||||
if (!data)
|
||||
return;
|
||||
|
||||
if (data->flags == MMC_DATA_WRITE)
|
||||
rw = 0;
|
||||
else if (data->flags == MMC_DATA_READ)
|
||||
rw = 1;
|
||||
else
|
||||
return;
|
||||
|
||||
tsk = mt_bio_curr_task(0, &ctx);
|
||||
if (!tsk)
|
||||
return;
|
||||
|
||||
/* return if there's no on-going request */
|
||||
if (!tsk->t[tsk_req_start])
|
||||
return;
|
||||
|
||||
size = (data->blocks * data->blksz);
|
||||
busy_time = end_time - tsk->t[tsk_req_start];
|
||||
|
||||
/* workload statistics */
|
||||
ctx->workload.count++;
|
||||
|
||||
/* count request handling time in workload usage */
|
||||
mt_bio_ctx_count_usage(ctx, tsk->t[tsk_req_start], end_time);
|
||||
|
||||
/* throughput statistics */
|
||||
/* write: 0, read: 1 */
|
||||
tp = (rw) ? &ctx->throughput.r : &ctx->throughput.w;
|
||||
tp->usage += busy_time;
|
||||
tp->size += size;
|
||||
|
||||
/* update mictx for embedded eMMC only */
|
||||
if (ctx->qid == BTAG_STORAGE_EMBEDDED) {
|
||||
mtk_btag_mictx_eval_tp(!rw, busy_time, size);
|
||||
mtk_btag_mictx_update_ctx(0);
|
||||
}
|
||||
|
||||
/* re-init task to indicate no on-going request */
|
||||
mt_bio_init_task(tsk);
|
||||
}
|
||||
|
||||
/*
|
||||
* snprintf may return a value of size or "more" to indicate
|
||||
* that the output was truncated, thus be careful of "more"
|
||||
* case.
|
||||
*/
|
||||
#define SPREAD_PRINTF(buff, size, evt, fmt, args...) \
|
||||
do { \
|
||||
if (buff && size && *(size)) { \
|
||||
unsigned long var = snprintf(*(buff), *(size),\
|
||||
fmt, ##args); \
|
||||
if (var > 0) { \
|
||||
if (var > *(size)) \
|
||||
var = *(size); \
|
||||
*(size) -= var; \
|
||||
*(buff) += var; \
|
||||
} \
|
||||
} \
|
||||
if (evt) \
|
||||
seq_printf(evt, fmt, ##args); \
|
||||
if (!buff && !evt) { \
|
||||
pr_info(fmt, ##args); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
static size_t mt_bio_seq_debug_show_info(char **buff, unsigned long *size,
|
||||
struct seq_file *seq)
|
||||
{
|
||||
int i;
|
||||
struct mt_bio_context *ctx = BTAG_CTX(mtk_btag_mmc);
|
||||
|
||||
if (!ctx)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < MMC_BIOLOG_CONTEXTS; i++) {
|
||||
if (ctx[i].pid == 0)
|
||||
continue;
|
||||
SPREAD_PRINTF(buff, size, seq,
|
||||
"ctx[%d]=ctx_map[%d]=%s,pid:%4d,q:%d\n",
|
||||
i,
|
||||
ctx[i].id,
|
||||
ctx[i].comm,
|
||||
ctx[i].pid,
|
||||
ctx[i].qid);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mt_mmc_biolog_init(void)
|
||||
{
|
||||
struct mtk_blocktag *btag;
|
||||
|
||||
btag = mtk_btag_alloc("mmc",
|
||||
MMC_BIOLOG_RINGBUF_MAX,
|
||||
sizeof(struct mt_bio_context),
|
||||
MMC_BIOLOG_CONTEXTS,
|
||||
mt_bio_seq_debug_show_info);
|
||||
|
||||
if (btag)
|
||||
mtk_btag_mmc = btag;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt_mmc_biolog_init);
|
||||
|
||||
int mt_mmc_biolog_exit(void)
|
||||
{
|
||||
mtk_btag_free(mtk_btag_mmc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Mediatek MMC Block IO Log");
|
||||
|
||||
108
drivers/mmc/core/mtk_mmc_block.h
Normal file
108
drivers/mmc/core/mtk_mmc_block.h
Normal file
@@ -0,0 +1,108 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2019 MediaTek Inc.
|
||||
*/
|
||||
|
||||
#ifndef _MT_MMC_BLOCK_H
|
||||
#define _MT_MMC_BLOCK_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mmc/core.h>
|
||||
#include <linux/mmc/host.h>
|
||||
#include "../../misc/mediatek/include/mt-plat/mtk_blocktag.h"
|
||||
|
||||
#if defined(CONFIG_MMC_BLOCK_IO_LOG)
|
||||
|
||||
int mt_mmc_biolog_init(void);
|
||||
int mt_mmc_biolog_exit(void);
|
||||
|
||||
void mt_bio_queue_alloc(struct task_struct *thread, struct request_queue *q);
|
||||
void mt_bio_queue_free(struct task_struct *thread);
|
||||
|
||||
void mt_biolog_mmcqd_req_check(void);
|
||||
void mt_biolog_mmcqd_req_start(struct mmc_host *host);
|
||||
void mt_biolog_mmcqd_req_end(struct mmc_data *data);
|
||||
|
||||
void mt_biolog_cmdq_check(void);
|
||||
void mt_biolog_cmdq_queue_task(unsigned int task_id, struct mmc_request *req);
|
||||
void mt_biolog_cmdq_dma_start(unsigned int task_id);
|
||||
void mt_biolog_cmdq_dma_end(unsigned int task_id);
|
||||
void mt_biolog_cmdq_isdone_start(unsigned int task_id, struct mmc_request *req);
|
||||
void mt_biolog_cmdq_isdone_end(unsigned int task_id);
|
||||
|
||||
void mt_biolog_cqhci_check(void);
|
||||
void mt_biolog_cqhci_queue_task(unsigned int task_id, struct mmc_request *req);
|
||||
void mt_biolog_cqhci_complete(unsigned int task_id);
|
||||
|
||||
#define MMC_BIOLOG_RINGBUF_MAX 120
|
||||
#define MMC_BIOLOG_CONTEXTS 10 /* number of request queues */
|
||||
#define MMC_BIOLOG_CONTEXT_TASKS 32 /* number concurrent tasks in cmdq */
|
||||
|
||||
enum {
|
||||
tsk_req_start = 0,
|
||||
tsk_dma_start,
|
||||
tsk_dma_end,
|
||||
tsk_isdone_start,
|
||||
tsk_isdone_end,
|
||||
tsk_max
|
||||
};
|
||||
|
||||
struct mt_bio_context_task {
|
||||
int task_id;
|
||||
u32 arg;
|
||||
uint64_t t[tsk_max];
|
||||
};
|
||||
|
||||
struct mt_bio_context_wl {
|
||||
uint64_t period_busy;
|
||||
uint64_t period_left_window_end_t;
|
||||
uint64_t period_right_window_start_t;
|
||||
uint64_t period_right_window_end_t;
|
||||
};
|
||||
|
||||
/* Context of Request Queue */
|
||||
struct mt_bio_context {
|
||||
int id;
|
||||
int state;
|
||||
pid_t pid;
|
||||
struct request_queue *q;
|
||||
char comm[TASK_COMM_LEN];
|
||||
u16 qid;
|
||||
u16 q_depth;
|
||||
spinlock_t lock;
|
||||
uint64_t period_start_t;
|
||||
uint64_t period_end_t;
|
||||
struct mt_bio_context_wl wl;
|
||||
struct mt_bio_context_task task[MMC_BIOLOG_CONTEXT_TASKS];
|
||||
struct mtk_btag_workload workload;
|
||||
struct mtk_btag_throughput throughput;
|
||||
struct mtk_btag_pidlogger pidlog;
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
#define mt_mmc_biolog_init(...)
|
||||
#define mt_mmc_biolog_exit(...)
|
||||
|
||||
#define mt_bio_queue_alloc(...)
|
||||
#define mt_bio_queue_free(...)
|
||||
|
||||
#define mt_biolog_mmcqd_req_check(...)
|
||||
#define mt_biolog_mmcqd_req_start(...)
|
||||
#define mt_biolog_mmcqd_req_end(...)
|
||||
|
||||
#define mt_biolog_cmdq_check(...)
|
||||
#define mt_biolog_cmdq_queue_task(...)
|
||||
#define mt_biolog_cmdq_dma_start(...)
|
||||
#define mt_biolog_cmdq_dma_end(...)
|
||||
#define mt_biolog_cmdq_isdone_start(...)
|
||||
#define mt_biolog_cmdq_isdone_end(...)
|
||||
|
||||
#define mt_biolog_cqhci_check(...)
|
||||
#define mt_biolog_cqhci_queue_task(...)
|
||||
#define mt_biolog_cqhci_complete(...)
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
@@ -24,6 +24,7 @@
|
||||
#include "card.h"
|
||||
#include "host.h"
|
||||
#include "mmc-crypto.h"
|
||||
#include "mtk_mmc_block.h"
|
||||
|
||||
static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
|
||||
{
|
||||
@@ -288,6 +289,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
break;
|
||||
}
|
||||
|
||||
mt_biolog_mmcqd_req_check();
|
||||
/* Parallel dispatch of requests is not supported at the moment */
|
||||
mq->busy = true;
|
||||
|
||||
|
||||
@@ -38,6 +38,7 @@
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/mm_inline.h>
|
||||
#include <trace/events/writeback.h>
|
||||
#include <mt-plat/mtk_blocktag.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
@@ -2427,6 +2428,13 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
|
||||
task_io_account_write(PAGE_SIZE);
|
||||
current->nr_dirtied++;
|
||||
this_cpu_inc(bdp_ratelimits);
|
||||
|
||||
/*
|
||||
* Dirty pages may be written by writeback thread later.
|
||||
* To get real i/o owner of this page, we shall keep it
|
||||
* before writeback takes over.
|
||||
*/
|
||||
mtk_btag_pidlog_set_pid(page);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(account_page_dirtied);
|
||||
|
||||
Reference in New Issue
Block a user