Kalesh reported mm_event shows 5~8% regression in pft test of pms.
The most overheads comes from ktime_get when I investigated it.
Let's use jiffies instead of ktime_get.
The side effect is jiffies doesn't provides such low-resolution time
like ktime_get so avg_latency would be easier to be zero and not providing
exact max latency as well. However, the goal is not providing *exact*
latency but found some events were too slow at that time.
Thus, if the slow events continued to happen in the period, the stats
will represent the *trend*.
<idle>-0 [002] d.h2 696.836558: mm_event_record: ufs_read_send_cmd count=2 avg_lat=6666 max_lat=6666
<idle>-0 [002] d.h2 696.836559: mm_event_record: f2fs_read_data count=49 avg_lat=3333 max_lat=33333
<...>-27 [002] ..s. 696.836852: mm_event_record: f2fs_read_data count=6 avg_lat=6666 max_lat=10000
LightweightExec-25872 [000] .... 696.838052: mm_event_record: min_flt count=3 avg_lat=0 max_lat=3333
<...>-28336 [000] .... 696.843788: mm_event_record: min_flt count=1 avg_lat=0 max_lat=0
In coral, this patch reduces the overhead from 7% to %2 in pft
benchmark.
fault/sec stddev overhead
mm_event_disable 451513.86 2.63%
mm_event_enable 419609.68 2.37% 7.07%
mm_event_ktime_improve 443256.45 2.68% 1.83%
Bug: 169113282
Signed-off-by: Minchan Kim <minchan@google.com>
Change-Id: If058eff7d502c98286d103ab2937115d7dc63b90
59 lines
1.5 KiB
C
59 lines
1.5 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_MM_EVENT_H
|
|
#define _LINUX_MM_EVENT_H
|
|
|
|
/*
|
|
* These enums are exposed to userspace via the ftrace trace_pipe_raw endpoint
|
|
* and are an ABI. Userspace tools depend on them.
|
|
*/
|
|
enum mm_event_type {
|
|
MM_MIN_FAULT = 0,
|
|
MM_MAJ_FAULT = 1,
|
|
MM_READ_IO = 2,
|
|
MM_COMPACTION = 3,
|
|
MM_RECLAIM = 4,
|
|
MM_SWP_FAULT = 5,
|
|
MM_KERN_ALLOC = 6,
|
|
BLK_READ_SUBMIT_BIO = 7,
|
|
UFS_READ_QUEUE_CMD = 8,
|
|
UFS_READ_SEND_CMD = 9,
|
|
UFS_READ_COMPL_CMD = 10,
|
|
F2FS_READ_DATA = 11,
|
|
MM_TYPE_NUM = 12,
|
|
};
|
|
|
|
struct mm_event_task {
|
|
unsigned int count;
|
|
unsigned int max_lat;
|
|
u64 accm_lat;
|
|
} __attribute__ ((packed));
|
|
|
|
struct mm_event_vmstat {
|
|
unsigned long free;
|
|
unsigned long file;
|
|
unsigned long anon;
|
|
unsigned long ion;
|
|
unsigned long slab;
|
|
unsigned long ws_refault;
|
|
unsigned long ws_activate;
|
|
unsigned long mapped;
|
|
unsigned long pgin;
|
|
unsigned long pgout;
|
|
unsigned long swpin;
|
|
unsigned long swpout;
|
|
unsigned long reclaim_steal;
|
|
unsigned long reclaim_scan;
|
|
unsigned long compact_scan;
|
|
};
|
|
|
|
#ifdef CONFIG_MM_EVENT_STAT
|
|
void mm_event_task_init(struct task_struct *tsk);
|
|
void mm_event_record(enum mm_event_type event, unsigned long s_jiffies);
|
|
void mm_event_count(enum mm_event_type event, int count);
|
|
#else
|
|
static inline void mm_event_task_init(struct task_struct *tsk) {}
|
|
static inline void mm_event_record(enum mm_event_type event, unsigned long s_jiffies) {}
|
|
static inline void mm_event_count(enum mm_event_type event, int count) {}
|
|
#endif /* _LINUX_MM_EVENT_H */
|
|
#endif
|