[ALPS04355347] lockdep: enhance lock debugging function

1. verify lockdep lifetime
2. dump held locks when rcu stalls on CPU/task
3. adjust aee_dump position
4. improve lockdep selftest cases
5. print warning message to trace when printk is unusable
6. skip UBSAN checking in lockdep.c
7. add false alarm cases to filter

MTK-Commit-Id: a610ff8aa5a0d25a119898ceb69aae4cbb1213f6

Change-Id: Ieb7054ef94676f3b4d77c3c00706671d4e238826
CR-Id: ALPS04355347
Feature: [Module]Lockdep
Signed-off-by: Kobe Wu <kobe-cp.wu@mediatek.com>
This commit is contained in:
Kobe Wu
2020-03-03 02:10:06 +08:00
committed by Ladios Chang (張竣隆)
parent d3993a2e25
commit d31acb9fcc
10 changed files with 632 additions and 138 deletions

View File

@@ -752,6 +752,7 @@ do { \
#ifdef CONFIG_LOCKDEP
void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
extern unsigned long long debug_locks_off_ts;
#else
static inline void
lockdep_rcu_suspicious(const char *file, const int line, const char *s)
@@ -759,4 +760,18 @@ lockdep_rcu_suspicious(const char *file, const int line, const char *s)
}
#endif
#ifdef CONFIG_PROVE_LOCKING
void check_held_locks(int force);
void mt_aee_dump_held_locks(void);
#else
static inline void
check_held_locks(int force)
{
}
static inline void
mt_aee_dump_held_locks(void)
{
}
#endif
#endif /* __LINUX_LOCKDEP_H */

View File

@@ -80,6 +80,23 @@ DEFINE_EVENT(lock, lock_acquired,
#endif
TRACE_EVENT(lock_dbg,
TP_PROTO(const char *buf),
TP_ARGS(buf),
TP_STRUCT__entry(
__string(mbuf, buf)
),
TP_fast_assign(
__assign_str(mbuf, buf);
),
TP_printk("%s", __get_str(mbuf))
);
TRACE_EVENT(lock_monitor_msg,
TP_PROTO(const char *buf),

View File

@@ -2,6 +2,7 @@
# Any varying coverage in these files is non-deterministic
# and is generally not a function of system call inputs.
KCOV_INSTRUMENT := n
UBSAN_SANITIZE_lockdep.o := n
ccflags-y += -I$(srctree)/drivers/misc/mediatek/include
subdir-ccflags-y += -I$(srctree)/kernel/sched/
obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o
@@ -15,12 +16,12 @@ endif
ifeq ($(CONFIG_PROVE_LOCKING),y)
ccflags-y += -DMTK_LOCK_DEBUG
ccflags-y += -DMTK_LOCK_DEBUG_NEW_DEPENDENCY
ccflags-y += -DMTK_LOCK_MONITOR
endif
ifeq ($(CONFIG_LOCKDEP),y)
ccflags-y += -DMTK_ENHANCE_LOCKDEP_PROC
ccflags-y += -DMTK_LOCK_DEBUG_NEW_DEPENDENCY
ccflags-y += -DMTK_LOCK_MONITOR
endif
obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o

View File

@@ -70,6 +70,10 @@
#include <linux/slab.h>
#endif
#ifdef CONFIG_MTK_RAM_CONSOLE
#include <mt-plat/mtk_ram_console.h>
#endif
#ifdef CONFIG_PROVE_LOCKING
int prove_locking = 1;
module_param(prove_locking, int, 0644);
@@ -85,7 +89,7 @@ module_param(lock_stat, int, 0644);
#endif
#ifdef MTK_LOCK_DEBUG_NEW_DEPENDENCY
#define MTK_LOCK_DEBUG_NEW_DEPENDENCY_AEE
//#define MTK_LOCK_DEBUG_NEW_DEPENDENCY_AEE
/*
* This is a debug function to catch the lock dependency at runtime.
* This help to catch the first time of the lock dependency appeared
@@ -129,13 +133,41 @@ module_param(lock_stat, int, 0644);
*/
#define PREV_LOCK_NAME "&(&lockB)->rlock"
#define NEXT_LOCK_NAME "&(&lockC)->rlock"
#define LOCK_DEPTH 2
#define LOCK_DEPTH 0
#endif
#ifdef MTK_LOCK_MONITOR
#define MAX_LOCK_NAME 128
static noinline int trace_circular_bug(
struct lock_list *this,
struct lock_list *target,
struct held_lock *check_src,
struct held_lock *check_tgt,
struct stack_trace *trace);
static bool is_log_lock_held(struct task_struct *curr)
{
int i, depth = curr->lockdep_depth;
struct held_lock *hlock = curr->held_locks;
struct lockdep_map *instance;
if (!depth)
return false;
for (i = 0; i < depth; i++) {
instance = (hlock + i)->instance;
/* used by pr_xxx */
if (!strcmp(instance->name, "logbuf_lock"))
return true;
/* used by console */
if (!strcmp(instance->name, "&(&port->lock)->rlock"))
return true;
}
return false;
}
#ifdef MTK_LOCK_MONITOR
static char buf_lock[256];
static unsigned int lock_mon_enable = 1;
static unsigned int lock_mon_1st_th_ms = 2000; /* show name */
static unsigned int lock_mon_2nd_th_ms = 5000; /* show backtrace */
static unsigned int lock_mon_3rd_th_ms = 15000; /* trigger Kernel API dump */
@@ -152,6 +184,7 @@ static const char * const held_lock_white_list[] = {
"&of->mutex",
"&epfile->mutex"
};
#endif
static unsigned long long sec_high(unsigned long long nsec)
{
@@ -164,18 +197,34 @@ static unsigned long sec_low(unsigned long long nsec)
/* exclude part of nsec */
return do_div(nsec, 1000000000)/1000;
}
#endif
#define TO_KERNEL_LOG 0x1
#define TO_FTRACE 0x2
#define TO_SRAM 0x4
#define TO_BOTH (TO_KERNEL_LOG|TO_FTRACE)
static void lock_dbg(const char *msg, ...)
{
char buf[256];
va_list args;
va_start(args, msg);
vsnprintf(buf, sizeof(buf), msg, args);
va_end(args);
trace_lock_dbg(buf);
}
static void lock_mon_msg(char *buf, int out)
{
if (out & TO_FTRACE)
trace_lock_monitor_msg(buf);
if (out & TO_KERNEL_LOG)
pr_info("%s\n", buf);
#ifdef CONFIG_MTK_RAM_CONSOLE
if (out & TO_SRAM)
aee_sram_fiq_log(buf);
#endif
}
static void lockdep_aee(void)
@@ -551,9 +600,9 @@ static int save_trace(struct stack_trace *trace)
if (!debug_locks_off_graph_unlock())
return 0;
lockdep_aee();
print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
dump_stack();
lockdep_aee();
return 0;
}
@@ -717,8 +766,9 @@ static void print_lock(struct held_lock *hlock)
}
print_lock_name(lock_classes + class_idx - 1);
printk(KERN_CONT ", at: [<%p>] %pS\n",
(void *)hlock->acquire_ip, (void *)hlock->acquire_ip);
pr_cont(", at: [<%p>] %pS from [%lld.%06lu]\n",
(void *)hlock->acquire_ip, (void *)hlock->acquire_ip,
sec_high(hlock->timestamp), sec_low(hlock->timestamp));
}
/* MTK_LOCK_DEBUG_HELD_LOCK */
@@ -728,7 +778,12 @@ void held_lock_save_trace(struct stack_trace *trace, unsigned long *entries)
trace->max_entries = HELD_LOCK_STACK_TRACE_DEPTH;
trace->entries = entries;
trace->skip = 3;
save_stack_trace(trace);
if (trace->nr_entries != 0 &&
trace->entries[trace->nr_entries-1] == ULONG_MAX)
trace->nr_entries--;
}
void held_lock_show_trace(struct stack_trace *trace, int output)
@@ -740,8 +795,9 @@ void held_lock_show_trace(struct stack_trace *trace, int output)
return;
for (i = 0; i < trace->nr_entries; i++) {
snprintf(buf, sizeof(buf), "%*c%pS", 6, ' ',
(void *)trace->entries[i]);
snprintf(buf, sizeof(buf), "%*c%pS%s", 6, ' ',
(void *)trace->entries[i],
(output == TO_SRAM) ? "\n" : "");
lock_mon_msg(buf, output);
}
}
@@ -1046,9 +1102,10 @@ static struct lock_list *alloc_list_entry(void)
if (!debug_locks_off_graph_unlock())
return NULL;
lockdep_aee();
print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
dump_stack();
lockdep_aee();
return NULL;
}
return list_entries + nr_list_entries++;
@@ -1377,8 +1434,6 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
if (debug_locks_silent)
return 0;
lockdep_aee();
pr_warn("\n");
pr_warn("======================================================\n");
pr_warn("WARNING: possible circular locking dependency detected\n");
@@ -1399,6 +1454,8 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
print_circular_bug_entry(entry, depth);
lockdep_aee();
return 0;
}
@@ -1418,6 +1475,11 @@ static noinline int print_circular_bug(struct lock_list *this,
struct lock_list *first_parent;
int depth;
if (is_log_lock_held(current)) {
trace_circular_bug(this, target, check_src, check_tgt, trace);
return 0;
}
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
@@ -1738,8 +1800,6 @@ print_bad_irq_dependency(struct task_struct *curr,
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
lockdep_aee();
pr_warn("\n");
pr_warn("=====================================================\n");
pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
@@ -1795,6 +1855,7 @@ print_bad_irq_dependency(struct task_struct *curr,
pr_warn("\nstack backtrace:\n");
dump_stack();
lockdep_aee();
return 0;
}
@@ -1969,8 +2030,6 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
lockdep_aee();
pr_warn("\n");
pr_warn("============================================\n");
pr_warn("WARNING: possible recursive locking detected\n");
@@ -1988,6 +2047,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
pr_warn("\nstack backtrace:\n");
dump_stack();
lockdep_aee();
return 0;
}
@@ -2169,11 +2229,12 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
/* We drop graph lock, so another thread can overwrite trace. */
graph_unlock();
pr_info("\n new dependency: ");
pr_cont("\n new dependency:");
print_lock_name(hlock_class(prev));
pr_info(" =>");
pr_cont(" =>");
print_lock_name(hlock_class(next));
pr_info("\n");
pr_cont("\n");
dump_stack();
lockdep_print_held_locks(current);
@@ -2776,8 +2837,6 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
lockdep_aee();
pr_warn("\n");
pr_warn("================================\n");
pr_warn("WARNING: inconsistent lock state\n");
@@ -2806,6 +2865,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
pr_warn("\nstack backtrace:\n");
dump_stack();
lockdep_aee();
return 0;
}
@@ -2843,8 +2903,6 @@ print_irq_inversion_bug(struct task_struct *curr,
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
lockdep_aee();
pr_warn("\n");
pr_warn("========================================================\n");
pr_warn("WARNING: possible irq lock inversion dependency detected\n");
@@ -2889,6 +2947,7 @@ print_irq_inversion_bug(struct task_struct *curr,
pr_warn("\nstack backtrace:\n");
dump_stack();
lockdep_aee();
return 0;
}
@@ -3538,8 +3597,6 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
if (debug_locks_silent)
return 0;
lockdep_aee();
pr_warn("\n");
pr_warn("==================================\n");
pr_warn("WARNING: Nested lock was not taken\n");
@@ -3560,6 +3617,7 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
pr_warn("\nstack backtrace:\n");
dump_stack();
lockdep_aee();
return 0;
}
@@ -3769,8 +3827,6 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
if (debug_locks_silent)
return 0;
lockdep_aee();
pr_warn("\n");
pr_warn("=====================================\n");
pr_warn("WARNING: bad unlock balance detected!\n");
@@ -3787,6 +3843,7 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
pr_warn("\nstack backtrace:\n");
dump_stack();
lockdep_aee();
return 0;
}
@@ -3991,8 +4048,8 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
* So we're all set to release this lock.. wait what lock? We don't
* own any locks, you've been drinking again?
*/
if (DEBUG_LOCKS_WARN_ON(depth <= 0))
return print_unlock_imbalance_bug(curr, lock, ip);
if (depth <= 0)
return print_unlock_imbalance_bug(curr, lock, ip);
/*
* Check whether the lock exists in the current stack
@@ -4342,8 +4399,6 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
if (debug_locks_silent)
return 0;
lockdep_aee();
pr_warn("\n");
pr_warn("=================================\n");
pr_warn("WARNING: bad contention detected!\n");
@@ -4360,6 +4415,7 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
pr_warn("\nstack backtrace:\n");
dump_stack();
lockdep_aee();
return 0;
}
@@ -4688,8 +4744,6 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
if (debug_locks_silent)
return;
lockdep_aee();
pr_warn("\n");
pr_warn("=========================\n");
pr_warn("WARNING: held lock freed!\n");
@@ -4702,6 +4756,7 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
pr_warn("\nstack backtrace:\n");
dump_stack();
lockdep_aee();
}
static inline int not_in_range(const void* mem_from, unsigned long mem_len,
@@ -4748,8 +4803,6 @@ static void print_held_locks_bug(void)
if (debug_locks_silent)
return;
lockdep_aee();
pr_warn("\n");
pr_warn("====================================\n");
pr_warn("WARNING: %s/%d still has locks held!\n",
@@ -4759,6 +4812,7 @@ static void print_held_locks_bug(void)
lockdep_print_held_locks(current);
pr_warn("\nstack backtrace:\n");
dump_stack();
lockdep_aee();
}
void debug_check_no_locks_held(void)
@@ -4853,8 +4907,6 @@ asmlinkage __visible void lockdep_sys_exit(void)
if (!debug_locks_off())
return;
lockdep_aee();
pr_warn("\n");
pr_warn("================================================\n");
pr_warn("WARNING: lock held when returning to user space!\n");
@@ -4863,6 +4915,7 @@ asmlinkage __visible void lockdep_sys_exit(void)
pr_warn("%s/%d is leaving the kernel with locks still held!\n",
curr->comm, curr->pid);
lockdep_print_held_locks(curr);
lockdep_aee();
}
/*
@@ -4879,8 +4932,6 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
if (!debug_locks_off())
return;
lockdep_aee();
/* Note: the following can be executed concurrently, so be careful. */
pr_warn("\n");
pr_warn("=============================\n");
@@ -4923,6 +4974,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
lockdep_print_held_locks(curr);
pr_warn("\nstack backtrace:\n");
dump_stack();
lockdep_aee();
}
EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
@@ -5412,7 +5464,6 @@ void lockdep_free_task(struct task_struct *task)
}
#endif
#ifdef MTK_LOCK_MONITOR
static void get_lock_name(struct lock_class *class, char name[MAX_LOCK_NAME])
{
char str[KSYM_NAME_LEN];
@@ -5434,7 +5485,9 @@ static void get_lock_name(struct lock_class *class, char name[MAX_LOCK_NAME])
}
}
static void lockdep_check_held_locks(struct task_struct *curr, bool en)
#ifdef MTK_LOCK_MONITOR
static void lockdep_check_held_locks(
struct task_struct *curr, bool en, bool aee)
{
int i;
struct held_lock *hlock;
@@ -5451,15 +5504,19 @@ static void lockdep_check_held_locks(struct task_struct *curr, bool en)
continue;
timestamp_bak = hlock->timestamp;
t_diff = sched_clock() - hlock->timestamp;
t_diff = sched_clock() - timestamp_bak;
do_div(t_diff, 1000000);
if (t_diff > lock_mon_1st_th_ms) {
if (hlock->timestamp == 0 ||
hlock->timestamp != timestamp_bak)
continue;
if (t_diff > lock_mon_1st_th_ms || aee) {
struct lock_class *class;
char name[MAX_LOCK_NAME];
unsigned int class_idx = hlock->class_idx;
bool skip_this = 0;
int j, output = TO_FTRACE;
int j, output1, output2 = TO_FTRACE;
int list_num = ARRAY_SIZE(held_lock_white_list);
/* Don't re-read hlock->class_idx */
@@ -5475,43 +5532,60 @@ static void lockdep_check_held_locks(struct task_struct *curr, bool en)
for (j = 0; j < list_num; j++) {
if (!strcmp(name, held_lock_white_list[j]))
skip_this = 1;
/* this is a special case */
if (!strncmp(name, "s_active", 8))
skip_this = 1;
}
/* ignore special cases */
if (!strncmp(name, "s_active", 8))
skip_this = 1;
if (!strncmp(name, "kn->count", 9))
skip_this = 1;
/* locks might be unlocked in runtime */
if (skip_this || hlock->timestamp == 0 ||
timestamp_bak != hlock->timestamp)
hlock->timestamp != timestamp_bak)
continue;
output1 = aee ? TO_SRAM : TO_BOTH;
snprintf(buf_lock, sizeof(buf_lock),
"(%s) held by %s/%d/[%ld] on CPU#%d/IRQ[%s] from [%lld.%06lu]",
name, curr->comm, curr->pid,
"[%p] (%s) held by %s/%d/[%ld] on CPU#%d/IRQ[%s] from [%lld.%06lu]%s",
hlock->instance, name,
curr->comm, curr->pid,
curr->state, task_cpu(curr),
hlock->hardirqs_off ? "off" : "on",
sec_high(hlock->timestamp),
sec_low(hlock->timestamp));
lock_mon_msg(buf_lock, TO_BOTH);
sec_low(hlock->timestamp),
(output1 == TO_SRAM) ? "\n" : "");
lock_mon_msg(buf_lock, output1);
if (t_diff > lock_mon_2nd_th_ms && en)
output = TO_BOTH;
output2 = TO_BOTH;
output2 = aee ? TO_SRAM : output2;
held_lock_show_trace(&hlock->trace, output);
held_lock_show_trace(&hlock->trace, output2);
}
}
}
static void check_held_locks(void)
static DEFINE_SPINLOCK(lockdep_mon_lock);
void check_held_locks(int force)
{
struct task_struct *g, *p;
int count = 10;
int unlock = 1;
unsigned long long time_sec = sec_high(sched_clock());
bool en = !(do_div(time_sec, lock_mon_show_more_s));
static int force_bt; /* show backtrace */
if (unlikely(!debug_locks))
return;
force_bt = force ? 1 : force_bt;
en = force_bt ? 1 : en;
if (!spin_trylock(&lockdep_mon_lock))
return;
/*
* Here we try to get the tasklist_lock as hard as possible,
* if not successful after 2 seconds we ignore it (but keep
@@ -5530,7 +5604,7 @@ retry:
do_each_thread(g, p) {
if (p->lockdep_depth)
lockdep_check_held_locks(p, en);
lockdep_check_held_locks(p, en, 0);
if (!unlock)
if (read_trylock(&tasklist_lock))
unlock = 1;
@@ -5538,7 +5612,11 @@ retry:
if (unlock)
read_unlock(&tasklist_lock);
spin_unlock(&lockdep_mon_lock);
force_bt = 0;
}
EXPORT_SYMBOL_GPL(check_held_locks);
static void check_debug_locks_stats(void)
{
@@ -5548,40 +5626,43 @@ static void check_debug_locks_stats(void)
if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
snprintf(buf, sizeof(buf),
" lock-classes [%lu]", nr_lock_classes);
"lock-classes [%lu]", nr_lock_classes);
strlcat(str, buf, sizeof(str));
}
if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
snprintf(buf, sizeof(buf),
" direct dependencies [%lu]", nr_list_entries);
"direct dependencies [%lu]", nr_list_entries);
strlcat(str, buf, sizeof(str));
}
if (nr_lock_chains >= MAX_LOCKDEP_CHAINS) {
snprintf(buf, sizeof(buf),
" dependency chains [%lu]", nr_lock_chains);
"dependency chains [%lu]", nr_lock_chains);
strlcat(str, buf, sizeof(str));
}
if (nr_chain_hlocks >= MAX_LOCKDEP_CHAIN_HLOCKS) {
snprintf(buf, sizeof(buf),
" dependency chain hlocks [%d]", nr_chain_hlocks);
"dependency chain hlocks [%d]", nr_chain_hlocks);
strlcat(str, buf, sizeof(str));
}
if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES) {
snprintf(buf, sizeof(buf),
" stack-trace entries [%lu]", nr_stack_trace_entries);
"stack-trace entries [%lu]", nr_stack_trace_entries);
strlcat(str, buf, sizeof(str));
}
/* check debug_locks per 10 seconds */
time_sec = sec_high(sched_clock());
if (!debug_locks && !do_div(time_sec, 10))
pr_info("debug_locks is off:%s\n", str);
if (!debug_locks && !do_div(time_sec, 10)) {
pr_info("debug_locks is off [%lld.%06lu] %s\n",
sec_high(debug_locks_off_ts),
sec_low(debug_locks_off_ts), str);
}
}
static void lock_monitor_work(struct work_struct *work)
{
check_debug_locks_stats();
check_held_locks();
cancel_delayed_work(&lock_mon_work);
if (lock_mon_enable)
check_held_locks(0);
queue_delayed_work(system_unbound_wq, &lock_mon_work,
msecs_to_jiffies(lock_mon_period_ms));
}
@@ -5627,6 +5708,7 @@ static const struct file_operations lock_mon_##name##_fops = { \
.write = lock_mon_##name##_write, \
}
DECLARE_LOCK_MONITOR_MATCH(enable, lock_mon_enable);
DECLARE_LOCK_MONITOR_MATCH(period, lock_mon_period_ms);
DECLARE_LOCK_MONITOR_MATCH(1st_th, lock_mon_1st_th_ms);
DECLARE_LOCK_MONITOR_MATCH(2nd_th, lock_mon_2nd_th_ms);
@@ -5673,6 +5755,9 @@ void lock_monitor_init(void)
if (!root)
return;
pe = proc_create("door", 0220, root, &lock_mon_door_fops);
if (!pe)
return;
pe = proc_create("enable", 0664, root, &lock_mon_enable_fops);
if (!pe)
return;
pe = proc_create("period_ms", 0664, root, &lock_mon_period_fops);
@@ -5695,4 +5780,238 @@ void lock_monitor_init(void)
queue_delayed_work(system_unbound_wq, &lock_mon_work,
msecs_to_jiffies(lock_mon_period_ms));
}
void mt_aee_dump_held_locks(void)
{
struct task_struct *g, *p;
int count = 10;
int unlock = 1;
char buf[64];
snprintf(buf, sizeof(buf), "\n");
lock_mon_msg(buf, TO_SRAM);
if (unlikely(!debug_locks)) {
snprintf(buf, sizeof(buf), "debug_locks is off\n");
lock_mon_msg(buf, TO_SRAM);
return;
}
debug_locks = 0;
/*
* Here we try to get the tasklist_lock as hard as possible,
* if not successful after 2 seconds we ignore it (but keep
* trying). This is to enable a debug printout even if a
* tasklist_lock-holding task deadlocks or crashes.
*/
retry:
if (!read_trylock(&tasklist_lock)) {
if (count) {
count--;
mdelay(200);
goto retry;
}
unlock = 0;
}
do_each_thread(g, p) {
if (p->lockdep_depth)
lockdep_check_held_locks(p, 0, 1);
if (!unlock)
if (read_trylock(&tasklist_lock))
unlock = 1;
} while_each_thread(g, p);
if (unlock)
read_unlock(&tasklist_lock);
}
#endif
/*
* Duplicate debug functions and print log to trace.
* This prevents deadlock from some special locks.
*/
static void get_lock_name_usage(struct lock_class *class, char *lk_name)
{
char buf[MAX_LOCK_NAME];
char usage[LOCK_USAGE_CHARS];
get_lock_name(class, buf);
get_usage_chars(class, usage);
snprintf(lk_name, MAX_LOCK_NAME, "(%s){%s}", buf, usage);
}
static void trace_lock(struct held_lock *hlock)
{
char name[MAX_LOCK_NAME];
/*
* We can be called locklessly through debug_show_all_locks() so be
* extra careful, the hlock might have been released and cleared.
*/
unsigned int class_idx = hlock->class_idx;
/* Don't re-read hlock->class_idx,
* can't use READ_ONCE() on bitfields:
*/
barrier();
if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) {
lock_dbg("<RELEASED>");
return;
}
get_lock_name_usage(lock_classes + class_idx - 1, name);
lock_dbg("%s, at: [<%p>] %pS from [%lld.%06lu]",
name, (void *)hlock->acquire_ip, (void *)hlock->acquire_ip,
sec_high(hlock->timestamp), sec_low(hlock->timestamp));
}
static void lockdep_trace_held_locks(struct task_struct *curr)
{
int i, j, depth = curr->lockdep_depth;
struct held_lock *hlock;
if (!depth) {
lock_dbg("no locks held by %s/%d.",
curr->comm, task_pid_nr(curr));
return;
}
lock_dbg("%d lock%s held by %s/%d/[%ld] on CPU#%d:",
depth, depth > 1 ? "s" : "", curr->comm,
curr->pid, curr->state, task_cpu(curr));
hlock = curr->held_locks;
for (i = 0; i < depth; i++) {
char buf[256];
struct stack_trace *trace = &(hlock + i)->trace;
lock_dbg(" #%d: ", i);
trace_lock(hlock + i);
/* show backtrace */
if (!trace || !trace->entries)
continue;
for (j = 0; j < trace->nr_entries; j++) {
snprintf(buf, sizeof(buf), "%*c%pS", 6, ' ',
(void *)trace->entries[j]);
lock_dbg("%s", buf);
}
}
}
static void dump_stack_to_trace(struct stack_trace *trace)
{
int i;
if (!trace || !trace->entries)
return;
for (i = 0; i < trace->nr_entries; i++)
lock_dbg("%*c%pS", 7, ' ', (void *)trace->entries[i]);
}
static void dump_curr_stack(void)
{
struct stack_trace trace;
unsigned long entries[32];
trace.nr_entries = 0;
trace.max_entries = ARRAY_SIZE(entries);
trace.entries = entries;
trace.skip = 0;
save_stack_trace_tsk(current, &trace);
dump_stack_to_trace(&trace);
}
static noinline int
trace_circular_bug_entry(struct lock_list *target, int depth)
{
char name[MAX_LOCK_NAME];
get_lock_name_usage(target->class, name);
lock_dbg("-> #%u: %s", depth, name);
dump_stack_to_trace(&target->trace);
return 0;
}
static noinline int
trace_circular_bug_header(struct lock_list *entry, unsigned int depth,
struct held_lock *check_src,
struct held_lock *check_tgt)
{
char name[MAX_LOCK_NAME];
struct task_struct *curr = current;
if (debug_locks_silent)
return 0;
lock_dbg("======================================================");
lock_dbg("WARNING: possible circular locking dependency detected");
lock_dbg("%s %.*s %s", init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version,
print_tainted());
lock_dbg("------------------------------------------------------");
lock_dbg("%s/%d is trying to acquire lock:",
curr->comm, task_pid_nr(curr));
trace_lock(check_src);
if (cross_lock(check_tgt->instance))
lock_dbg("but now in release context of a crosslock acquired at the following:");
else
lock_dbg("but task is already holding lock:");
trace_lock(check_tgt);
lock_dbg("which lock already depends on the new lock.");
lock_dbg("the existing dependency chain (in reverse order) is:");
get_lock_name_usage(entry->class, name);
lock_dbg("-> #%u: %s", depth, name);
dump_stack_to_trace(&entry->trace);
lockdep_aee();
return 0;
}
static noinline int trace_circular_bug(struct lock_list *this,
struct lock_list *target,
struct held_lock *check_src,
struct held_lock *check_tgt,
struct stack_trace *trace)
{
struct task_struct *curr = current;
struct lock_list *parent;
struct lock_list *first_parent;
int depth;
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
if (cross_lock(check_tgt->instance))
this->trace = *trace;
else if (!save_trace(&this->trace))
return 0;
depth = get_lock_depth(target);
trace_circular_bug_header(target, depth, check_src, check_tgt);
parent = get_lock_parent(target);
first_parent = parent;
while (parent) {
trace_circular_bug_entry(parent, --depth);
parent = get_lock_parent(parent);
}
lock_dbg("other info that might help us debug this:");
lockdep_trace_held_locks(curr);
lock_dbg("current backtrace:");
dump_curr_stack();
return 0;
}

View File

@@ -100,6 +100,11 @@ static int l_show(struct seq_file *m, void *v)
}
seq_printf(m, "%p", class->key);
#ifdef MTK_ENHANCE_LOCKDEP_PROC
/* 0x10: print key information */
if (lockdep_mode & 0x10)
seq_printf(m, " <%ps>", class->key);
#endif
#ifdef CONFIG_DEBUG_LOCKDEP
seq_printf(m, " OPS:%8ld", class->ops);
#endif
@@ -185,6 +190,7 @@ static int lockdep_open(struct inode *inode, struct file *file)
* 0x2: print entry trace of dependency locks in locks_after
* 0x4: print locks with all distance in locks_after
* 0x8: print locks with all distance in locks_before
* 0x10: print key information
*/
static ssize_t lockdep_write(struct file *filp,
const char *ubuf, size_t cnt, loff_t *data)
@@ -204,7 +210,7 @@ static ssize_t lockdep_write(struct file *filp,
if (ret)
return ret;
if (lockdep_mode > 0x15)
if (lockdep_mode > 0x1f)
lockdep_mode = 0;
return cnt;

View File

@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
static spinlock_t lockA;
static spinlock_t lockB;
@@ -26,33 +27,44 @@ static struct mutex mutexA;
static struct rw_semaphore rw_semA;
static struct timer_list lockdep_timer;
void lockdep_test_recursive_lock(void)
{
/* recursive deadlock */
mutex_lock(&mutexA);
mutex_lock(&mutexA);
mutex_unlock(&mutexA);
mutex_unlock(&mutexA);
}
struct lockdep_test_rcu {
int val;
};
static struct lockdep_test_rcu __rcu *lockdep_test_rcu_data;
void lockdep_test_recursive_lock(void)
{
/* recursive deadlock */
spin_lock(&lockA);
spin_lock(&lockA);
spin_unlock(&lockA);
spin_unlock(&lockA);
}
void lockdep_test_suspicious_rcu(void)
{
struct lockdep_test_rcu *rcu_p;
/* RCU Updater */
{
struct lockdep_test_rcu *rcu_updater;
rcu_p = kmalloc(sizeof(struct lockdep_test_rcu), GFP_KERNEL);
if (rcu_p == NULL)
return;
rcu_assign_pointer(lockdep_test_rcu_data, rcu_p);
synchronize_rcu();
rcu_updater =
kmalloc(sizeof(struct lockdep_test_rcu), GFP_KERNEL);
if (rcu_updater == NULL)
return;
/* rcu_read_lock() should be here */
rcu_p = rcu_dereference(lockdep_test_rcu_data);
/* rcu_read_unlock() should be here */
rcu_updater->val = 123;
rcu_assign_pointer(lockdep_test_rcu_data, rcu_updater);
synchronize_rcu();
}
/* RCU Reader */
{
struct lockdep_test_rcu *rcu_reader;
/* rcu_read_lock() should be here */
rcu_reader = rcu_dereference(lockdep_test_rcu_data);
pr_info("data value is %d\n", rcu_reader->val);
/* rcu_read_unlock() should be here */
}
}
void lockdep_test_circular_deadlock(void)
@@ -73,6 +85,8 @@ void lockdep_test_circular_deadlock(void)
static void lockdep_test_timer(unsigned long arg)
{
spin_lock(&lockA);
if (arg == 1)
mdelay(5000);
spin_unlock(&lockA);
}
@@ -84,14 +98,14 @@ void lockdep_test_inconsistent_lock_a(void)
/* {IN-SOFTIRQ-W} */
setup_timer(&lockdep_timer, lockdep_test_timer, 0);
mod_timer(&lockdep_timer, jiffies + msecs_to_jiffies(20));
mod_timer(&lockdep_timer, jiffies + msecs_to_jiffies(10));
}
void lockdep_test_inconsistent_lock_b(void)
{
/* {IN-SOFTIRQ-W} */
setup_timer(&lockdep_timer, lockdep_test_timer, 0);
mod_timer(&lockdep_timer, jiffies + msecs_to_jiffies(20));
mod_timer(&lockdep_timer, jiffies + msecs_to_jiffies(10));
mdelay(100);
/* {SOFTIRQ-ON-W} */
@@ -103,14 +117,90 @@ void lockdep_test_irq_lock_inversion(void)
{
unsigned long flags;
/* the order is the problem */
/* lockB is used in SOFTIRQ-unsafe condition.
* The state of lockB is {SOFTIRQ-ON-W}.
* The state of lockB in SOFTIRQ field is marked as {+}.
* A new lock dependency is generated.
* 1. lockB
*/
spin_lock(&lockB);
spin_unlock(&lockB);
/* lockA and lockB are used in absolute safe condition.
* Because IRQ is disabled and they are not used in interrupt handler.
* The state of lockA and lockB are unconcerned.
* The state of lockA and lockB in SOFTIRQ field are marked as {.}.
* Two new lock dependencies are generated.
* 1. lockA
* 2. lockA -> lockB
*/
spin_lock_irqsave(&lockA, flags);
spin_lock(&lockB);
spin_unlock(&lockB);
spin_unlock_irqrestore(&lockA, flags);
/* lockA is used in lockdep_test_timer.
* It's a timer callback function and the condition is SOFTIRQ-safe.
* The state of lockA will change from unconcerned to {IN-SOFTIRQ-W}.
* The state of lockA in SOFTIRQ field will change from {.} to {-}.
* No lock dependency is generated.
*/
setup_timer(&lockdep_timer, lockdep_test_timer, 0);
mod_timer(&lockdep_timer, jiffies + msecs_to_jiffies(10));
}
void lockdep_test_irq_lock_inversion_sp(void)
{
unsigned long flags;
/* This is a special case.
* The lock and unlock order is the problem.
* The wrong order make lockB to be in IRQ-unsafe condition.
*
* (X) <IRQ-safe>
* -> lock(A)
* -> lock(B)
* -> unlock(A)
* <IRQ-unsafe>
* -> unlock(B)
*
* (O) <IRQ-safe>
* -> lock(A)
* -> lock(B)
* -> unlock(B)
* -> unlock(A)
* <IRQ-unsafe>
*/
spin_lock_irqsave(&lockA, flags);
spin_lock(&lockB);
spin_unlock_irqrestore(&lockA, flags);
spin_unlock(&lockB);
setup_timer(&lockdep_timer, lockdep_test_timer, 0);
mod_timer(&lockdep_timer, jiffies + msecs_to_jiffies(20));
mod_timer(&lockdep_timer, jiffies + msecs_to_jiffies(10));
}
void lockdep_test_safe_to_unsafe(void)
{
unsigned long flags;
/* SOFTIRQ-unsafe */
spin_lock(&lockB);
spin_unlock(&lockB);
/* SOFTIRQ-safe */
setup_timer(&lockdep_timer, lockdep_test_timer, 0);
mod_timer(&lockdep_timer, jiffies + msecs_to_jiffies(10));
/* wait for lockdep_test_timer to finish */
mdelay(200);
/* safe and unconcerned */
spin_lock_irqsave(&lockA, flags);
spin_lock(&lockB);
spin_unlock(&lockB);
spin_unlock_irqrestore(&lockA, flags);
}
void lockdep_test_uninitialized(void)
@@ -152,6 +242,22 @@ void lockdep_test_held_lock_freed(void)
/* should do spin_unlock before free memory */
}
static int lockdep_test_thread(void *data)
{
spin_lock(&lockA);
mdelay(8000);
spin_unlock(&lockA);
return 0;
}
void lockdep_test_spin_time(void)
{
kthread_run(lockdep_test_thread, NULL, "lockdep_test_spin_time");
mdelay(100);
spin_lock(&lockA);
spin_unlock(&lockA);
}
void lockdep_test_lock_monitor(void)
{
mutex_lock(&mutexA);
@@ -167,20 +273,12 @@ void lockdep_test_lock_monitor(void)
mutex_unlock(&mutexA);
}
static int lockdep_test_thread(void *data)
void lockdep_test_freeze_with_lock(void)
{
spin_lock(&lockA);
mdelay(5000);
spin_unlock(&lockA);
return 0;
}
void lockdep_test_spin_time(void)
{
kthread_run(lockdep_test_thread, NULL, "lockdep_test");
mdelay(500);
spin_lock(&lockA);
spin_unlock(&lockA);
mutex_lock(&mutexA);
/* should not freeze a task with locks held */
try_to_freeze();
mutex_unlock(&mutexA);
}
struct lockdep_test_func {
@@ -191,20 +289,22 @@ struct lockdep_test_func {
struct lockdep_test_func lockdep_test_list[] = {
/* KernelAPI Dump */
{"circular_deadlock", lockdep_test_circular_deadlock},
{"recursive_lock", lockdep_test_recursive_lock},
{"suspicious_rcu", lockdep_test_suspicious_rcu},
{"inconsistent_lock_a", lockdep_test_inconsistent_lock_a},
{"inconsistent_lock_b", lockdep_test_inconsistent_lock_b},
{"irq_lock_inversion", lockdep_test_irq_lock_inversion},
{"irq_lock_inversion_sp", lockdep_test_irq_lock_inversion_sp},
{"bad_unlock_balance", lockdep_test_bad_unlock_balance},
{"safe_to_unsafe", lockdep_test_safe_to_unsafe},
{"spin_time", lockdep_test_spin_time},
{"lock_monitor", lockdep_test_lock_monitor},
{"freeze_with_lock", lockdep_test_freeze_with_lock},
/* KE */
{"uninitialized", lockdep_test_uninitialized},
{"bad_magic", lockdep_test_bad_magic},
{"wrong_owner_cpu", lockdep_test_wrong_owner_cpu},
{"held_lock_freed", lockdep_test_held_lock_freed},
/* HWT */
{"recursive_lock", lockdep_test_recursive_lock},
{"held_lock_freed", lockdep_test_held_lock_freed}
};
static ssize_t lockdep_test_write(struct file *file,

View File

@@ -12,6 +12,7 @@
#include <linux/debug_locks.h>
#include <linux/delay.h>
#include <linux/export.h>
#include "sched.h"
#ifdef CONFIG_MTK_AEE_FEATURE
#include <mt-plat/aee.h>
@@ -58,12 +59,32 @@ static unsigned long sec_low(unsigned long long nsec)
}
#endif
extern bool is_logbuf_lock(raw_spinlock_t *lock);
bool is_critical_lock(raw_spinlock_t *lock)
bool is_critical_lock_held(raw_spinlock_t *lock)
{
int cpu;
struct rq *rq;
cpu = raw_smp_processor_id();
rq = cpu_rq(cpu);
/* The lock is needed by aee_kernel_warning_api */
if (raw_spin_is_locked(&rq->lock))
return true;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/* The lock is needed by kmalloc and aee_kernel_warning_api */
if (!strcmp(lock->dep_map.name, "&(&n->list_lock)->rlock"))
return true;
if (!strcmp(lock->dep_map.name, "depot_lock"))
return true;
#endif
return false;
}
bool is_logbuf_lock_held(raw_spinlock_t *lock)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/* The lock is used by __kmalloc and aee_kernel_warning_api */
if (!strcmp(lock->dep_map.name, "&(&n->list_lock)->rlock"))
/* The lock is needed by kmalloc and aee_kernel_warning_api */
if (!strcmp(lock->dep_map.name, "logbuf_lock"))
return true;
#endif
return false;
@@ -131,7 +152,7 @@ static void spin_bug(raw_spinlock_t *lock, const char *msg)
return;
spin_dump(lock, msg);
snprintf(aee_str, 50, "%s: [%s]\n", current->comm, msg);
snprintf(aee_str, sizeof(aee_str), "%s: [%s]\n", current->comm, msg);
if (!strcmp(msg, "bad magic") || !strcmp(msg, "already unlocked")
|| !strcmp(msg, "wrong owner") || !strcmp(msg, "wrong CPU")
@@ -146,9 +167,11 @@ static void spin_bug(raw_spinlock_t *lock, const char *msg)
}
#ifdef CONFIG_MTK_AEE_FEATURE
aee_kernel_warning_api(__FILE__, __LINE__,
DB_OPT_DUMMY_DUMP | DB_OPT_FTRACE,
aee_str, "spinlock debugger\n");
if (!is_critical_lock_held(lock)) {
aee_kernel_warning_api(__FILE__, __LINE__,
DB_OPT_DUMMY_DUMP | DB_OPT_FTRACE,
aee_str, "spinlock debugger\n");
}
#endif
}
@@ -182,13 +205,20 @@ static inline void debug_spin_unlock(raw_spinlock_t *lock)
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
}
#ifdef MTK_LOCK_DEBUG
static void show_cpu_backtrace(void *ignored)
{
pr_info("========== The call trace of lock owner on CPU%d ==========\n",
smp_processor_id());
raw_smp_processor_id());
dump_stack();
}
static DEFINE_PER_CPU(call_single_data_t, spinlock_debug_csd) = {
.func = show_cpu_backtrace,
.info = NULL,
.flags = 0
};
#endif
/*Select appropriate loop counts to 1~2sec*/
#if HZ == 100
@@ -200,26 +230,20 @@ static void show_cpu_backtrace(void *ignored)
#endif
#define WARNING_TIME 1000000000 /* warning time 1 seconds */
static call_single_data_t spinlock_debug_csd = {
.func = show_cpu_backtrace,
.info = NULL,
.flags = 0
};
static void __spin_lock_debug(raw_spinlock_t *lock)
{
u64 i;
u64 loops = loops_per_jiffy * LOOP_HZ;
u64 one_second = loops_per_jiffy * LOOP_HZ;
u64 loops = one_second;
u32 cpu = raw_smp_processor_id();
int print_once = 1, cnt = 0;
char lock_name[64];
unsigned long long t1, t2, t3;
struct task_struct *owner = NULL;
cycles_t start = get_cycles();
if (is_logbuf_lock(lock)) { /* ignore to debug logbuf_lock */
for (i = 0; i < loops; i++) {
if (arch_spin_trylock(&lock->raw_lock))
return;
__delay(1);
}
/* skip debugging */
if (is_logbuf_lock_held(lock)) {
arch_spin_lock(&lock->raw_lock);
return;
}
@@ -227,11 +251,11 @@ static void __spin_lock_debug(raw_spinlock_t *lock)
t2 = t1;
for (;;) {
for (i = 0; i < loops; i++) {
while ((get_cycles() - start) < loops) {
if (arch_spin_trylock(&lock->raw_lock))
return;
__delay(1);
}
loops += one_second;
t3 = sched_clock();
if (t3 < t2)
@@ -249,8 +273,9 @@ static void __spin_lock_debug(raw_spinlock_t *lock)
#else
snprintf(lock_name, 64, "%ps", lock);
#endif
pr_info("(%s) spin time: %llu ms(from %lld.%06lu), raw_lock: 0x%08x, magic: %08x, held by %s/%d on CPU#%d(from %lld.%06lu)\n",
lock_name, msec_high(t2 - t1), sec_high(t1), sec_low(t1),
pr_info("(%s)(%p) spin time: %llu ms(from %lld.%06lu), raw_lock: 0x%08x, magic: %08x, held by %s/%d on CPU#%d(from %lld.%06lu)\n",
lock_name, lock,
msec_high(t2 - t1), sec_high(t1), sec_low(t1),
*((unsigned int *)&lock->raw_lock), lock->magic,
owner ? owner->comm : "<none>",
owner ? task_pid_nr(owner) : -1, lock->owner_cpu,
@@ -271,14 +296,15 @@ static void __spin_lock_debug(raw_spinlock_t *lock)
pr_info("========== The call trace of spinning task ==========\n");
dump_stack();
if (owner)
if (owner && (cpu != lock->owner_cpu))
smp_call_function_single_async(
lock->owner_cpu, &spinlock_debug_csd);
lock->owner_cpu,
this_cpu_ptr(&spinlock_debug_csd));
#ifdef CONFIG_MTK_AEE_FEATURE
if (!is_critical_lock(lock)) {
if (!is_critical_lock_held(lock)) {
char aee_str[50];
snprintf(aee_str, 50,
snprintf(aee_str, sizeof(aee_str),
"Spinlock lockup: (%s) in %s\n",
lock_name, current->comm);
aee_kernel_warning_api(__FILE__, __LINE__,
@@ -289,7 +315,6 @@ static void __spin_lock_debug(raw_spinlock_t *lock)
}
}
}
#endif
/*
* We are now relying on the NMI watchdog to detect lockup instead of doing

View File

@@ -1458,6 +1458,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
* See Documentation/RCU/stallwarn.txt for info on how to debug
* RCU CPU stall warnings.
*/
check_held_locks(1);
pr_err("INFO: %s detected stalls on CPUs/tasks:",
rsp->name);
print_cpu_stall_info_begin();
@@ -1526,6 +1527,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
* See Documentation/RCU/stallwarn.txt for info on how to debug
* RCU CPU stall warnings.
*/
check_held_locks(1);
pr_err("INFO: %s self-detected stall on CPU", rsp->name);
print_cpu_stall_info_begin();
print_cpu_stall_info(rsp, smp_processor_id());

View File

@@ -253,6 +253,7 @@ static void ftrace_events_enable(int enable)
trace_set_clr_event(NULL, "sched_mon_msg", 1);
#endif
#ifdef CONFIG_LOCKDEP
trace_set_clr_event(NULL, "lock_dbg", 1);
trace_set_clr_event(NULL, "lock_monitor_msg", 1);
#endif
trace_set_clr_event("mtk_events", NULL, 1);

View File

@@ -14,6 +14,11 @@
#include <linux/spinlock.h>
#include <linux/debug_locks.h>
#ifdef CONFIG_LOCKDEP
#include <linux/sched/clock.h>
unsigned long long debug_locks_off_ts;
#endif
/*
* We want to turn all lock-debugging facilities on/off at once,
* via a global flag. The reason is that once a single bug has been
@@ -38,6 +43,9 @@ EXPORT_SYMBOL_GPL(debug_locks_silent);
int debug_locks_off(void)
{
if (debug_locks && __debug_locks_off()) {
#ifdef CONFIG_LOCKDEP
debug_locks_off_ts = sched_clock();
#endif
if (!debug_locks_silent) {
console_verbose();
return 1;