[ALPS04870405] lockdep: implement lock debugging

1. introduce lock monitor
2. introduce spinlock debugging
3. introduce new lock dependency capture
4. enhance /proc/lockdep debug information

MTK-Commit-Id: 8b5e506a5cd60ea3264f1adb480e0463088f91af

Change-Id: I63fde6ba18f5c73d0ecaafa1bc9bb8f2a2e2fad1
CR-Id: ALPS04870405
Feature: [Module]Lockdep
Signed-off-by: Kobe Wu <kobe-cp.wu@mediatek.com>
This commit is contained in:
Kobe Wu
2021-01-28 13:27:29 +08:00
committed by Ladios Chang
parent 88bf0be69b
commit 0e485a946a
12 changed files with 2020 additions and 20 deletions

View File

@@ -261,6 +261,13 @@ struct held_lock {
unsigned int hardirqs_off:1;
unsigned int references:12; /* 32 bits */
unsigned int pin_count;
/* MTK_LOCK_DEBUG_HELD_LOCK */
#define HELD_LOCK_STACK_TRACE_DEPTH 24
struct stack_trace trace;
unsigned long entries[HELD_LOCK_STACK_TRACE_DEPTH];
/* MTK_LOCK_MONITOR */
unsigned long long timestamp;
};
/*
@@ -617,6 +624,7 @@ do { \
#ifdef CONFIG_LOCKDEP
void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
extern unsigned long long debug_locks_off_ts;
#else
static inline void
lockdep_rcu_suspicious(const char *file, const int line, const char *s)
@@ -624,4 +632,19 @@ lockdep_rcu_suspicious(const char *file, const int line, const char *s)
}
#endif
#ifdef CONFIG_PROVE_LOCKING
void check_held_locks(int force);
void mt_aee_dump_held_locks(void);
#else
static inline void
check_held_locks(int force)
{
}
static inline void
mt_aee_dump_held_locks(void)
{
}
#endif
#endif /* __LINUX_LOCKDEP_H */

View File

@@ -22,6 +22,9 @@ typedef struct raw_spinlock {
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned int magic, owner_cpu;
void *owner;
/* timestamp of lock/unlock*/
unsigned long long lock_t;
unsigned long long unlock_t;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;

View File

@@ -79,6 +79,41 @@ DEFINE_EVENT(lock, lock_acquired,
);
#endif
TRACE_EVENT(lock_dbg,
TP_PROTO(const char *buf),
TP_ARGS(buf),
TP_STRUCT__entry(
__string(mbuf, buf)
),
TP_fast_assign(
__assign_str(mbuf, buf);
),
TP_printk("%s", __get_str(mbuf))
);
TRACE_EVENT(lock_monitor_msg,
TP_PROTO(const char *buf),
TP_ARGS(buf),
TP_STRUCT__entry(
__string(mbuf, buf)
),
TP_fast_assign(
__assign_str(mbuf, buf);
),
TP_printk("%s", __get_str(mbuf))
);
#endif
#endif /* _TRACE_LOCK_H */

View File

@@ -2,7 +2,7 @@
# Any varying coverage in these files is non-deterministic
# and is generally not a function of system call inputs.
KCOV_INSTRUMENT := n
UBSAN_SANITIZE_lockdep.o := n
ccflags-y += -I$(srctree)/
ccflags-y += -I$(srctree)/drivers/misc/mediatek/include/
obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o
@@ -14,10 +14,21 @@ CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE)
endif
ifeq ($(CONFIG_PROVE_LOCKING),y)
ccflags-y += -DMTK_LOCK_DEBUG_NEW_DEPENDENCY
ccflags-y += -DMTK_LOCK_MONITOR
ccflags-y += -DMTK_ENHANCE_LOCKDEP_PROC
ifeq ($(CONFIG_MACH_MT6771),y)
ccflags-y += -DMTK_DEBUG_SPINLOCK_V2
else
ccflags-y += -DMTK_DEBUG_SPINLOCK_V1
endif
endif
obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
obj-$(CONFIG_LOCKDEP) += lockdep.o
ifeq ($(CONFIG_PROC_FS),y)
obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
obj-$(CONFIG_LOCKDEP) += lockdep_proc.o lockdep_test.o
endif
obj-$(CONFIG_SMP) += spinlock.o
obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o

File diff suppressed because it is too large Load Diff

View File

@@ -198,3 +198,13 @@ static bool is_critical_lock_held(void)
}
#endif
#endif
#ifdef CONFIG_MTK_AEE_IPANIC
#include <mt-plat/mboot_params.h>
#endif
void lockdep_test_init(void);
#ifdef MTK_LOCK_MONITOR
void lock_monitor_init(void);
#endif

View File

@@ -24,6 +24,27 @@
#include "lockdep_internals.h"
#ifdef CONFIG_MTK_BOOT
#include <mt-plat/mtk_boot_common.h>
#endif
#ifdef MTK_ENHANCE_LOCKDEP_PROC
static unsigned int lockdep_mode;
#define __USAGE(__STATE) \
[LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
[LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
[LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
[LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
static const char * const usage_str[] = {
#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
#include "lockdep_states.h"
#undef LOCKDEP_STATE
[LOCK_USED] = "INITIAL USE",
};
#endif
static void *l_next(struct seq_file *m, void *v, loff_t *pos)
{
return seq_list_next(v, &all_lock_classes, pos);
@@ -55,6 +76,20 @@ static void print_name(struct seq_file *m, struct lock_class *class)
}
}
#ifdef MTK_ENHANCE_LOCKDEP_PROC
static void lock_show_trace(struct seq_file *m, struct stack_trace *trace)
{
int i;
if (!trace->entries)
return;
for (i = 0; i < trace->nr_entries; i++)
seq_printf(m, "%*c%pS\n", 7, ' ',
(void *)trace->entries[i]);
}
#endif
static int l_show(struct seq_file *m, void *v)
{
struct lock_class *class = list_entry(v, struct lock_class, lock_entry);
@@ -67,6 +102,11 @@ static int l_show(struct seq_file *m, void *v)
}
seq_printf(m, "%p", class->key);
#ifdef MTK_ENHANCE_LOCKDEP_PROC
/* 0x10: print key information */
if (lockdep_mode & 0x10)
seq_printf(m, " <%ps>", class->key);
#endif
#ifdef CONFIG_DEBUG_LOCKDEP
seq_printf(m, " OPS:%8ld", class->ops);
#endif
@@ -82,6 +122,44 @@ static int l_show(struct seq_file *m, void *v)
print_name(m, class);
seq_puts(m, "\n");
#ifdef MTK_ENHANCE_LOCKDEP_PROC
/* 0x1: print usage traces of this lock */
if (lockdep_mode & 0x1) {
int bit;
for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
if (class->usage_mask & (1 << bit)) {
seq_printf(m, "%s:\n", usage_str[bit]);
lock_show_trace(m, class->usage_traces + bit);
}
}
seq_puts(m, "-------------------------------------------\n");
}
list_for_each_entry(entry, &class->locks_after, entry) {
/* 0x4: print locks with all distance in locks_after */
if (entry->distance == 1 || (lockdep_mode & 0x4)) {
seq_printf(m, " -> [%p] ", entry->class->key);
print_name(m, entry->class);
seq_printf(m, " [FD][%d]", entry->distance);
seq_puts(m, "\n");
/* 0x2: print entry trace of dependency locks in locks_after */
if (lockdep_mode & 0x2)
lock_show_trace(m, &entry->trace);
}
}
/* 0x8: print locks with all distance in locks_before */
if (lockdep_mode & 0x8) {
list_for_each_entry(entry, &class->locks_before, entry) {
seq_printf(m, " -> [%p] ", entry->class->key);
print_name(m, entry->class);
seq_printf(m, " [BD][%d]", entry->distance);
seq_puts(m, "\n");
}
}
seq_puts(m, "\n");
#else
list_for_each_entry(entry, &class->locks_after, entry) {
if (entry->distance == 1) {
seq_printf(m, " -> [%p] ", entry->class->key);
@@ -90,6 +168,7 @@ static int l_show(struct seq_file *m, void *v)
}
}
seq_puts(m, "\n");
#endif
return 0;
}
@@ -101,6 +180,54 @@ static const struct seq_operations lockdep_ops = {
.show = l_show,
};
#ifdef MTK_ENHANCE_LOCKDEP_PROC
static int lockdep_open(struct inode *inode, struct file *file)
{
return seq_open(file, &lockdep_ops);
}
/*
* 0x0: print basic dependency information
* 0x1: print usage traces of this lock
* 0x2: print entry trace of dependency locks in locks_after
* 0x4: print locks with all distance in locks_after
* 0x8: print locks with all distance in locks_before
* 0x10: print key information
*/
static ssize_t
lockdep_write(struct file *filp, const char *ubuf,
size_t cnt, loff_t *data)
{
char buf[16];
int ret;
if (cnt >= sizeof(buf) || cnt <= 0)
return cnt;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
ret = kstrtouint(buf, 16, &lockdep_mode);
if (ret)
return ret;
if (lockdep_mode > 0x1f)
lockdep_mode = 0;
return cnt;
}
static const struct file_operations proc_lockdep_ops = {
.open = lockdep_open,
.write = lockdep_write,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif
#ifdef CONFIG_PROVE_LOCKING
static void *lc_start(struct seq_file *m, loff_t *pos)
{
@@ -648,7 +775,15 @@ static const struct file_operations proc_lock_stat_operations = {
static int __init lockdep_proc_init(void)
{
#ifdef CONFIG_MTK_BOOT
if (get_boot_mode() == META_BOOT)
debug_locks_off();
#endif
#ifdef MTK_ENHANCE_LOCKDEP_PROC
proc_create("lockdep", 0600, NULL, &proc_lockdep_ops);
#else
proc_create_seq("lockdep", S_IRUSR, NULL, &lockdep_ops);
#endif
#ifdef CONFIG_PROVE_LOCKING
proc_create_seq("lockdep_chains", S_IRUSR, NULL, &lockdep_chains_ops);
#endif
@@ -658,6 +793,10 @@ static int __init lockdep_proc_init(void)
&proc_lock_stat_operations);
#endif
#ifdef MTK_LOCK_MONITOR
lock_monitor_init();
#endif
lockdep_test_init();
return 0;
}

View File

@@ -0,0 +1,340 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018 MediaTek Inc.
*/
#include <linux/proc_fs.h>
#include <linux/uaccess.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
static spinlock_t lockA;
static spinlock_t lockB;
static spinlock_t lockC;
static spinlock_t lockD;
static struct mutex mutexA;
static struct rw_semaphore rw_semA;
static struct timer_list lockdep_timer;
void lockdep_test_recursive_lock(void)
{
/* recursive deadlock */
mutex_lock(&mutexA);
mutex_lock(&mutexA);
mutex_unlock(&mutexA);
mutex_unlock(&mutexA);
}
struct lockdep_test_rcu {
int val;
};
static struct lockdep_test_rcu __rcu *lockdep_test_rcu_data;
void lockdep_test_suspicious_rcu(void)
{
/* RCU Updater */
{
struct lockdep_test_rcu *rcu_updater;
rcu_updater =
kmalloc(sizeof(struct lockdep_test_rcu), GFP_KERNEL);
if (!rcu_updater)
return;
rcu_updater->val = 123;
RCU_INIT_POINTER(lockdep_test_rcu_data, rcu_updater);
synchronize_rcu();
}
/* RCU Reader */
{
struct lockdep_test_rcu *rcu_reader;
/* rcu_read_lock() should be here */
rcu_reader = rcu_dereference(lockdep_test_rcu_data);
pr_info("data value is %d\n", rcu_reader->val);
/* rcu_read_unlock() should be here */
}
}
void lockdep_test_circular_deadlock(void)
{
/* lockA -> lockB */
spin_lock(&lockA);
spin_lock(&lockB);
spin_unlock(&lockB);
spin_unlock(&lockA);
/* lockB -> lockA */
spin_lock(&lockB);
spin_lock(&lockA);
spin_unlock(&lockA);
spin_unlock(&lockB);
}
static void lockdep_test_timer(struct timer_list *t)
{
spin_lock(&lockA);
spin_unlock(&lockA);
}
void lockdep_test_inconsistent_lock_a(void)
{
/* {SOFTIRQ-ON-W} */
spin_lock(&lockA);
spin_unlock(&lockA);
/* {IN-SOFTIRQ-W} */
timer_setup(&lockdep_timer, lockdep_test_timer, 0);
mod_timer(&lockdep_timer, jiffies + msecs_to_jiffies(10));
}
void lockdep_test_inconsistent_lock_b(void)
{
/* {IN-SOFTIRQ-W} */
timer_setup(&lockdep_timer, lockdep_test_timer, 0);
mod_timer(&lockdep_timer, jiffies + msecs_to_jiffies(10));
mdelay(100);
/* {SOFTIRQ-ON-W} */
spin_lock(&lockA);
spin_unlock(&lockA);
}
void lockdep_test_irq_lock_inversion(void)
{
unsigned long flags;
/* lockB is used in SOFTIRQ-unsafe condition.
* The state of lockB is {SOFTIRQ-ON-W}.
* The state of lockB in SOFTIRQ field is marked as {+}.
* A new lock dependency is generated.
* 1. lockB
*/
spin_lock(&lockB);
spin_unlock(&lockB);
/* lockA and lockB are used in absolute safe condition.
* Because IRQ is disabled and they are not used in interrupt handler.
* The state of lockA and lockB are unconcerned.
* The state of lockA and lockB in SOFTIRQ field are marked as {.}.
* Two new lock dependencies are generated.
* 1. lockA
* 2. lockA -> lockB
*/
spin_lock_irqsave(&lockA, flags);
spin_lock(&lockB);
spin_unlock(&lockB);
spin_unlock_irqrestore(&lockA, flags);
/* lockA is used in lockdep_test_timer.
* It's a timer callback function and the condition is SOFTIRQ-safe.
* The state of lockA will change from unconcerned to {IN-SOFTIRQ-W}.
* The state of lockA in SOFTIRQ field will change from {.} to {-}.
* No lock dependency is generated.
*/
timer_setup(&lockdep_timer, lockdep_test_timer, 0);
mod_timer(&lockdep_timer, jiffies + msecs_to_jiffies(10));
}
void lockdep_test_irq_lock_inversion_sp(void)
{
unsigned long flags;
/* This is a special case.
* The lock and unlock order is the problem.
* The wrong order make lockB to be in IRQ-unsafe condition.
*
* (X) <IRQ-safe>
* -> lock(A)
* -> lock(B)
* -> unlock(A)
* <IRQ-unsafe>
* -> unlock(B)
*
* (O) <IRQ-safe>
* -> lock(A)
* -> lock(B)
* -> unlock(B)
* -> unlock(A)
* <IRQ-unsafe>
*/
spin_lock_irqsave(&lockA, flags);
spin_lock(&lockB);
spin_unlock_irqrestore(&lockA, flags);
spin_unlock(&lockB);
timer_setup(&lockdep_timer, lockdep_test_timer, 0);
mod_timer(&lockdep_timer, jiffies + msecs_to_jiffies(10));
}
void lockdep_test_safe_to_unsafe(void)
{
unsigned long flags;
/* SOFTIRQ-unsafe */
spin_lock(&lockB);
spin_unlock(&lockB);
/* SOFTIRQ-safe */
timer_setup(&lockdep_timer, lockdep_test_timer, 0);
mod_timer(&lockdep_timer, jiffies + msecs_to_jiffies(10));
/* wait for lockdep_test_timer to finish */
mdelay(200);
/* safe and unconcerned */
spin_lock_irqsave(&lockA, flags);
spin_lock(&lockB);
spin_unlock(&lockB);
spin_unlock_irqrestore(&lockA, flags);
}
void lockdep_test_uninitialized(void)
{
/* miss spin_lock_init */
spin_lock(&lockD);
spin_unlock(&lockD);
}
void lockdep_test_bad_magic(void)
{
/* without spin_lock_init */
spin_lock(&lockD);
spin_unlock(&lockD);
}
void lockdep_test_bad_unlock_balance(void)
{
spin_lock(&lockA);
spin_unlock(&lockA);
spin_unlock(&lockA);
}
void lockdep_test_wrong_owner_cpu(void)
{
spin_lock(&lockA);
lockA.rlock.owner_cpu = -1;
spin_unlock(&lockA);
}
void lockdep_test_held_lock_freed(void)
{
spinlock_t *lockE; /* pointer */
lockE = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
spin_lock_init(lockE);
spin_lock(lockE);
kfree(lockE);
/* should do spin_unlock before free memory */
}
static int lockdep_test_thread(void *data)
{
spin_lock(&lockA);
mdelay(8000);
spin_unlock(&lockA);
return 0;
}
void lockdep_test_spin_time(void)
{
kthread_run(lockdep_test_thread, NULL, "lockdep_test_spin_time");
mdelay(100);
spin_lock(&lockA);
spin_unlock(&lockA);
}
void lockdep_test_lock_monitor(void)
{
mutex_lock(&mutexA);
down_read(&rw_semA);
rcu_read_lock();
spin_lock(&lockA);
mdelay(12000);
spin_unlock(&lockA);
rcu_read_unlock();
up_read(&rw_semA);
mutex_unlock(&mutexA);
}
void lockdep_test_freeze_with_lock(void)
{
mutex_lock(&mutexA);
/* should not freeze a task with locks held */
try_to_freeze();
mutex_unlock(&mutexA);
}
struct lockdep_test_func {
char name[32];
void (*func)(void);
};
struct lockdep_test_func lockdep_test_list[] = {
/* KernelAPI Dump */
{"circular_deadlock", lockdep_test_circular_deadlock},
{"recursive_lock", lockdep_test_recursive_lock},
{"suspicious_rcu", lockdep_test_suspicious_rcu},
{"inconsistent_lock_a", lockdep_test_inconsistent_lock_a},
{"inconsistent_lock_b", lockdep_test_inconsistent_lock_b},
{"irq_lock_inversion", lockdep_test_irq_lock_inversion},
{"irq_lock_inversion_sp", lockdep_test_irq_lock_inversion_sp},
{"bad_unlock_balance", lockdep_test_bad_unlock_balance},
{"safe_to_unsafe", lockdep_test_safe_to_unsafe},
{"spin_time", lockdep_test_spin_time},
{"lock_monitor", lockdep_test_lock_monitor},
{"freeze_with_lock", lockdep_test_freeze_with_lock},
/* KE */
{"uninitialized", lockdep_test_uninitialized},
{"bad_magic", lockdep_test_bad_magic},
{"wrong_owner_cpu", lockdep_test_wrong_owner_cpu},
{"held_lock_freed", lockdep_test_held_lock_freed}
};
static ssize_t
lockdep_test_write(struct file *file, const char *ubuf,
size_t count, loff_t *ppos)
{
int i;
char buf[32];
if (count >= sizeof(buf) || count == 0)
return -EINVAL;
if (copy_from_user(&buf, ubuf, count))
return -EFAULT;
buf[count] = '\0';
for (i = 0; i < ARRAY_SIZE(lockdep_test_list); i++) {
if (strlen(lockdep_test_list[i].name) != count - 1)
continue;
if (!strncmp(buf, lockdep_test_list[i].name, count - 1))
lockdep_test_list[i].func();
}
return count;
}
static const struct file_operations proc_lockdep_test_fops = {
.open = simple_open,
.write = lockdep_test_write,
};
void lockdep_test_init(void)
{
spin_lock_init(&lockA);
spin_lock_init(&lockB);
spin_lock_init(&lockC);
mutex_init(&mutexA);
init_rwsem(&rw_semA);
proc_create("lockdep_test", 0220, NULL, &proc_lockdep_test_fops);
}

View File

@@ -16,6 +16,127 @@
#include "lockdep_internals.h"
static void spin_aee(const char *msg);
#if defined(MTK_DEBUG_SPINLOCK_V1) || defined(MTK_DEBUG_SPINLOCK_V2)
#include <linux/sched/clock.h>
#include <linux/sched/debug.h>
#define MAX_LOCK_NAME 128
#define WARNING_TIME 1000000000 /* 1 seconds */
static long long msec_high(unsigned long long nsec)
{
if ((long long)nsec < 0) {
nsec = -nsec;
do_div(nsec, 1000000);
return -nsec;
}
do_div(nsec, 1000000);
return nsec;
}
static long long sec_high(unsigned long long nsec)
{
if ((long long)nsec < 0) {
nsec = -nsec;
do_div(nsec, 1000000000);
return -nsec;
}
do_div(nsec, 1000000000);
return nsec;
}
static unsigned long sec_low(unsigned long long nsec)
{
if ((long long)nsec < 0)
nsec = -nsec;
/* exclude part of nsec */
return do_div(nsec, 1000000000) / 1000;
}
static void get_spin_lock_name(raw_spinlock_t *lock, char *name)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
snprintf(name, MAX_LOCK_NAME, "%s", lock->dep_map.name);
#else
snprintf(name, MAX_LOCK_NAME, "%ps", lock);
#endif
}
static bool is_critical_spinlock(raw_spinlock_t *lock)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/* The lock is needed by kmalloc and aee_kernel_warning_api */
if (!strcmp(lock->dep_map.name, "&(&n->list_lock)->rlock"))
return true;
if (!strcmp(lock->dep_map.name, "depot_lock"))
return true;
#endif
return false;
}
#endif /* MTK_DEBUG_SPINLOCK_V1 || MTK_DEBUG_SPINLOCK_V2 */
#ifdef MTK_DEBUG_SPINLOCK_V2
static void spin_lock_get_timestamp(unsigned long long *ts)
{
*ts = sched_clock();
}
static void spin_lock_check_spinning_time(raw_spinlock_t *lock,
unsigned long long ts)
{
unsigned long long te;
te = sched_clock();
if (te - ts > WARNING_TIME) {
char lock_name[MAX_LOCK_NAME];
get_spin_lock_name(lock, lock_name);
pr_info("spinning for (%s)(%p) from [%lld.%06lu] to [%lld.%06lu], total %llu ms\n",
lock_name, lock,
sec_high(ts), sec_low(ts),
sec_high(te), sec_low(te),
msec_high(te - ts));
}
}
static void spin_lock_check_holding_time(raw_spinlock_t *lock)
{
/* check if holding time over 1 second */
if (lock->unlock_t - lock->lock_t > WARNING_TIME) {
char lock_name[MAX_LOCK_NAME];
char aee_str[128];
get_spin_lock_name(lock, lock_name);
pr_info("hold spinlock (%s)(%p) from [%lld.%06lu] to [%lld.%06lu], total %llu ms\n",
lock_name, lock,
sec_high(lock->lock_t), sec_low(lock->lock_t),
sec_high(lock->unlock_t), sec_low(lock->unlock_t),
msec_high(lock->unlock_t - lock->lock_t));
if (is_critical_spinlock(lock) || is_critical_lock_held())
return;
pr_info("========== The call trace of lock owner on CPU%d ==========\n",
raw_smp_processor_id());
dump_stack();
#ifdef CONFIG_MTK_AEE_FEATURE
snprintf(aee_str, sizeof(aee_str),
"Spinlock lockup: (%s) in %s\n",
lock_name, current->comm);
aee_kernel_warning_api(__FILE__, __LINE__,
DB_OPT_DUMMY_DUMP | DB_OPT_FTRACE,
aee_str, "spinlock debugger\n");
#endif
}
}
#else /* MTK_DEBUG_SPINLOCK_V2 */
static inline void spin_lock_check_holding_time(raw_spinlock_t *lock)
{
}
#endif /* !MTK_DEBUG_SPINLOCK_V2 */
void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
struct lock_class_key *key)
{
@@ -92,6 +213,7 @@ debug_spin_lock_before(raw_spinlock_t *lock)
static inline void debug_spin_lock_after(raw_spinlock_t *lock)
{
lock->lock_t = sched_clock();
lock->owner_cpu = raw_smp_processor_id();
lock->owner = current;
}
@@ -105,16 +227,193 @@ static inline void debug_spin_unlock(raw_spinlock_t *lock)
lock, "wrong CPU");
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
lock->unlock_t = sched_clock();
spin_lock_check_holding_time(lock);
}
#ifdef MTK_DEBUG_SPINLOCK_V1
#define LOCK_CSD_IN_USE ((void *)-1L)
static DEFINE_PER_CPU(call_single_data_t, spinlock_debug_csd);
struct spinlock_debug_info {
int detector_cpu;
raw_spinlock_t lock;
};
static DEFINE_PER_CPU(struct spinlock_debug_info, sp_dbg) = {
-1, __RAW_SPIN_LOCK_UNLOCKED(sp_dbg.lock) };
static void show_cpu_backtrace(void *info)
{
call_single_data_t *csd;
pr_info("========== The call trace of lock owner on CPU%d ==========\n",
raw_smp_processor_id());
dump_stack();
if (info != LOCK_CSD_IN_USE) {
#ifdef CONFIG_MTK_AEE_FEATURE
char aee_str[128];
snprintf(aee_str, sizeof(aee_str),
"Spinlock lockup: (%s) in %s\n",
(char *)info, current->comm);
aee_kernel_warning_api(__FILE__, __LINE__,
DB_OPT_DUMMY_DUMP | DB_OPT_FTRACE,
aee_str, "spinlock debugger\n");
#endif
kfree(info);
}
csd = this_cpu_ptr(&spinlock_debug_csd);
csd->info = NULL;
}
bool is_logbuf_lock_held(raw_spinlock_t *lock)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/* The lock is needed by kmalloc and aee_kernel_warning_api */
if (!strcmp(lock->dep_map.name, "logbuf_lock"))
return true;
#endif
return false;
}
static void __spin_lock_debug(raw_spinlock_t *lock)
{
u64 one_second = (u64)loops_per_jiffy * msecs_to_jiffies(1000);
int owner_cpu = -1, target_cpu = -1;
int curr_cpu = raw_smp_processor_id();
int print_once = 1, cnt = 0;
int is_warning_owner = 0;
char lock_name[MAX_LOCK_NAME];
unsigned long long t1, t2, t3;
struct task_struct *owner = NULL;
cycles_t exit_cycles;
/* skip debugging */
if (is_logbuf_lock_held(lock)) {
arch_spin_lock(&lock->raw_lock);
return;
}
t1 = sched_clock();
t2 = t1;
exit_cycles = get_cycles() + one_second;
for (;;) {
while (get_cycles() < exit_cycles) {
if (arch_spin_trylock(&lock->raw_lock)) {
if (is_warning_owner) {
struct spinlock_debug_info *sdi;
sdi = per_cpu_ptr(&sp_dbg, target_cpu);
sdi->detector_cpu = -1;
}
return;
}
}
exit_cycles += one_second;
t3 = sched_clock();
if (t3 < t2)
continue;
t2 = t3;
owner = lock->owner;
owner_cpu = lock->owner_cpu;
/* lock is already released */
if (owner == SPINLOCK_OWNER_INIT || owner_cpu == -1)
continue;
get_spin_lock_name(lock, lock_name);
pr_info("(%s)(%p) spin time: %llu ms(from %lld.%06lu), raw_lock: 0x%08x, magic: %08x, held by %s/%d on CPU#%d(from %lld.%06lu)\n",
lock_name, lock,
msec_high(t2 - t1), sec_high(t1), sec_low(t1),
*((unsigned int *)&lock->raw_lock), lock->magic,
owner->comm, task_pid_nr(owner), owner_cpu,
sec_high(lock->lock_t), sec_low(lock->lock_t));
/* print held lock information per 5 sec */
if (cnt == 0) {
struct spinlock_debug_info *sdi;
sdi = per_cpu_ptr(&sp_dbg, owner_cpu);
if (sdi->detector_cpu == -1 &&
raw_spin_trylock(&sdi->lock)) {
is_warning_owner = 1;
sdi->detector_cpu = curr_cpu;
target_cpu = owner_cpu;
raw_spin_unlock(&sdi->lock);
}
if (sdi->detector_cpu == curr_cpu)
debug_show_held_locks(owner);
}
cnt = (++cnt == 5) ? 0 : cnt;
if (oops_in_progress != 0)
/* in exception follow, pr_xxx maybe spinlock error */
continue;
if (!print_once || !is_warning_owner)
continue;
print_once = 0;
if (owner_cpu != curr_cpu) {
call_single_data_t *csd;
csd = per_cpu_ptr(&spinlock_debug_csd, owner_cpu);
/* already warned by another cpu */
if (csd->info)
continue;
/* mark csd is in use */
csd->info = LOCK_CSD_IN_USE;
csd->func = show_cpu_backtrace;
csd->flags = 0;
if (!is_critical_spinlock(lock) &&
!is_critical_lock_held()) {
csd->info = kmalloc(MAX_LOCK_NAME, GFP_ATOMIC);
if (!csd->info) {
print_once = 1;
continue;
}
strncpy(csd->info, lock_name, MAX_LOCK_NAME);
}
smp_call_function_single_async(owner_cpu, csd);
} else {
pr_info("(%s) recursive deadlock on CPU%d\n",
lock_name, owner_cpu);
}
}
}
#endif /* MTK_DEBUG_SPINLOCK_V1 */
/*
* We are now relying on the NMI watchdog to detect lockup instead of doing
* the detection here with an unfair lock which can cause problem of its own.
*/
void do_raw_spin_lock(raw_spinlock_t *lock)
{
#ifdef MTK_DEBUG_SPINLOCK_V2
unsigned long long ts = 0;
#endif
debug_spin_lock_before(lock);
#ifdef MTK_DEBUG_SPINLOCK_V1
if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
__spin_lock_debug(lock);
#elif defined(MTK_DEBUG_SPINLOCK_V2)
spin_lock_get_timestamp(&ts);
arch_spin_lock(&lock->raw_lock);
spin_lock_check_spinning_time(lock, ts);
#else
arch_spin_lock(&lock->raw_lock);
#endif
debug_spin_lock_after(lock);
}

View File

@@ -1389,6 +1389,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
* See Documentation/RCU/stallwarn.txt for info on how to debug
* RCU CPU stall warnings.
*/
check_held_locks(1);
pr_err("INFO: %s detected stalls on CPUs/tasks:", rsp->name);
print_cpu_stall_info_begin();
rcu_for_each_leaf_node(rsp, rnp) {
@@ -1460,6 +1461,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
* See Documentation/RCU/stallwarn.txt for info on how to debug
* RCU CPU stall warnings.
*/
check_held_locks(1);
pr_err("INFO: %s self-detected stall on CPU", rsp->name);
print_cpu_stall_info_begin();
raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);

View File

@@ -1252,7 +1252,7 @@ config WW_MUTEX_SELFTEST
config MTK_LOCKING_AEE
bool "AEE warning for locking"
depends on MTK_AEE_FEATURE
depends on MTK_AEE_FEATURE && (LOCKDEP || DEBUG_SPINLOCK)
help
This feature will trigger kernel API dump when lock warnings
occur in runtime lock operations. For some specific cases,

View File

@@ -14,6 +14,11 @@
#include <linux/spinlock.h>
#include <linux/debug_locks.h>
#ifdef CONFIG_LOCKDEP
#include <linux/sched/clock.h>
unsigned long long debug_locks_off_ts;
#endif
/*
* We want to turn all lock-debugging facilities on/off at once,
* via a global flag. The reason is that once a single bug has been
@@ -38,6 +43,9 @@ EXPORT_SYMBOL_GPL(debug_locks_silent);
int debug_locks_off(void)
{
if (debug_locks && __debug_locks_off()) {
#ifdef CONFIG_LOCKDEP
debug_locks_off_ts = sched_clock();
#endif
if (!debug_locks_silent) {
console_verbose();
return 1;