FROMLIST: mm: add mmu_notifier_lock

Introduce mmu_notifier_lock as a per-mm percpu_rw_semaphore,
as well as the code to initialize and destroy it together with the mm.

This lock will be used to prevent races between mmu_notifier_register()
and speculative fault handlers that need to fire MMU notifications
without holding any of the mmap or rmap locks.

Signed-off-by: Michel Lespinasse <michel@lespinasse.org>
Link: https://lore.kernel.org/all/20220128131006.67712-24-michel@lespinasse.org/
Bug: 161210518
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Change-Id: I453ebe979c8b9dcc6159b41c5ec7a1ea17d85ee2
This commit is contained in:
Michel Lespinasse
2022-01-24 17:43:55 -08:00
committed by Todd Kjos
parent 3f4fefc1a4
commit 1ae855f191
3 changed files with 32 additions and 4 deletions

View File

@@ -425,6 +425,7 @@ struct core_state {
};
struct kioctx_table;
struct percpu_rw_semaphore;
struct mm_struct {
struct {
struct vm_area_struct *mmap; /* list of VMAs */
@@ -573,7 +574,10 @@ struct mm_struct {
struct file __rcu *exe_file;
#ifdef CONFIG_MMU_NOTIFIER
struct mmu_notifier_subscriptions *notifier_subscriptions;
#endif
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
struct percpu_rw_semaphore *mmu_notifier_lock;
#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
#endif /* CONFIG_MMU_NOTIFIER */
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
pgtable_t pmd_huge_pte; /* protected by page_table_lock */
#endif

View File

@@ -6,6 +6,8 @@
#include <linux/spinlock.h>
#include <linux/mm_types.h>
#include <linux/mmap_lock.h>
#include <linux/percpu-rwsem.h>
#include <linux/slab.h>
#include <linux/srcu.h>
#include <linux/interval_tree.h>
@@ -499,15 +501,35 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
__mmu_notifier_invalidate_range(mm, start, end);
}
static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
static inline bool mmu_notifier_subscriptions_init(struct mm_struct *mm)
{
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
mm->mmu_notifier_lock = kzalloc(sizeof(struct percpu_rw_semaphore), GFP_KERNEL);
if (!mm->mmu_notifier_lock)
return false;
if (percpu_init_rwsem(mm->mmu_notifier_lock)) {
kfree(mm->mmu_notifier_lock);
return false;
}
#endif
mm->notifier_subscriptions = NULL;
return true;
}
static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
{
if (mm_has_notifiers(mm))
__mmu_notifier_subscriptions_destroy(mm);
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
if (!in_atomic()) {
percpu_free_rwsem(mm->mmu_notifier_lock);
kfree(mm->mmu_notifier_lock);
} else {
percpu_rwsem_async_destroy(mm->mmu_notifier_lock);
}
#endif
}
@@ -724,8 +746,9 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
{
}
static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
static inline bool mmu_notifier_subscriptions_init(struct mm_struct *mm)
{
return true;
}
static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)

View File

@@ -1084,7 +1084,8 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm_init_owner(mm, p);
mm_init_pasid(mm);
RCU_INIT_POINTER(mm->exe_file, NULL);
mmu_notifier_subscriptions_init(mm);
if (!mmu_notifier_subscriptions_init(mm))
goto fail_nopgd;
init_tlb_flush_pending(mm);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
mm->pmd_huge_pte = NULL;