Revert "ANDROID: Make SPF aware of fast mremaps"
This reverts commit af027c97fc.
Reason for revert: vts_linux_kselftest_arm_64 timeout
Bug: 263479421
Bug: 263177905
Change-Id: I123c56741c982d1539ceebd8bfde2443871aa1de
Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
This commit is contained in:
@@ -3380,8 +3380,6 @@ madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||
extern wait_queue_head_t vma_users_wait;
|
||||
extern atomic_t vma_user_waiters;
|
||||
|
||||
bool __pte_map_lock(struct vm_fault *vmf);
|
||||
|
||||
|
||||
28
mm/memory.c
28
mm/memory.c
@@ -217,23 +217,6 @@ struct vm_area_struct *get_vma(struct mm_struct *mm, unsigned long addr)
|
||||
|
||||
rcu_read_lock();
|
||||
vma = find_vma_from_tree(mm, addr);
|
||||
|
||||
/*
|
||||
* atomic_inc_unless_negative() also protects from races with
|
||||
* fast mremap.
|
||||
*
|
||||
* If there is a concurrent fast mremap, bail out since the entire
|
||||
* PMD/PUD subtree may have been remapped.
|
||||
*
|
||||
* This is usually safe for conventional mremap since it takes the
|
||||
* PTE locks as does SPF. However fast mremap only takes the lock
|
||||
* at the PMD/PUD level which is ok as it is done with the mmap
|
||||
* write lock held. But since SPF, as the term implies forgoes,
|
||||
* taking the mmap read lock and also cannot take PTL lock at the
|
||||
* larger PMD/PUD granualrity, since it would introduce huge
|
||||
* contention in the page fault path; fall back to regular fault
|
||||
* handling.
|
||||
*/
|
||||
if (vma) {
|
||||
if (vma->vm_start > addr ||
|
||||
!atomic_inc_unless_negative(&vma->file_ref_count))
|
||||
@@ -249,16 +232,7 @@ void put_vma(struct vm_area_struct *vma)
|
||||
int new_ref_count;
|
||||
|
||||
new_ref_count = atomic_dec_return(&vma->file_ref_count);
|
||||
|
||||
/*
|
||||
* Implicit smp_mb due to atomic_dec_return.
|
||||
*
|
||||
* If this is the last reference, wake up the mremap waiter
|
||||
* (if any).
|
||||
*/
|
||||
if (new_ref_count == 0 && unlikely(atomic_read(&vma_user_waiters) > 0))
|
||||
wake_up(&vma_users_wait);
|
||||
else if (new_ref_count < 0)
|
||||
if (new_ref_count < 0)
|
||||
vm_area_free_no_check(vma);
|
||||
}
|
||||
|
||||
|
||||
106
mm/mremap.c
106
mm/mremap.c
@@ -219,77 +219,17 @@ static inline bool arch_supports_page_table_move(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||
DECLARE_WAIT_QUEUE_HEAD(vma_users_wait);
|
||||
atomic_t vma_user_waiters = ATOMIC_INIT(0);
|
||||
|
||||
static inline void wait_for_vma_users(struct vm_area_struct *vma)
|
||||
{
|
||||
/*
|
||||
* If we have the only reference, swap the refcount to -1. This
|
||||
* will prevent other concurrent references by get_vma() for SPFs.
|
||||
*/
|
||||
if (likely(atomic_cmpxchg(&vma->file_ref_count, 0, -1) == 0))
|
||||
return;
|
||||
|
||||
/* Indicate we are waiting for other users of the VMA to finish. */
|
||||
atomic_inc(&vma_user_waiters);
|
||||
|
||||
/* Failed atomic_cmpxchg; no implicit barrier, use an explicit one. */
|
||||
smp_mb();
|
||||
|
||||
/*
|
||||
* Callers cannot handle failure, sleep uninterruptibly until there
|
||||
* are no other users of this VMA.
|
||||
*
|
||||
* We don't need to worry about references from concurrent waiters,
|
||||
* since this is only used in the context of fast mremaps, with
|
||||
* exclusive mmap write lock held.
|
||||
*/
|
||||
wait_event(vma_users_wait, atomic_cmpxchg(&vma->file_ref_count, 0, -1) == 0);
|
||||
|
||||
atomic_dec(&vma_user_waiters);
|
||||
}
|
||||
|
||||
/*
|
||||
* Restore the VMA reference count to 0 after a fast mremap.
|
||||
*/
|
||||
static inline void restore_vma_ref_count(struct vm_area_struct *vma)
|
||||
{
|
||||
/*
|
||||
* This should only be called after a corresponding,
|
||||
* wait_for_vma_users()
|
||||
*/
|
||||
VM_BUG_ON_VMA(atomic_cmpxchg(&vma->file_ref_count, -1, 0) != -1,
|
||||
vma);
|
||||
}
|
||||
#else /* !CONFIG_SPECULATIVE_PAGE_FAULT */
|
||||
static inline void wait_for_vma_users(struct vm_area_struct *vma)
|
||||
{
|
||||
}
|
||||
static inline void restore_vma_ref_count(struct vm_area_struct *vma)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
|
||||
|
||||
/*
|
||||
* Speculative page fault handlers will not detect page table changes done
|
||||
* without ptl locking.
|
||||
*/
|
||||
#ifdef CONFIG_HAVE_MOVE_PMD
|
||||
#if defined(CONFIG_HAVE_MOVE_PMD) && !defined(CONFIG_SPECULATIVE_PAGE_FAULT)
|
||||
static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
|
||||
unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
|
||||
{
|
||||
spinlock_t *old_ptl, *new_ptl;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
pmd_t pmd;
|
||||
bool ret;
|
||||
|
||||
/*
|
||||
* Wait for concurrent users, since these can potentially be
|
||||
* speculative page faults.
|
||||
*/
|
||||
wait_for_vma_users(vma);
|
||||
|
||||
if (!arch_supports_page_table_move())
|
||||
return false;
|
||||
@@ -316,10 +256,8 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
|
||||
* One alternative might be to just unmap the target pmd at
|
||||
* this point, and verify that it really is empty. We'll see.
|
||||
*/
|
||||
if (WARN_ON_ONCE(!pmd_none(*new_pmd))) {
|
||||
ret = false;
|
||||
goto out;
|
||||
}
|
||||
if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* We don't have to worry about the ordering of src and dst
|
||||
@@ -342,11 +280,7 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
|
||||
spin_unlock(new_ptl);
|
||||
spin_unlock(old_ptl);
|
||||
|
||||
ret = true;
|
||||
|
||||
out:
|
||||
restore_vma_ref_count(vma);
|
||||
return ret;
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
static inline bool move_normal_pmd(struct vm_area_struct *vma,
|
||||
@@ -357,33 +291,27 @@ static inline bool move_normal_pmd(struct vm_area_struct *vma,
|
||||
}
|
||||
#endif
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD)
|
||||
/*
|
||||
* Speculative page fault handlers will not detect page table changes done
|
||||
* without ptl locking.
|
||||
*/
|
||||
#if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD) && \
|
||||
!defined(CONFIG_SPECULATIVE_PAGE_FAULT)
|
||||
static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
|
||||
unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
|
||||
{
|
||||
spinlock_t *old_ptl, *new_ptl;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
pud_t pud;
|
||||
bool ret;
|
||||
|
||||
/*
|
||||
* Wait for concurrent users, since these can potentially be
|
||||
* speculative page faults.
|
||||
*/
|
||||
wait_for_vma_users(vma);
|
||||
|
||||
if (!arch_supports_page_table_move()) {
|
||||
ret = false;
|
||||
goto out;
|
||||
}
|
||||
if (!arch_supports_page_table_move())
|
||||
return false;
|
||||
/*
|
||||
* The destination pud shouldn't be established, free_pgtables()
|
||||
* should have released it.
|
||||
*/
|
||||
if (WARN_ON_ONCE(!pud_none(*new_pud))) {
|
||||
ret = false;
|
||||
goto out;
|
||||
}
|
||||
if (WARN_ON_ONCE(!pud_none(*new_pud)))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* We don't have to worry about the ordering of src and dst
|
||||
@@ -406,11 +334,7 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
|
||||
spin_unlock(new_ptl);
|
||||
spin_unlock(old_ptl);
|
||||
|
||||
ret = true;
|
||||
|
||||
out:
|
||||
restore_vma_ref_count(vma);
|
||||
return ret;
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
static inline bool move_normal_pud(struct vm_area_struct *vma,
|
||||
|
||||
Reference in New Issue
Block a user