Revert "mm: VMA sequence count"

This reverts commit 184ccc18de.

Bug: 128240262
Change-Id: I7293939d08e6b9603d82464f4538a573b3407a88
Signed-off-by: Minchan Kim <minchan@google.com>
This commit is contained in:
Minchan Kim
2019-03-11 11:42:15 +09:00
committed by basamaryan
parent 1fe3771a2c
commit f005ca2488
4 changed files with 0 additions and 80 deletions

View File

@@ -1373,9 +1373,6 @@ struct zap_details {
static inline void INIT_VMA(struct vm_area_struct *vma)
{
INIT_LIST_HEAD(&vma->anon_vma_chain);
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
seqcount_init(&vma->vm_sequence);
#endif
}
struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
@@ -1460,47 +1457,6 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
unmap_mapping_range(mapping, holebegin, holelen, 0);
}
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
static inline void vm_write_begin(struct vm_area_struct *vma)
{
write_seqcount_begin(&vma->vm_sequence);
}
static inline void vm_write_begin_nested(struct vm_area_struct *vma,
int subclass)
{
write_seqcount_begin_nested(&vma->vm_sequence, subclass);
}
static inline void vm_write_end(struct vm_area_struct *vma)
{
write_seqcount_end(&vma->vm_sequence);
}
static inline void vm_raw_write_begin(struct vm_area_struct *vma)
{
raw_write_seqcount_begin(&vma->vm_sequence);
}
static inline void vm_raw_write_end(struct vm_area_struct *vma)
{
raw_write_seqcount_end(&vma->vm_sequence);
}
#else
static inline void vm_write_begin(struct vm_area_struct *vma)
{
}
static inline void vm_write_begin_nested(struct vm_area_struct *vma,
int subclass)
{
}
static inline void vm_write_end(struct vm_area_struct *vma)
{
}
static inline void vm_raw_write_begin(struct vm_area_struct *vma)
{
}
static inline void vm_raw_write_end(struct vm_area_struct *vma)
{
}
#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);

View File

@@ -349,9 +349,6 @@ struct vm_area_struct {
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
#endif
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
seqcount_t vm_sequence;
#endif
} __randomize_layout;
struct core_thread {

View File

@@ -1552,7 +1552,6 @@ void unmap_page_range(struct mmu_gather *tlb,
unsigned long next;
BUG_ON(addr >= end);
vm_write_begin(vma);
tlb_start_vma(tlb, vma);
pgd = pgd_offset(vma->vm_mm, addr);
do {
@@ -1562,7 +1561,6 @@ void unmap_page_range(struct mmu_gather *tlb,
next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
} while (pgd++, addr = next, addr != end);
tlb_end_vma(tlb, vma);
vm_write_end(vma);
}

View File

@@ -687,30 +687,6 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
long adjust_next = 0;
int remove_next = 0;
/*
* Why using vm_raw_write*() functions here to avoid lockdep's warning ?
*
* Locked is complaining about a theoretical lock dependency, involving
* 3 locks:
* mapping->i_mmap_rwsem --> vma->vm_sequence --> fs_reclaim
*
* Here are the major path leading to this dependency :
* 1. __vma_adjust() mmap_sem -> vm_sequence -> i_mmap_rwsem
* 2. move_vmap() mmap_sem -> vm_sequence -> fs_reclaim
* 3. __alloc_pages_nodemask() fs_reclaim -> i_mmap_rwsem
* 4. unmap_mapping_range() i_mmap_rwsem -> vm_sequence
*
* So there is no way to solve this easily, especially because in
* unmap_mapping_range() the i_mmap_rwsem is grab while the impacted
* VMAs are not yet known.
* However, the way the vm_seq is used is guarantying that we will
* never block on it since we just check for its value and never wait
* for it to move, see vma_has_changed() and handle_speculative_fault().
*/
vm_raw_write_begin(vma);
if (next)
vm_raw_write_begin(next);
if (next && !insert) {
struct vm_area_struct *exporter = NULL, *importer = NULL;
@@ -921,7 +897,6 @@ again:
anon_vma_merge(vma, next);
mm->map_count--;
mpol_put(vma_policy(next));
vm_raw_write_end(next);
kmem_cache_free(vm_area_cachep, next);
/*
* In mprotect's case 6 (see comments on vma_merge),
@@ -936,8 +911,6 @@ again:
* "vma->vm_next" gap must be updated.
*/
next = vma->vm_next;
if (next)
vm_raw_write_begin(next);
} else {
/*
* For the scope of the comment "next" and
@@ -984,10 +957,6 @@ again:
if (insert && file)
uprobe_mmap(insert);
if (next && next != vma)
vm_raw_write_end(next);
vm_raw_write_end(vma);
validate_mm(mm);
return 0;