Skip to content

Commit 40aa85b

Browse files
author
Fox Snowpatch
committed
1 parent 182544a commit 40aa85b

11 files changed

Lines changed: 114 additions & 70 deletions

File tree

arch/arm/mm/fault.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -349,6 +349,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
349349
if (!(flags & FAULT_FLAG_USER))
350350
goto lock_mmap;
351351

352+
retry_vma:
352353
vma = lock_vma_under_rcu(mm, addr);
353354
if (!vma)
354355
goto lock_mmap;
@@ -378,6 +379,10 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
378379
goto no_context;
379380
return 0;
380381
}
382+
383+
/* If the first try is only about waiting for the I/O to complete */
384+
if (fault & VM_FAULT_RETRY_VMA)
385+
goto retry_vma;
381386
lock_mmap:
382387

383388
retry:

arch/arm64/mm/fault.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -626,6 +626,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
626626
if (!(mm_flags & FAULT_FLAG_USER))
627627
goto lock_mmap;
628628

629+
retry_vma:
629630
vma = lock_vma_under_rcu(mm, addr);
630631
if (!vma)
631632
goto lock_mmap;
@@ -672,6 +673,10 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
672673
goto no_context;
673674
return 0;
674675
}
676+
677+
/* If the first try is only about waiting for the I/O to complete */
678+
if (fault & VM_FAULT_RETRY_VMA)
679+
goto retry_vma;
675680
lock_mmap:
676681

677682
retry:

arch/loongarch/mm/fault.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -219,6 +219,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
219219
if (!(flags & FAULT_FLAG_USER))
220220
goto lock_mmap;
221221

222+
retry_vma:
222223
vma = lock_vma_under_rcu(mm, address);
223224
if (!vma)
224225
goto lock_mmap;
@@ -265,6 +266,9 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
265266
no_context(regs, write, address);
266267
return;
267268
}
269+
/* If the first try is only about waiting for the I/O to complete */
270+
if (fault & VM_FAULT_RETRY_VMA)
271+
goto retry_vma;
268272
lock_mmap:
269273

270274
retry:

arch/powerpc/mm/fault.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -487,6 +487,7 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
487487
if (!(flags & FAULT_FLAG_USER))
488488
goto lock_mmap;
489489

490+
retry_vma:
490491
vma = lock_vma_under_rcu(mm, address);
491492
if (!vma)
492493
goto lock_mmap;
@@ -516,7 +517,9 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
516517

517518
if (fault_signal_pending(fault, regs))
518519
return user_mode(regs) ? 0 : SIGBUS;
519-
520+
/* If the first try is only about waiting for the I/O to complete */
521+
if (fault & VM_FAULT_RETRY_VMA)
522+
goto retry_vma;
520523
lock_mmap:
521524

522525
/* When running in the kernel we expect faults to occur only to

arch/riscv/mm/fault.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -347,6 +347,7 @@ void handle_page_fault(struct pt_regs *regs)
347347
if (!(flags & FAULT_FLAG_USER))
348348
goto lock_mmap;
349349

350+
retry_vma:
350351
vma = lock_vma_under_rcu(mm, addr);
351352
if (!vma)
352353
goto lock_mmap;
@@ -376,6 +377,9 @@ void handle_page_fault(struct pt_regs *regs)
376377
no_context(regs, addr);
377378
return;
378379
}
380+
/* If the first try is only about waiting for the I/O to complete */
381+
if (fault & VM_FAULT_RETRY_VMA)
382+
goto retry_vma;
379383
lock_mmap:
380384

381385
retry:

arch/s390/mm/fault.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -294,6 +294,7 @@ static void do_exception(struct pt_regs *regs, int access)
294294
flags |= FAULT_FLAG_WRITE;
295295
if (!(flags & FAULT_FLAG_USER))
296296
goto lock_mmap;
297+
retry_vma:
297298
vma = lock_vma_under_rcu(mm, address);
298299
if (!vma)
299300
goto lock_mmap;
@@ -318,6 +319,9 @@ static void do_exception(struct pt_regs *regs, int access)
318319
handle_fault_error_nolock(regs, 0);
319320
return;
320321
}
322+
/* If the first try is only about waiting for the I/O to complete */
323+
if (fault & VM_FAULT_RETRY_VMA)
324+
goto retry_vma;
321325
lock_mmap:
322326
retry:
323327
vma = lock_mm_and_find_vma(mm, address, regs);

arch/x86/mm/fault.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1322,6 +1322,7 @@ void do_user_addr_fault(struct pt_regs *regs,
13221322
if (!(flags & FAULT_FLAG_USER))
13231323
goto lock_mmap;
13241324

1325+
retry_vma:
13251326
vma = lock_vma_under_rcu(mm, address);
13261327
if (!vma)
13271328
goto lock_mmap;
@@ -1351,6 +1352,9 @@ void do_user_addr_fault(struct pt_regs *regs,
13511352
ARCH_DEFAULT_PKEY);
13521353
return;
13531354
}
1355+
/* If the first try is only about waiting for the I/O to complete */
1356+
if (fault & VM_FAULT_RETRY_VMA)
1357+
goto retry_vma;
13541358
lock_mmap:
13551359

13561360
retry:

include/linux/mm_types.h

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1625,10 +1625,11 @@ enum vm_fault_reason {
16251625
VM_FAULT_NOPAGE = (__force vm_fault_t)0x000100,
16261626
VM_FAULT_LOCKED = (__force vm_fault_t)0x000200,
16271627
VM_FAULT_RETRY = (__force vm_fault_t)0x000400,
1628-
VM_FAULT_FALLBACK = (__force vm_fault_t)0x000800,
1629-
VM_FAULT_DONE_COW = (__force vm_fault_t)0x001000,
1630-
VM_FAULT_NEEDDSYNC = (__force vm_fault_t)0x002000,
1631-
VM_FAULT_COMPLETED = (__force vm_fault_t)0x004000,
1628+
VM_FAULT_RETRY_VMA = (__force vm_fault_t)0x000800,
1629+
VM_FAULT_FALLBACK = (__force vm_fault_t)0x001000,
1630+
VM_FAULT_DONE_COW = (__force vm_fault_t)0x002000,
1631+
VM_FAULT_NEEDDSYNC = (__force vm_fault_t)0x004000,
1632+
VM_FAULT_COMPLETED = (__force vm_fault_t)0x008000,
16321633
VM_FAULT_HINDEX_MASK = (__force vm_fault_t)0x0f0000,
16331634
};
16341635

include/linux/pagemap.h

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1101,7 +1101,6 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page,
11011101

11021102
void __folio_lock(struct folio *folio);
11031103
int __folio_lock_killable(struct folio *folio);
1104-
vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf);
11051104
void unlock_page(struct page *page);
11061105
void folio_unlock(struct folio *folio);
11071106

@@ -1198,22 +1197,6 @@ static inline int folio_lock_killable(struct folio *folio)
11981197
return 0;
11991198
}
12001199

1201-
/*
1202-
* folio_lock_or_retry - Lock the folio, unless this would block and the
1203-
* caller indicated that it can handle a retry.
1204-
*
1205-
* Return value and mmap_lock implications depend on flags; see
1206-
* __folio_lock_or_retry().
1207-
*/
1208-
static inline vm_fault_t folio_lock_or_retry(struct folio *folio,
1209-
struct vm_fault *vmf)
1210-
{
1211-
might_sleep();
1212-
if (!folio_trylock(folio))
1213-
return __folio_lock_or_retry(folio, vmf);
1214-
return 0;
1215-
}
1216-
12171200
/*
12181201
* This is exported only for folio_wait_locked/folio_wait_writeback, etc.,
12191202
* and should not be used directly.

mm/filemap.c

Lines changed: 11 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -1739,51 +1739,6 @@ static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)
17391739
return ret;
17401740
}
17411741

1742-
/*
1743-
* Return values:
1744-
* 0 - folio is locked.
1745-
* non-zero - folio is not locked.
1746-
* mmap_lock or per-VMA lock has been released (mmap_read_unlock() or
1747-
* vma_end_read()), unless flags had both FAULT_FLAG_ALLOW_RETRY and
1748-
* FAULT_FLAG_RETRY_NOWAIT set, in which case the lock is still held.
1749-
*
1750-
* If neither ALLOW_RETRY nor KILLABLE are set, will always return 0
1751-
* with the folio locked and the mmap_lock/per-VMA lock is left unperturbed.
1752-
*/
1753-
vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf)
1754-
{
1755-
unsigned int flags = vmf->flags;
1756-
1757-
if (fault_flag_allow_retry_first(flags)) {
1758-
/*
1759-
* CAUTION! In this case, mmap_lock/per-VMA lock is not
1760-
* released even though returning VM_FAULT_RETRY.
1761-
*/
1762-
if (flags & FAULT_FLAG_RETRY_NOWAIT)
1763-
return VM_FAULT_RETRY;
1764-
1765-
release_fault_lock(vmf);
1766-
if (flags & FAULT_FLAG_KILLABLE)
1767-
folio_wait_locked_killable(folio);
1768-
else
1769-
folio_wait_locked(folio);
1770-
return VM_FAULT_RETRY;
1771-
}
1772-
if (flags & FAULT_FLAG_KILLABLE) {
1773-
bool ret;
1774-
1775-
ret = __folio_lock_killable(folio);
1776-
if (ret) {
1777-
release_fault_lock(vmf);
1778-
return VM_FAULT_RETRY;
1779-
}
1780-
} else {
1781-
__folio_lock(folio);
1782-
}
1783-
1784-
return 0;
1785-
}
1786-
17871742
/**
17881743
* page_cache_next_miss() - Find the next gap in the page cache.
17891744
* @mapping: Mapping.
@@ -3520,6 +3475,7 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
35203475
struct folio *folio;
35213476
vm_fault_t ret = 0;
35223477
bool mapping_locked = false;
3478+
bool retry_by_vma_lock = false;
35233479

35243480
max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
35253481
if (unlikely(index >= max_idx))
@@ -3572,6 +3528,13 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
35723528
}
35733529
}
35743530

3531+
/*
3532+
* If the folio is uptodate, we are likely only waiting for
3533+
* another concurrent PTE mapping to complete, which should
3534+
* be brief. No need to drop the lock and retry the fault.
3535+
*/
3536+
if (folio_test_uptodate(folio))
3537+
vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY;
35753538
if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin))
35763539
goto out_retry;
35773540

@@ -3616,6 +3579,8 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
36163579
*/
36173580
if (fpin) {
36183581
folio_unlock(folio);
3582+
if (vmf->flags & FAULT_FLAG_VMA_LOCK)
3583+
retry_by_vma_lock = true;
36193584
goto out_retry;
36203585
}
36213586
if (mapping_locked)
@@ -3666,7 +3631,7 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
36663631
filemap_invalidate_unlock_shared(mapping);
36673632
if (fpin)
36683633
fput(fpin);
3669-
return ret | VM_FAULT_RETRY;
3634+
return ret | VM_FAULT_RETRY | (retry_by_vma_lock ? VM_FAULT_RETRY_VMA : 0);
36703635
}
36713636
EXPORT_SYMBOL(filemap_fault);
36723637

0 commit comments

Comments
 (0)