@@ -1739,51 +1739,6 @@ static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)
17391739 return ret ;
17401740}
17411741
1742- /*
1743- * Return values:
1744- * 0 - folio is locked.
1745- * non-zero - folio is not locked.
1746- * mmap_lock or per-VMA lock has been released (mmap_read_unlock() or
1747- * vma_end_read()), unless flags had both FAULT_FLAG_ALLOW_RETRY and
1748- * FAULT_FLAG_RETRY_NOWAIT set, in which case the lock is still held.
1749- *
1750- * If neither ALLOW_RETRY nor KILLABLE are set, will always return 0
1751- * with the folio locked and the mmap_lock/per-VMA lock is left unperturbed.
1752- */
1753- vm_fault_t __folio_lock_or_retry (struct folio * folio , struct vm_fault * vmf )
1754- {
1755- unsigned int flags = vmf -> flags ;
1756-
1757- if (fault_flag_allow_retry_first (flags )) {
1758- /*
1759- * CAUTION! In this case, mmap_lock/per-VMA lock is not
1760- * released even though returning VM_FAULT_RETRY.
1761- */
1762- if (flags & FAULT_FLAG_RETRY_NOWAIT )
1763- return VM_FAULT_RETRY ;
1764-
1765- release_fault_lock (vmf );
1766- if (flags & FAULT_FLAG_KILLABLE )
1767- folio_wait_locked_killable (folio );
1768- else
1769- folio_wait_locked (folio );
1770- return VM_FAULT_RETRY ;
1771- }
1772- if (flags & FAULT_FLAG_KILLABLE ) {
1773- bool ret ;
1774-
1775- ret = __folio_lock_killable (folio );
1776- if (ret ) {
1777- release_fault_lock (vmf );
1778- return VM_FAULT_RETRY ;
1779- }
1780- } else {
1781- __folio_lock (folio );
1782- }
1783-
1784- return 0 ;
1785- }
1786-
17871742/**
17881743 * page_cache_next_miss() - Find the next gap in the page cache.
17891744 * @mapping: Mapping.
@@ -3520,6 +3475,7 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
35203475 struct folio * folio ;
35213476 vm_fault_t ret = 0 ;
35223477 bool mapping_locked = false;
3478+ bool retry_by_vma_lock = false;
35233479
35243480 max_idx = DIV_ROUND_UP (i_size_read (inode ), PAGE_SIZE );
35253481 if (unlikely (index >= max_idx ))
@@ -3572,6 +3528,13 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
35723528 }
35733529 }
35743530
3531+ /*
3532+ * If the folio is uptodate, we are likely only waiting for
3533+ * another concurrent PTE mapping to complete, which should
3534+ * be brief. No need to drop the lock and retry the fault.
3535+ */
3536+ if (folio_test_uptodate (folio ))
3537+ vmf -> flags &= ~FAULT_FLAG_ALLOW_RETRY ;
35753538 if (!lock_folio_maybe_drop_mmap (vmf , folio , & fpin ))
35763539 goto out_retry ;
35773540
@@ -3616,6 +3579,8 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
36163579 */
36173580 if (fpin ) {
36183581 folio_unlock (folio );
3582+ if (vmf -> flags & FAULT_FLAG_VMA_LOCK )
3583+ retry_by_vma_lock = true;
36193584 goto out_retry ;
36203585 }
36213586 if (mapping_locked )
@@ -3666,7 +3631,7 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
36663631 filemap_invalidate_unlock_shared (mapping );
36673632 if (fpin )
36683633 fput (fpin );
3669- return ret | VM_FAULT_RETRY ;
3634+ return ret | VM_FAULT_RETRY | ( retry_by_vma_lock ? VM_FAULT_RETRY_VMA : 0 ) ;
36703635}
36713636EXPORT_SYMBOL (filemap_fault );
36723637
0 commit comments