From: Martin Pieuchot Subject: Wired page & faults To: miod@openbsd.org Cc: tech@openbsd.org Date: Sat, 2 Nov 2024 16:38:06 +0100 Previous change disabled looking at neighborhood pages when a fault happened on a wired map entry. We also want to ensure the underlying page doesn't end up on the page queues. Diff below does that, move code to release swap resources outside of the pageq mutex and shuffle some locking dances to reduce differences with NetBSD. ok? Index: uvm/uvm_fault.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_fault.c,v diff -u -p -r1.137 uvm_fault.c --- uvm/uvm_fault.c 2 Nov 2024 10:31:16 -0000 1.137 +++ uvm/uvm_fault.c 2 Nov 2024 15:06:33 -0000 @@ -1074,9 +1074,14 @@ uvm_fault_upper(struct uvm_faultinfo *uf * ... update the page queues. */ uvm_lock_pageq(); - - if (fault_type == VM_FAULT_WIRE) { + if (flt->wired) { uvm_pagewire(pg); + } else { + uvm_pageactivate(pg); + } + uvm_unlock_pageq(); + + if (flt->wired) { /* * since the now-wired page cannot be paged out, * release its swap resources for others to use. @@ -1085,13 +1090,8 @@ uvm_fault_upper(struct uvm_faultinfo *uf */ atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); uvm_anon_dropswap(anon); - } else { - /* activate it */ - uvm_pageactivate(pg); } - uvm_unlock_pageq(); - /* * done case 1! finish up by unlocking everything and returning success */ @@ -1205,7 +1205,7 @@ uvm_fault_lower(struct uvm_faultinfo *uf struct vm_amap *amap = ufi->entry->aref.ar_amap; struct uvm_object *uobj = ufi->entry->object.uvm_obj; boolean_t promote, locked; - int result; + int result, dropswap = 0; struct vm_page *uobjpage, *pg = NULL; struct vm_anon *anon = NULL; voff_t uoff; @@ -1537,10 +1537,9 @@ uvm_fault_lower(struct uvm_faultinfo *uf return ERESTART; } - if (fault_type == VM_FAULT_WIRE) { - uvm_lock_pageq(); + uvm_lock_pageq(); + if (flt->wired) { uvm_pagewire(pg); - uvm_unlock_pageq(); if (pg->pg_flags & PQ_AOBJ) { /* * since the now-wired page cannot be paged out, @@ -1555,14 +1554,15 @@ uvm_fault_lower(struct uvm_faultinfo *uf KASSERT(uobj != NULL); KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock); atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); - uao_dropswap(uobj, pg->offset >> PAGE_SHIFT); + dropswap = 1; } } else { - /* activate it */ - uvm_lock_pageq(); uvm_pageactivate(pg); - uvm_unlock_pageq(); } + uvm_unlock_pageq(); + + if (dropswap) + uao_dropswap(uobj, pg->offset >> PAGE_SHIFT); if (pg->pg_flags & PG_WANTED) wakeup(pg);