Download raw body.
Introduce uvmfault_promote()
Diff below introduces a new function to merge common code used to
promote data to a new anon.
It is used for 3 different cases:
- CoW of an anon, `uobjpage' is set to oanon->an_page
- Zero fill of a new page, `uobjpage' is set to PGO_DONTCARE
- CoW of a uobj page, `uobjpage` is set to the center fetched by pgo_get()
This diff doesn't introduce any change in behavior. More merging will
come afterwards.
As usual, this change reduce the diff with NetBSD.
ok?
Index: uvm/uvm_fault.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_fault.c,v
diff -u -p -r1.148 uvm_fault.c
--- uvm/uvm_fault.c 29 Nov 2024 06:44:57 -0000 1.148
+++ uvm/uvm_fault.c 30 Nov 2024 11:53:57 -0000
@@ -487,6 +487,68 @@ uvmfault_anonget(struct uvm_faultinfo *u
}
/*
+ * uvmfault_promote: promote data to a new anon. used for 1B and 2B.
+ *
+ * 1. allocate an anon and a page.
+ * 2. fill its contents.
+ *
+ * => if we fail (result != 0) we unlock everything.
+ * => on success, return a new locked anon via 'nanon'.
+ * => it's caller's responsibility to put the promoted nanon->an_page to the
+ * page queue.
+ */
+int
+uvmfault_promote(struct uvm_faultinfo *ufi,
+ struct vm_page *uobjpage,
+ struct vm_anon **nanon, /* OUT: allocated anon */
+ struct vm_page **npg)
+{
+ struct vm_amap *amap = ufi->entry->aref.ar_amap;
+ struct vm_anon *anon;
+ struct vm_page *pg = NULL;
+
+ anon = uvm_analloc();
+ if (anon) {
+ anon->an_lock = amap->am_lock;
+ pg = uvm_pagealloc(NULL, 0, anon,
+ (uobjpage == PGO_DONTCARE) ? UVM_PGA_ZERO : 0);
+ }
+
+ /* check for out of RAM */
+ if (anon == NULL || pg == NULL) {
+ uvmfault_unlockall(ufi, amap, NULL);
+ if (anon == NULL)
+ counters_inc(uvmexp_counters, flt_noanon);
+ else {
+ anon->an_lock = NULL;
+ anon->an_ref--;
+ uvm_anfree(anon);
+ counters_inc(uvmexp_counters, flt_noram);
+ }
+
+ if (uvm_swapisfull())
+ return ENOMEM;
+
+ /* out of RAM, wait for more */
+ if (anon == NULL)
+ uvm_anwait();
+ else
+ uvm_wait("flt_noram3");
+ return ERESTART;
+ }
+
+ /*
+ * copy the page [pg now dirty]
+ */
+ if (uobjpage != PGO_DONTCARE)
+ uvm_pagecopy(uobjpage, pg);
+
+ *nanon = anon;
+ *npg = pg;
+ return 0;
+}
+
+/*
* Update statistics after fault resolution.
* - maxrss
*/
@@ -978,37 +1040,11 @@ uvm_fault_upper(struct uvm_faultinfo *uf
if ((flt->access_type & PROT_WRITE) != 0 && anon->an_ref > 1) {
counters_inc(uvmexp_counters, flt_acow);
oanon = anon; /* oanon = old */
- anon = uvm_analloc();
- if (anon) {
- anon->an_lock = amap->am_lock;
- pg = uvm_pagealloc(NULL, 0, anon, 0);
- }
-
- /* check for out of RAM */
- if (anon == NULL || pg == NULL) {
- uvmfault_unlockall(ufi, amap, NULL);
- if (anon == NULL)
- counters_inc(uvmexp_counters, flt_noanon);
- else {
- anon->an_lock = NULL;
- anon->an_ref--;
- uvm_anfree(anon);
- counters_inc(uvmexp_counters, flt_noram);
- }
- if (uvm_swapisfull())
- return ENOMEM;
-
- /* out of RAM, wait for more */
- if (anon == NULL)
- uvm_anwait();
- else
- uvm_wait("flt_noram3");
- return ERESTART;
- }
+ error = uvmfault_promote(ufi, oanon->an_page, &anon, &pg);
+ if (error)
+ return error;
- /* got all resources, replace anon with nanon */
- uvm_pagecopy(oanon->an_page, pg); /* pg now !PG_CLEAN */
/* un-busy! new page */
atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE);
UVM_PAGE_OWN(pg, NULL);
@@ -1316,23 +1352,8 @@ uvm_fault_lower(struct uvm_faultinfo *uf
panic("uvm_fault: want to promote data, but no anon");
#endif
- anon = uvm_analloc();
- if (anon) {
- /*
- * In `Fill in data...' below, if
- * uobjpage == PGO_DONTCARE, we want
- * a zero'd, dirty page, so have
- * uvm_pagealloc() do that for us.
- */
- anon->an_lock = amap->am_lock;
- pg = uvm_pagealloc(NULL, 0, anon,
- (uobjpage == PGO_DONTCARE) ? UVM_PGA_ZERO : 0);
- }
-
- /*
- * out of memory resources?
- */
- if (anon == NULL || pg == NULL) {
+ error = uvmfault_promote(ufi, uobjpage, &anon, &pg);
+ if (error) {
/*
* arg! must unbusy our page and fail or sleep.
*/
@@ -1347,27 +1368,7 @@ uvm_fault_lower(struct uvm_faultinfo *uf
PG_BUSY|PG_WANTED);
UVM_PAGE_OWN(uobjpage, NULL);
}
-
- /* unlock and fail ... */
- uvmfault_unlockall(ufi, amap, uobj);
- if (anon == NULL)
- counters_inc(uvmexp_counters, flt_noanon);
- else {
- anon->an_lock = NULL;
- anon->an_ref--;
- uvm_anfree(anon);
- counters_inc(uvmexp_counters, flt_noram);
- }
-
- if (uvm_swapisfull())
- return (ENOMEM);
-
- /* out of RAM, wait for more */
- if (anon == NULL)
- uvm_anwait();
- else
- uvm_wait("flt_noram5");
- return ERESTART;
+ return error;
}
/*
@@ -1375,8 +1376,6 @@ uvm_fault_lower(struct uvm_faultinfo *uf
*/
if (uobjpage != PGO_DONTCARE) {
counters_inc(uvmexp_counters, flt_prcopy);
- /* copy page [pg now dirty] */
- uvm_pagecopy(uobjpage, pg);
/*
* promote to shared amap? make sure all sharing
Introduce uvmfault_promote()