Download raw body.
Improve uvm_pageout() logic for segmented memory space
On 06/11/24(Wed) 15:58, mpi@grenadille.net wrote:
> Diff below greatly improves the responsiveness of the page daemon for
> 64bit archs with a low/high memory split. The improvement comes from
> a more precise calculation of how many low pages have to be freed. As
> a result the amount of pages written to swap is decreased by ~50% in my
> tests and my arm64 machine becomes responsive during heavy swapping.
>
> The diff includes:
>
> - Use a global "struct uvm_pmalloc" to notify failed nowait allocations
> in order to look at the managed lists. The current algorithm does not
> call uvmpd_scan() if there have been only nowait allocations.
>
> - Skip calling the shrinkers and grabbing some locks if the page daemon
> is awoken to rebalance the active/inactive lists.
>
> - Do not bother releasing high pages if all we are interested in are low
> pages
>
> - Try to deactivate low pages first only if we are not short on swap
> slots
4th diff:
Use the same mechanism to notify failed nowait & wait allocs to the page daemon.
As a side effect the page daemon now consider releasing inactive pages when
a nowait allocation for low pages failed.
---
sys/uvm/uvm_pdaemon.c | 25 ++++++++++++-------------
sys/uvm/uvm_pmemrange.c | 11 +++++++----
2 files changed, 19 insertions(+), 17 deletions(-)
diff --git a/sys/uvm/uvm_pdaemon.c b/sys/uvm/uvm_pdaemon.c
index 8af0ff181a6..614209393e2 100644
--- a/sys/uvm/uvm_pdaemon.c
+++ b/sys/uvm/uvm_pdaemon.c
@@ -194,7 +194,7 @@ uvmpd_tune(void)
* recover at least some memory in the most restricted region (assumed
* to be dma_constraint).
*/
-volatile int uvm_nowait_failed;
+struct uvm_pmalloc nowait_pma;
static inline int
uvmpd_pma_done(struct uvm_pmalloc *pma)
@@ -219,11 +219,19 @@ uvm_pageout(void *arg)
(void) spl0();
uvmpd_tune();
+ /*
+ * XXX realistically, this is what our nowait callers probably
+ * care about.
+ */
+ nowait_pma.pm_constraint = dma_constraint;
+ nowait_pma.pm_size = (16 << PAGE_SHIFT); /* XXX */
+ nowait_pma.pm_flags = 0;
+
for (;;) {
long size;
uvm_lock_fpageq();
- if (!uvm_nowait_failed && TAILQ_EMPTY(&uvm.pmr_control.allocs)) {
+ if (TAILQ_EMPTY(&uvm.pmr_control.allocs)) {
msleep_nsec(&uvm.pagedaemon, &uvm.fpageqlock, PVM,
"pgdaemon", INFSLP);
uvmexp.pdwoke++;
@@ -233,15 +241,7 @@ uvm_pageout(void *arg)
pma->pm_flags |= UVM_PMA_BUSY;
constraint = pma->pm_constraint;
} else {
- if (uvm_nowait_failed) {
- /*
- * XXX realistically, this is what our
- * nowait callers probably care about
- */
- constraint = dma_constraint;
- uvm_nowait_failed = 0;
- } else
- constraint = no_constraint;
+ constraint = no_constraint;
}
/* How many pages do we need to free during this round? */
shortage = uvmexp.freetarg - uvmexp.free + BUFPAGES_DEFICIT;
@@ -303,8 +303,7 @@ uvm_pageout(void *arg)
pma->pm_flags &= ~UVM_PMA_BUSY;
if (pma->pm_flags & UVM_PMA_FREED) {
pma->pm_flags &= ~UVM_PMA_LINKED;
- TAILQ_REMOVE(&uvm.pmr_control.allocs, pma,
- pmq);
+ TAILQ_REMOVE(&uvm.pmr_control.allocs, pma, pmq);
wakeup(pma);
}
}
diff --git a/sys/uvm/uvm_pmemrange.c b/sys/uvm/uvm_pmemrange.c
index 287308db735..207f2d31a79 100644
--- a/sys/uvm/uvm_pmemrange.c
+++ b/sys/uvm/uvm_pmemrange.c
@@ -841,7 +841,7 @@ uvm_pmr_extract_range(struct uvm_pmemrange *pmr, struct vm_page *pg,
* recover at least some memory in the most restricted region (assumed
* to be dma_constraint).
*/
-extern volatile int uvm_nowait_failed;
+extern struct uvm_pmalloc nowait_pma;
/*
* Acquire a number of pages.
@@ -1190,9 +1190,12 @@ fail:
flags & UVM_PLA_FAILOK) == 0)
goto retry;
KASSERT(flags & UVM_PLA_FAILOK);
- } else {
- if (!(flags & UVM_PLA_NOWAKE)) {
- uvm_nowait_failed = 1;
+ } else if (!(flags & UVM_PLA_NOWAKE)) {
+ struct uvm_pmalloc *pma = &nowait_pma;
+
+ if (!(nowait_pma.pm_flags & UVM_PMA_LINKED)) {
+ nowait_pma.pm_flags = UVM_PMA_LINKED;
+ TAILQ_INSERT_TAIL(&uvm.pmr_control.allocs, pma, pmq);
wakeup(&uvm.pagedaemon);
}
}
--
2.46.1
Improve uvm_pageout() logic for segmented memory space