Index | Thread | Search

From:
"Martin Pieuchot" <mpi@grenadille.net>
Subject:
Re: Improve uvm_pageout() logic for segmented memory space
To:
tech@openbsd.org
Date:
Thu, 07 Nov 2024 10:11:13 +0100

Download raw body.

Thread
  • Martin Pieuchot:

    Improve uvm_pageout() logic for segmented memory space

On 06/11/24(Wed) 15:58, mpi@grenadille.net wrote:
> Diff below greatly improves the responsiveness of the page daemon for
> 64bit archs with a low/high memory split.  The improvement comes from
> a more precise calculation of how many low pages have to be freed.  As
> a result the amount of pages written to swap is decreased by ~50% in my
> tests and my arm64 machine becomes responsive during heavy swapping.
> 
> The diff includes:
> 
> - Use a global "struct uvm_pmalloc" to notify failed nowait allocations
>   in order to look at the managed lists.  The current algorithm does not
>   call uvmpd_scan() if there have been only nowait allocations.
> 
> - Skip calling the shrinkers and grabbing some locks if the page daemon
>   is awoken to rebalance the active/inactive lists.
> 
> - Do not bother releasing high pages if all we are interested in are low
>   pages
> 
> - Try to deactivate low pages first only if we are not short on swap
>   slots

Third one.

Remove redundant `constraint' argument to uvmpd_scan() & friends.

---
 sys/uvm/uvm_pdaemon.c | 29 +++++++++++++++--------------
 1 file changed, 15 insertions(+), 14 deletions(-)

diff --git a/sys/uvm/uvm_pdaemon.c b/sys/uvm/uvm_pdaemon.c
index 1070fd8d82b..8af0ff181a6 100644
--- a/sys/uvm/uvm_pdaemon.c
+++ b/sys/uvm/uvm_pdaemon.c
@@ -102,10 +102,8 @@ extern unsigned long drmbackoff(long);
  */
 
 struct rwlock	*uvmpd_trylockowner(struct vm_page *);
-void		uvmpd_scan(struct uvm_pmalloc *, int, int,
-		    struct uvm_constraint_range *);
-int		uvmpd_scan_inactive(struct uvm_pmalloc *, int,
-		    struct uvm_constraint_range *);
+void		uvmpd_scan(struct uvm_pmalloc *, int, int);
+int		uvmpd_scan_inactive(struct uvm_pmalloc *, int);
 void		uvmpd_tune(void);
 void		uvmpd_drop(struct pglist *);
 int		uvmpd_dropswap(struct vm_page *);
@@ -283,8 +281,7 @@ uvm_pageout(void *arg)
 		uvm_lock_pageq();
 		if (!uvmpd_pma_done(pma) ||
 		    (shortage > 0) || (inactive_shortage > 0)) {
-			uvmpd_scan(pma, shortage, inactive_shortage,
-			    &constraint);
+			uvmpd_scan(pma, shortage, inactive_shortage);
 		}
 
 		/*
@@ -449,8 +446,7 @@ uvmpd_match_constraint(struct vm_page *p,
  * => we return TRUE if we are exiting because we met our target
  */
 int
-uvmpd_scan_inactive(struct uvm_pmalloc *pma, int shortage,
-    struct uvm_constraint_range *constraint)
+uvmpd_scan_inactive(struct uvm_pmalloc *pma, int shortage)
 {
 	struct pglist *pglst = &uvm.page_inactive;
 	int result, freed = 0;
@@ -477,9 +473,14 @@ uvmpd_scan_inactive(struct uvm_pmalloc *pma, int shortage,
 	dirtyreacts = 0;
 	p = NULL;
 
-	/* Start with the first page on the list that fit in `constraint' */
+	/*
+	 * If a thread is waiting for us to release memory from a specific
+	 * memory range start with the first page on the list that fits in
+	 * it.
+	 */
 	TAILQ_FOREACH(p, pglst, pageq) {
-		if (uvmpd_match_constraint(p, constraint))
+		if (uvmpd_pma_done(pma) ||
+		    uvmpd_match_constraint(p, &pma->pm_constraint))
 			break;
 	}
 
@@ -882,8 +883,7 @@ uvmpd_scan_inactive(struct uvm_pmalloc *pma, int shortage,
  */
 
 void
-uvmpd_scan(struct uvm_pmalloc *pma, int shortage, int inactive_shortage,
-    struct uvm_constraint_range *constraint)
+uvmpd_scan(struct uvm_pmalloc *pma, int shortage, int inactive_shortage)
 {
 	int swap_shortage, pages_freed;
 	struct vm_page *p, *nextpg;
@@ -913,7 +913,7 @@ uvmpd_scan(struct uvm_pmalloc *pma, int shortage, int inactive_shortage,
 	 * we work on meeting our inactive target by converting active pages
 	 * to inactive ones.
 	 */
-	pages_freed = uvmpd_scan_inactive(pma, shortage, constraint);
+	pages_freed = uvmpd_scan_inactive(pma, shortage);
 	uvmexp.pdfreed += pages_freed;
 	shortage -= pages_freed;
 
@@ -941,7 +941,8 @@ uvmpd_scan(struct uvm_pmalloc *pma, int shortage, int inactive_shortage,
 		/*
 		 * skip this page if it doesn't match the constraint.
 		 */
-		if (!uvmpd_match_constraint(p, &pma->pm_constraint))
+		if (!uvmpd_pma_done(pma) &&
+		    !uvmpd_match_constraint(p, &pma->pm_constraint))
 			continue;
 
 		/*
-- 
2.46.1