Index | Thread | Search

From:
Mark Kettenis <mark.kettenis@xs4all.nl>
Subject:
Re: Access uvmexp.pdfreed only once
To:
Martin Pieuchot <mpi@grenadille.net>
Cc:
tech@openbsd.org
Date:
Mon, 30 Sep 2024 11:24:12 +0200

Download raw body.

Thread
> Date: Mon, 30 Sep 2024 09:25:33 +0200
> From: Martin Pieuchot <mpi@grenadille.net>
> 
> Return the number of freed pages in uvmpd_scan_inactive() and decrement
> the global only once.  This is part of an effort to reduce accesses to
> global counters in the page daemon to ease its unlocking.
> 
> ok?

ok kettenis@

> Index: uvm/uvm_pdaemon.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_pdaemon.c,v
> diff -u -p -r1.114 uvm_pdaemon.c
> --- uvm/uvm_pdaemon.c	1 May 2024 12:54:27 -0000	1.114
> +++ uvm/uvm_pdaemon.c	30 Sep 2024 07:22:38 -0000
> @@ -103,8 +103,8 @@ extern void drmbackoff(long);
>  
>  struct rwlock	*uvmpd_trylockowner(struct vm_page *);
>  void		uvmpd_scan(struct uvm_pmalloc *, struct uvm_constraint_range *);
> -void		uvmpd_scan_inactive(struct uvm_pmalloc *,
> -		    struct uvm_constraint_range *, struct pglist *);
> +int		uvmpd_scan_inactive(struct uvm_pmalloc *,
> +		    struct uvm_constraint_range *);
>  void		uvmpd_tune(void);
>  void		uvmpd_drop(struct pglist *);
>  int		uvmpd_dropswap(struct vm_page *);
> @@ -418,11 +418,12 @@ uvmpd_dropswap(struct vm_page *pg)
>   * => we handle the building of swap-backed clusters
>   * => we return TRUE if we are exiting because we met our target
>   */
> -void
> +int
>  uvmpd_scan_inactive(struct uvm_pmalloc *pma,
> -    struct uvm_constraint_range *constraint, struct pglist *pglst)
> +    struct uvm_constraint_range *constraint)
>  {
> -	int free, result;
> +	struct pglist *pglst = &uvm.page_inactive;
> +	int free, result, freed = 0;
>  	struct vm_page *p, *nextpg;
>  	struct uvm_object *uobj;
>  	struct vm_page *pps[SWCLUSTPAGES], **ppsp;
> @@ -542,7 +543,7 @@ uvmpd_scan_inactive(struct uvm_pmalloc *
>  				/* zap all mappings with pmap_page_protect... */
>  				pmap_page_protect(p, PROT_NONE);
>  				uvm_pagefree(p);
> -				uvmexp.pdfreed++;
> +				freed++;
>  
>  				if (anon) {
>  
> @@ -846,6 +847,8 @@ uvmpd_scan_inactive(struct uvm_pmalloc *
>  			uvm_lock_pageq();
>  		}
>  	}
> +
> +	return freed;
>  }
>  
>  /*
> @@ -890,10 +893,8 @@ uvmpd_scan(struct uvm_pmalloc *pma, stru
>  	 * we work on meeting our inactive target by converting active pages
>  	 * to inactive ones.
>  	 */
> -
> -	pages_freed = uvmexp.pdfreed;
> -	(void) uvmpd_scan_inactive(pma, constraint, &uvm.page_inactive);
> -	pages_freed = uvmexp.pdfreed - pages_freed;
> +	pages_freed = uvmpd_scan_inactive(pma, constraint);
> +	uvmexp.pdfreed += pages_freed;
>  
>  	/*
>  	 * we have done the scan to get free pages.   now we work on meeting
> 
>