Index | Thread | Search

From:
Mark Kettenis <mark.kettenis@xs4all.nl>
Subject:
Re: Deactivating pages & PROT_NONE
To:
Martin Pieuchot <mpi@grenadille.net>
Cc:
tech@openbsd.org
Date:
Wed, 25 Dec 2024 13:11:09 +0100

Download raw body.

Thread
> Date: Tue, 24 Dec 2024 10:51:19 +0100
> From: Martin Pieuchot <mpi@grenadille.net>
> 
> When the kernel decides to move a page to the inactive list, with
> uvm_pagedeactivate(), it also remove its pmap references to ensure
> the next access will generate a fault.
> 
> Diff below moves the pmap_page_protect(9) call inside uvm_pagedeactivate().
> 
> This is a baby step towards reducing the contention on the `pageqlock'.

Hah, it means that you actually do a bit more work while ho;lding the
lock in one case.  But all the other calls to pmap_page_protect()
already happen with the lock held.  And doing it this way is safer.

> I'm aware that many more improvements are possible.  I'm trying to move
> very slowly to not introduce too many changes in behavior and expose
> existing bugs.  That's why I'm keeping the existing behavior of always
> calling pmap_page_protect(9) inside uvm_pagedeactivate().
> 
> Note that currently, one call to uvm_pagedeactivate() is not coupled with
> a pmap_page_protect(), the diff below fixes that/makes it coherent.
> 
> ok?

ok kettenis@

> Index: uvm/uvm_anon.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_anon.c,v
> diff -u -p -r1.60 uvm_anon.c
> --- uvm/uvm_anon.c	24 Dec 2024 08:39:30 -0000	1.60
> +++ uvm/uvm_anon.c	24 Dec 2024 09:40:07 -0000
> @@ -202,7 +202,6 @@ uvm_anon_pagein(struct vm_amap *amap, st
>  	/*
>  	 * Deactivate the page (to put it on a page queue).
>  	 */
> -	pmap_page_protect(pg, PROT_NONE);
>  	uvm_lock_pageq();
>  	uvm_pagedeactivate(pg);
>  	uvm_unlock_pageq();
> Index: uvm/uvm_aobj.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_aobj.c,v
> diff -u -p -r1.114 uvm_aobj.c
> --- uvm/uvm_aobj.c	24 Dec 2024 08:39:30 -0000	1.114
> +++ uvm/uvm_aobj.c	24 Dec 2024 09:40:08 -0000
> @@ -930,7 +930,6 @@ uao_flush(struct uvm_object *uobj, voff_
>  				continue;
>  
>  			uvm_lock_pageq();
> -			pmap_page_protect(pg, PROT_NONE);
>  			uvm_pagedeactivate(pg);
>  			uvm_unlock_pageq();
>  
> Index: uvm/uvm_fault.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_fault.c,v
> diff -u -p -r1.157 uvm_fault.c
> --- uvm/uvm_fault.c	24 Dec 2024 08:55:48 -0000	1.157
> +++ uvm/uvm_fault.c	24 Dec 2024 09:40:08 -0000
> @@ -185,7 +185,6 @@ uvmfault_anonflush(struct vm_anon **anon
>  		if (pg && (pg->pg_flags & PG_BUSY) == 0) {
>  			uvm_lock_pageq();
>  			if (pg->wire_count == 0) {
> -				pmap_page_protect(pg, PROT_NONE);
>  				uvm_pagedeactivate(pg);
>  			}
>  			uvm_unlock_pageq();
> Index: uvm/uvm_map.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_map.c,v
> diff -u -p -r1.336 uvm_map.c
> --- uvm/uvm_map.c	11 Dec 2024 02:00:32 -0000	1.336
> +++ uvm/uvm_map.c	24 Dec 2024 09:40:09 -0000
> @@ -4524,11 +4524,6 @@ deactivate_it:
>  				uvm_lock_pageq();
>  
>  				KASSERT(pg->uanon == anon);
> -
> -				/* zap all mappings for the page. */
> -				pmap_page_protect(pg, PROT_NONE);
> -
> -				/* ...and deactivate the page. */
>  				uvm_pagedeactivate(pg);
>  
>  				uvm_unlock_pageq();
> Index: uvm/uvm_page.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_page.c,v
> diff -u -p -r1.179 uvm_page.c
> --- uvm/uvm_page.c	20 Dec 2024 18:54:12 -0000	1.179
> +++ uvm/uvm_page.c	24 Dec 2024 09:40:09 -0000
> @@ -1256,7 +1256,7 @@ uvm_pageunwire(struct vm_page *pg)
>  }
>  
>  /*
> - * uvm_pagedeactivate: deactivate page -- no pmaps have access to page
> + * uvm_pagedeactivate: deactivate page.
>   *
>   * => caller must lock page queues
>   * => caller must check to make sure page is not wired
> @@ -1267,6 +1267,8 @@ uvm_pagedeactivate(struct vm_page *pg)
>  {
>  	KASSERT(uvm_page_owner_locked_p(pg, FALSE));
>  	MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
> +
> +	pmap_page_protect(pg, PROT_NONE);
>  
>  	if (pg->pg_flags & PQ_ACTIVE) {
>  		TAILQ_REMOVE(&uvm.page_active, pg, pageq);
> Index: uvm/uvm_pdaemon.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_pdaemon.c,v
> diff -u -p -r1.132 uvm_pdaemon.c
> --- uvm/uvm_pdaemon.c	25 Nov 2024 13:37:49 -0000	1.132
> +++ uvm/uvm_pdaemon.c	24 Dec 2024 09:40:09 -0000
> @@ -1020,7 +1020,6 @@ uvmpd_scan_active(struct uvm_pmalloc *pm
>  		 * inactive pages.
>  		 */
>  		if (inactive_shortage > 0) {
> -			pmap_page_protect(p, PROT_NONE);
>  			/* no need to check wire_count as pg is "active" */
>  			uvm_pagedeactivate(p);
>  			uvmexp.pddeact++;
> Index: uvm/uvm_vnode.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_vnode.c,v
> diff -u -p -r1.137 uvm_vnode.c
> --- uvm/uvm_vnode.c	20 Dec 2024 18:49:37 -0000	1.137
> +++ uvm/uvm_vnode.c	24 Dec 2024 09:40:09 -0000
> @@ -683,7 +683,6 @@ uvn_flush(struct uvm_object *uobj, voff_
>  		if (!needs_clean) {
>  			if (flags & PGO_DEACTIVATE) {
>  				if (pp->wire_count == 0) {
> -					pmap_page_protect(pp, PROT_NONE);
>  					uvm_pagedeactivate(pp);
>  				}
>  			} else if (flags & PGO_FREE) {
> @@ -809,7 +808,6 @@ ReTry:
>  			/* dispose of page */
>  			if (flags & PGO_DEACTIVATE) {
>  				if (ptmp->wire_count == 0) {
> -					pmap_page_protect(ptmp, PROT_NONE);
>  					uvm_pagedeactivate(ptmp);
>  				}
>  			} else if (flags & PGO_FREE &&
> 
> 
> 
>