Index | Thread | Search

From:
Martin Pieuchot <mpi@grenadille.net>
Subject:
Relax uvm_page_owner_locked for shared locks
To:
tech@openbsd.org
Date:
Wed, 18 Dec 2024 17:59:21 +0100

Download raw body.

Thread
  • Martin Pieuchot:

    Relax uvm_page_owner_locked for shared locks

Diff below relax the checks to accept a shared (read) lock in:

. uvm_pagewait():
	
	Sleeping & releasing the lock doesn't require an exclusive one.

. uvm_pageactivate() & uvm_pagedeactivate():

	The lists are protected by the global `pageqlock' mutex.

ok?

Index: uvm/uvm_page.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_page.c,v
diff -u -p -r1.178 uvm_page.c
--- uvm/uvm_page.c	26 Nov 2024 09:51:30 -0000	1.178
+++ uvm/uvm_page.c	18 Dec 2024 16:44:27 -0000
@@ -118,7 +118,7 @@ static vaddr_t      virtual_space_end;
  */
 static void uvm_pageinsert(struct vm_page *);
 static void uvm_pageremove(struct vm_page *);
-int uvm_page_owner_locked_p(struct vm_page *);
+int uvm_page_owner_locked_p(struct vm_page *, boolean_t);
 
 /*
  * inline functions
@@ -700,7 +700,7 @@ uvm_pagealloc_pg(struct vm_page *pg, str
 	pg->offset = off;
 	pg->uobject = obj;
 	pg->uanon = anon;
-	KASSERT(uvm_page_owner_locked_p(pg));
+	KASSERT(uvm_page_owner_locked_p(pg, TRUE));
 	if (anon) {
 		anon->an_page = pg;
 		flags |= PQ_ANON;
@@ -1040,7 +1040,7 @@ uvm_page_unbusy(struct vm_page **pgs, in
 			continue;
 		}
 
-		KASSERT(uvm_page_owner_locked_p(pg));
+		KASSERT(uvm_page_owner_locked_p(pg, TRUE));
 		KASSERT(pg->pg_flags & PG_BUSY);
 
 		if (pg->pg_flags & PG_WANTED) {
@@ -1072,6 +1072,7 @@ uvm_pagewait(struct vm_page *pg, struct 
 {
 	KASSERT(rw_lock_held(lock));
 	KASSERT((pg->pg_flags & PG_BUSY) != 0);
+	KASSERT(uvm_page_owner_locked_p(pg, FALSE));
 
 	atomic_setbits_int(&pg->pg_flags, PG_WANTED);
 	rwsleep_nsec(pg, lock, PVM | PNORELOCK, wmesg, INFSLP);
@@ -1225,7 +1226,7 @@ uvm_pagelookup(struct uvm_object *obj, v
 void
 uvm_pagewire(struct vm_page *pg)
 {
-	KASSERT(uvm_page_owner_locked_p(pg));
+	KASSERT(uvm_page_owner_locked_p(pg, TRUE));
 	MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
 
 	if (pg->wire_count == 0) {
@@ -1244,7 +1245,7 @@ uvm_pagewire(struct vm_page *pg)
 void
 uvm_pageunwire(struct vm_page *pg)
 {
-	KASSERT(uvm_page_owner_locked_p(pg));
+	KASSERT(uvm_page_owner_locked_p(pg, TRUE));
 	MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
 
 	pg->wire_count--;
@@ -1264,7 +1265,7 @@ uvm_pageunwire(struct vm_page *pg)
 void
 uvm_pagedeactivate(struct vm_page *pg)
 {
-	KASSERT(uvm_page_owner_locked_p(pg));
+	KASSERT(uvm_page_owner_locked_p(pg, FALSE));
 	MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
 
 	if (pg->pg_flags & PQ_ACTIVE) {
@@ -1298,7 +1299,7 @@ uvm_pagedeactivate(struct vm_page *pg)
 void
 uvm_pageactivate(struct vm_page *pg)
 {
-	KASSERT(uvm_page_owner_locked_p(pg));
+	KASSERT(uvm_page_owner_locked_p(pg, FALSE));
 	MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
 
 	uvm_pagedequeue(pg);
@@ -1352,12 +1353,14 @@ uvm_pagecopy(struct vm_page *src, struct
  * locked.  this is a weak check for runtime assertions only.
  */
 int
-uvm_page_owner_locked_p(struct vm_page *pg)
+uvm_page_owner_locked_p(struct vm_page *pg, boolean_t exclusive)
 {
 	if (pg->uobject != NULL) {
 		if (UVM_OBJ_IS_DUMMY(pg->uobject))
 			return 1;
-		return rw_write_held(pg->uobject->vmobjlock);
+		return exclusive
+		    ? rw_write_held(pg->uobject->vmobjlock)
+		    : rw_lock_held(pg->uobject->vmobjlock);
 	}
 	if (pg->uanon != NULL) {
 		return rw_write_held(pg->uanon->an_lock);