Index | Thread | Search

From:
Martin Pieuchot <mpi@grenadille.net>
Subject:
amap_lock() & shared option
To:
tech@openbsd.org
Date:
Tue, 3 Dec 2024 09:00:28 +0100

Download raw body.

Thread
Diff below changes the amap_lock() macro to take an argument that will
allow switching between shared and exclusive locks.  For the moment all
usages are done with RW_WRITE.

ok?

Index: uvm/uvm_amap.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_amap.c,v
diff -u -p -r1.95 uvm_amap.c
--- uvm/uvm_amap.c	20 May 2024 17:03:36 -0000	1.95
+++ uvm/uvm_amap.c	2 Dec 2024 17:07:09 -0000
@@ -621,7 +621,7 @@ amap_copy(struct vm_map *map, struct vm_
 	amap->am_lock = srcamap->am_lock;
 	rw_obj_hold(amap->am_lock);
 
-	amap_lock(srcamap);
+	amap_lock(srcamap, RW_WRITE);
 
 	/*
 	 * Re-check the reference count with the lock held.  If it has
@@ -749,7 +749,7 @@ amap_cow_now(struct vm_map *map, struct 
 	 * am_anon[] array on us while the lock is dropped.
 	 */
 ReStart:
-	amap_lock(amap);
+	amap_lock(amap, RW_WRITE);
 	AMAP_CHUNK_FOREACH(chunk, amap) {
 		int i, map = chunk->ac_usedmap;
 
@@ -849,7 +849,7 @@ amap_splitref(struct vm_aref *origref, s
 	if (leftslots == 0)
 		panic("amap_splitref: split at zero offset");
 
-	amap_lock(amap);
+	amap_lock(amap, RW_WRITE);
 
 	if (amap->am_nslot - origref->ar_pageoff - leftslots <= 0)
 		panic("amap_splitref: map size check failed");
@@ -1088,7 +1088,7 @@ amap_swap_off(int startslot, int endslot
 		int i, map;
 		struct vm_amap_chunk *chunk;
 
-		amap_lock(am);
+		amap_lock(am, RW_WRITE);
 		if (am->am_nused == 0) {
 			amap_unlock(am);
 			am_next = LIST_NEXT(am, am_list);
@@ -1118,7 +1118,7 @@ again:
 				am->am_flags |= AMAP_SWAPOFF;
 
 				rv = uvm_anon_pagein(am, anon);
-				amap_lock(am);
+				amap_lock(am, RW_WRITE);
 
 				am->am_flags &= ~AMAP_SWAPOFF;
 				if (amap_refs(am) == 0) {
@@ -1339,7 +1339,7 @@ amap_adjref_anons(struct vm_amap *amap, 
 void
 amap_ref(struct vm_amap *amap, vaddr_t offset, vsize_t len, int flags)
 {
-	amap_lock(amap);
+	amap_lock(amap, RW_WRITE);
 	if (flags & AMAP_SHARED)
 		amap->am_flags |= AMAP_SHARED;
 	amap_adjref_anons(amap, offset, len, 1, (flags & AMAP_REFALL) != 0);
@@ -1355,7 +1355,7 @@ amap_ref(struct vm_amap *amap, vaddr_t o
 void
 amap_unref(struct vm_amap *amap, vaddr_t offset, vsize_t len, boolean_t all)
 {
-	amap_lock(amap);
+	amap_lock(amap, RW_WRITE);
 
 	KASSERT(amap->am_ref > 0);
 
Index: uvm/uvm_amap.h
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_amap.h,v
diff -u -p -r1.34 uvm_amap.h
--- uvm/uvm_amap.h	15 Nov 2024 02:54:01 -0000	1.34
+++ uvm/uvm_amap.h	2 Dec 2024 17:07:09 -0000
@@ -262,8 +262,8 @@ struct vm_amap {
 #define amap_flags(AMAP)	((AMAP)->am_flags)
 #define amap_refs(AMAP)		((AMAP)->am_ref)
 
-#define amap_lock(AMAP)		rw_enter_write((AMAP)->am_lock)
-#define amap_unlock(AMAP)	rw_exit_write((AMAP)->am_lock)
+#define amap_lock(AMAP, RWLT)	rw_enter((AMAP)->am_lock, (RWLT))
+#define amap_unlock(AMAP)	rw_exit((AMAP)->am_lock)
 
 #endif /* _KERNEL */
 
Index: uvm/uvm_fault.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_fault.c,v
diff -u -p -r1.148 uvm_fault.c
--- uvm/uvm_fault.c	29 Nov 2024 06:44:57 -0000	1.148
+++ uvm/uvm_fault.c	2 Dec 2024 17:07:54 -0000
@@ -784,7 +784,7 @@ uvm_fault_check(struct uvm_faultinfo *uf
 	 * if we've got an amap then lock it and extract current anons.
 	 */
 	if (amap) {
-		amap_lock(amap);
+		amap_lock(amap, RW_WRITE);
 		amap_lookups(&ufi->entry->aref,
 		    flt->startva - ufi->entry->start, *ranons, flt->npages);
 	} else {
@@ -976,6 +976,9 @@ uvm_fault_upper(struct uvm_faultinfo *uf
 	 */
 
 	if ((flt->access_type & PROT_WRITE) != 0 && anon->an_ref > 1) {
+		/* promoting requires a write lock. */
+		KASSERT(rw_write_held(amap->am_lock));
+
 		counters_inc(uvmexp_counters, flt_acow);
 		oanon = anon;		/* oanon = old */
 		anon = uvm_analloc();
@@ -1016,7 +1019,10 @@ uvm_fault_upper(struct uvm_faultinfo *uf
 		    ufi->orig_rvaddr - ufi->entry->start, anon, 1);
 		KASSERT(ret == 0);
 
+		KASSERT(anon->an_lock == oanon->an_lock);
+
 		/* deref: can not drop to zero here by defn! */
+		KASSERT(oanon->an_ref > 1);
 		oanon->an_ref--;
 
 #if defined(MULTIPROCESSOR) && !defined(__HAVE_PMAP_MPSAFE_ENTER_COW)
@@ -1563,7 +1569,7 @@ uvm_fault_lower_io(
 	/* re-verify the state of the world.  */
 	locked = uvmfault_relock(ufi);
 	if (locked && amap != NULL)
-		amap_lock(amap);
+		amap_lock(amap, RW_WRITE);
 
 	/* might be changed */
 	if (pg != PGO_DONTCARE) {
Index: uvm/uvm_map.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_map.c,v
diff -u -p -r1.334 uvm_map.c
--- uvm/uvm_map.c	5 Nov 2024 08:18:44 -0000	1.334
+++ uvm/uvm_map.c	2 Dec 2024 17:07:09 -0000
@@ -461,7 +461,7 @@ void
 uvm_map_lock_entry(struct vm_map_entry *entry)
 {
 	if (entry->aref.ar_amap != NULL) {
-		amap_lock(entry->aref.ar_amap);
+		amap_lock(entry->aref.ar_amap, RW_WRITE);
 	}
 	if (UVM_ET_ISOBJ(entry)) {
 		rw_enter(entry->object.uvm_obj->vmobjlock, RW_WRITE);
@@ -4492,7 +4492,7 @@ uvm_map_clean(struct vm_map *map, vaddr_
 		cp_start = MAX(entry->start, start);
 		cp_end = MIN(entry->end, end);
 
-		amap_lock(amap);
+		amap_lock(amap, RW_WRITE);
 		for (; cp_start != cp_end; cp_start += PAGE_SIZE) {
 			anon = amap_lookup(&entry->aref,
 			    cp_start - entry->start);