Index | Thread | Search

From:
Martin Pieuchot <mpi@grenadille.net>
Subject:
Kill UVM_LK_ENTER/EXIT
To:
tech@openbsd.org
Date:
Wed, 14 May 2025 15:33:59 +0200

Download raw body.

Thread
Diff below removes the `lockflags' argument from uvm_map_pageable(),
grabs the exclusive lock explicitly where necessary and adds locking
assertions in uvm_map_pageable_wire() and uvm_map_pageable().

As a result uvm_map_pageable_wire() becomes much simpler, the locking
requirement becomes obvious and I don't have to think to understand
where locks are released.

I'd like to get this in before fixing uvm_vslock_device() which suffers
from the same race that was present in uvm_vslock().

ok?

Index: dev/ic/psp.c
===================================================================
RCS file: /cvs/src/sys/dev/ic/psp.c,v
diff -u -p -r1.16 psp.c
--- dev/ic/psp.c	25 Apr 2025 19:10:50 -0000	1.16
+++ dev/ic/psp.c	14 May 2025 13:19:36 -0000
@@ -504,7 +504,9 @@ psp_launch_update_data(struct psp_softc 
 	end = start + size;
 
 	/* Wire mapping. */
-	error = uvm_map_pageable(&p->p_vmspace->vm_map, start, end, FALSE, 0);
+	vm_map_lock(&p->p_vmspace->vm_map);
+	error = uvm_map_pageable(&p->p_vmspace->vm_map, start, end, FALSE);
+	vm_map_unlock(&p->p_vmspace->vm_map);
 	if (error)
 		goto out;
 
@@ -533,7 +535,9 @@ out:
 	 * Unwire again.  Ignore new error.  Error has either been set,
 	 * or PSP command has already succeeded.
 	 */
-	(void) uvm_map_pageable(&p->p_vmspace->vm_map, start, end, TRUE, 0);
+	vm_map_lock(&p->p_vmspace->vm_map);
+	(void) uvm_map_pageable(&p->p_vmspace->vm_map, start, end, TRUE);
+	vm_map_unlock(&p->p_vmspace->vm_map);
 
 	return (error);
 }
Index: uvm/uvm_extern.h
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_extern.h,v
diff -u -p -r1.180 uvm_extern.h
--- uvm/uvm_extern.h	19 Nov 2024 06:18:26 -0000	1.180
+++ uvm/uvm_extern.h	14 May 2025 13:19:36 -0000
@@ -153,12 +153,6 @@ typedef int		vm_prot_t;
 #define UVM_PLA_USERESERVE	0x0040	/* can allocate from kernel reserve */
 
 /*
- * lockflags that control the locking behavior of various functions.
- */
-#define	UVM_LK_ENTER	0x00000001	/* map locked on entry */
-#define	UVM_LK_EXIT	0x00000002	/* leave map locked on exit */
-
-/*
  * flags to uvm_page_physload.
  */
 #define	PHYSLOAD_DEVICE	0x01	/* don't add to the page queue */
@@ -388,7 +383,7 @@ int			uvm_map(vm_map_t, vaddr_t *, vsize
 			    struct uvm_object *, voff_t, vsize_t, unsigned int);
 int			uvm_mapanon(vm_map_t, vaddr_t *, vsize_t, vsize_t, unsigned int);
 int			uvm_map_pageable(vm_map_t, vaddr_t, 
-			    vaddr_t, boolean_t, int);
+			    vaddr_t, boolean_t);
 int			uvm_map_pageable_all(vm_map_t, int, vsize_t);
 boolean_t		uvm_map_checkprot(vm_map_t, vaddr_t,
 			    vaddr_t, vm_prot_t);
Index: uvm/uvm_glue.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_glue.c,v
diff -u -p -r1.88 uvm_glue.c
--- uvm/uvm_glue.c	21 Mar 2025 13:19:33 -0000	1.88
+++ uvm/uvm_glue.c	14 May 2025 13:19:36 -0000
@@ -108,13 +108,17 @@ uvm_vslock(struct proc *p, caddr_t addr,
 {
 	struct vm_map *map = &p->p_vmspace->vm_map;
 	vaddr_t start, end;
+	int error;
 
 	start = trunc_page((vaddr_t)addr);
 	end = round_page((vaddr_t)addr + len);
 	if (end <= start)
 		return (EINVAL);
 
-	return uvm_map_pageable(map, start, end, FALSE, 0);
+	vm_map_lock(map);
+	error = uvm_map_pageable(map, start, end, FALSE);
+	vm_map_unlock(map);
+	return error;
 }
 
 /*
@@ -132,7 +136,9 @@ uvm_vsunlock(struct proc *p, caddr_t add
 	end = round_page((vaddr_t)addr + len);
 	KASSERT(end > start);
 
-	uvm_map_pageable(map, start, end, TRUE, 0);
+	vm_map_lock(map);
+	uvm_map_pageable(map, start, end, TRUE);
+	vm_map_unlock(map);
 }
 
 /*
Index: uvm/uvm_map.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_map.c,v
diff -u -p -r1.341 uvm_map.c
--- uvm/uvm_map.c	21 Apr 2025 14:46:18 -0000	1.341
+++ uvm/uvm_map.c	14 May 2025 13:19:36 -0000
@@ -135,7 +135,7 @@ void			 uvm_map_pageable_pgon(struct vm_
 			    vaddr_t, vaddr_t);
 int			 uvm_map_pageable_wire(struct vm_map*,
 			    struct vm_map_entry*, struct vm_map_entry*,
-			    vaddr_t, vaddr_t, int);
+			    vaddr_t, vaddr_t);
 void			 uvm_map_setup_entries(struct vm_map*);
 void			 uvm_map_setup_md(struct vm_map*);
 void			 uvm_map_teardown(struct vm_map*);
@@ -2057,14 +2057,10 @@ uvm_map_pageable_pgon(struct vm_map *map
 
 /*
  * Mark all entries from first until end (exclusive) as wired.
- *
- * Lockflags determines the lock state on return from this function.
- * Lock must be exclusive on entry.
  */
 int
 uvm_map_pageable_wire(struct vm_map *map, struct vm_map_entry *first,
-    struct vm_map_entry *end, vaddr_t start_addr, vaddr_t end_addr,
-    int lockflags)
+    struct vm_map_entry *end, vaddr_t start_addr, vaddr_t end_addr)
 {
 	struct vm_map_entry *iter;
 #ifdef DIAGNOSTIC
@@ -2072,6 +2068,8 @@ uvm_map_pageable_wire(struct vm_map *map
 #endif
 	int error;
 
+	vm_map_assert_wrlock(map);
+
 	/*
 	 * Wire pages in two passes:
 	 *
@@ -2129,12 +2127,12 @@ uvm_map_pageable_wire(struct vm_map *map
 	vm_map_lock(map);
 	vm_map_unbusy(map);
 
-	if (error) {
 #ifdef DIAGNOSTIC
-		if (timestamp_save != map->timestamp)
-			panic("uvm_map_pageable_wire: stale map");
+	if (timestamp_save != map->timestamp)
+		panic("stale map");
 #endif
 
+	if (error) {
 		/*
 		 * first is no longer needed to restart loops.
 		 * Use it as iterator to unmap successful mappings.
@@ -2162,37 +2160,20 @@ uvm_map_pageable_wire(struct vm_map *map
 
 			iter->wired_count--;
 		}
-
-		if ((lockflags & UVM_LK_EXIT) == 0)
-			vm_map_unlock(map);
-		return error;
 	}
 
-
-	if ((lockflags & UVM_LK_EXIT) == 0) {
-		vm_map_unlock(map);
-	} else {
-#ifdef DIAGNOSTIC
-		if (timestamp_save != map->timestamp)
-			panic("uvm_map_pageable_wire: stale map");
-#endif
-	}
-	return 0;
+	return error;
 }
 
 /*
  * uvm_map_pageable: set pageability of a range in a map.
  *
- * Flags:
- * UVM_LK_ENTER: map is already locked by caller
- * UVM_LK_EXIT:  don't unlock map on exit
- *
  * The full range must be in use (entries may not have fspace != 0).
  * UVM_ET_HOLE counts as unmapped.
  */
 int
 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
-    boolean_t new_pageable, int lockflags)
+    boolean_t new_pageable)
 {
 	struct vm_map_entry *first, *last, *tmp;
 	int error;
@@ -2210,8 +2191,7 @@ uvm_map_pageable(struct vm_map *map, vad
 		return EINVAL; /* why? see second XXX below */
 
 	KASSERT(map->flags & VM_MAP_PAGEABLE);
-	if ((lockflags & UVM_LK_ENTER) == 0)
-		vm_map_lock(map);
+	vm_map_assert_wrlock(map);
 
 	/*
 	 * Find first entry.
@@ -2282,8 +2262,6 @@ uvm_map_pageable(struct vm_map *map, vad
 		error = 0;
 
 out:
-		if ((lockflags & UVM_LK_EXIT) == 0)
-			vm_map_unlock(map);
 		return error;
 	} else {
 		/*
@@ -2303,8 +2281,7 @@ out:
 		} else
 			tmp = last;
 
-		return uvm_map_pageable_wire(map, first, tmp, start, end,
-		    lockflags);
+		return uvm_map_pageable_wire(map, first, tmp, start, end);
 	}
 }
 
@@ -2370,7 +2347,7 @@ uvm_map_pageable_all(struct vm_map *map,
 	 * uvm_map_pageable_wire will release lock
 	 */
 	return uvm_map_pageable_wire(map, RBT_MIN(uvm_map_addr, &map->addr),
-	    NULL, map->min_offset, map->max_offset, 0);
+	    NULL, map->min_offset, map->max_offset);
 }
 
 /*
@@ -3196,7 +3175,7 @@ uvm_map_protect(struct vm_map *map, vadd
 		    old_prot == PROT_NONE &&
 		    new_prot != PROT_NONE) {
 			if (uvm_map_pageable(map, iter->start, iter->end,
-			    FALSE, UVM_LK_ENTER | UVM_LK_EXIT) != 0) {
+			    FALSE) != 0) {
 				/*
 				 * If locking the entry fails, remember the
 				 * error if it's the first one.  Note we
Index: uvm/uvm_mmap.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_mmap.c,v
diff -u -p -r1.198 uvm_mmap.c
--- uvm/uvm_mmap.c	6 Apr 2025 20:20:11 -0000	1.198
+++ uvm/uvm_mmap.c	14 May 2025 13:19:36 -0000
@@ -821,6 +821,7 @@ sys_mlock(struct proc *p, void *v, regis
 		syscallarg(const void *) addr;
 		syscallarg(size_t) len;
 	} */ *uap = v;
+	vm_map_t map = &p->p_vmspace->vm_map;
 	vaddr_t addr;
 	vsize_t size, pageoff;
 	int error;
@@ -838,7 +839,7 @@ sys_mlock(struct proc *p, void *v, regis
 		return EAGAIN;
 
 #ifdef pmap_wired_count
-	if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
+	if (size + ptoa(pmap_wired_count(vm_map_pmap(map))) >
 			lim_cur(RLIMIT_MEMLOCK))
 		return EAGAIN;
 #else
@@ -846,8 +847,9 @@ sys_mlock(struct proc *p, void *v, regis
 		return error;
 #endif
 
-	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, FALSE,
-	    0);
+	vm_map_lock(map);
+	error = uvm_map_pageable(map, addr, addr+size, FALSE);
+	vm_map_unlock(map);
 	return error == 0 ? 0 : ENOMEM;
 }
 
@@ -862,6 +864,7 @@ sys_munlock(struct proc *p, void *v, reg
 		syscallarg(const void *) addr;
 		syscallarg(size_t) len;
 	} */ *uap = v;
+	vm_map_t map = &p->p_vmspace->vm_map;
 	vaddr_t addr;
 	vsize_t size, pageoff;
 	int error;
@@ -880,8 +883,9 @@ sys_munlock(struct proc *p, void *v, reg
 		return error;
 #endif
 
-	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, TRUE,
-	    0);
+	vm_map_lock(map);
+	error = uvm_map_pageable(map, addr, addr+size, TRUE);
+	vm_map_unlock(map);
 	return error == 0 ? 0 : ENOMEM;
 }
 
@@ -962,12 +966,9 @@ uvm_mmaplock(vm_map_t map, vaddr_t *addr
 			KERNEL_UNLOCK();
 			return error;
 		}
-		/*
-		 * uvm_map_pageable() always returns the map
-		 * unlocked.
-		 */
-		error = uvm_map_pageable(map, *addr, *addr + size,
-		    FALSE, UVM_LK_ENTER);
+
+		error = uvm_map_pageable(map, *addr, *addr + size, FALSE);
+		vm_map_unlock(map);
 		if (error != 0) {
 			/* unmap the region! */
 			uvm_unmap(map, *addr, *addr + size);