Index | Thread | Search

From:
Martin Pieuchot <mpi@grenadille.net>
Subject:
uvm_vnp_obj_alloc
To:
tech@openbsd.org
Date:
Wed, 24 Sep 2025 12:28:22 +0200

Download raw body.

Thread
Diff below is a small refactoring about UVM vnode descriptors.  These
descriptors are used to keep track of mmap(2)ed vnodes.  They are
currently created for each and every vnode.

This introduces a new function to do that and move the pool init in the
right place.

ok?

Index: kern/vfs_subr.c
===================================================================
RCS file: /cvs/src/sys/kern/vfs_subr.c,v
diff -u -p -r1.331 vfs_subr.c
--- kern/vfs_subr.c	20 Sep 2025 13:53:36 -0000	1.331
+++ kern/vfs_subr.c	24 Sep 2025 09:45:08 -0000
@@ -67,7 +67,6 @@
 
 #include <netinet/in.h>
 
-#include <uvm/uvm_extern.h>
 #include <uvm/uvm_vnode.h>
 
 #include "softraid.h"
@@ -129,7 +128,6 @@ void printlockedvnodes(void);
 #endif
 
 struct pool vnode_pool;
-struct pool uvm_vnode_pool;
 
 static inline int rb_buf_compare(const struct buf *b1, const struct buf *b2);
 RBT_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare);
@@ -154,8 +152,6 @@ vntblinit(void)
 	maxvnodes = 2 * initialvnodes;
 	pool_init(&vnode_pool, sizeof(struct vnode), 0, IPL_NONE,
 	    PR_WAITOK, "vnodes", NULL);
-	pool_init(&uvm_vnode_pool, sizeof(struct uvm_vnode), 0, IPL_NONE,
-	    PR_WAITOK, "uvmvnodes", NULL);
 	TAILQ_INIT(&vnode_hold_list);
 	TAILQ_INIT(&vnode_free_list);
 	TAILQ_INIT(&mountlist);
@@ -424,9 +420,7 @@ getnewvnode(enum vtagtype tag, struct mo
 	    ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) {
 		splx(s);
 		vp = pool_get(&vnode_pool, PR_WAITOK | PR_ZERO);
-		vp->v_uvm = pool_get(&uvm_vnode_pool, PR_WAITOK | PR_ZERO);
-		vp->v_uvm->u_vnode = vp;
-		uvm_obj_init(&vp->v_uvm->u_obj, &uvm_vnodeops, 0);
+		uvm_vnp_obj_alloc(vp);
 		RBT_INIT(buf_rb_bufs, &vp->v_bufs_tree);
 		cache_tree_init(&vp->v_nc_tree);
 		TAILQ_INIT(&vp->v_cache_dst);
Index: uvm/uvm_vnode.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_vnode.c,v
diff -u -p -r1.140 uvm_vnode.c
--- uvm/uvm_vnode.c	15 Aug 2025 08:21:41 -0000	1.140
+++ uvm/uvm_vnode.c	24 Sep 2025 09:44:36 -0000
@@ -47,6 +47,7 @@
 
 #include <sys/param.h>
 #include <sys/systm.h>
+#include <sys/pool.h>
 #include <sys/proc.h>
 #include <sys/malloc.h>
 #include <sys/vnode.h>
@@ -67,7 +68,7 @@
  * we keep a list of writeable active vnode-backed VM objects for sync op.
  * we keep a simpleq of vnodes that are currently being sync'd.
  */
-
+struct pool			uvm_vnode_pool;
 LIST_HEAD(, uvm_vnode)		uvn_wlist;	/* [K] writeable uvns */
 SIMPLEQ_HEAD(, uvm_vnode)	uvn_sync_q;	/* [S] sync'ing uvns */
 struct rwlock uvn_sync_lock;			/* locks sync operation */
@@ -113,7 +114,8 @@ const struct uvm_pagerops uvm_vnodeops =
 void
 uvn_init(void)
 {
-
+	pool_init(&uvm_vnode_pool, sizeof(struct uvm_vnode), 0, IPL_NONE,
+	    PR_WAITOK, "uvmvnodes", NULL);
 	LIST_INIT(&uvn_wlist);
 	/* note: uvn_sync_q init'd in uvm_vnp_sync() */
 	rw_init_flags(&uvn_sync_lock, "uvnsync", RWL_IS_VNODE);
@@ -1539,4 +1541,17 @@ uvm_vnp_sync(struct mount *mp)
 	}
 
 	rw_exit_write(&uvn_sync_lock);
+}
+
+void
+uvm_vnp_obj_alloc(struct vnode *vp)
+{
+	struct uvm_vnode *uvn;
+
+	KASSERT(vp->v_uvm == NULL);
+
+	uvn = pool_get(&uvm_vnode_pool, PR_WAITOK | PR_ZERO);
+	uvm_obj_init(&uvn->u_obj, &uvm_vnodeops, 0);
+	uvn->u_vnode = vp;
+	vp->v_uvm = uvn;
 }
Index: uvm/uvm_vnode.h
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_vnode.h,v
diff -u -p -r1.21 uvm_vnode.h
--- uvm/uvm_vnode.h	20 Oct 2022 13:31:52 -0000	1.21
+++ uvm/uvm_vnode.h	24 Sep 2025 09:46:04 -0000
@@ -94,4 +94,7 @@ struct uvm_vnode {
  */
 #define UVM_VNODE_BLOCKED (UVM_VNODE_ALOCK|UVM_VNODE_DYING|UVM_VNODE_RELKILL)
 
+void			uvm_vnp_obj_alloc(struct vnode *);
+void			uvm_vnp_terminate(struct vnode *);
+
 #endif /* _UVM_UVM_VNODE_H_ */