Download raw body.
Kill UVM_VNODE_VALID
Now that "mmap descriptors" are allocated as part of uvn_attach() a NULL
check is enough for testing validity. The diff below gets rid of this
flag and assert that the KERNEL_LOCK() is held when checking if the
descriptor associated to a vnode is NULL or not.
ok?
Index: uvm/uvm_vnode.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_vnode.c,v
diff -u -p -r1.148 uvm_vnode.c
--- uvm/uvm_vnode.c 10 Nov 2025 15:53:06 -0000 1.148
+++ uvm/uvm_vnode.c 10 Nov 2025 17:30:02 -0000
@@ -137,7 +137,7 @@ uvn_attach(struct vnode *vp, vm_prot_t a
{
struct uvm_vnode *uvn;
struct vattr vattr;
- int result;
+ int result, new = 0;
struct partinfo pi;
u_quad_t used_vnode_size = 0;
@@ -187,6 +187,7 @@ uvn_attach(struct vnode *vp, vm_prot_t a
uvm_obj_init(&uvn->u_obj, &uvm_vnodeops, 0);
uvn->u_vnode = vp;
vp->v_uvm = uvn;
+ new = 1;
} else
pool_put(&uvm_vnode_pool, uvn);
}
@@ -194,6 +195,7 @@ uvn_attach(struct vnode *vp, vm_prot_t a
uvn = vp->v_uvm;
rw_enter(uvn->u_obj.vmobjlock, RW_WRITE);
while (uvn->u_flags & UVM_VNODE_BLOCKED) {
+ KASSERT(!new);
uvn->u_flags |= UVM_VNODE_WANTED;
rwsleep_nsec(uvn, uvn->u_obj.vmobjlock, PVM, "uvn_attach",
INFSLP);
@@ -204,8 +206,7 @@ uvn_attach(struct vnode *vp, vm_prot_t a
* first check to see if it is already active, in which case
* we can bump the reference count.
*/
- if (uvn->u_flags & UVM_VNODE_VALID) { /* already active? */
-
+ if (!new) {
/* regain vref if we were persisting */
if (uvn->u_obj.uo_refs == 0) {
vref(vp);
@@ -215,7 +216,7 @@ uvn_attach(struct vnode *vp, vm_prot_t a
/* now set up the uvn. */
KASSERT(uvn->u_obj.uo_refs == 0);
uvn->u_obj.uo_refs++;
- uvn->u_flags = UVM_VNODE_VALID|UVM_VNODE_CANPERSIST;
+ uvn->u_flags = UVM_VNODE_CANPERSIST;
uvn->u_nio = 0;
uvn->u_size = used_vnode_size;
@@ -252,23 +253,10 @@ uvn_attach(struct vnode *vp, vm_prot_t a
*
* => caller must be using the same accessprot as was used at attach time
*/
-
-
void
uvn_reference(struct uvm_object *uobj)
{
-#ifdef DEBUG
- struct uvm_vnode *uvn = (struct uvm_vnode *) uobj;
-#endif
-
rw_enter(uobj->vmobjlock, RW_WRITE);
-#ifdef DEBUG
- if ((uvn->u_flags & UVM_VNODE_VALID) == 0) {
- printf("uvn_reference: ref=%d, flags=0x%x\n",
- uobj->uo_refs, uvn->u_flags);
- panic("uvn_reference: invalid state");
- }
-#endif
uobj->uo_refs++;
rw_exit(uobj->vmobjlock);
}
@@ -407,15 +395,11 @@ uvm_vnp_terminate(struct vnode *vp)
struct uvm_object *uobj;
int oldflags;
+ KERNEL_ASSERT_LOCKED();
if (uvn == NULL)
return;
uobj = &uvn->u_obj;
rw_enter(uobj->vmobjlock, RW_WRITE);
- if ((uvn->u_flags & UVM_VNODE_VALID) == 0) {
- rw_exit(uobj->vmobjlock);
- return;
- }
-
/*
* must be a valid uvn that is not already dying (because XLOCK
* protects us from that).
@@ -1344,12 +1328,12 @@ uvm_vnp_uncache(struct vnode *vp)
struct uvm_vnode *uvn = vp->v_uvm;
struct uvm_object *uobj;
+ KERNEL_ASSERT_LOCKED();
if (uvn == NULL)
return TRUE;
uobj = &uvn->u_obj;
rw_enter(uobj->vmobjlock, RW_WRITE);
- if ((uvn->u_flags & UVM_VNODE_VALID) == 0 ||
- (uvn->u_flags & UVM_VNODE_BLOCKED) != 0) {
+ if (uvn->u_flags & UVM_VNODE_BLOCKED) {
rw_exit(uobj->vmobjlock);
return TRUE;
}
@@ -1429,19 +1413,11 @@ uvm_vnp_setsize(struct vnode *vp, off_t
return;
uobj = &uvn->u_obj;
rw_enter(uobj->vmobjlock, RW_WRITE);
- if (uvn->u_flags & UVM_VNODE_VALID) {
-
- /*
- * now check if the size has changed: if we shrink we had better
- * toss some pages...
- */
-
- if (uvn->u_size > newsize) {
- (void)uvn_flush(&uvn->u_obj, newsize,
- uvn->u_size, PGO_FREE);
- }
- uvn->u_size = newsize;
+ if (newsize < uvn->u_size) {
+ /* File has shrunk. Toss any cached pages beyond the new EOF. */
+ uvn_flush(&uvn->u_obj, newsize, uvn->u_size, PGO_FREE);
}
+ uvn->u_size = newsize;
rw_exit(uobj->vmobjlock);
}
Kill UVM_VNODE_VALID