Index | Thread | Search

From:
Martin Pieuchot <mpi@grenadille.net>
Subject:
pgo_fault & VM_PAGER_* values
To:
tech@openbsd.org
Cc:
jsg@openbsd.org, tb@openbsd.org
Date:
Wed, 4 Dec 2024 10:52:59 +0100

Download raw body.

Thread
Similar to the previous change in uvmfault_anonget(), the diff below
gets rid of the VM_PAGER_* values and use errno values instead in all
the pgo_fault() handlers.

The goal of this change is to reduce the differences with NetBSD's UVM
and then to return different error codes.

I've annotated return values that are currently EACCES but could be
something else.

While here grab the KERNEL_LOCK() after the UVM object lock to reduce
contention.

ok?

diff --git sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
index 4818cbb7e07..ce2cc763c80 100644
--- sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
+++ sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
@@ -101,13 +101,13 @@ amdgpu_gem_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
 	if (ret) {
 		switch (ret) {
 		case VM_FAULT_NOPAGE:
-			ret = VM_PAGER_OK;
+			ret = 0;
 			break;
 		case VM_FAULT_RETRY:
-			ret = VM_PAGER_REFAULT;
+			ret = ERESTART;
 			break;
 		default:
-			ret = VM_PAGER_BAD;
+			ret = EACCES;
 			break;
 		}
 		uvmfault_unlockall(ufi, NULL, uobj);
@@ -139,13 +139,13 @@ amdgpu_gem_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
 unlock:
 	switch (ret) {
 	case VM_FAULT_NOPAGE:
-		ret = VM_PAGER_OK;
+		ret = 0;
 		break;
 	case VM_FAULT_RETRY:
-		ret = VM_PAGER_REFAULT;
+		ret = ERESTART;
 		break;
 	default:
-		ret = VM_PAGER_BAD;
+		ret = EACCES;
 		break;
 	}
 	dma_resv_unlock(bo->base.resv);
diff --git sys/dev/pci/drm/drm_gem.c sys/dev/pci/drm/drm_gem.c
index 0154d8c22c6..7d2f6b70e8a 100644
--- sys/dev/pci/drm/drm_gem.c
+++ sys/dev/pci/drm/drm_gem.c
@@ -102,10 +102,9 @@ drm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
 	 * we do not allow device mappings to be mapped copy-on-write
 	 * so we kill any attempt to do so here.
 	 */
-	
 	if (UVM_ET_ISCOPYONWRITE(entry)) {
 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
-		return(VM_PAGER_ERROR);
+		return EACCES;
 	}
 
 	/*
@@ -125,7 +124,7 @@ drm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
 			    PZERO, "drmflt", INFSLP);
 		}
 		mtx_leave(&dev->quiesce_mtx);
-		return(VM_PAGER_REFAULT);
+		return ERESTART;
 	}
 	dev->quiesce_count++;
 	mtx_leave(&dev->quiesce_mtx);
@@ -141,7 +140,7 @@ drm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
 		wakeup(&dev->quiesce_count);
 	mtx_leave(&dev->quiesce_mtx);
 
-	return (ret);
+	return ret;
 }
 
 boolean_t	
diff --git sys/dev/pci/drm/drm_gem_dma_helper.c sys/dev/pci/drm/drm_gem_dma_helper.c
index 05bdfefecbf..6e3d96f22ff 100644
--- sys/dev/pci/drm/drm_gem_dma_helper.c
+++ sys/dev/pci/drm/drm_gem_dma_helper.c
@@ -208,7 +208,7 @@ drm_gem_dma_fault(struct drm_gem_object *gem_obj, struct uvm_faultinfo *ufi,
 		paddr = bus_dmamem_mmap(obj->dmat, obj->dmasegs, 1,
 		    offset, access_type, BUS_DMA_NOCACHE);
 		if (paddr == -1) {
-			retval = VM_PAGER_BAD;
+			retval = EACCES;
 			break;
 		}
 
@@ -218,7 +218,7 @@ drm_gem_dma_fault(struct drm_gem_object *gem_obj, struct uvm_faultinfo *ufi,
 			uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
 			    uobj);
 			uvm_wait("drm_gem_dma_fault");
-			return VM_PAGER_REFAULT;
+			return ERESTART;
 		}
 	}
 
diff --git sys/dev/pci/drm/i915/gem/i915_gem_mman.c sys/dev/pci/drm/i915/gem/i915_gem_mman.c
index 8ee2a8c1f0f..653076cc323 100644
--- sys/dev/pci/drm/i915/gem/i915_gem_mman.c
+++ sys/dev/pci/drm/i915/gem/i915_gem_mman.c
@@ -591,10 +591,10 @@ static int i915_error_to_vmf_fault(int err)
 	case -EFAULT: /* purged object */
 	case -ENODEV: /* bad object, how did you get here! */
 	case -ENXIO: /* unable to access backing store (on device) */
-		return VM_PAGER_ERROR;
+		return EACCES;
 
 	case -ENOMEM: /* our allocation failure */
-		return VM_PAGER_ERROR;
+		return EACCES; /* XXX */
 
 	case 0:
 	case -EAGAIN:
@@ -607,7 +607,7 @@ static int i915_error_to_vmf_fault(int err)
 		 * EBUSY is ok: this just means that another thread
 		 * already did the job.
 		 */
-		return VM_PAGER_OK;
+		return 0;
 	}
 }
 
@@ -629,11 +629,11 @@ vm_fault_cpu(struct i915_mmap_offset *mmo, struct uvm_faultinfo *ufi,
 	/* Sanity check that we allow writing into this object */
 	if (unlikely(i915_gem_object_is_readonly(obj) && write)) {
 		uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
-		return VM_PAGER_BAD;
+		return EACCES;
 	}
 
 	if (i915_gem_object_lock_interruptible(obj, NULL))
-		return VM_PAGER_ERROR;
+		return EACCES;
 
 	err = i915_gem_object_pin_pages(obj);
 	if (err)
@@ -921,7 +921,7 @@ i915_gem_fault(struct drm_gem_object *gem_obj, struct uvm_faultinfo *ufi,
 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
 	if (!mmo) {
 		uvmfault_unlockall(ufi, NULL, &gem_obj->uobj);
-		return VM_PAGER_BAD;
+		return EACCES;
 	}
 
 	KASSERT(gem_obj == &mmo->obj->base);
diff --git sys/dev/pci/drm/i915/gem/i915_gem_ttm.c sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
index cbca8f52353..d77e467d43d 100644
--- sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
+++ sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
@@ -1240,20 +1240,20 @@ vm_fault_ttm(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
 	/* Sanity check that we allow writing into this object */
 	if (unlikely(i915_gem_object_is_readonly(obj) && write)) {
 		uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
-		return VM_PAGER_BAD;
+		return EACCES;
 	}
 
 	ret = ttm_bo_vm_reserve(bo);
 	if (ret) {
 		switch (ret) {
 		case VM_FAULT_NOPAGE:
-			ret = VM_PAGER_OK;
+			ret = 0;
 			break;
 		case VM_FAULT_RETRY:
-			ret = VM_PAGER_REFAULT;
+			ret = ERESTART;
 			break;
 		default:
-			ret = VM_PAGER_BAD;
+			ret = EACCES;
 			break;
 		}
 		uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
@@ -1263,7 +1263,7 @@ vm_fault_ttm(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
 	if (obj->mm.madv != I915_MADV_WILLNEED) {
 		dma_resv_unlock(bo->base.resv);
 		uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
-		return VM_PAGER_BAD;
+		return EACCES;
 	}
 
 	/*
@@ -1285,7 +1285,7 @@ vm_fault_ttm(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
 		if (err) {
 			dma_resv_unlock(bo->base.resv);
 			uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
-			return VM_PAGER_BAD;
+			return EACCES;
 		}
 	} else if (!i915_ttm_resource_mappable(bo->resource)) {
 		int err = -ENODEV;
@@ -1359,13 +1359,13 @@ vm_fault_ttm(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
 out_rpm:
 	switch (ret) {
 	case VM_FAULT_NOPAGE:
-		ret = VM_PAGER_OK;
+		ret = 0;
 		break;
 	case VM_FAULT_RETRY:
-		ret = VM_PAGER_REFAULT;
+		ret = ERESTART;
 		break;
 	default:
-		ret = VM_PAGER_BAD;
+		ret = EACCES;
 		break;
 	}
 
diff --git sys/dev/pci/drm/radeon/radeon_gem.c sys/dev/pci/drm/radeon/radeon_gem.c
index b9db67ce876..d18b3ab57aa 100644
--- sys/dev/pci/drm/radeon/radeon_gem.c
+++ sys/dev/pci/drm/radeon/radeon_gem.c
@@ -115,13 +115,13 @@ unlock_resv:
 unlock_mclk:
 	switch (ret) {
 	case VM_FAULT_NOPAGE:
-		ret = VM_PAGER_OK;
+		ret = 0;
 		break;
 	case VM_FAULT_RETRY:
-		ret = VM_PAGER_REFAULT;
+		ret = ERESTART;
 		break;
 	default:
-		ret = VM_PAGER_BAD;
+		ret = EACCES;
 		break;
 	}
 	up_read(&rdev->pm.mclk_lock);
diff --git sys/dev/pci/drm/ttm/ttm_bo_vm.c sys/dev/pci/drm/ttm/ttm_bo_vm.c
index 23bd4ceaa71..45ad7203210 100644
--- sys/dev/pci/drm/ttm/ttm_bo_vm.c
+++ sys/dev/pci/drm/ttm/ttm_bo_vm.c
@@ -598,37 +598,23 @@ ttm_bo_vm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
 
 	ret = ttm_bo_vm_reserve(bo);
 	if (ret) {
-		switch (ret) {
-		case VM_FAULT_NOPAGE:
-			ret = VM_PAGER_OK;
-			break;
-		case VM_FAULT_RETRY:
-			ret = VM_PAGER_REFAULT;
-			break;
-		default:
-			ret = VM_PAGER_BAD;
-			break;
-		}
-
-		uvmfault_unlockall(ufi, NULL, uobj);
-		return ret;
+		goto out;
 	}
 
 	ret = ttm_bo_vm_fault_reserved(ufi, vaddr, TTM_BO_VM_NUM_PREFAULT, 1);
+	dma_resv_unlock(bo->base.resv);
+out:
 	switch (ret) {
 	case VM_FAULT_NOPAGE:
-		ret = VM_PAGER_OK;
+		ret = 0;
 		break;
 	case VM_FAULT_RETRY:
-		ret = VM_PAGER_REFAULT;
+		ret = ERESTART;
 		break;
 	default:
-		ret = VM_PAGER_BAD;
+		ret = EACCES;
 		break;
 	}
-
-	dma_resv_unlock(bo->base.resv);
-
 	uvmfault_unlockall(ufi, NULL, uobj);
 	return ret;
 }
diff --git sys/uvm/uvm_device.c sys/uvm/uvm_device.c
index 10288d7153c..ee94c893b6f 100644
--- sys/uvm/uvm_device.c
+++ sys/uvm/uvm_device.c
@@ -331,7 +331,7 @@ udv_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps, int npages,
 	 */
 	if (UVM_ET_ISCOPYONWRITE(entry)) {
 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
-		return(VM_PAGER_ERROR);
+		return EACCES;
 	}
 
 	/*
@@ -365,7 +365,7 @@ udv_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps, int npages,
 
 		paddr = (*mapfn)(device, curr_offset, access_type);
 		if (paddr == -1) {
-			retval = VM_PAGER_ERROR;
+			retval = EACCES; /* XXX */
 			break;
 		}
 		mapprot = ufi->entry->protection;
@@ -387,11 +387,11 @@ udv_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps, int npages,
 			/* sync what we have so far */
 			pmap_update(ufi->orig_map->pmap);      
 			uvm_wait("udv_fault");
-			return (VM_PAGER_REFAULT);
+			return ERESTART;
 		}
 	}
 
 	uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
 	pmap_update(ufi->orig_map->pmap);
-	return (retval);
+	return retval;
 }
diff --git sys/uvm/uvm_fault.c sys/uvm/uvm_fault.c
index 45ba7f1923b..e694554adf0 100644
--- sys/uvm/uvm_fault.c
+++ sys/uvm/uvm_fault.c
@@ -623,20 +623,13 @@ uvm_fault(vm_map_t orig_map, vaddr_t vaddr, vm_fault_t fault_type,
 			 * providing a pgo_fault routine.
 			 */
 			if (uobj != NULL && uobj->pgops->pgo_fault != NULL) {
-				KERNEL_LOCK();
 				rw_enter(uobj->vmobjlock, RW_WRITE);
+				KERNEL_LOCK();
 				error = uobj->pgops->pgo_fault(&ufi,
 				    flt.startva, pages, flt.npages,
 				    flt.centeridx, fault_type, flt.access_type,
 				    PGO_LOCKED);
 				KERNEL_UNLOCK();
-
-				if (error == VM_PAGER_OK)
-					error = 0;
-				else if (error == VM_PAGER_REFAULT)
-					error = ERESTART;
-				else
-					error = EACCES;
 			} else {
 				/* case 2: fault on backing obj or zero fill */
 				error = uvm_fault_lower(&ufi, &flt, pages);