Download raw body.
Kill uvm_km_zalloc()
On Fri, Nov 01, 2024 at 01:17:48PM +0100, Martin Pieuchot wrote:
> Now unused, ok to remove it?
ok jsg@
>
> Index: uvm/uvm_extern.h
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_extern.h,v
> diff -u -p -r1.178 uvm_extern.h
> --- uvm/uvm_extern.h 8 Oct 2024 02:29:10 -0000 1.178
> +++ uvm/uvm_extern.h 1 Nov 2024 12:10:39 -0000
> @@ -286,9 +286,6 @@ int uvm_io(vm_map_t, struct uio *, int
>
> #define UVM_IO_FIXPROT 0x01
>
> -#ifdef __i386__
> -vaddr_t uvm_km_zalloc(vm_map_t, vsize_t);
> -#endif
> void uvm_km_free(vm_map_t, vaddr_t, vsize_t);
> vaddr_t uvm_km_kmemalloc_pla(struct vm_map *,
> struct uvm_object *, vsize_t, vsize_t, int,
> Index: uvm/uvm_km.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_km.c,v
> diff -u -p -r1.154 uvm_km.c
> --- uvm/uvm_km.c 24 Aug 2024 10:46:43 -0000 1.154
> +++ uvm/uvm_km.c 1 Nov 2024 12:10:45 -0000
> @@ -433,86 +433,6 @@ uvm_km_free(struct vm_map *map, vaddr_t
> uvm_unmap(map, trunc_page(addr), round_page(addr+size));
> }
>
> -#ifdef __i386__
> -/*
> - * uvm_km_zalloc: allocate wired down memory in the kernel map.
> - *
> - * => we can sleep if needed
> - */
> -vaddr_t
> -uvm_km_zalloc(struct vm_map *map, vsize_t size)
> -{
> - vaddr_t kva, loopva;
> - voff_t offset;
> - struct vm_page *pg;
> -
> - KASSERT(vm_map_pmap(map) == pmap_kernel());
> -
> - size = round_page(size);
> - kva = vm_map_min(map); /* hint */
> -
> - /* allocate some virtual space */
> - if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
> - UVM_UNKNOWN_OFFSET, 0,
> - UVM_MAPFLAG(PROT_READ | PROT_WRITE,
> - PROT_READ | PROT_WRITE | PROT_EXEC,
> - MAP_INHERIT_NONE, MADV_RANDOM, 0)) != 0)) {
> - return 0;
> - }
> -
> - /* recover object offset from virtual address */
> - offset = kva - vm_map_min(kernel_map);
> -
> - /* now allocate the memory. we must be careful about released pages. */
> - loopva = kva;
> - while (size) {
> - rw_enter(uvm.kernel_object->vmobjlock, RW_WRITE);
> - /* allocate ram */
> - pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0);
> - if (pg) {
> - atomic_clearbits_int(&pg->pg_flags, PG_BUSY);
> - UVM_PAGE_OWN(pg, NULL);
> - }
> - rw_exit(uvm.kernel_object->vmobjlock);
> - if (__predict_false(pg == NULL)) {
> - if (curproc == uvm.pagedaemon_proc) {
> - /*
> - * It is unfeasible for the page daemon to
> - * sleep for memory, so free what we have
> - * allocated and fail.
> - */
> - uvm_unmap(map, kva, loopva - kva);
> - return (0);
> - } else {
> - uvm_wait("km_zallocw"); /* wait for memory */
> - continue;
> - }
> - }
> -
> - /*
> - * map it in; note we're never called with an intrsafe
> - * object, so we always use regular old pmap_enter().
> - */
> - pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
> - PROT_READ | PROT_WRITE,
> - PROT_READ | PROT_WRITE | PMAP_WIRED);
> -
> - loopva += PAGE_SIZE;
> - offset += PAGE_SIZE;
> - size -= PAGE_SIZE;
> - }
> - pmap_update(map->pmap);
> -
> - /*
> - * zero on request (note that "size" is now zero due to the above loop
> - * so we need to subtract kva from loopva to reconstruct the size).
> - */
> - memset((caddr_t)kva, 0, loopva - kva);
> -
> - return kva;
> -}
> -#endif
> -
> #if defined(__HAVE_PMAP_DIRECT)
> /*
> * uvm_km_page allocator, __HAVE_PMAP_DIRECT arch
>
>
Kill uvm_km_zalloc()