From: Mark Kettenis Subject: Re: bce(4): use km_alloc To: David Hill Cc: tech@openbsd.org Date: Thu, 14 Nov 2024 15:58:48 +0100 > Date: Thu, 14 Nov 2024 14:46:21 +0000 > From: David Hill > > Hello - > > uvm_km_kmemalloc_pla has 3 users: bce(4), envy(4), and malloc. > > I have attempted a conversion to km_alloc for bce(4), but I lack the > hardware to test. If anyone has bce(4), could you please test? This should really use bus_dmamem_alloc_range(9). I don't think we had that interface back when the driver was changed to use uvm_km_kmemalloc_pla(). > Index: dev/pci/if_bce.c > =================================================================== > RCS file: /cvs/src/sys/dev/pci/if_bce.c,v > diff -u -p -r1.57 if_bce.c > --- dev/pci/if_bce.c 31 Aug 2024 16:23:09 -0000 1.57 > +++ dev/pci/if_bce.c 8 Nov 2024 18:17:48 -0000 > @@ -172,6 +172,21 @@ const struct pci_matchid bce_devices[] = > { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401B1 } > }; > > +struct uvm_constraint_range bce_constraint = { > + 0, > + (paddr_t)(0x40000000 - 1), > +}; > + > +const struct kmem_pa_mode bce_mode = { > + .kp_constraint = &bce_constraint, > + .kp_maxseg = 1, > +}; > + > +const struct kmem_va_mode bce_area = { > + .kv_map = &kernel_map, > + .kv_align = 0, > +}; > + > int > bce_probe(struct device *parent, void *match, void *aux) > { > @@ -249,9 +264,8 @@ bce_attach(struct device *parent, struct > bce_reset(sc); > > /* Create the data DMA region and maps. */ > - if ((sc->bce_data = (caddr_t)uvm_km_kmemalloc_pla(kernel_map, > - uvm.kernel_object, (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES, 0, > - UVM_KMF_NOWAIT, 0, (paddr_t)(0x40000000 - 1), 0, 0, 1)) == NULL) { > + if ((sc->bce_data = km_alloc((BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES, > + &bce_area, &bce_mode, &kd_nowait)) == NULL) { > printf(": unable to alloc space for ring"); > return; > } > @@ -261,8 +275,8 @@ bce_attach(struct device *parent, struct > 1, BCE_NRXDESC * MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, > &sc->bce_rxdata_map))) { > printf(": unable to create ring DMA map, error = %d\n", error); > - uvm_km_free(kernel_map, (vaddr_t)sc->bce_data, > - (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES); > + km_free(sc->bce_data, (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES, > + &bce_area, &bce_mode); > return; > } > > @@ -270,8 +284,8 @@ bce_attach(struct device *parent, struct > if (bus_dmamap_load(sc->bce_dmatag, sc->bce_rxdata_map, sc->bce_data, > BCE_NRXDESC * MCLBYTES, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT)) { > printf(": unable to load rx ring DMA map\n"); > - uvm_km_free(kernel_map, (vaddr_t)sc->bce_data, > - (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES); > + km_free(sc->bce_data, (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES, > + &bce_area, &bce_mode); > bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map); > return; > } > @@ -281,8 +295,8 @@ bce_attach(struct device *parent, struct > 1, BCE_NTXDESC * MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, > &sc->bce_txdata_map))) { > printf(": unable to create ring DMA map, error = %d\n", error); > - uvm_km_free(kernel_map, (vaddr_t)sc->bce_data, > - (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES); > + km_free(sc->bce_data, (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES, > + &bce_area, &bce_mode); > bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map); > return; > } > @@ -292,8 +306,8 @@ bce_attach(struct device *parent, struct > sc->bce_data + BCE_NRXDESC * MCLBYTES, > BCE_NTXDESC * MCLBYTES, NULL, BUS_DMA_WRITE | BUS_DMA_NOWAIT)) { > printf(": unable to load tx ring DMA map\n"); > - uvm_km_free(kernel_map, (vaddr_t)sc->bce_data, > - (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES); > + km_free(sc->bce_data, (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES, > + &bce_area, &bce_mode); > bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map); > bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map); > return; > @@ -314,8 +328,8 @@ bce_attach(struct device *parent, struct > (bus_addr_t)0, (bus_addr_t)0x3fffffff))) { > printf(": unable to alloc space for ring descriptors, " > "error = %d\n", error); > - uvm_km_free(kernel_map, (vaddr_t)sc->bce_data, > - (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES); > + km_free(sc->bce_data, (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES, > + &bce_area, &bce_mode); > bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map); > bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map); > return; > @@ -325,8 +339,8 @@ bce_attach(struct device *parent, struct > if ((error = bus_dmamem_map(sc->bce_dmatag, &seg, rseg, > 2 * PAGE_SIZE, &kva, BUS_DMA_NOWAIT))) { > printf(": unable to map DMA buffers, error = %d\n", error); > - uvm_km_free(kernel_map, (vaddr_t)sc->bce_data, > - (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES); > + km_free(sc->bce_data, (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES, > + &bce_area, &bce_mode); > bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map); > bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map); > bus_dmamem_free(sc->bce_dmatag, &seg, rseg); > @@ -337,8 +351,8 @@ bce_attach(struct device *parent, struct > if ((error = bus_dmamap_create(sc->bce_dmatag, 2 * PAGE_SIZE, 1, > 2 * PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->bce_ring_map))) { > printf(": unable to create ring DMA map, error = %d\n", error); > - uvm_km_free(kernel_map, (vaddr_t)sc->bce_data, > - (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES); > + km_free(sc->bce_data, (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES, > + &bce_area, &bce_mode); > bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map); > bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map); > bus_dmamem_free(sc->bce_dmatag, &seg, rseg); > @@ -349,8 +363,8 @@ bce_attach(struct device *parent, struct > if (bus_dmamap_load(sc->bce_dmatag, sc->bce_ring_map, kva, > 2 * PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) { > printf(": unable to load ring DMA map\n"); > - uvm_km_free(kernel_map, (vaddr_t)sc->bce_data, > - (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES); > + km_free(sc->bce_data, (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES, > + &bce_area, &bce_mode); > bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map); > bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map); > bus_dmamap_destroy(sc->bce_dmatag, sc->bce_ring_map); > >