Download raw body.
amd64 bus dma useless lastaddr variable
> Date: Wed, 28 Aug 2024 00:11:32 +0200
> From: Alexander Bluhm <bluhm@openbsd.org>
>
> On Tue, Aug 27, 2024 at 10:28:41PM +0200, Mark Kettenis wrote:
> > > Date: Mon, 26 Aug 2024 14:23:16 +0200
> > > From: Alexander Bluhm <bluhm@openbsd.org>
> > >
> > > Hi,
> > >
> > > I think the lastaddr variable in amd64 bus dma is redundant. Instead
> > > of storing the final address of a segment in the caller,
> > > _bus_dmamap_load_buffer() can inspect the previous segment. Also
> > > in _bus_dmamap_load_raw() and _bus_dmamem_alloc_range() the previous
> > > segement is available and can be used directly.
> > >
> > > ok?
> >
> > Hmm, thois makes it a bit harder to understand the code.
>
> My impression was the other way around. Why bother with lastaddr
> in the caller when same check can be done locally with segment
> oldaddr + oldlen == newaddr.
>
> Maybe it helps if I keep lastaddr as local variable. Updated diff.
Yes, I think that's better.
> > Also, other architectures use a _dmamap_load_buffer() with a lastaddr
> > pointer argument. By only changing amd64 you make them needlessly
> > diverge.
>
> Of course I can transform the other architectures if we decide to
> go this way.
I had a look at some of the other architectures, include those that
have IOMMUs. And I can't find a good reason for any of them to need
that lastaddr pointer argument. And it seems NetBSD got rid of it as
well at some point.
so ok kettenis@
> Index: arch/amd64/amd64/bus_dma.c
> ===================================================================
> RCS file: /data/mirror/openbsd/cvs/src/sys/arch/amd64/amd64/bus_dma.c,v
> diff -u -p -r1.57 bus_dma.c
> --- arch/amd64/amd64/bus_dma.c 22 Aug 2024 11:36:24 -0000 1.57
> +++ arch/amd64/amd64/bus_dma.c 27 Aug 2024 21:26:12 -0000
> @@ -102,7 +102,7 @@
> #endif
>
> int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
> - struct proc *, int, paddr_t *, int *, int *, int);
> + struct proc *, int, int *, int *, int);
>
> /*
> * Common function for DMA map creation. May be called by bus-specific
> @@ -254,7 +254,6 @@ int
> _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
> bus_size_t buflen, struct proc *p, int flags)
> {
> - bus_addr_t lastaddr = 0;
> int seg, used, error;
>
> /*
> @@ -269,7 +268,7 @@ _bus_dmamap_load(bus_dma_tag_t t, bus_dm
> seg = 0;
> used = 0;
> error = _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags,
> - &lastaddr, &seg, &used, 1);
> + &seg, &used, 1);
> if (error == 0) {
> map->dm_mapsize = buflen;
> map->dm_nsegs = seg + 1;
> @@ -285,7 +284,6 @@ int
> _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
> int flags)
> {
> - paddr_t lastaddr = 0;
> int seg, used, error, first;
> struct mbuf *m;
>
> @@ -311,7 +309,7 @@ _bus_dmamap_load_mbuf(bus_dma_tag_t t, b
> if (m->m_len == 0)
> continue;
> error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
> - NULL, flags, &lastaddr, &seg, &used, first);
> + NULL, flags, &seg, &used, first);
> first = 0;
> }
> if (error == 0) {
> @@ -329,7 +327,6 @@ int
> _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
> int flags)
> {
> - paddr_t lastaddr = 0;
> int seg, used, i, error, first;
> bus_size_t minlen, resid;
> struct proc *p = NULL;
> @@ -366,7 +363,7 @@ _bus_dmamap_load_uio(bus_dma_tag_t t, bu
> addr = (caddr_t)iov[i].iov_base;
>
> error = _bus_dmamap_load_buffer(t, map, addr, minlen,
> - p, flags, &lastaddr, &seg, &used, first);
> + p, flags, &seg, &used, first);
> first = 0;
>
> resid -= minlen;
> @@ -387,7 +384,7 @@ int
> _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
> int nsegs, bus_size_t size, int flags)
> {
> - bus_addr_t paddr, baddr, bmask, lastaddr = 0;
> + bus_addr_t paddr, baddr, bmask;
> bus_size_t plen, sgsize, mapsize;
> int first = 1;
> int i, seg = 0;
> @@ -439,14 +436,17 @@ _bus_dmamap_load_raw(bus_dma_tag_t t, bu
> map->dm_segs[seg].ds_len = sgsize;
> first = 0;
> } else {
> + bus_addr_t lastaddr = map->dm_segs[seg].ds_addr
> + + map->dm_segs[seg].ds_len;
> +
> if (paddr == lastaddr &&
> (map->dm_segs[seg].ds_len + sgsize) <=
> map->_dm_maxsegsz &&
> (map->_dm_boundary == 0 ||
> (map->dm_segs[seg].ds_addr & bmask) ==
> - (paddr & bmask)))
> + (paddr & bmask))) {
> map->dm_segs[seg].ds_len += sgsize;
> - else {
> + } else {
> if (++seg >= map->_dm_segcnt)
> return (EINVAL);
> map->dm_segs[seg].ds_addr = paddr;
> @@ -457,8 +457,6 @@ _bus_dmamap_load_raw(bus_dma_tag_t t, bu
> paddr += sgsize;
> plen -= sgsize;
> size -= sgsize;
> -
> - lastaddr = paddr;
> }
> }
>
> @@ -686,18 +684,17 @@ _bus_dmamem_mmap(bus_dma_tag_t t, bus_dm
> * DMA utility functions
> **********************************************************************/
> /*
> - * Utility function to load a linear buffer. lastaddrp holds state
> - * between invocations (for multiple-buffer loads). segp contains
> + * Utility function to load a linear buffer. segp contains
> * the starting segment on entrance, and the ending segment on exit.
> * first indicates if this is the first invocation of this function.
> */
> int
> _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
> - bus_size_t buflen, struct proc *p, int flags, paddr_t *lastaddrp,
> + bus_size_t buflen, struct proc *p, int flags,
> int *segp, int *usedp, int first)
> {
> bus_size_t sgsize;
> - bus_addr_t curaddr, lastaddr, baddr, bmask;
> + bus_addr_t curaddr, baddr, bmask;
> vaddr_t pgva = -1, vaddr = (vaddr_t)buf;
> int seg, page, off;
> pmap_t pmap;
> @@ -710,7 +707,6 @@ _bus_dmamap_load_buffer(bus_dma_tag_t t,
> pmap = pmap_kernel();
>
> page = *usedp;
> - lastaddr = *lastaddrp;
> bmask = ~(map->_dm_boundary - 1);
>
> for (seg = *segp; buflen > 0 ; ) {
> @@ -762,14 +758,18 @@ _bus_dmamap_load_buffer(bus_dma_tag_t t,
> map->dm_segs[seg]._ds_bounce_va = pgva;
> first = 0;
> } else {
> + bus_addr_t lastaddr = map->dm_segs[seg].ds_addr +
> + map->dm_segs[seg].ds_len;
> + vaddr_t lastva = map->dm_segs[seg]._ds_va +
> + map->dm_segs[seg].ds_len;
> +
> if (curaddr == lastaddr &&
> (map->dm_segs[seg].ds_len + sgsize) <=
> map->_dm_maxsegsz &&
> (map->_dm_boundary == 0 ||
> (map->dm_segs[seg].ds_addr & bmask) ==
> (curaddr & bmask)) &&
> - (!use_bounce_buffer || (map->dm_segs[seg]._ds_va +
> - map->dm_segs[seg].ds_len) == vaddr)) {
> + (!use_bounce_buffer || vaddr == lastva)) {
> map->dm_segs[seg].ds_len += sgsize;
> } else {
> if (++seg >= map->_dm_segcnt)
> @@ -781,14 +781,12 @@ _bus_dmamap_load_buffer(bus_dma_tag_t t,
> }
> }
>
> - lastaddr = curaddr + sgsize;
> vaddr += sgsize;
> buflen -= sgsize;
> }
>
> *segp = seg;
> *usedp = page;
> - *lastaddrp = lastaddr;
>
> /*
> * Did we fit?
> @@ -807,7 +805,7 @@ _bus_dmamem_alloc_range(bus_dma_tag_t t,
> bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
> int flags, bus_addr_t low, bus_addr_t high)
> {
> - paddr_t curaddr, lastaddr;
> + paddr_t curaddr;
> struct vm_page *m;
> struct pglist mlist;
> int curseg, error, plaflag;
> @@ -837,7 +835,7 @@ _bus_dmamem_alloc_range(bus_dma_tag_t t,
> */
> m = TAILQ_FIRST(&mlist);
> curseg = 0;
> - lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
> + segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
> segs[curseg].ds_len = PAGE_SIZE;
>
> for (m = TAILQ_NEXT(m, pageq); m != NULL; m = TAILQ_NEXT(m, pageq)) {
> @@ -853,14 +851,13 @@ _bus_dmamem_alloc_range(bus_dma_tag_t t,
> panic("_bus_dmamem_alloc_range");
> }
> #endif
> - if (curaddr == (lastaddr + PAGE_SIZE))
> + if (curaddr == (segs[curseg].ds_addr + segs[curseg].ds_len)) {
> segs[curseg].ds_len += PAGE_SIZE;
> - else {
> + } else {
> curseg++;
> segs[curseg].ds_addr = curaddr;
> segs[curseg].ds_len = PAGE_SIZE;
> }
> - lastaddr = curaddr;
> }
>
> *rsegs = curseg + 1;
>
>
amd64 bus dma useless lastaddr variable