Index | Thread | Search

From:
Hans-Jörg Höxer <hshoexer@genua.de>
Subject:
busdma: Implement bounce buffering for raw memory
To:
<tech@openbsd.org>
Date:
Wed, 12 Mar 2025 09:52:57 +0100

Download raw body.

Thread
  • Hans-Jörg Höxer:

    busdma: Implement bounce buffering for raw memory

Hi,

this diff implements busdma(9) bounce buffering for raw memory.  With this
diff, xhci(4) attaches when running on a KVM hypervisor with SEV enabled.

Take care,
HJ.
---------------------------------------------------------------------------
commit 402662c4392fc235bbb61bbb2a485974284e568a
Author: Hans-Joerg Hoexer <hshoexer@genua.de>
Date:   Thu Mar 6 14:11:32 2025 +0100

    busdma(9): implement bounce buffering for raw memory

diff --git a/sys/arch/amd64/amd64/bus_dma.c b/sys/arch/amd64/amd64/bus_dma.c
index 465401fbb53..37852acb184 100644
--- a/sys/arch/amd64/amd64/bus_dma.c
+++ b/sys/arch/amd64/amd64/bus_dma.c
@@ -391,6 +391,9 @@ _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
 	bus_size_t plen, sgsize, mapsize;
 	int first = 1;
 	int i, seg = 0;
+	int page, off;
+	vaddr_t pgva, vaddr;
+	int use_bounce_buffer = cpu_sev_guestmode || FORCE_BOUNCE_BUFFER;
 
 	/*
 	 * Make sure that on error condition we return "no valid mappings".
@@ -401,6 +404,10 @@ _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
 	if (nsegs > map->_dm_segcnt || size > map->_dm_size)
 		return (EINVAL);
 
+	page = 0;
+	pgva = -1;
+	vaddr = -1;
+
 	mapsize = size;
 	bmask  = ~(map->_dm_boundary - 1);
 
@@ -409,6 +416,16 @@ _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
 		plen = MIN(segs[i].ds_len, size);
 
 		while (plen > 0) {
+			if (use_bounce_buffer) {
+				if (page >= map->_dm_npages)
+					return (EFBIG);
+
+				off = paddr & PAGE_MASK;
+				vaddr = PMAP_DIRECT_MAP(paddr);
+				pgva = map->_dm_pgva + (page << PGSHIFT) + off;
+				page++;
+			}
+
 			/*
 			 * Compute the segment size, and adjust counts.
 			 */
@@ -437,6 +454,8 @@ _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
 			if (first) {
 				map->dm_segs[seg].ds_addr = paddr;
 				map->dm_segs[seg].ds_len = sgsize;
+				map->dm_segs[seg]._ds_va = vaddr;
+				map->dm_segs[seg]._ds_bounce_va = pgva;
 				first = 0;
 			} else {
 				if (paddr == lastaddr &&
@@ -444,13 +463,18 @@ _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
 				     map->_dm_maxsegsz &&
 				    (map->_dm_boundary == 0 ||
 				     (map->dm_segs[seg].ds_addr & bmask) ==
-				     (paddr & bmask)))
+				     (paddr & bmask)) &&
+				    (!use_bounce_buffer ||
+				     (map->dm_segs[seg]._ds_va +
+				     map->dm_segs[seg].ds_len) == vaddr)) {
 					map->dm_segs[seg].ds_len += sgsize;
-				else {
+				} else {
 					if (++seg >= map->_dm_segcnt)
 						return (EINVAL);
 					map->dm_segs[seg].ds_addr = paddr;
 					map->dm_segs[seg].ds_len = sgsize;
+					map->dm_segs[seg]._ds_va = vaddr;
+					map->dm_segs[seg]._ds_bounce_va = pgva;
 				}
 			}