--- linux/fs/buffer.c.62	Thu Feb  3 19:28:39 2000
+++ linux/fs/buffer.c	Thu Feb  3 19:29:06 2000
@@ -1295,6 +1295,30 @@
 }
 
 /*
+ * Clean up the bounce buffers potentially used by brw_kiovec.  All of
+ * the kiovec's bounce buffers must be cleared of temporarily allocated
+ * bounce pages, but only READ pages for whom IO completed successfully
+ * can actually be transferred back to user space. 
+ */
+
+void cleanup_bounce_buffers(int rw, int nr, struct kiobuf *iovec[], 
+			    int transferred)
+{
+	int i;
+	for (i = 0; i < nr; i++) {
+		struct kiobuf *iobuf = iovec[i];
+		if (iobuf->bounced) {
+			if (transferred > 0 && !(rw & WRITE))
+				kiobuf_copy_bounce(iobuf, COPY_FROM_BOUNCE, 
+						   transferred);
+			
+			clear_kiobuf_bounce_pages(iobuf);
+		}
+		transferred -= iobuf->length;
+	}
+}
+
+/*
  * Start I/O on a physical range of kernel memory, defined by a vector
  * of kiobuf structs (much like a user-space iovec list).
  *
@@ -1320,6 +1344,7 @@
 	unsigned long	blocknr;
 	struct kiobuf *	iobuf = NULL;
 	unsigned long	page;
+	unsigned long	bounce;
 	struct page *	map;
 	struct buffer_head *tmp, *bh[KIO_MAX_SECTORS];
 
@@ -1349,13 +1374,24 @@
 	bufind = bhind = transferred = err = 0;
 	for (i = 0; i < nr; i++) {
 		iobuf = iovec[i];
+		err = setup_kiobuf_bounce_pages(iobuf, GFP_USER);
+		if (err) 
+			goto finished;
+		if (rw & WRITE)
+			kiobuf_copy_bounce(iobuf, COPY_TO_BOUNCE, -1);
+		
 		offset = iobuf->offset;
 		length = iobuf->length;
 		dprintk ("iobuf %d %d %d\n", offset, length, size);
 
 		for (pageind = 0; pageind < iobuf->nr_pages; pageind++) {
-			page = iobuf->pagelist[pageind];
-			map  = iobuf->maplist[pageind];
+			map    = iobuf->maplist[pageind];
+			bounce = iobuf->bouncelist[pageind];
+
+			if (bounce)
+				page = bounce;
+			else
+				page = iobuf->pagelist[pageind];
 
 			while (length > 0) {
 				blocknr = b[bufind++];
@@ -1415,6 +1451,9 @@
 
  finished:
 	dprintk ("brw_kiovec: end (%d, %d)\n", transferred, err);
+
+	cleanup_bounce_buffers(rw, nr, iovec, transferred);
+	
 	if (transferred)
 		return transferred;
 	return err;
@@ -1425,6 +1464,9 @@
 	for (i = bhind; --i >= 0; ) {
 		free_async_buffers(bh[bhind]);
 	}
+
+	clear_kiobuf_bounce_pages(iobuf);
+	
 	goto finished;
 }
 
--- linux/fs/iobuf.c.62	Thu Feb  3 19:32:18 2000
+++ linux/fs/iobuf.c	Thu Feb  3 19:32:46 2000
@@ -9,6 +9,7 @@
 #include <linux/iobuf.h>
 #include <linux/malloc.h>
 #include <linux/slab.h>
+#include <linux/bigmem.h>
 
 static kmem_cache_t *kiobuf_cachep;
 
@@ -36,15 +37,31 @@
 		}
 		
 		memset(iobuf, 0, sizeof(*iobuf));
-		iobuf->array_len = KIO_STATIC_PAGES;
-		iobuf->pagelist  = iobuf->page_array;
-		iobuf->maplist   = iobuf->map_array;
+		iobuf->array_len  = KIO_STATIC_PAGES;
+		iobuf->pagelist   = iobuf->page_array;
+		iobuf->maplist    = iobuf->map_array;
+		iobuf->bouncelist = iobuf->bounce_array;
 		*bufp++ = iobuf;
 	}
 	
 	return 0;
 }
 
+void clear_kiobuf_bounce_pages(struct kiobuf *iobuf)
+{
+	int i;
+	
+	if (!iobuf->bounced)
+		return;
+	
+	for (i = 0; i < iobuf->nr_pages; i++) {
+		unsigned long page = iobuf->bouncelist[i];
+		if (page)
+			free_page(page);
+	}
+	iobuf->bounced = 0;
+}
+
 void free_kiovec(int nr, struct kiobuf **bufp) 
 {
 	struct kiobuf *iobuf;
@@ -52,9 +69,9 @@
 	
 	for (i = 0; i < nr; i++) {
 		iobuf = bufp[i];
+		clear_kiobuf_bounce_pages(iobuf);
 		if (iobuf->array_len > KIO_STATIC_PAGES) {
 			kfree (iobuf->pagelist);
-			kfree (iobuf->maplist);
 		}
 		kmem_cache_free(kiobuf_cachep, bufp[i]);
 	}
@@ -62,45 +79,158 @@
 
 int expand_kiobuf(struct kiobuf *iobuf, int wanted)
 {
-	unsigned long *	pagelist;
+	unsigned long *	pagelist, * bouncelist;
 	struct page ** maplist;
 	
 	if (iobuf->array_len >= wanted)
 		return 0;
-	
+
+	/*
+	 * kmalloc enough space for the page, map and bounce lists all
+	 * at once. 
+	 */
 	pagelist = (unsigned long *) 
-		kmalloc(wanted * sizeof(unsigned long), GFP_KERNEL);
+		kmalloc(3 * wanted * sizeof(unsigned long), GFP_KERNEL);
 	if (!pagelist)
 		return -ENOMEM;
 	
-	maplist = (struct page **) 
-		kmalloc(wanted * sizeof(struct page **), GFP_KERNEL);
-	if (!maplist) {
-		kfree(pagelist);
-		return -ENOMEM;
-	}
-
 	/* Did it grow while we waited? */
 	if (iobuf->array_len >= wanted) {
 		kfree(pagelist);
-		kfree(maplist);
 		return 0;
 	}
-	
+
+	maplist    = (struct page **) (pagelist + wanted);
+	bouncelist = pagelist + 2 * wanted;
+
 	memcpy (pagelist, iobuf->pagelist,
 		iobuf->array_len * sizeof(unsigned long));
 	memcpy (maplist, iobuf->maplist,
 		iobuf->array_len * sizeof(struct page **));
+	memcpy (bouncelist, iobuf->bouncelist,
+		iobuf->array_len * sizeof(unsigned long));
 
 	if (iobuf->array_len > KIO_STATIC_PAGES) {
 		kfree (iobuf->pagelist);
-		kfree (iobuf->maplist);
 	}
 	
-	iobuf->pagelist  = pagelist;
-	iobuf->maplist   = maplist;
-	iobuf->array_len = wanted;
+	iobuf->pagelist   = pagelist;
+	iobuf->maplist    = maplist;
+	iobuf->bouncelist = bouncelist;
+	iobuf->array_len  = wanted;
+	return 0;
+}
+
+/*
+ * Test whether a given page from the bounce buffer matches the given
+ * gfp_mask.  Return true if a bounce buffer is required for this
+ * page. 
+ */
+
+static inline int test_bounce_page(unsigned long page, 
+				   struct page * map,
+				   int gfp_mask)
+{
+	/* Unmapped pages from PCI memory or BIGMEM pages always need a
+	 * bounce buffer unless the caller is prepared to accept
+	 * GFP_BIGMEM pages. */
+	
+	if (!map || PageBIGMEM(map) )
+		/* Careful, the following must return the right value
+		 * even if CONFIG_BIGMEM is not set */
+		return !(gfp_mask & __GFP_BIGMEM);
+	
+	/* A DMA-able page never needs a bounce buffer */
+	if (PageDMA(map))
+		return 0;
+	
+	/* Otherwise it is a non-ISA-DMA-capable page and needs bounce
+	 * buffers if GFP_DMA is requested */
+	return gfp_mask & __GFP_DMA;
+}
+
+int setup_kiobuf_bounce_pages(struct kiobuf *iobuf, int gfp_mask)
+{
+	int i;
+	
+	clear_kiobuf_bounce_pages(iobuf);
+	
+	for (i = 0; i < iobuf->nr_pages; i++) {
+		struct page *map = iobuf->maplist[i];
+		unsigned long page = iobuf->pagelist[i];
+		unsigned long bounce_page;
+		
+		if (!test_bounce_page(page, map, gfp_mask)) {
+			iobuf->bouncelist[i] = 0;
+			continue;
+		}
+		
+		bounce_page = __get_free_page(gfp_mask);
+		if (!bounce_page)
+			goto error;
+
+		iobuf->bouncelist[i] = bounce_page;
+		iobuf->bounced = 1;
+	}
 	return 0;
+	
+ error:
+	clear_kiobuf_bounce_pages(iobuf);
+	return -ENOMEM;
 }
 
+/*
+ * Copy a bounce buffer.  For completion of partially-failed read IOs,
+ * we need to be able to place an upper limit on the data successfully
+ * transferred from bounce buffers to the user's own buffers.  
+ */
 
+void kiobuf_copy_bounce(struct kiobuf *iobuf, int direction, int max)
+{
+	int i;
+	int offset, length;
+	
+	if (!iobuf->bounced)
+		return;
+	
+	offset = iobuf->offset;
+	length = iobuf->length;
+	if (max >= 0 && length > max)
+		length = max;
+	
+	i = 0;
+
+	if (offset > PAGE_SIZE) {
+		i = (offset >> PAGE_SHIFT);
+		offset &= ~PAGE_MASK;
+	}
+	
+	for (; i < iobuf->nr_pages && length > 0; i++) {
+		unsigned long page = iobuf->pagelist[i];
+		unsigned long bounce_page = iobuf->bouncelist[i];
+		unsigned long kin, kout;
+		int pagelen = length;
+		
+		if (bounce_page) {
+			if (pagelen > PAGE_SIZE)
+				pagelen = PAGE_SIZE;
+		
+			if (direction == COPY_TO_BOUNCE) {
+				kin  = kmap(page, KM_READ);
+				kout = kmap(bounce_page, KM_WRITE);
+			} else {
+				kin  = kmap(bounce_page, KM_READ);
+				kout = kmap(page, KM_WRITE);
+			}
+			
+			memcpy((char *) (kout+offset), 
+			       (char *) (kin+offset),
+			       pagelen);
+			kunmap(kout, KM_WRITE);
+			kunmap(kin, KM_READ);
+		}
+		
+		length -= pagelen;
+		offset = 0;
+	}
+}
--- linux/include/linux/iobuf.h.62	Thu Feb  3 19:28:39 2000
+++ linux/include/linux/iobuf.h	Thu Feb  3 19:29:06 2000
@@ -41,12 +41,15 @@
 
 	unsigned long *	pagelist;
 	struct page **	maplist;
+	unsigned long *	bouncelist;
 
 	unsigned int	locked : 1;	/* If set, pages has been locked */
+	unsigned int	bounced : 1;	/* If set, bounce pages are set up */
 	
 	/* Always embed enough struct pages for 64k of IO */
 	unsigned long	page_array[KIO_STATIC_PAGES];
 	struct page *	map_array[KIO_STATIC_PAGES];
+	unsigned long	bounce_array[KIO_STATIC_PAGES];
 };
 
 
@@ -61,6 +64,15 @@
 int	alloc_kiovec(int nr, struct kiobuf **);
 void	free_kiovec(int nr, struct kiobuf **);
 int	expand_kiobuf(struct kiobuf *, int);
+int	setup_kiobuf_bounce_pages(struct kiobuf *, int gfp_mask);
+void	clear_kiobuf_bounce_pages(struct kiobuf *);
+void	kiobuf_copy_bounce(struct kiobuf *, int direction, int max);
+
+/* Direction codes for kiobuf_copy_bounce: */
+enum {
+	COPY_TO_BOUNCE,
+	COPY_FROM_BOUNCE
+};
 
 /* fs/buffer.c */
 
