ia64/xen-unstable

changeset 4736:96530b133124

bitkeeper revision 1.1327.1.23 (4276810fCGJukAImkhaVlxkOSDW-Dg)

Sorry... still one more (Last round of cleanup)
author djm@kirby.fc.hp.com
date Mon May 02 19:35:43 2005 +0000 (2005-05-02)
parents 1d7635794bb8
children ba75d06d25c5
files xen/arch/ia64/patch/linux-2.6.11/swiotlb.c
line diff
     1.1 --- a/xen/arch/ia64/patch/linux-2.6.11/swiotlb.c	Mon May 02 19:09:26 2005 +0000
     1.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/swiotlb.c	Mon May 02 19:35:43 2005 +0000
     1.3 @@ -1,70 +1,22 @@
     1.4 - swiotlb.c |   21 +++++++++++++--------
     1.5 - 1 files changed, 13 insertions(+), 8 deletions(-)
     1.6 -
     1.7 -Index: linux-2.6.11-xendiffs/arch/ia64/lib/swiotlb.c
     1.8 -===================================================================
     1.9 ---- linux-2.6.11-xendiffs.orig/arch/ia64/lib/swiotlb.c	2005-04-08 12:13:54.040202667 -0500
    1.10 -+++ linux-2.6.11-xendiffs/arch/ia64/lib/swiotlb.c	2005-04-08 12:19:09.170367318 -0500
    1.11 -@@ -124,8 +124,11 @@ swiotlb_init_with_default_size (size_t d
    1.12 - 	/*
    1.13 - 	 * Get IO TLB memory from the low pages
    1.14 - 	 */
    1.15 --	io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs *
    1.16 --					       (1 << IO_TLB_SHIFT));
    1.17 -+	/* FIXME: Do we really need swiotlb in HV? If all memory trunks
    1.18 -+	 * presented to guest as <4G, are actually <4G in machine range,
    1.19 -+	 * no DMA intevention from HV...
    1.20 -+	 */
    1.21 -+	io_tlb_start = alloc_xenheap_pages(get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT)));
    1.22 - 	if (!io_tlb_start)
    1.23 - 		panic("Cannot allocate SWIOTLB buffer");
    1.24 - 	io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
    1.25 -@@ -135,16 +138,16 @@ swiotlb_init_with_default_size (size_t d
    1.26 - 	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
    1.27 - 	 * between io_tlb_start and io_tlb_end.
    1.28 - 	 */
    1.29 --	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
    1.30 -+	io_tlb_list = alloc_xenheap_pages(get_order(io_tlb_nslabs * sizeof(int)));
    1.31 - 	for (i = 0; i < io_tlb_nslabs; i++)
    1.32 -  		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
    1.33 - 	io_tlb_index = 0;
    1.34 --	io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
    1.35 -+	io_tlb_orig_addr = alloc_xenheap_pages(get_order(io_tlb_nslabs * sizeof(char *)));
    1.36 +--- ../../linux-2.6.11/arch/ia64/lib/swiotlb.c	2005-03-02 00:38:17.000000000 -0700
    1.37 ++++ arch/ia64/lib/swiotlb.c	2005-05-02 13:04:15.000000000 -0600
    1.38 +@@ -49,6 +49,15 @@
    1.39 +  */
    1.40 + #define IO_TLB_SHIFT 11
    1.41   
    1.42 - 	/*
    1.43 - 	 * Get the overflow emergency buffer
    1.44 - 	 */
    1.45 --	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
    1.46 -+	io_tlb_overflow_buffer = alloc_xenheap_pages(get_order(io_tlb_overflow));
    1.47 - 	printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
    1.48 - 	       virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
    1.49 - }
    1.50 -@@ -328,13 +331,13 @@ swiotlb_alloc_coherent(struct device *hw
    1.51 - 	 */
    1.52 - 	flags |= GFP_DMA;
    1.53 ++#ifdef XEN
    1.54 ++#define __order_to_size(_order) (1 << (_order+PAGE_SHIFT))
    1.55 ++#define alloc_bootmem_low_pages(_x) alloc_xenheap_pages(get_order(_x))
    1.56 ++#define alloc_bootmem_low(_x) alloc_xenheap_pages(get_order(_x))
    1.57 ++#define alloc_bootmem(_x) alloc_xenheap_pages(get_order(_x))
    1.58 ++#define __get_free_pages(_x,_y) alloc_xenheap_pages(__order_to_size(_y))
    1.59 ++#define free_pages(_x,_y) free_xenheap_pages(_x,_y)
    1.60 ++#endif
    1.61 ++
    1.62 + int swiotlb_force;
    1.63   
    1.64 --	ret = (void *)__get_free_pages(flags, order);
    1.65 -+	ret = (void *)alloc_xenheap_pages(get_order(size));
    1.66 - 	if (ret && address_needs_mapping(hwdev, virt_to_phys(ret))) {
    1.67 - 		/*
    1.68 - 		 * The allocated memory isn't reachable by the device.
    1.69 - 		 * Fall back on swiotlb_map_single().
    1.70 - 		 */
    1.71 --		free_pages((unsigned long) ret, order);
    1.72 -+		free_xenheap_pages((unsigned long) ret, order);
    1.73 - 		ret = NULL;
    1.74 - 	}
    1.75 - 	if (!ret) {
    1.76 -@@ -372,7 +375,7 @@ swiotlb_free_coherent(struct device *hwd
    1.77 - {
    1.78 - 	if (!(vaddr >= (void *)io_tlb_start
    1.79 -                     && vaddr < (void *)io_tlb_end))
    1.80 --		free_pages((unsigned long) vaddr, get_order(size));
    1.81 -+		free_xenheap_pages((unsigned long) vaddr, get_order(size));
    1.82 - 	else
    1.83 - 		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
    1.84 - 		swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
    1.85 -@@ -388,8 +391,10 @@ swiotlb_full(struct device *dev, size_t 
    1.86 + /*
    1.87 +@@ -388,8 +397,10 @@
    1.88   	 * When the mapping is small enough return a static buffer to limit
    1.89   	 * the damage, or panic when the transfer is too big.
    1.90   	 */