direct-io.hg

changeset 15098:a08261650b8a

[IA64] Paravirtualize sba_iommu

This paravirtualizes the iommu driver for HP Integrity systems. This
allows dom0 to use the iommu just as it would on native hardware.

Signed-off-by: Alex Williamson <alex.williamson@hp.com>
author Alex Williamson <alex.williamson@hp.com>
date Mon May 07 08:45:15 2007 -0600 (2007-05-07)
parents d20e4f3158c7
children 3b3f00d42dc4
files linux-2.6-xen-sparse/arch/ia64/hp/common/sba_iommu.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/ia64/hp/common/sba_iommu.c	Mon May 07 08:41:50 2007 -0600
     1.2 +++ b/linux-2.6-xen-sparse/arch/ia64/hp/common/sba_iommu.c	Mon May 07 08:45:15 2007 -0600
     1.3 @@ -763,13 +763,14 @@ sba_free_range(struct ioc *ioc, dma_addr
     1.4   */
     1.5  
     1.6  #if 1
     1.7 -#define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL)	\
     1.8 -						      | 0x8000000000000000ULL)
     1.9 +#define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr =	\
    1.10 +	((virt_to_bus((void *)vba) & ~0xFFFULL) | 0x8000000000000000ULL)
    1.11  #else
    1.12  void SBA_INLINE
    1.13  sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
    1.14  {
    1.15 -	*pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
    1.16 +	*pdir_ptr = ((virt_to_bus((void *)vba) & ~0xFFFULL) |
    1.17 +		    0x80000000000000FFULL);
    1.18  }
    1.19  #endif
    1.20  
    1.21 @@ -784,6 +785,12 @@ mark_clean (void *addr, size_t size)
    1.22  {
    1.23  	unsigned long pg_addr, end;
    1.24  
    1.25 +#ifdef CONFIG_XEN
    1.26 +	/* XXX: Bad things happen starting domUs when this is enabled. */
    1.27 +	if (is_running_on_xen())
    1.28 +		return;
    1.29 +#endif
    1.30 +
    1.31  	pg_addr = PAGE_ALIGN((unsigned long) addr);
    1.32  	end = (unsigned long) addr + size;
    1.33  	while (pg_addr + PAGE_SIZE <= end) {
    1.34 @@ -894,15 +901,14 @@ sba_map_single(struct device *dev, void 
    1.35  	unsigned long flags;
    1.36  #endif
    1.37  #ifdef ALLOW_IOV_BYPASS
    1.38 -	unsigned long pci_addr = virt_to_phys(addr);
    1.39 -#endif
    1.40 +	unsigned long pci_addr = virt_to_bus(addr);
    1.41  
    1.42 -#ifdef ALLOW_IOV_BYPASS
    1.43  	ASSERT(to_pci_dev(dev)->dma_mask);
    1.44  	/*
    1.45   	** Check if the PCI device can DMA to ptr... if so, just return ptr
    1.46   	*/
    1.47 -	if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) {
    1.48 +	if (likely(pci_addr & ~to_pci_dev(dev)->dma_mask) == 0 &&
    1.49 +		   !range_straddles_page_boundary(addr, size)) {
    1.50  		/*
    1.51   		** Device is bit capable of DMA'ing to the buffer...
    1.52  		** just return the PCI address of ptr
    1.53 @@ -973,13 +979,13 @@ sba_mark_clean(struct ioc *ioc, dma_addr
    1.54  	void	*addr;
    1.55  
    1.56  	if (size <= iovp_size) {
    1.57 -		addr = phys_to_virt(ioc->pdir_base[off] &
    1.58 -		                    ~0xE000000000000FFFULL);
    1.59 +		addr = bus_to_virt(ioc->pdir_base[off] &
    1.60 +				   ~0xE000000000000FFFULL);
    1.61  		mark_clean(addr, size);
    1.62  	} else {
    1.63  		do {
    1.64 -			addr = phys_to_virt(ioc->pdir_base[off] &
    1.65 -			                    ~0xE000000000000FFFULL);
    1.66 +			addr = bus_to_virt(ioc->pdir_base[off] &
    1.67 +					   ~0xE000000000000FFFULL);
    1.68  			mark_clean(addr, min(size, iovp_size));
    1.69  			off++;
    1.70  			size -= iovp_size;
    1.71 @@ -1018,7 +1024,7 @@ void sba_unmap_single(struct device *dev
    1.72  
    1.73  #ifdef ENABLE_MARK_CLEAN
    1.74  		if (dir == DMA_FROM_DEVICE) {
    1.75 -			mark_clean(phys_to_virt(iova), size);
    1.76 +			mark_clean(bus_to_virt(iova), size);
    1.77  		}
    1.78  #endif
    1.79  		return;
    1.80 @@ -1102,9 +1108,14 @@ sba_alloc_coherent (struct device *dev, 
    1.81  		return NULL;
    1.82  
    1.83  	memset(addr, 0, size);
    1.84 -	*dma_handle = virt_to_phys(addr);
    1.85  
    1.86  #ifdef ALLOW_IOV_BYPASS
    1.87 +#ifdef CONFIG_XEN
    1.88 +	if (xen_create_contiguous_region((unsigned long)addr, get_order(size),
    1.89 +					 fls64(dev->coherent_dma_mask)))
    1.90 +		goto iommu_map;
    1.91 +#endif
    1.92 +	*dma_handle = virt_to_bus(addr);
    1.93  	ASSERT(dev->coherent_dma_mask);
    1.94  	/*
    1.95   	** Check if the PCI device can DMA to ptr... if so, just return ptr
    1.96 @@ -1115,6 +1126,9 @@ sba_alloc_coherent (struct device *dev, 
    1.97  
    1.98  		return addr;
    1.99  	}
   1.100 +#ifdef CONFIG_XEN
   1.101 +iommu_map:
   1.102 +#endif
   1.103  #endif
   1.104  
   1.105  	/*
   1.106 @@ -1138,6 +1152,13 @@ sba_alloc_coherent (struct device *dev, 
   1.107   */
   1.108  void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
   1.109  {
   1.110 +#if defined(ALLOW_IOV_BYPASS) && defined(CONFIG_XEN)
   1.111 +	struct ioc *ioc = GET_IOC(dev);
   1.112 +
   1.113 +	if (likely((dma_handle & ioc->imask) != ioc->ibase))
   1.114 +		xen_destroy_contiguous_region((unsigned long)vaddr,
   1.115 +					      get_order(size));
   1.116 +#endif
   1.117  	sba_unmap_single(dev, dma_handle, size, 0);
   1.118  	free_pages((unsigned long) vaddr, get_order(size));
   1.119  }
   1.120 @@ -1406,7 +1427,7 @@ int sba_map_sg(struct device *dev, struc
   1.121  	if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
   1.122  		for (sg = sglist ; filled < nents ; filled++, sg++){
   1.123  			sg->dma_length = sg->length;
   1.124 -			sg->dma_address = virt_to_phys(sba_sg_address(sg));
   1.125 +			sg->dma_address = virt_to_bus(sba_sg_address(sg));
   1.126  		}
   1.127  		return filled;
   1.128  	}
   1.129 @@ -1560,13 +1581,19 @@ ioc_iova_init(struct ioc *ioc)
   1.130  	if (!ioc->pdir_base)
   1.131  		panic(PFX "Couldn't allocate I/O Page Table\n");
   1.132  
   1.133 +#ifdef CONFIG_XEN
   1.134 +	/* The page table needs to be pinned in Xen memory */
   1.135 +	if (xen_create_contiguous_region((unsigned long)ioc->pdir_base,
   1.136 +					 get_order(ioc->pdir_size), 0))
   1.137 +		panic(PFX "Couldn't contiguously map I/O Page Table\n");
   1.138 +#endif
   1.139  	memset(ioc->pdir_base, 0, ioc->pdir_size);
   1.140  
   1.141  	DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __FUNCTION__,
   1.142  		iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
   1.143  
   1.144  	ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
   1.145 -	WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
   1.146 +	WRITE_REG(virt_to_bus(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
   1.147  
   1.148  	/*
   1.149  	** If an AGP device is present, only use half of the IOV space
   1.150 @@ -1603,7 +1630,7 @@ ioc_iova_init(struct ioc *ioc)
   1.151  		for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size)
   1.152  			memcpy(poison_addr, spill_poison, poison_size);
   1.153  
   1.154 -		prefetch_spill_page = virt_to_phys(addr);
   1.155 +		prefetch_spill_page = virt_to_bus(addr);
   1.156  
   1.157  		DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __FUNCTION__, prefetch_spill_page);
   1.158  	}