ia64/xen-unstable
changeset 6346:b09f13ddd27b
Merge x86_64 and i386 ioremap.c.
Signed-off-by: Keir Fraser <keir@xensource.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Tue Aug 23 13:30:40 2005 +0000 (2005-08-23) |
parents | 0b5ee83ea35c |
children | fa688e7baab9 |
files | linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c linux-2.6-xen-sparse/arch/xen/x86_64/mm/Makefile linux-2.6-xen-sparse/include/asm-xen/asm-i386/io.h |
line diff
1.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c Tue Aug 23 13:13:39 2005 +0000 1.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c Tue Aug 23 13:30:40 2005 +0000 1.3 @@ -36,6 +36,8 @@ void iounmap(volatile void __iomem *addr 1.4 { 1.5 } 1.6 1.7 +#ifdef __i386__ 1.8 + 1.9 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size) 1.10 { 1.11 return NULL; 1.12 @@ -45,6 +47,8 @@ void __init bt_iounmap(void *addr, unsig 1.13 { 1.14 } 1.15 1.16 +#endif /* __i386__ */ 1.17 + 1.18 #else 1.19 1.20 /* 1.21 @@ -126,10 +130,12 @@ void __iomem * __ioremap(unsigned long p 1.22 return NULL; 1.23 area->phys_addr = phys_addr; 1.24 addr = (void __iomem *) area->addr; 1.25 + flags |= _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED; 1.26 +#ifdef __x86_64__ 1.27 + flags |= _PAGE_USER; 1.28 +#endif 1.29 if (direct_remap_area_pages(&init_mm, (unsigned long) addr, phys_addr, 1.30 - size, __pgprot(_PAGE_PRESENT | _PAGE_RW | 1.31 - _PAGE_DIRTY | _PAGE_ACCESSED 1.32 - | flags), domid)) { 1.33 + size, __pgprot(flags), domid)) { 1.34 vunmap((void __force *) addr); 1.35 return NULL; 1.36 } 1.37 @@ -218,6 +224,8 @@ void iounmap(volatile void __iomem *addr 1.38 kfree(p); 1.39 } 1.40 1.41 +#ifdef __i386__ 1.42 + 1.43 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size) 1.44 { 1.45 unsigned long offset, last_addr; 1.46 @@ -289,6 +297,8 @@ void __init bt_iounmap(void *addr, unsig 1.47 } 1.48 } 1.49 1.50 +#endif /* __i386__ */ 1.51 + 1.52 #endif /* CONFIG_XEN_PHYSDEV_ACCESS */ 1.53 1.54 /* These hacky macros avoid phys->machine translations. */
2.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/Makefile Tue Aug 23 13:13:39 2005 +0000 2.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/mm/Makefile Tue Aug 23 13:30:40 2005 +0000 2.3 @@ -6,10 +6,10 @@ XENARCH := $(subst ",,$(CONFIG_XENARCH)) 2.4 2.5 CFLAGS += -Iarch/$(XENARCH)/mm 2.6 2.7 -obj-y := init.o fault.o ioremap.o pageattr.o 2.8 +obj-y := init.o fault.o pageattr.o 2.9 c-obj-y := extable.o 2.10 2.11 -i386-obj-y := hypervisor.o 2.12 +i386-obj-y := hypervisor.o ioremap.o 2.13 2.14 #obj-y := init.o fault.o ioremap.o extable.o pageattr.o 2.15 #c-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
3.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/ioremap.c Tue Aug 23 13:13:39 2005 +0000 3.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 3.3 @@ -1,489 +0,0 @@ 3.4 -/* 3.5 - * arch/x86_64/mm/ioremap.c 3.6 - * 3.7 - * Re-map IO memory to kernel address space so that we can access it. 3.8 - * This is needed for high PCI addresses that aren't mapped in the 3.9 - * 640k-1MB IO memory area on PC's 3.10 - * 3.11 - * (C) Copyright 1995 1996 Linus Torvalds 3.12 - */ 3.13 - 3.14 -#include <linux/vmalloc.h> 3.15 -#include <linux/init.h> 3.16 -#include <linux/slab.h> 3.17 -#include <linux/module.h> 3.18 -#include <asm/io.h> 3.19 -#include <asm/fixmap.h> 3.20 -#include <asm/cacheflush.h> 3.21 -#include <asm/tlbflush.h> 3.22 -#include <asm/pgtable.h> 3.23 -#include <asm/pgalloc.h> 3.24 - 3.25 -/* 3.26 - * Reuse arch/xen/i396/mm/ioremap.c. Need to merge later 3.27 - */ 3.28 -#ifndef CONFIG_XEN_PHYSDEV_ACCESS 3.29 - 3.30 -void * __ioremap(unsigned long phys_addr, unsigned long size, 3.31 - unsigned long flags) 3.32 -{ 3.33 - return NULL; 3.34 -} 3.35 - 3.36 -void *ioremap_nocache (unsigned long phys_addr, unsigned long size) 3.37 -{ 3.38 - return NULL; 3.39 -} 3.40 - 3.41 -void iounmap(volatile void __iomem *addr) 3.42 -{ 3.43 -} 3.44 - 3.45 -void __init *bt_ioremap(unsigned long phys_addr, unsigned long size) 3.46 -{ 3.47 - return NULL; 3.48 -} 3.49 - 3.50 -void __init bt_iounmap(void *addr, unsigned long size) 3.51 -{ 3.52 -} 3.53 - 3.54 -#else 3.55 - 3.56 -/* 3.57 - * Does @address reside within a non-highmem page that is local to this virtual 3.58 - * machine (i.e., not an I/O page, nor a memory page belonging to another VM). 3.59 - * See the comment that accompanies pte_pfn() in pgtable-2level.h to understand 3.60 - * why this works. 3.61 - */ 3.62 -static inline int is_local_lowmem(unsigned long address) 3.63 -{ 3.64 - extern unsigned long max_low_pfn; 3.65 - unsigned long mfn = address >> PAGE_SHIFT; 3.66 - unsigned long pfn = mfn_to_pfn(mfn); 3.67 - return ((pfn < max_low_pfn) && (phys_to_machine_mapping[pfn] == mfn)); 3.68 -} 3.69 - 3.70 -#endif 3.71 - 3.72 -/* 3.73 - * Generic mapping function (not visible outside): 3.74 - */ 3.75 - 3.76 -/* 3.77 - * Remap an arbitrary physical address space into the kernel virtual 3.78 - * address space. Needed when the kernel wants to access high addresses 3.79 - * directly. 3.80 - * 3.81 - * NOTE! We need to allow non-page-aligned mappings too: we will obviously 3.82 - * have to convert them into an offset in a page-aligned mapping, but the 3.83 - * caller shouldn't need to know that small detail. 3.84 - */ 3.85 -void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) 3.86 -{ 3.87 - void __iomem * addr; 3.88 - struct vm_struct * area; 3.89 - unsigned long offset, last_addr; 3.90 - domid_t domid = DOMID_IO; 3.91 - 3.92 - /* Don't allow wraparound or zero size */ 3.93 - last_addr = phys_addr + size - 1; 3.94 - if (!size || last_addr < phys_addr) 3.95 - return NULL; 3.96 - 3.97 -#ifdef CONFIG_XEN_PRIVILEGED_GUEST 3.98 - /* 3.99 - * Don't remap the low PCI/ISA area, it's always mapped.. 3.100 - */ 3.101 - if (phys_addr >= 0x0 && last_addr < 0x100000) 3.102 - return isa_bus_to_virt(phys_addr); 3.103 -#endif 3.104 - 3.105 - /* 3.106 - * Don't allow anybody to remap normal RAM that we're using.. 3.107 - */ 3.108 - if (is_local_lowmem(phys_addr)) { 3.109 - char *t_addr, *t_end; 3.110 - struct page *page; 3.111 - 3.112 - t_addr = bus_to_virt(phys_addr); 3.113 - t_end = t_addr + (size - 1); 3.114 - 3.115 - for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) 3.116 - if(!PageReserved(page)) 3.117 - return NULL; 3.118 - 3.119 - domid = DOMID_LOCAL; 3.120 - } 3.121 - 3.122 - /* 3.123 - * Mappings have to be page-aligned 3.124 - */ 3.125 - offset = phys_addr & ~PAGE_MASK; 3.126 - phys_addr &= PAGE_MASK; 3.127 - size = PAGE_ALIGN(last_addr+1) - phys_addr; 3.128 - 3.129 - /* 3.130 - * Ok, go for it.. 3.131 - */ 3.132 - area = get_vm_area(size, VM_IOREMAP | (flags << 20)); 3.133 - if (!area) 3.134 - return NULL; 3.135 - area->phys_addr = phys_addr; 3.136 - addr = (void __iomem *) area->addr; 3.137 - if (direct_remap_area_pages(&init_mm, (unsigned long) addr, phys_addr, 3.138 - size, __pgprot(_PAGE_PRESENT | _PAGE_RW | 3.139 - _PAGE_DIRTY | _PAGE_ACCESSED 3.140 - | _PAGE_USER 3.141 - | flags), domid)) { 3.142 - vunmap((void __force *) addr); 3.143 - return NULL; 3.144 - } 3.145 - return (void __iomem *) (offset + (char __iomem *)addr); 3.146 -} 3.147 - 3.148 - 3.149 -/** 3.150 - * ioremap_nocache - map bus memory into CPU space 3.151 - * @offset: bus address of the memory 3.152 - * @size: size of the resource to map 3.153 - * 3.154 - * ioremap_nocache performs a platform specific sequence of operations to 3.155 - * make bus memory CPU accessible via the readb/readw/readl/writeb/ 3.156 - * writew/writel functions and the other mmio helpers. The returned 3.157 - * address is not guaranteed to be usable directly as a virtual 3.158 - * address. 3.159 - * 3.160 - * This version of ioremap ensures that the memory is marked uncachable 3.161 - * on the CPU as well as honouring existing caching rules from things like 3.162 - * the PCI bus. Note that there are other caches and buffers on many 3.163 - * busses. In particular driver authors should read up on PCI writes 3.164 - * 3.165 - * It's useful if some control registers are in such an area and 3.166 - * write combining or read caching is not desirable: 3.167 - * 3.168 - * Must be freed with iounmap. 3.169 - */ 3.170 - 3.171 -void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size) 3.172 -{ 3.173 - unsigned long last_addr; 3.174 - void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD); 3.175 - if (!p) 3.176 - return p; 3.177 - 3.178 - /* Guaranteed to be > phys_addr, as per __ioremap() */ 3.179 - last_addr = phys_addr + size - 1; 3.180 - 3.181 - if (is_local_lowmem(last_addr)) { 3.182 - struct page *ppage = virt_to_page(bus_to_virt(phys_addr)); 3.183 - unsigned long npages; 3.184 - 3.185 - phys_addr &= PAGE_MASK; 3.186 - 3.187 - /* This might overflow and become zero.. */ 3.188 - last_addr = PAGE_ALIGN(last_addr); 3.189 - 3.190 - /* .. but that's ok, because modulo-2**n arithmetic will make 3.191 - * the page-aligned "last - first" come out right. 3.192 - */ 3.193 - npages = (last_addr - phys_addr) >> PAGE_SHIFT; 3.194 - 3.195 - if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { 3.196 - iounmap(p); 3.197 - p = NULL; 3.198 - } 3.199 - global_flush_tlb(); 3.200 - } 3.201 - 3.202 - return p; 3.203 -} 3.204 - 3.205 -void iounmap(volatile void __iomem *addr) 3.206 -{ 3.207 - struct vm_struct *p; 3.208 - if ((void __force *) addr <= high_memory) 3.209 - return; 3.210 -#ifdef CONFIG_XEN_PRIVILEGED_GUEST 3.211 - if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN)) 3.212 - return; 3.213 -#endif 3.214 - p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr)); 3.215 - if (!p) { 3.216 - printk("__iounmap: bad address %p\n", addr); 3.217 - return; 3.218 - } 3.219 - 3.220 - if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) { 3.221 - /* p->size includes the guard page, but cpa doesn't like that */ 3.222 - change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)), 3.223 - (p->size - PAGE_SIZE) >> PAGE_SHIFT, 3.224 - PAGE_KERNEL); 3.225 - global_flush_tlb(); 3.226 - } 3.227 - kfree(p); 3.228 -} 3.229 - 3.230 -#if defined(__i386__) 3.231 -void __init *bt_ioremap(unsigned long phys_addr, unsigned long size) 3.232 -{ 3.233 - unsigned long offset, last_addr; 3.234 - unsigned int nrpages; 3.235 - enum fixed_addresses idx; 3.236 - 3.237 - /* Don't allow wraparound or zero size */ 3.238 - last_addr = phys_addr + size - 1; 3.239 - if (!size || last_addr < phys_addr) 3.240 - return NULL; 3.241 - 3.242 -#ifdef CONFIG_XEN_PRIVILEGED_GUEST 3.243 - /* 3.244 - * Don't remap the low PCI/ISA area, it's always mapped.. 3.245 - */ 3.246 - if (phys_addr >= 0x0 && last_addr < 0x100000) 3.247 - return isa_bus_to_virt(phys_addr); 3.248 -#endif 3.249 - 3.250 - /* 3.251 - * Mappings have to be page-aligned 3.252 - */ 3.253 - offset = phys_addr & ~PAGE_MASK; 3.254 - phys_addr &= PAGE_MASK; 3.255 - size = PAGE_ALIGN(last_addr) - phys_addr; 3.256 - 3.257 - /* 3.258 - * Mappings have to fit in the FIX_BTMAP area. 3.259 - */ 3.260 - nrpages = size >> PAGE_SHIFT; 3.261 - if (nrpages > NR_FIX_BTMAPS) 3.262 - return NULL; 3.263 - 3.264 - /* 3.265 - * Ok, go for it.. 3.266 - */ 3.267 - idx = FIX_BTMAP_BEGIN; 3.268 - while (nrpages > 0) { 3.269 - set_fixmap(idx, phys_addr); 3.270 - phys_addr += PAGE_SIZE; 3.271 - --idx; 3.272 - --nrpages; 3.273 - } 3.274 - return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN)); 3.275 -} 3.276 - 3.277 -void __init bt_iounmap(void *addr, unsigned long size) 3.278 -{ 3.279 - unsigned long virt_addr; 3.280 - unsigned long offset; 3.281 - unsigned int nrpages; 3.282 - enum fixed_addresses idx; 3.283 - 3.284 - virt_addr = (unsigned long)addr; 3.285 - if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) 3.286 - return; 3.287 -#ifdef CONFIG_XEN_PRIVILEGED_GUEST 3.288 - if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN)) 3.289 - return; 3.290 -#endif 3.291 - offset = virt_addr & ~PAGE_MASK; 3.292 - nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; 3.293 - 3.294 - idx = FIX_BTMAP_BEGIN; 3.295 - while (nrpages > 0) { 3.296 - clear_fixmap(idx); 3.297 - --idx; 3.298 - --nrpages; 3.299 - } 3.300 -} 3.301 -#endif /* defined(__i386__) */ 3.302 - 3.303 -#endif /* CONFIG_XEN_PHYSDEV_ACCESS */ 3.304 - 3.305 -/* These hacky macros avoid phys->machine translations. */ 3.306 -#define __direct_pte(x) ((pte_t) { (x) } ) 3.307 -#define __direct_mk_pte(page_nr,pgprot) \ 3.308 - __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot)) 3.309 -#define direct_mk_pte_phys(physpage, pgprot) \ 3.310 - __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot) 3.311 - 3.312 -static inline void direct_remap_area_pte(pte_t *pte, 3.313 - unsigned long address, 3.314 - unsigned long size, 3.315 - mmu_update_t **v) 3.316 -{ 3.317 - unsigned long end; 3.318 - 3.319 - address &= ~PMD_MASK; 3.320 - end = address + size; 3.321 - if (end > PMD_SIZE) 3.322 - end = PMD_SIZE; 3.323 - if (address >= end) 3.324 - BUG(); 3.325 - 3.326 - do { 3.327 - (*v)->ptr = virt_to_machine(pte); 3.328 - (*v)++; 3.329 - address += PAGE_SIZE; 3.330 - pte++; 3.331 - } while (address && (address < end)); 3.332 -} 3.333 - 3.334 -static inline int direct_remap_area_pmd(struct mm_struct *mm, 3.335 - pmd_t *pmd, 3.336 - unsigned long address, 3.337 - unsigned long size, 3.338 - mmu_update_t **v) 3.339 -{ 3.340 - unsigned long end; 3.341 - 3.342 - address &= ~PGDIR_MASK; 3.343 - end = address + size; 3.344 - if (end > PGDIR_SIZE) 3.345 - end = PGDIR_SIZE; 3.346 - if (address >= end) 3.347 - BUG(); 3.348 - do { 3.349 - pte_t *pte = (mm == &init_mm) ? 3.350 - pte_alloc_kernel(mm, pmd, address) : 3.351 - pte_alloc_map(mm, pmd, address); 3.352 - if (!pte) 3.353 - return -ENOMEM; 3.354 - direct_remap_area_pte(pte, address, end - address, v); 3.355 - pte_unmap(pte); 3.356 - address = (address + PMD_SIZE) & PMD_MASK; 3.357 - pmd++; 3.358 - } while (address && (address < end)); 3.359 - return 0; 3.360 -} 3.361 - 3.362 -int __direct_remap_area_pages(struct mm_struct *mm, 3.363 - unsigned long address, 3.364 - unsigned long size, 3.365 - mmu_update_t *v) 3.366 -{ 3.367 - pgd_t * dir; 3.368 - unsigned long end = address + size; 3.369 - int error; 3.370 - 3.371 -#if defined(__i386__) 3.372 - dir = pgd_offset(mm, address); 3.373 -#elif defined (__x86_64) 3.374 - dir = (mm == &init_mm) ? 3.375 - pgd_offset_k(address): 3.376 - pgd_offset(mm, address); 3.377 -#endif 3.378 - if (address >= end) 3.379 - BUG(); 3.380 - spin_lock(&mm->page_table_lock); 3.381 - do { 3.382 - pud_t *pud; 3.383 - pmd_t *pmd; 3.384 - 3.385 - error = -ENOMEM; 3.386 - pud = pud_alloc(mm, dir, address); 3.387 - if (!pud) 3.388 - break; 3.389 - pmd = pmd_alloc(mm, pud, address); 3.390 - if (!pmd) 3.391 - break; 3.392 - error = 0; 3.393 - direct_remap_area_pmd(mm, pmd, address, end - address, &v); 3.394 - address = (address + PGDIR_SIZE) & PGDIR_MASK; 3.395 - dir++; 3.396 - 3.397 - } while (address && (address < end)); 3.398 - spin_unlock(&mm->page_table_lock); 3.399 - return error; 3.400 -} 3.401 - 3.402 - 3.403 -int direct_remap_area_pages(struct mm_struct *mm, 3.404 - unsigned long address, 3.405 - unsigned long machine_addr, 3.406 - unsigned long size, 3.407 - pgprot_t prot, 3.408 - domid_t domid) 3.409 -{ 3.410 - int i; 3.411 - unsigned long start_address; 3.412 -#define MAX_DIRECTMAP_MMU_QUEUE 130 3.413 - mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *v = u; 3.414 - 3.415 - start_address = address; 3.416 - 3.417 - flush_cache_all(); 3.418 - 3.419 - for (i = 0; i < size; i += PAGE_SIZE) { 3.420 - if ((v - u) == MAX_DIRECTMAP_MMU_QUEUE) { 3.421 - /* Fill in the PTE pointers. */ 3.422 - __direct_remap_area_pages(mm, 3.423 - start_address, 3.424 - address-start_address, 3.425 - u); 3.426 - 3.427 - if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0) 3.428 - return -EFAULT; 3.429 - v = u; 3.430 - start_address = address; 3.431 - } 3.432 - 3.433 - /* 3.434 - * Fill in the machine address: PTE ptr is done later by 3.435 - * __direct_remap_area_pages(). 3.436 - */ 3.437 - v->val = pte_val_ma(pfn_pte_ma(machine_addr >> PAGE_SHIFT, prot)); 3.438 - 3.439 - machine_addr += PAGE_SIZE; 3.440 - address += PAGE_SIZE; 3.441 - v++; 3.442 - } 3.443 - 3.444 - if (v != u) { 3.445 - /* get the ptep's filled in */ 3.446 - __direct_remap_area_pages(mm, 3.447 - start_address, 3.448 - address-start_address, 3.449 - u); 3.450 - if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)) 3.451 - return -EFAULT; 3.452 - } 3.453 - 3.454 - flush_tlb_all(); 3.455 - 3.456 - return 0; 3.457 -} 3.458 - 3.459 -EXPORT_SYMBOL(direct_remap_area_pages); 3.460 - 3.461 -static int lookup_pte_fn( 3.462 - pte_t *pte, struct page *pte_page, unsigned long addr, void *data) 3.463 -{ 3.464 - unsigned long *ptep = (unsigned long *)data; 3.465 - if (ptep) *ptep = (pfn_to_mfn(page_to_pfn(pte_page)) << PAGE_SHIFT) 3.466 - | ((unsigned long)pte & ~PAGE_MASK); 3.467 - return 0; 3.468 -} 3.469 - 3.470 -int create_lookup_pte_addr(struct mm_struct *mm, 3.471 - unsigned long address, 3.472 - unsigned long *ptep) 3.473 -{ 3.474 - return generic_page_range(mm, address, PAGE_SIZE, lookup_pte_fn, ptep); 3.475 -} 3.476 - 3.477 -EXPORT_SYMBOL(create_lookup_pte_addr); 3.478 - 3.479 -static int noop_fn( 3.480 - pte_t *pte, struct page *pte_page, unsigned long addr, void *data) 3.481 -{ 3.482 - return 0; 3.483 -} 3.484 - 3.485 -int touch_pte_range(struct mm_struct *mm, 3.486 - unsigned long address, 3.487 - unsigned long size) 3.488 -{ 3.489 - return generic_page_range(mm, address, size, noop_fn, NULL); 3.490 -} 3.491 - 3.492 -EXPORT_SYMBOL(touch_pte_range);