ia64/xen-unstable
changeset 4073:fbad927100bc
bitkeeper revision 1.1236.1.74 (42306a34dSgXLgqE_cz8DtOHx4cK0w)
Merge centipede.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into centipede.cl.cam.ac.uk:/local/scratch/cwc22/Xen-Builds/2005-02-24/xen-unstable.bk
Merge centipede.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into centipede.cl.cam.ac.uk:/local/scratch/cwc22/Xen-Builds/2005-02-24/xen-unstable.bk
author | cwc22@centipede.cl.cam.ac.uk |
---|---|
date | Thu Mar 10 15:39:32 2005 +0000 (2005-03-10) |
parents | 68d9d54ac50f cae72af328e5 |
children | afa2f6b47e70 |
files | .rootkeys linux-2.4.29-xen-sparse/include/linux/highmem.h linux-2.4.29-xen-sparse/mm/highmem.c linux-2.6.10-xen-sparse/drivers/xen/balloon/balloon.c linux-2.6.10-xen-sparse/include/linux/highmem.h linux-2.6.10-xen-sparse/mm/highmem.c tools/libxc/xc_linux_restore.c tools/libxutil/file_stream.c tools/xfrd/xfrd.c |
line diff
1.1 --- a/.rootkeys Thu Mar 10 14:05:30 2005 +0000 1.2 +++ b/.rootkeys Thu Mar 10 15:39:32 2005 +0000 1.3 @@ -117,6 +117,7 @@ 3e5a4e68mTr0zcp9SXDbnd-XLrrfxw linux-2.4 1.4 3f1056a9L_kqHcFheV00KbKBzv9j5w linux-2.4.29-xen-sparse/include/asm-xen/vga.h 1.5 40659defgWA92arexpMGn8X3QMDj3w linux-2.4.29-xen-sparse/include/asm-xen/xor.h 1.6 3f056927gMHl7mWB89rb73JahbhQIA linux-2.4.29-xen-sparse/include/linux/blk.h 1.7 +42305f54mFScQCttpj57EIm60BnxIg linux-2.4.29-xen-sparse/include/linux/highmem.h 1.8 419e0488SBzS3mdUhwgsES5a5e3abA linux-2.4.29-xen-sparse/include/linux/irq.h 1.9 4124f66fPHG6yvB_vXmesjvzrJ3yMg linux-2.4.29-xen-sparse/include/linux/mm.h 1.10 401c0590D_kwJDU59X8NyvqSv_Cl2A linux-2.4.29-xen-sparse/include/linux/sched.h 1.11 @@ -269,10 +270,12 @@ 40f5623cndVUFlkxpf7Lfx7xu8madQ linux-2.6 1.12 4122466356eIBnC9ot44WSVVIFyhQA linux-2.6.10-xen-sparse/include/asm-xen/queues.h 1.13 3f689063BoW-HWV3auUJ-OqXfcGArw linux-2.6.10-xen-sparse/include/asm-xen/xen_proc.h 1.14 419b4e93z2S0gR17XTy8wg09JEwAhg linux-2.6.10-xen-sparse/include/linux/gfp.h 1.15 +42305f545Vc5SLCUewZ2-n-P9JJhEQ linux-2.6.10-xen-sparse/include/linux/highmem.h 1.16 419dfc609zbti8rqL60tL2dHXQ_rvQ linux-2.6.10-xen-sparse/include/linux/irq.h 1.17 4124f66f4NaKNa0xPiGGykn9QaZk3w linux-2.6.10-xen-sparse/include/linux/skbuff.h 1.18 419dfc6awx7w88wk6cG9P3mPidX6LQ linux-2.6.10-xen-sparse/kernel/irq/manage.c 1.19 40f56a0ddHCSs3501MY4hRf22tctOw linux-2.6.10-xen-sparse/mkbuildtree 1.20 +42305f54Q6xJ1bXcQJlCQq1m-e2C8g linux-2.6.10-xen-sparse/mm/highmem.c 1.21 412f46c0LJuKAgSPGoC0Z1DEkLfuLA linux-2.6.10-xen-sparse/mm/memory.c 1.22 410a94a4KT6I6X0LVc7djB39tRDp4g linux-2.6.10-xen-sparse/mm/page_alloc.c 1.23 41505c572m-s9ATiO1LiD1GPznTTIg linux-2.6.10-xen-sparse/net/core/skbuff.c
2.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 2.2 +++ b/linux-2.4.29-xen-sparse/include/linux/highmem.h Thu Mar 10 15:39:32 2005 +0000 2.3 @@ -0,0 +1,137 @@ 2.4 +#ifndef _LINUX_HIGHMEM_H 2.5 +#define _LINUX_HIGHMEM_H 2.6 + 2.7 +#include <linux/config.h> 2.8 +#include <asm/pgalloc.h> 2.9 + 2.10 +#ifdef CONFIG_HIGHMEM 2.11 + 2.12 +extern struct page *highmem_start_page; 2.13 + 2.14 +#include <asm/highmem.h> 2.15 + 2.16 +/* declarations for linux/mm/highmem.c */ 2.17 +unsigned int nr_free_highpages(void); 2.18 +void kmap_flush_unused(void); 2.19 + 2.20 +extern struct buffer_head *create_bounce(int rw, struct buffer_head * bh_orig); 2.21 + 2.22 +static inline char *bh_kmap(struct buffer_head *bh) 2.23 +{ 2.24 + return kmap(bh->b_page) + bh_offset(bh); 2.25 +} 2.26 + 2.27 +static inline void bh_kunmap(struct buffer_head *bh) 2.28 +{ 2.29 + kunmap(bh->b_page); 2.30 +} 2.31 + 2.32 +/* 2.33 + * remember to add offset! and never ever reenable interrupts between a 2.34 + * bh_kmap_irq and bh_kunmap_irq!! 2.35 + */ 2.36 +static inline char *bh_kmap_irq(struct buffer_head *bh, unsigned long *flags) 2.37 +{ 2.38 + unsigned long addr; 2.39 + 2.40 + __save_flags(*flags); 2.41 + 2.42 + /* 2.43 + * could be low 2.44 + */ 2.45 + if (!PageHighMem(bh->b_page)) 2.46 + return bh->b_data; 2.47 + 2.48 + /* 2.49 + * it's a highmem page 2.50 + */ 2.51 + __cli(); 2.52 + addr = (unsigned long) kmap_atomic(bh->b_page, KM_BH_IRQ); 2.53 + 2.54 + if (addr & ~PAGE_MASK) 2.55 + BUG(); 2.56 + 2.57 + return (char *) addr + bh_offset(bh); 2.58 +} 2.59 + 2.60 +static inline void bh_kunmap_irq(char *buffer, unsigned long *flags) 2.61 +{ 2.62 + unsigned long ptr = (unsigned long) buffer & PAGE_MASK; 2.63 + 2.64 + kunmap_atomic((void *) ptr, KM_BH_IRQ); 2.65 + __restore_flags(*flags); 2.66 +} 2.67 + 2.68 +#else /* CONFIG_HIGHMEM */ 2.69 + 2.70 +static inline unsigned int nr_free_highpages(void) { return 0; } 2.71 +static inline void kmap_flush_unused(void) { } 2.72 + 2.73 +static inline void *kmap(struct page *page) { return page_address(page); } 2.74 + 2.75 +#define kunmap(page) do { } while (0) 2.76 + 2.77 +#define kmap_atomic(page,idx) kmap(page) 2.78 +#define kunmap_atomic(page,idx) kunmap(page) 2.79 + 2.80 +#define bh_kmap(bh) ((bh)->b_data) 2.81 +#define bh_kunmap(bh) do { } while (0) 2.82 +#define kmap_nonblock(page) kmap(page) 2.83 +#define bh_kmap_irq(bh, flags) ((bh)->b_data) 2.84 +#define bh_kunmap_irq(bh, flags) do { *(flags) = 0; } while (0) 2.85 + 2.86 +#endif /* CONFIG_HIGHMEM */ 2.87 + 2.88 +/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ 2.89 +static inline void clear_user_highpage(struct page *page, unsigned long vaddr) 2.90 +{ 2.91 + void *addr = kmap_atomic(page, KM_USER0); 2.92 + clear_user_page(addr, vaddr); 2.93 + kunmap_atomic(addr, KM_USER0); 2.94 +} 2.95 + 2.96 +static inline void clear_highpage(struct page *page) 2.97 +{ 2.98 + clear_page(kmap(page)); 2.99 + kunmap(page); 2.100 +} 2.101 + 2.102 +/* 2.103 + * Same but also flushes aliased cache contents to RAM. 2.104 + */ 2.105 +static inline void memclear_highpage_flush(struct page *page, unsigned int offset, unsigned int size) 2.106 +{ 2.107 + char *kaddr; 2.108 + 2.109 + if (offset + size > PAGE_SIZE) 2.110 + out_of_line_bug(); 2.111 + kaddr = kmap(page); 2.112 + memset(kaddr + offset, 0, size); 2.113 + flush_dcache_page(page); 2.114 + flush_page_to_ram(page); 2.115 + kunmap(page); 2.116 +} 2.117 + 2.118 +static inline void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr) 2.119 +{ 2.120 + char *vfrom, *vto; 2.121 + 2.122 + vfrom = kmap_atomic(from, KM_USER0); 2.123 + vto = kmap_atomic(to, KM_USER1); 2.124 + copy_user_page(vto, vfrom, vaddr); 2.125 + kunmap_atomic(vfrom, KM_USER0); 2.126 + kunmap_atomic(vto, KM_USER1); 2.127 +} 2.128 + 2.129 +static inline void copy_highpage(struct page *to, struct page *from) 2.130 +{ 2.131 + char *vfrom, *vto; 2.132 + 2.133 + vfrom = kmap_atomic(from, KM_USER0); 2.134 + vto = kmap_atomic(to, KM_USER1); 2.135 + copy_page(vto, vfrom); 2.136 + kunmap_atomic(vfrom, KM_USER0); 2.137 + kunmap_atomic(vto, KM_USER1); 2.138 +} 2.139 + 2.140 +#endif /* _LINUX_HIGHMEM_H */
3.1 --- a/linux-2.4.29-xen-sparse/mm/highmem.c Thu Mar 10 14:05:30 2005 +0000 3.2 +++ b/linux-2.4.29-xen-sparse/mm/highmem.c Thu Mar 10 15:39:32 2005 +0000 3.3 @@ -130,6 +130,13 @@ start: 3.4 return vaddr; 3.5 } 3.6 3.7 +void kmap_flush_unused(void) 3.8 +{ 3.9 + spin_lock(&kmap_lock); 3.10 + flush_all_zero_pkmaps(); 3.11 + spin_unlock(&kmap_lock); 3.12 +} 3.13 + 3.14 void fastcall *kmap_high(struct page *page, int nonblocking) 3.15 { 3.16 unsigned long vaddr;
4.1 --- a/linux-2.6.10-xen-sparse/drivers/xen/balloon/balloon.c Thu Mar 10 14:05:30 2005 +0000 4.2 +++ b/linux-2.6.10-xen-sparse/drivers/xen/balloon/balloon.c Thu Mar 10 15:39:32 2005 +0000 4.3 @@ -92,6 +92,7 @@ static struct timer_list balloon_timer; 4.4 #define UNLIST_PAGE(p) ( list_del(&p->list) ) 4.5 #define pte_offset_kernel pte_offset 4.6 #define subsys_initcall(_fn) __initcall(_fn) 4.7 +#define pfn_to_page(_pfn) (mem_map + (_pfn)) 4.8 #endif 4.9 4.10 #define IPRINTK(fmt, args...) \ 4.11 @@ -245,11 +246,10 @@ static void balloon_process(void *unused 4.12 4.13 pfn = page - mem_map; 4.14 mfn_list[i] = phys_to_machine_mapping[pfn]; 4.15 - phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY; 4.16 4.17 if ( !PageHighMem(page) ) 4.18 { 4.19 - v = phys_to_virt((page - mem_map) << PAGE_SHIFT); 4.20 + v = phys_to_virt(pfn << PAGE_SHIFT); 4.21 scrub_pages(v, 1); 4.22 queue_l1_entry_update(get_ptep((unsigned long)v), 0); 4.23 } 4.24 @@ -260,14 +260,23 @@ static void balloon_process(void *unused 4.25 scrub_pages(v, 1); 4.26 kunmap(page); 4.27 } 4.28 -#endif 4.29 +#endif 4.30 + } 4.31 4.32 - balloon_append(page); 4.33 - } 4.34 + /* Ensure that ballooned highmem pages don't have cached mappings. */ 4.35 + kmap_flush_unused(); 4.36 4.37 /* Flush updates through and flush the TLB. */ 4.38 xen_tlb_flush(); 4.39 4.40 + /* No more mappings: invalidate pages in P2M and add to balloon. */ 4.41 + for ( i = 0; i < debt; i++ ) 4.42 + { 4.43 + pfn = mfn_to_pfn(mfn_list[i]); 4.44 + phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY; 4.45 + balloon_append(pfn_to_page(pfn)); 4.46 + } 4.47 + 4.48 if ( HYPERVISOR_dom_mem_op( 4.49 MEMOP_decrease_reservation, mfn_list, debt, 0) != debt ) 4.50 BUG();
5.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 5.2 +++ b/linux-2.6.10-xen-sparse/include/linux/highmem.h Thu Mar 10 15:39:32 2005 +0000 5.3 @@ -0,0 +1,95 @@ 5.4 +#ifndef _LINUX_HIGHMEM_H 5.5 +#define _LINUX_HIGHMEM_H 5.6 + 5.7 +#include <linux/config.h> 5.8 +#include <linux/fs.h> 5.9 +#include <linux/mm.h> 5.10 + 5.11 +#include <asm/cacheflush.h> 5.12 + 5.13 +#ifdef CONFIG_HIGHMEM 5.14 + 5.15 +extern struct page *highmem_start_page; 5.16 + 5.17 +#include <asm/highmem.h> 5.18 + 5.19 +/* declarations for linux/mm/highmem.c */ 5.20 +unsigned int nr_free_highpages(void); 5.21 +void kmap_flush_unused(void); 5.22 + 5.23 +#else /* CONFIG_HIGHMEM */ 5.24 + 5.25 +static inline unsigned int nr_free_highpages(void) { return 0; } 5.26 +static inline void kmap_flush_unused(void) { } 5.27 + 5.28 +static inline void *kmap(struct page *page) 5.29 +{ 5.30 + might_sleep(); 5.31 + return page_address(page); 5.32 +} 5.33 + 5.34 +#define kunmap(page) do { (void) (page); } while (0) 5.35 + 5.36 +#define kmap_atomic(page, idx) page_address(page) 5.37 +#define kunmap_atomic(addr, idx) do { } while (0) 5.38 +#define kmap_atomic_to_page(ptr) virt_to_page(ptr) 5.39 + 5.40 +#endif /* CONFIG_HIGHMEM */ 5.41 + 5.42 +/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ 5.43 +static inline void clear_user_highpage(struct page *page, unsigned long vaddr) 5.44 +{ 5.45 + void *addr = kmap_atomic(page, KM_USER0); 5.46 + clear_user_page(addr, vaddr, page); 5.47 + kunmap_atomic(addr, KM_USER0); 5.48 + /* Make sure this page is cleared on other CPU's too before using it */ 5.49 + smp_wmb(); 5.50 +} 5.51 + 5.52 +static inline void clear_highpage(struct page *page) 5.53 +{ 5.54 + void *kaddr = kmap_atomic(page, KM_USER0); 5.55 + clear_page(kaddr); 5.56 + kunmap_atomic(kaddr, KM_USER0); 5.57 +} 5.58 + 5.59 +/* 5.60 + * Same but also flushes aliased cache contents to RAM. 5.61 + */ 5.62 +static inline void memclear_highpage_flush(struct page *page, unsigned int offset, unsigned int size) 5.63 +{ 5.64 + void *kaddr; 5.65 + 5.66 + BUG_ON(offset + size > PAGE_SIZE); 5.67 + 5.68 + kaddr = kmap_atomic(page, KM_USER0); 5.69 + memset((char *)kaddr + offset, 0, size); 5.70 + flush_dcache_page(page); 5.71 + kunmap_atomic(kaddr, KM_USER0); 5.72 +} 5.73 + 5.74 +static inline void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr) 5.75 +{ 5.76 + char *vfrom, *vto; 5.77 + 5.78 + vfrom = kmap_atomic(from, KM_USER0); 5.79 + vto = kmap_atomic(to, KM_USER1); 5.80 + copy_user_page(vto, vfrom, vaddr, to); 5.81 + kunmap_atomic(vfrom, KM_USER0); 5.82 + kunmap_atomic(vto, KM_USER1); 5.83 + /* Make sure this page is cleared on other CPU's too before using it */ 5.84 + smp_wmb(); 5.85 +} 5.86 + 5.87 +static inline void copy_highpage(struct page *to, struct page *from) 5.88 +{ 5.89 + char *vfrom, *vto; 5.90 + 5.91 + vfrom = kmap_atomic(from, KM_USER0); 5.92 + vto = kmap_atomic(to, KM_USER1); 5.93 + copy_page(vto, vfrom); 5.94 + kunmap_atomic(vfrom, KM_USER0); 5.95 + kunmap_atomic(vto, KM_USER1); 5.96 +} 5.97 + 5.98 +#endif /* _LINUX_HIGHMEM_H */
6.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 6.2 +++ b/linux-2.6.10-xen-sparse/mm/highmem.c Thu Mar 10 15:39:32 2005 +0000 6.3 @@ -0,0 +1,614 @@ 6.4 +/* 6.5 + * High memory handling common code and variables. 6.6 + * 6.7 + * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de 6.8 + * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de 6.9 + * 6.10 + * 6.11 + * Redesigned the x86 32-bit VM architecture to deal with 6.12 + * 64-bit physical space. With current x86 CPUs this 6.13 + * means up to 64 Gigabytes physical RAM. 6.14 + * 6.15 + * Rewrote high memory support to move the page cache into 6.16 + * high memory. Implemented permanent (schedulable) kmaps 6.17 + * based on Linus' idea. 6.18 + * 6.19 + * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> 6.20 + */ 6.21 + 6.22 +#include <linux/mm.h> 6.23 +#include <linux/module.h> 6.24 +#include <linux/swap.h> 6.25 +#include <linux/bio.h> 6.26 +#include <linux/pagemap.h> 6.27 +#include <linux/mempool.h> 6.28 +#include <linux/blkdev.h> 6.29 +#include <linux/init.h> 6.30 +#include <linux/hash.h> 6.31 +#include <linux/highmem.h> 6.32 +#include <asm/tlbflush.h> 6.33 + 6.34 +static mempool_t *page_pool, *isa_page_pool; 6.35 + 6.36 +static void *page_pool_alloc(int gfp_mask, void *data) 6.37 +{ 6.38 + int gfp = gfp_mask | (int) (long) data; 6.39 + 6.40 + return alloc_page(gfp); 6.41 +} 6.42 + 6.43 +static void page_pool_free(void *page, void *data) 6.44 +{ 6.45 + __free_page(page); 6.46 +} 6.47 + 6.48 +/* 6.49 + * Virtual_count is not a pure "count". 6.50 + * 0 means that it is not mapped, and has not been mapped 6.51 + * since a TLB flush - it is usable. 6.52 + * 1 means that there are no users, but it has been mapped 6.53 + * since the last TLB flush - so we can't use it. 6.54 + * n means that there are (n-1) current users of it. 6.55 + */ 6.56 +#ifdef CONFIG_HIGHMEM 6.57 +static int pkmap_count[LAST_PKMAP]; 6.58 +static unsigned int last_pkmap_nr; 6.59 +static spinlock_t kmap_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; 6.60 + 6.61 +pte_t * pkmap_page_table; 6.62 + 6.63 +static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); 6.64 + 6.65 +static void flush_all_zero_pkmaps(void) 6.66 +{ 6.67 + int i; 6.68 + 6.69 + flush_cache_kmaps(); 6.70 + 6.71 + for (i = 0; i < LAST_PKMAP; i++) { 6.72 + struct page *page; 6.73 + 6.74 + /* 6.75 + * zero means we don't have anything to do, 6.76 + * >1 means that it is still in use. Only 6.77 + * a count of 1 means that it is free but 6.78 + * needs to be unmapped 6.79 + */ 6.80 + if (pkmap_count[i] != 1) 6.81 + continue; 6.82 + pkmap_count[i] = 0; 6.83 + 6.84 + /* sanity check */ 6.85 + if (pte_none(pkmap_page_table[i])) 6.86 + BUG(); 6.87 + 6.88 + /* 6.89 + * Don't need an atomic fetch-and-clear op here; 6.90 + * no-one has the page mapped, and cannot get at 6.91 + * its virtual address (and hence PTE) without first 6.92 + * getting the kmap_lock (which is held here). 6.93 + * So no dangers, even with speculative execution. 6.94 + */ 6.95 + page = pte_page(pkmap_page_table[i]); 6.96 + pte_clear(&pkmap_page_table[i]); 6.97 + 6.98 + set_page_address(page, NULL); 6.99 + } 6.100 + flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); 6.101 +} 6.102 + 6.103 +static inline unsigned long map_new_virtual(struct page *page) 6.104 +{ 6.105 + unsigned long vaddr; 6.106 + int count; 6.107 + 6.108 +start: 6.109 + count = LAST_PKMAP; 6.110 + /* Find an empty entry */ 6.111 + for (;;) { 6.112 + last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK; 6.113 + if (!last_pkmap_nr) { 6.114 + flush_all_zero_pkmaps(); 6.115 + count = LAST_PKMAP; 6.116 + } 6.117 + if (!pkmap_count[last_pkmap_nr]) 6.118 + break; /* Found a usable entry */ 6.119 + if (--count) 6.120 + continue; 6.121 + 6.122 + /* 6.123 + * Sleep for somebody else to unmap their entries 6.124 + */ 6.125 + { 6.126 + DECLARE_WAITQUEUE(wait, current); 6.127 + 6.128 + __set_current_state(TASK_UNINTERRUPTIBLE); 6.129 + add_wait_queue(&pkmap_map_wait, &wait); 6.130 + spin_unlock(&kmap_lock); 6.131 + schedule(); 6.132 + remove_wait_queue(&pkmap_map_wait, &wait); 6.133 + spin_lock(&kmap_lock); 6.134 + 6.135 + /* Somebody else might have mapped it while we slept */ 6.136 + if (page_address(page)) 6.137 + return (unsigned long)page_address(page); 6.138 + 6.139 + /* Re-start */ 6.140 + goto start; 6.141 + } 6.142 + } 6.143 + vaddr = PKMAP_ADDR(last_pkmap_nr); 6.144 + set_pte(&(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); 6.145 + 6.146 + pkmap_count[last_pkmap_nr] = 1; 6.147 + set_page_address(page, (void *)vaddr); 6.148 + 6.149 + return vaddr; 6.150 +} 6.151 + 6.152 +void kmap_flush_unused(void) 6.153 +{ 6.154 + spin_lock(&kmap_lock); 6.155 + flush_all_zero_pkmaps(); 6.156 + spin_unlock(&kmap_lock); 6.157 +} 6.158 + 6.159 +EXPORT_SYMBOL(kmap_flush_unused); 6.160 + 6.161 +void fastcall *kmap_high(struct page *page) 6.162 +{ 6.163 + unsigned long vaddr; 6.164 + 6.165 + /* 6.166 + * For highmem pages, we can't trust "virtual" until 6.167 + * after we have the lock. 6.168 + * 6.169 + * We cannot call this from interrupts, as it may block 6.170 + */ 6.171 + spin_lock(&kmap_lock); 6.172 + vaddr = (unsigned long)page_address(page); 6.173 + if (!vaddr) 6.174 + vaddr = map_new_virtual(page); 6.175 + pkmap_count[PKMAP_NR(vaddr)]++; 6.176 + if (pkmap_count[PKMAP_NR(vaddr)] < 2) 6.177 + BUG(); 6.178 + spin_unlock(&kmap_lock); 6.179 + return (void*) vaddr; 6.180 +} 6.181 + 6.182 +EXPORT_SYMBOL(kmap_high); 6.183 + 6.184 +void fastcall kunmap_high(struct page *page) 6.185 +{ 6.186 + unsigned long vaddr; 6.187 + unsigned long nr; 6.188 + int need_wakeup; 6.189 + 6.190 + spin_lock(&kmap_lock); 6.191 + vaddr = (unsigned long)page_address(page); 6.192 + if (!vaddr) 6.193 + BUG(); 6.194 + nr = PKMAP_NR(vaddr); 6.195 + 6.196 + /* 6.197 + * A count must never go down to zero 6.198 + * without a TLB flush! 6.199 + */ 6.200 + need_wakeup = 0; 6.201 + switch (--pkmap_count[nr]) { 6.202 + case 0: 6.203 + BUG(); 6.204 + case 1: 6.205 + /* 6.206 + * Avoid an unnecessary wake_up() function call. 6.207 + * The common case is pkmap_count[] == 1, but 6.208 + * no waiters. 6.209 + * The tasks queued in the wait-queue are guarded 6.210 + * by both the lock in the wait-queue-head and by 6.211 + * the kmap_lock. As the kmap_lock is held here, 6.212 + * no need for the wait-queue-head's lock. Simply 6.213 + * test if the queue is empty. 6.214 + */ 6.215 + need_wakeup = waitqueue_active(&pkmap_map_wait); 6.216 + } 6.217 + spin_unlock(&kmap_lock); 6.218 + 6.219 + /* do wake-up, if needed, race-free outside of the spin lock */ 6.220 + if (need_wakeup) 6.221 + wake_up(&pkmap_map_wait); 6.222 +} 6.223 + 6.224 +EXPORT_SYMBOL(kunmap_high); 6.225 + 6.226 +#define POOL_SIZE 64 6.227 + 6.228 +static __init int init_emergency_pool(void) 6.229 +{ 6.230 + struct sysinfo i; 6.231 + si_meminfo(&i); 6.232 + si_swapinfo(&i); 6.233 + 6.234 + if (!i.totalhigh) 6.235 + return 0; 6.236 + 6.237 + page_pool = mempool_create(POOL_SIZE, page_pool_alloc, page_pool_free, NULL); 6.238 + if (!page_pool) 6.239 + BUG(); 6.240 + printk("highmem bounce pool size: %d pages\n", POOL_SIZE); 6.241 + 6.242 + return 0; 6.243 +} 6.244 + 6.245 +__initcall(init_emergency_pool); 6.246 + 6.247 +/* 6.248 + * highmem version, map in to vec 6.249 + */ 6.250 +static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom) 6.251 +{ 6.252 + unsigned long flags; 6.253 + unsigned char *vto; 6.254 + 6.255 + local_irq_save(flags); 6.256 + vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ); 6.257 + memcpy(vto + to->bv_offset, vfrom, to->bv_len); 6.258 + kunmap_atomic(vto, KM_BOUNCE_READ); 6.259 + local_irq_restore(flags); 6.260 +} 6.261 + 6.262 +#else /* CONFIG_HIGHMEM */ 6.263 + 6.264 +#define bounce_copy_vec(to, vfrom) \ 6.265 + memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len) 6.266 + 6.267 +#endif 6.268 + 6.269 +#define ISA_POOL_SIZE 16 6.270 + 6.271 +/* 6.272 + * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA 6.273 + * as the max address, so check if the pool has already been created. 6.274 + */ 6.275 +int init_emergency_isa_pool(void) 6.276 +{ 6.277 + if (isa_page_pool) 6.278 + return 0; 6.279 + 6.280 + isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc, page_pool_free, (void *) __GFP_DMA); 6.281 + if (!isa_page_pool) 6.282 + BUG(); 6.283 + 6.284 + printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE); 6.285 + return 0; 6.286 +} 6.287 + 6.288 +/* 6.289 + * Simple bounce buffer support for highmem pages. Depending on the 6.290 + * queue gfp mask set, *to may or may not be a highmem page. kmap it 6.291 + * always, it will do the Right Thing 6.292 + */ 6.293 +static void copy_to_high_bio_irq(struct bio *to, struct bio *from) 6.294 +{ 6.295 + unsigned char *vfrom; 6.296 + struct bio_vec *tovec, *fromvec; 6.297 + int i; 6.298 + 6.299 + __bio_for_each_segment(tovec, to, i, 0) { 6.300 + fromvec = from->bi_io_vec + i; 6.301 + 6.302 + /* 6.303 + * not bounced 6.304 + */ 6.305 + if (tovec->bv_page == fromvec->bv_page) 6.306 + continue; 6.307 + 6.308 + /* 6.309 + * fromvec->bv_offset and fromvec->bv_len might have been 6.310 + * modified by the block layer, so use the original copy, 6.311 + * bounce_copy_vec already uses tovec->bv_len 6.312 + */ 6.313 + vfrom = page_address(fromvec->bv_page) + tovec->bv_offset; 6.314 + 6.315 + flush_dcache_page(tovec->bv_page); 6.316 + bounce_copy_vec(tovec, vfrom); 6.317 + } 6.318 +} 6.319 + 6.320 +static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) 6.321 +{ 6.322 + struct bio *bio_orig = bio->bi_private; 6.323 + struct bio_vec *bvec, *org_vec; 6.324 + int i; 6.325 + 6.326 + if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags)) 6.327 + set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags); 6.328 + 6.329 + /* 6.330 + * free up bounce indirect pages used 6.331 + */ 6.332 + __bio_for_each_segment(bvec, bio, i, 0) { 6.333 + org_vec = bio_orig->bi_io_vec + i; 6.334 + if (bvec->bv_page == org_vec->bv_page) 6.335 + continue; 6.336 + 6.337 + mempool_free(bvec->bv_page, pool); 6.338 + } 6.339 + 6.340 + bio_endio(bio_orig, bio_orig->bi_size, err); 6.341 + bio_put(bio); 6.342 +} 6.343 + 6.344 +static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done,int err) 6.345 +{ 6.346 + if (bio->bi_size) 6.347 + return 1; 6.348 + 6.349 + bounce_end_io(bio, page_pool, err); 6.350 + return 0; 6.351 +} 6.352 + 6.353 +static int bounce_end_io_write_isa(struct bio *bio, unsigned int bytes_done, int err) 6.354 +{ 6.355 + if (bio->bi_size) 6.356 + return 1; 6.357 + 6.358 + bounce_end_io(bio, isa_page_pool, err); 6.359 + return 0; 6.360 +} 6.361 + 6.362 +static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err) 6.363 +{ 6.364 + struct bio *bio_orig = bio->bi_private; 6.365 + 6.366 + if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 6.367 + copy_to_high_bio_irq(bio_orig, bio); 6.368 + 6.369 + bounce_end_io(bio, pool, err); 6.370 +} 6.371 + 6.372 +static int bounce_end_io_read(struct bio *bio, unsigned int bytes_done, int err) 6.373 +{ 6.374 + if (bio->bi_size) 6.375 + return 1; 6.376 + 6.377 + __bounce_end_io_read(bio, page_pool, err); 6.378 + return 0; 6.379 +} 6.380 + 6.381 +static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int err) 6.382 +{ 6.383 + if (bio->bi_size) 6.384 + return 1; 6.385 + 6.386 + __bounce_end_io_read(bio, isa_page_pool, err); 6.387 + return 0; 6.388 +} 6.389 + 6.390 +static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, 6.391 + mempool_t *pool) 6.392 +{ 6.393 + struct page *page; 6.394 + struct bio *bio = NULL; 6.395 + int i, rw = bio_data_dir(*bio_orig); 6.396 + struct bio_vec *to, *from; 6.397 + 6.398 + bio_for_each_segment(from, *bio_orig, i) { 6.399 + page = from->bv_page; 6.400 + 6.401 + /* 6.402 + * is destination page below bounce pfn? 6.403 + */ 6.404 + if (page_to_pfn(page) < q->bounce_pfn) 6.405 + continue; 6.406 + 6.407 + /* 6.408 + * irk, bounce it 6.409 + */ 6.410 + if (!bio) 6.411 + bio = bio_alloc(GFP_NOIO, (*bio_orig)->bi_vcnt); 6.412 + 6.413 + to = bio->bi_io_vec + i; 6.414 + 6.415 + to->bv_page = mempool_alloc(pool, q->bounce_gfp); 6.416 + to->bv_len = from->bv_len; 6.417 + to->bv_offset = from->bv_offset; 6.418 + 6.419 + if (rw == WRITE) { 6.420 + char *vto, *vfrom; 6.421 + 6.422 + flush_dcache_page(from->bv_page); 6.423 + vto = page_address(to->bv_page) + to->bv_offset; 6.424 + vfrom = kmap(from->bv_page) + from->bv_offset; 6.425 + memcpy(vto, vfrom, to->bv_len); 6.426 + kunmap(from->bv_page); 6.427 + } 6.428 + } 6.429 + 6.430 + /* 6.431 + * no pages bounced 6.432 + */ 6.433 + if (!bio) 6.434 + return; 6.435 + 6.436 + /* 6.437 + * at least one page was bounced, fill in possible non-highmem 6.438 + * pages 6.439 + */ 6.440 + bio_for_each_segment(from, *bio_orig, i) { 6.441 + to = bio_iovec_idx(bio, i); 6.442 + if (!to->bv_page) { 6.443 + to->bv_page = from->bv_page; 6.444 + to->bv_len = from->bv_len; 6.445 + to->bv_offset = from->bv_offset; 6.446 + } 6.447 + } 6.448 + 6.449 + bio->bi_bdev = (*bio_orig)->bi_bdev; 6.450 + bio->bi_flags |= (1 << BIO_BOUNCED); 6.451 + bio->bi_sector = (*bio_orig)->bi_sector; 6.452 + bio->bi_rw = (*bio_orig)->bi_rw; 6.453 + 6.454 + bio->bi_vcnt = (*bio_orig)->bi_vcnt; 6.455 + bio->bi_idx = (*bio_orig)->bi_idx; 6.456 + bio->bi_size = (*bio_orig)->bi_size; 6.457 + 6.458 + if (pool == page_pool) { 6.459 + bio->bi_end_io = bounce_end_io_write; 6.460 + if (rw == READ) 6.461 + bio->bi_end_io = bounce_end_io_read; 6.462 + } else { 6.463 + bio->bi_end_io = bounce_end_io_write_isa; 6.464 + if (rw == READ) 6.465 + bio->bi_end_io = bounce_end_io_read_isa; 6.466 + } 6.467 + 6.468 + bio->bi_private = *bio_orig; 6.469 + *bio_orig = bio; 6.470 +} 6.471 + 6.472 +void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) 6.473 +{ 6.474 + mempool_t *pool; 6.475 + 6.476 + /* 6.477 + * for non-isa bounce case, just check if the bounce pfn is equal 6.478 + * to or bigger than the highest pfn in the system -- in that case, 6.479 + * don't waste time iterating over bio segments 6.480 + */ 6.481 + if (!(q->bounce_gfp & GFP_DMA)) { 6.482 + if (q->bounce_pfn >= blk_max_pfn) 6.483 + return; 6.484 + pool = page_pool; 6.485 + } else { 6.486 + BUG_ON(!isa_page_pool); 6.487 + pool = isa_page_pool; 6.488 + } 6.489 + 6.490 + /* 6.491 + * slow path 6.492 + */ 6.493 + __blk_queue_bounce(q, bio_orig, pool); 6.494 +} 6.495 + 6.496 +EXPORT_SYMBOL(blk_queue_bounce); 6.497 + 6.498 +#if defined(HASHED_PAGE_VIRTUAL) 6.499 + 6.500 +#define PA_HASH_ORDER 7 6.501 + 6.502 +/* 6.503 + * Describes one page->virtual association 6.504 + */ 6.505 +struct page_address_map { 6.506 + struct page *page; 6.507 + void *virtual; 6.508 + struct list_head list; 6.509 +}; 6.510 + 6.511 +/* 6.512 + * page_address_map freelist, allocated from page_address_maps. 6.513 + */ 6.514 +static struct list_head page_address_pool; /* freelist */ 6.515 +static spinlock_t pool_lock; /* protects page_address_pool */ 6.516 + 6.517 +/* 6.518 + * Hash table bucket 6.519 + */ 6.520 +static struct page_address_slot { 6.521 + struct list_head lh; /* List of page_address_maps */ 6.522 + spinlock_t lock; /* Protect this bucket's list */ 6.523 +} ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER]; 6.524 + 6.525 +static struct page_address_slot *page_slot(struct page *page) 6.526 +{ 6.527 + return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; 6.528 +} 6.529 + 6.530 +void *page_address(struct page *page) 6.531 +{ 6.532 + unsigned long flags; 6.533 + void *ret; 6.534 + struct page_address_slot *pas; 6.535 + 6.536 + if (!PageHighMem(page)) 6.537 + return lowmem_page_address(page); 6.538 + 6.539 + pas = page_slot(page); 6.540 + ret = NULL; 6.541 + spin_lock_irqsave(&pas->lock, flags); 6.542 + if (!list_empty(&pas->lh)) { 6.543 + struct page_address_map *pam; 6.544 + 6.545 + list_for_each_entry(pam, &pas->lh, list) { 6.546 + if (pam->page == page) { 6.547 + ret = pam->virtual; 6.548 + goto done; 6.549 + } 6.550 + } 6.551 + } 6.552 +done: 6.553 + spin_unlock_irqrestore(&pas->lock, flags); 6.554 + return ret; 6.555 +} 6.556 + 6.557 +EXPORT_SYMBOL(page_address); 6.558 + 6.559 +void set_page_address(struct page *page, void *virtual) 6.560 +{ 6.561 + unsigned long flags; 6.562 + struct page_address_slot *pas; 6.563 + struct page_address_map *pam; 6.564 + 6.565 + BUG_ON(!PageHighMem(page)); 6.566 + 6.567 + pas = page_slot(page); 6.568 + if (virtual) { /* Add */ 6.569 + BUG_ON(list_empty(&page_address_pool)); 6.570 + 6.571 + spin_lock_irqsave(&pool_lock, flags); 6.572 + pam = list_entry(page_address_pool.next, 6.573 + struct page_address_map, list); 6.574 + list_del(&pam->list); 6.575 + spin_unlock_irqrestore(&pool_lock, flags); 6.576 + 6.577 + pam->page = page; 6.578 + pam->virtual = virtual; 6.579 + 6.580 + spin_lock_irqsave(&pas->lock, flags); 6.581 + list_add_tail(&pam->list, &pas->lh); 6.582 + spin_unlock_irqrestore(&pas->lock, flags); 6.583 + } else { /* Remove */ 6.584 + spin_lock_irqsave(&pas->lock, flags); 6.585 + list_for_each_entry(pam, &pas->lh, list) { 6.586 + if (pam->page == page) { 6.587 + list_del(&pam->list); 6.588 + spin_unlock_irqrestore(&pas->lock, flags); 6.589 + spin_lock_irqsave(&pool_lock, flags); 6.590 + list_add_tail(&pam->list, &page_address_pool); 6.591 + spin_unlock_irqrestore(&pool_lock, flags); 6.592 + goto done; 6.593 + } 6.594 + } 6.595 + spin_unlock_irqrestore(&pas->lock, flags); 6.596 + } 6.597 +done: 6.598 + return; 6.599 +} 6.600 + 6.601 +static struct page_address_map page_address_maps[LAST_PKMAP]; 6.602 + 6.603 +void __init page_address_init(void) 6.604 +{ 6.605 + int i; 6.606 + 6.607 + INIT_LIST_HEAD(&page_address_pool); 6.608 + for (i = 0; i < ARRAY_SIZE(page_address_maps); i++) 6.609 + list_add(&page_address_maps[i].list, &page_address_pool); 6.610 + for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) { 6.611 + INIT_LIST_HEAD(&page_address_htable[i].lh); 6.612 + spin_lock_init(&page_address_htable[i].lock); 6.613 + } 6.614 + spin_lock_init(&pool_lock); 6.615 +} 6.616 + 6.617 +#endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */
7.1 --- a/tools/libxc/xc_linux_restore.c Thu Mar 10 14:05:30 2005 +0000 7.2 +++ b/tools/libxc/xc_linux_restore.c Thu Mar 10 15:39:32 2005 +0000 7.3 @@ -99,7 +99,7 @@ int xc_linux_restore(int xc_handle, XcIO 7.4 /* A temporary mapping of the guest's suspend record. */ 7.5 suspend_record_t *p_srec; 7.6 7.7 - char *region_base; 7.8 + char *region_base, *p_gdt; 7.9 7.10 mmu_t *mmu = NULL; 7.11 7.12 @@ -542,6 +542,13 @@ int xc_linux_restore(int xc_handle, XcIO 7.13 ctxt.gdt_frames[i] = pfn_to_mfn_table[pfn]; 7.14 } 7.15 7.16 + /* Zero hypervisor GDT entries (supresses ugly warning) */ 7.17 + p_gdt = xc_map_foreign_range( 7.18 + xc_handle, dom, PAGE_SIZE, PROT_WRITE, ctxt.gdt_frames[0]); 7.19 + memset( p_gdt + FIRST_RESERVED_GDT_ENTRY*8, 0, 7.20 + NR_RESERVED_GDT_ENTRIES*8 ); 7.21 + munmap( p_gdt, PAGE_SIZE ); 7.22 + 7.23 /* Uncanonicalise the page table base pointer. */ 7.24 pfn = ctxt.pt_base >> PAGE_SHIFT; 7.25 if ( (pfn >= nr_pfns) || ((pfn_type[pfn]<ABTYPE_MASK) != L2TAB) )
8.1 --- a/tools/libxutil/file_stream.c Thu Mar 10 14:05:30 2005 +0000 8.2 +++ b/tools/libxutil/file_stream.c Thu Mar 10 15:39:32 2005 +0000 8.3 @@ -69,18 +69,28 @@ IOStream *iostdout = &_iostdout; 8.4 /** IOStream for stderr. */ 8.5 IOStream *iostderr = &_iostderr; 8.6 8.7 -/** Get the underlying FILE*. 8.8 - * 8.9 +/* Get the underlying FILE*. 8.10 + * 8.11 * @param s file stream 8.12 * @return the stream s wraps 8.13 */ 8.14 static inline FILE *get_file(IOStream *s){ 8.15 - switch((long)s->data){ 8.16 - case 1: s->data = stdin; break; 8.17 - case 2: s->data = stdout; break; 8.18 - case 3: s->data = stderr; break; 8.19 - } 8.20 - return (FILE*)s->data; 8.21 + FILE *data = NULL; 8.22 + switch((long)s->data){ 8.23 + case 1: 8.24 + data = stdin; 8.25 + break; 8.26 + case 2: 8.27 + data = stdout; 8.28 + break; 8.29 + case 3: 8.30 + data = stderr; 8.31 + break; 8.32 + default: 8.33 + data = (FILE*)s->data; 8.34 + break; 8.35 + } 8.36 + return data; 8.37 } 8.38 8.39 /** Control buffering on the underlying stream, like setvbuf().
9.1 --- a/tools/xfrd/xfrd.c Thu Mar 10 14:05:30 2005 +0000 9.2 +++ b/tools/xfrd/xfrd.c Thu Mar 10 15:39:32 2005 +0000 9.3 @@ -1007,6 +1007,8 @@ int xfrd_accept(Args *args, int sock){ 9.4 } 9.5 iprintf("> Accepted connection from %s:%d on %d\n", 9.6 inet_ntoa(peer_in.sin_addr), htons(peer_in.sin_port), sock); 9.7 + fflush(stdout); 9.8 + fflush(stderr); 9.9 pid = fork(); 9.10 if(pid > 0){ 9.11 // Parent, fork succeeded.