ia64/xen-unstable
changeset 6380:522bc50588ed
merge?
line diff
1.1 --- a/.hgignore Tue Aug 23 18:25:51 2005 +0000 1.2 +++ b/.hgignore Tue Aug 23 18:27:22 2005 +0000 1.3 @@ -147,6 +147,7 @@ 1.4 ^tools/xcs/xcsdump$ 1.5 ^tools/xcutils/xc_restore$ 1.6 ^tools/xcutils/xc_save$ 1.7 +^tools/xenstat/xentop/xentop$ 1.8 ^tools/xenstore/testsuite/tmp/.*$ 1.9 ^tools/xenstore/xen$ 1.10 ^tools/xenstore/xenstored$
11.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64 Tue Aug 23 18:25:51 2005 +0000 11.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64 Tue Aug 23 18:27:22 2005 +0000 11.3 @@ -807,7 +807,107 @@ CONFIG_DUMMY_CONSOLE=y 11.4 # 11.5 CONFIG_USB_ARCH_HAS_HCD=y 11.6 CONFIG_USB_ARCH_HAS_OHCI=y 11.7 -# CONFIG_USB is not set 11.8 +CONFIG_USB=y 11.9 +# CONFIG_USB_DEBUG is not set 11.10 + 11.11 +# 11.12 +# Miscellaneous USB options 11.13 +# 11.14 +# CONFIG_USB_DEVICEFS is not set 11.15 +# CONFIG_USB_BANDWIDTH is not set 11.16 +# CONFIG_USB_DYNAMIC_MINORS is not set 11.17 +# CONFIG_USB_OTG is not set 11.18 + 11.19 +# 11.20 +# USB Host Controller Drivers 11.21 +# 11.22 +# CONFIG_USB_EHCI_HCD is not set 11.23 +CONFIG_USB_OHCI_HCD=y 11.24 +# CONFIG_USB_OHCI_BIG_ENDIAN is not set 11.25 +CONFIG_USB_OHCI_LITTLE_ENDIAN=y 11.26 +CONFIG_USB_UHCI_HCD=y 11.27 +# CONFIG_USB_SL811_HCD is not set 11.28 + 11.29 +# 11.30 +# USB Device Class drivers 11.31 +# 11.32 +# CONFIG_USB_BLUETOOTH_TTY is not set 11.33 +# CONFIG_USB_ACM is not set 11.34 +# CONFIG_USB_PRINTER is not set 11.35 + 11.36 +# 11.37 +# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information 11.38 +# 11.39 +# CONFIG_USB_STORAGE is not set 11.40 + 11.41 +# 11.42 +# USB Input Devices 11.43 +# 11.44 +CONFIG_USB_HID=y 11.45 +CONFIG_USB_HIDINPUT=y 11.46 +# CONFIG_HID_FF is not set 11.47 +# CONFIG_USB_HIDDEV is not set 11.48 +# CONFIG_USB_AIPTEK is not set 11.49 +# CONFIG_USB_WACOM is not set 11.50 +# CONFIG_USB_KBTAB is not set 11.51 +# CONFIG_USB_POWERMATE is not set 11.52 +# CONFIG_USB_MTOUCH is not set 11.53 +# CONFIG_USB_EGALAX is not set 11.54 +# CONFIG_USB_XPAD is not set 11.55 +# CONFIG_USB_ATI_REMOTE is not set 11.56 + 11.57 +# 11.58 +# USB Imaging devices 11.59 +# 11.60 +# CONFIG_USB_MDC800 is not set 11.61 +# CONFIG_USB_MICROTEK is not set 11.62 + 11.63 +# 11.64 +# USB Multimedia devices 11.65 +# 11.66 +# CONFIG_USB_DABUSB is not set 11.67 + 11.68 +# 11.69 +# Video4Linux support is needed for USB Multimedia device support 11.70 +# 11.71 + 11.72 +# 11.73 +# USB Network Adapters 11.74 +# 11.75 +# CONFIG_USB_CATC is not set 11.76 +# CONFIG_USB_KAWETH is not set 11.77 +# CONFIG_USB_PEGASUS is not set 11.78 +# CONFIG_USB_RTL8150 is not set 11.79 +# CONFIG_USB_USBNET is not set 11.80 +CONFIG_USB_MON=y 11.81 + 11.82 +# 11.83 +# USB port drivers 11.84 +# 11.85 + 11.86 +# 11.87 +# USB Serial Converter support 11.88 +# 11.89 +# CONFIG_USB_SERIAL is not set 11.90 + 11.91 +# 11.92 +# USB Miscellaneous drivers 11.93 +# 11.94 +# CONFIG_USB_EMI62 is not set 11.95 +# CONFIG_USB_EMI26 is not set 11.96 +# CONFIG_USB_AUERSWALD is not set 11.97 +# CONFIG_USB_RIO500 is not set 11.98 +# CONFIG_USB_LEGOTOWER is not set 11.99 +# CONFIG_USB_LCD is not set 11.100 +# CONFIG_USB_LED is not set 11.101 +# CONFIG_USB_CYTHERM is not set 11.102 +# CONFIG_USB_PHIDGETKIT is not set 11.103 +# CONFIG_USB_PHIDGETSERVO is not set 11.104 +# CONFIG_USB_IDMOUSE is not set 11.105 + 11.106 +# 11.107 +# USB ATM/DSL drivers 11.108 +# 11.109 11.110 # 11.111 # USB Gadget Support
25.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c Tue Aug 23 18:25:51 2005 +0000 25.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c Tue Aug 23 18:27:22 2005 +0000 25.3 @@ -149,12 +149,12 @@ void cpu_idle (void) 25.4 25.5 if (cpu_is_offline(cpu)) { 25.6 local_irq_disable(); 25.7 +#if defined(CONFIG_XEN) && defined(CONFIG_HOTPLUG_CPU) 25.8 /* Ack it. From this point on until 25.9 we get woken up, we're not allowed 25.10 to take any locks. In particular, 25.11 don't printk. */ 25.12 __get_cpu_var(cpu_state) = CPU_DEAD; 25.13 -#if defined(CONFIG_XEN) && defined(CONFIG_HOTPLUG_CPU) 25.14 /* Tell hypervisor to take vcpu down. */ 25.15 HYPERVISOR_vcpu_down(cpu); 25.16 #endif
26.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/setup.c Tue Aug 23 18:25:51 2005 +0000 26.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/setup.c Tue Aug 23 18:27:22 2005 +0000 26.3 @@ -1575,19 +1575,20 @@ void __init setup_arch(char **cmdline_p) 26.4 /* Make sure we have a correctly sized P->M table. */ 26.5 if (max_pfn != xen_start_info.nr_pages) { 26.6 phys_to_machine_mapping = alloc_bootmem_low_pages( 26.7 - max_pfn * sizeof(unsigned long)); 26.8 + max_pfn * sizeof(unsigned int)); 26.9 26.10 if (max_pfn > xen_start_info.nr_pages) { 26.11 /* set to INVALID_P2M_ENTRY */ 26.12 memset(phys_to_machine_mapping, ~0, 26.13 - max_pfn * sizeof(unsigned long)); 26.14 + max_pfn * sizeof(unsigned int)); 26.15 memcpy(phys_to_machine_mapping, 26.16 - (unsigned long *)xen_start_info.mfn_list, 26.17 - xen_start_info.nr_pages * sizeof(unsigned long)); 26.18 + (unsigned int *)xen_start_info.mfn_list, 26.19 + xen_start_info.nr_pages * sizeof(unsigned int)); 26.20 } else { 26.21 memcpy(phys_to_machine_mapping, 26.22 - (unsigned long *)xen_start_info.mfn_list, 26.23 - max_pfn * sizeof(unsigned long)); 26.24 + (unsigned int *)xen_start_info.mfn_list, 26.25 + max_pfn * sizeof(unsigned int)); 26.26 + /* N.B. below relies on sizeof(int) == sizeof(long). */ 26.27 if (HYPERVISOR_dom_mem_op( 26.28 MEMOP_decrease_reservation, 26.29 (unsigned long *)xen_start_info.mfn_list + max_pfn, 26.30 @@ -1597,11 +1598,11 @@ void __init setup_arch(char **cmdline_p) 26.31 free_bootmem( 26.32 __pa(xen_start_info.mfn_list), 26.33 PFN_PHYS(PFN_UP(xen_start_info.nr_pages * 26.34 - sizeof(unsigned long)))); 26.35 + sizeof(unsigned int)))); 26.36 } 26.37 26.38 pfn_to_mfn_frame_list = alloc_bootmem_low_pages(PAGE_SIZE); 26.39 - for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ ) 26.40 + for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned int)), j++ ) 26.41 { 26.42 pfn_to_mfn_frame_list[j] = 26.43 virt_to_mfn(&phys_to_machine_mapping[i]);
30.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/fault.c Tue Aug 23 18:25:51 2005 +0000 30.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/fault.c Tue Aug 23 18:27:22 2005 +0000 30.3 @@ -281,7 +281,7 @@ fastcall void do_page_fault(struct pt_re 30.4 siginfo_t info; 30.5 30.6 /* Set the "privileged fault" bit to something sane. */ 30.7 - error_code &= 3; 30.8 + error_code &= ~4; 30.9 error_code |= (regs->xcs & 2) << 1; 30.10 if (regs->eflags & X86_EFLAGS_VM) 30.11 error_code |= 4;
33.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/init.c Tue Aug 23 18:25:51 2005 +0000 33.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/init.c Tue Aug 23 18:27:22 2005 +0000 33.3 @@ -348,9 +348,12 @@ static void __init pagetable_init (void) 33.4 { 33.5 unsigned long vaddr; 33.6 pgd_t *pgd_base = (pgd_t *)xen_start_info.pt_base; 33.7 + int i; 33.8 33.9 swapper_pg_dir = pgd_base; 33.10 init_mm.pgd = pgd_base; 33.11 + for (i = 0; i < NR_CPUS; i++) 33.12 + per_cpu(cur_pgd, i) = pgd_base; 33.13 33.14 /* Enable PSE if available */ 33.15 if (cpu_has_pse) {
34.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c Tue Aug 23 18:25:51 2005 +0000 34.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c Tue Aug 23 18:27:22 2005 +0000 34.3 @@ -36,6 +36,8 @@ void iounmap(volatile void __iomem *addr 34.4 { 34.5 } 34.6 34.7 +#ifdef __i386__ 34.8 + 34.9 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size) 34.10 { 34.11 return NULL; 34.12 @@ -45,6 +47,8 @@ void __init bt_iounmap(void *addr, unsig 34.13 { 34.14 } 34.15 34.16 +#endif /* __i386__ */ 34.17 + 34.18 #else 34.19 34.20 /* 34.21 @@ -58,7 +62,7 @@ static inline int is_local_lowmem(unsign 34.22 extern unsigned long max_low_pfn; 34.23 unsigned long mfn = address >> PAGE_SHIFT; 34.24 unsigned long pfn = mfn_to_pfn(mfn); 34.25 - return ((pfn < max_low_pfn) && (pfn_to_mfn(pfn) == mfn)); 34.26 + return ((pfn < max_low_pfn) && (phys_to_machine_mapping[pfn] == mfn)); 34.27 } 34.28 34.29 /* 34.30 @@ -126,10 +130,12 @@ void __iomem * __ioremap(unsigned long p 34.31 return NULL; 34.32 area->phys_addr = phys_addr; 34.33 addr = (void __iomem *) area->addr; 34.34 + flags |= _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED; 34.35 +#ifdef __x86_64__ 34.36 + flags |= _PAGE_USER; 34.37 +#endif 34.38 if (direct_remap_area_pages(&init_mm, (unsigned long) addr, phys_addr, 34.39 - size, __pgprot(_PAGE_PRESENT | _PAGE_RW | 34.40 - _PAGE_DIRTY | _PAGE_ACCESSED 34.41 - | flags), domid)) { 34.42 + size, __pgprot(flags), domid)) { 34.43 vunmap((void __force *) addr); 34.44 return NULL; 34.45 } 34.46 @@ -218,6 +224,8 @@ void iounmap(volatile void __iomem *addr 34.47 kfree(p); 34.48 } 34.49 34.50 +#ifdef __i386__ 34.51 + 34.52 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size) 34.53 { 34.54 unsigned long offset, last_addr; 34.55 @@ -289,6 +297,8 @@ void __init bt_iounmap(void *addr, unsig 34.56 } 34.57 } 34.58 34.59 +#endif /* __i386__ */ 34.60 + 34.61 #endif /* CONFIG_XEN_PHYSDEV_ACCESS */ 34.62 34.63 /* These hacky macros avoid phys->machine translations. */ 34.64 @@ -346,7 +356,7 @@ int direct_remap_area_pages(struct mm_st 34.65 * Fill in the machine address: PTE ptr is done later by 34.66 * __direct_remap_area_pages(). 34.67 */ 34.68 - v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot); 34.69 + v->val = pte_val_ma(pfn_pte_ma(machine_addr >> PAGE_SHIFT, prot)); 34.70 34.71 machine_addr += PAGE_SIZE; 34.72 address += PAGE_SIZE;
38.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/gnttab.c Tue Aug 23 18:25:51 2005 +0000 38.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/gnttab.c Tue Aug 23 18:27:22 2005 +0000 38.3 @@ -40,38 +40,82 @@ EXPORT_SYMBOL(gnttab_grant_foreign_trans 38.4 EXPORT_SYMBOL(gnttab_end_foreign_transfer); 38.5 EXPORT_SYMBOL(gnttab_alloc_grant_references); 38.6 EXPORT_SYMBOL(gnttab_free_grant_references); 38.7 +EXPORT_SYMBOL(gnttab_free_grant_reference); 38.8 EXPORT_SYMBOL(gnttab_claim_grant_reference); 38.9 EXPORT_SYMBOL(gnttab_release_grant_reference); 38.10 EXPORT_SYMBOL(gnttab_grant_foreign_access_ref); 38.11 EXPORT_SYMBOL(gnttab_grant_foreign_transfer_ref); 38.12 38.13 -static grant_ref_t gnttab_free_list[NR_GRANT_ENTRIES]; 38.14 +#define NR_GRANT_ENTRIES (NR_GRANT_FRAMES * PAGE_SIZE / sizeof(grant_entry_t)) 38.15 +#define GNTTAB_LIST_END (NR_GRANT_ENTRIES + 1) 38.16 + 38.17 +static grant_ref_t gnttab_list[NR_GRANT_ENTRIES]; 38.18 +static int gnttab_free_count = NR_GRANT_ENTRIES; 38.19 static grant_ref_t gnttab_free_head; 38.20 +static spinlock_t gnttab_list_lock = SPIN_LOCK_UNLOCKED; 38.21 38.22 static grant_entry_t *shared; 38.23 38.24 -/* 38.25 - * Lock-free grant-entry allocator 38.26 - */ 38.27 +static struct gnttab_free_callback *gnttab_free_callback_list = NULL; 38.28 38.29 -static inline int 38.30 -get_free_entry( 38.31 - void) 38.32 +static int 38.33 +get_free_entries(int count) 38.34 { 38.35 - grant_ref_t fh, nfh = gnttab_free_head; 38.36 - do { if ( unlikely((fh = nfh) == NR_GRANT_ENTRIES) ) return -1; } 38.37 - while ( unlikely((nfh = cmpxchg(&gnttab_free_head, fh, 38.38 - gnttab_free_list[fh])) != fh) ); 38.39 - return fh; 38.40 + unsigned long flags; 38.41 + int ref; 38.42 + grant_ref_t head; 38.43 + spin_lock_irqsave(&gnttab_list_lock, flags); 38.44 + if (gnttab_free_count < count) { 38.45 + spin_unlock_irqrestore(&gnttab_list_lock, flags); 38.46 + return -1; 38.47 + } 38.48 + ref = head = gnttab_free_head; 38.49 + gnttab_free_count -= count; 38.50 + while (count-- > 1) 38.51 + head = gnttab_list[head]; 38.52 + gnttab_free_head = gnttab_list[head]; 38.53 + gnttab_list[head] = GNTTAB_LIST_END; 38.54 + spin_unlock_irqrestore(&gnttab_list_lock, flags); 38.55 + return ref; 38.56 +} 38.57 + 38.58 +#define get_free_entry() get_free_entries(1) 38.59 + 38.60 +static void 38.61 +do_free_callbacks(void) 38.62 +{ 38.63 + struct gnttab_free_callback *callback = gnttab_free_callback_list, *next; 38.64 + gnttab_free_callback_list = NULL; 38.65 + while (callback) { 38.66 + next = callback->next; 38.67 + if (gnttab_free_count >= callback->count) { 38.68 + callback->next = NULL; 38.69 + callback->fn(callback->arg); 38.70 + } else { 38.71 + callback->next = gnttab_free_callback_list; 38.72 + gnttab_free_callback_list = callback; 38.73 + } 38.74 + callback = next; 38.75 + } 38.76 } 38.77 38.78 static inline void 38.79 -put_free_entry( 38.80 - grant_ref_t ref) 38.81 +check_free_callbacks(void) 38.82 { 38.83 - grant_ref_t fh, nfh = gnttab_free_head; 38.84 - do { gnttab_free_list[ref] = fh = nfh; wmb(); } 38.85 - while ( unlikely((nfh = cmpxchg(&gnttab_free_head, fh, ref)) != fh) ); 38.86 + if (unlikely(gnttab_free_callback_list)) 38.87 + do_free_callbacks(); 38.88 +} 38.89 + 38.90 +static void 38.91 +put_free_entry(grant_ref_t ref) 38.92 +{ 38.93 + unsigned long flags; 38.94 + spin_lock_irqsave(&gnttab_list_lock, flags); 38.95 + gnttab_list[ref] = gnttab_free_head; 38.96 + gnttab_free_head = ref; 38.97 + gnttab_free_count++; 38.98 + check_free_callbacks(); 38.99 + spin_unlock_irqrestore(&gnttab_list_lock, flags); 38.100 } 38.101 38.102 /* 38.103 @@ -79,8 +123,7 @@ put_free_entry( 38.104 */ 38.105 38.106 int 38.107 -gnttab_grant_foreign_access( 38.108 - domid_t domid, unsigned long frame, int readonly) 38.109 +gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int readonly) 38.110 { 38.111 int ref; 38.112 38.113 @@ -96,8 +139,8 @@ gnttab_grant_foreign_access( 38.114 } 38.115 38.116 void 38.117 -gnttab_grant_foreign_access_ref( 38.118 - grant_ref_t ref, domid_t domid, unsigned long frame, int readonly) 38.119 +gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, 38.120 + unsigned long frame, int readonly) 38.121 { 38.122 shared[ref].frame = frame; 38.123 shared[ref].domid = domid; 38.124 @@ -107,7 +150,7 @@ gnttab_grant_foreign_access_ref( 38.125 38.126 38.127 int 38.128 -gnttab_query_foreign_access( grant_ref_t ref ) 38.129 +gnttab_query_foreign_access(grant_ref_t ref) 38.130 { 38.131 u16 nflags; 38.132 38.133 @@ -117,7 +160,7 @@ gnttab_query_foreign_access( grant_ref_t 38.134 } 38.135 38.136 void 38.137 -gnttab_end_foreign_access( grant_ref_t ref, int readonly ) 38.138 +gnttab_end_foreign_access(grant_ref_t ref, int readonly) 38.139 { 38.140 u16 flags, nflags; 38.141 38.142 @@ -132,8 +175,7 @@ gnttab_end_foreign_access( grant_ref_t r 38.143 } 38.144 38.145 int 38.146 -gnttab_grant_foreign_transfer( 38.147 - domid_t domid, unsigned long pfn ) 38.148 +gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn) 38.149 { 38.150 int ref; 38.151 38.152 @@ -149,8 +191,8 @@ gnttab_grant_foreign_transfer( 38.153 } 38.154 38.155 void 38.156 -gnttab_grant_foreign_transfer_ref( 38.157 - grant_ref_t ref, domid_t domid, unsigned long pfn ) 38.158 +gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid, 38.159 + unsigned long pfn) 38.160 { 38.161 shared[ref].frame = pfn; 38.162 shared[ref].domid = domid; 38.163 @@ -159,8 +201,7 @@ gnttab_grant_foreign_transfer_ref( 38.164 } 38.165 38.166 unsigned long 38.167 -gnttab_end_foreign_transfer( 38.168 - grant_ref_t ref) 38.169 +gnttab_end_foreign_transfer(grant_ref_t ref) 38.170 { 38.171 unsigned long frame = 0; 38.172 u16 flags; 38.173 @@ -189,59 +230,79 @@ gnttab_end_foreign_transfer( 38.174 } 38.175 38.176 void 38.177 -gnttab_free_grant_references( u16 count, grant_ref_t head ) 38.178 +gnttab_free_grant_reference(grant_ref_t ref) 38.179 { 38.180 - /* TODO: O(N)...? */ 38.181 - grant_ref_t to_die = 0, next = head; 38.182 - int i; 38.183 + 38.184 + put_free_entry(ref); 38.185 +} 38.186 38.187 - for ( i = 0; i < count; i++ ) 38.188 - { 38.189 - to_die = next; 38.190 - next = gnttab_free_list[next]; 38.191 - put_free_entry( to_die ); 38.192 +void 38.193 +gnttab_free_grant_references(grant_ref_t head) 38.194 +{ 38.195 + grant_ref_t ref; 38.196 + unsigned long flags; 38.197 + int count = 1; 38.198 + if (head == GNTTAB_LIST_END) 38.199 + return; 38.200 + spin_lock_irqsave(&gnttab_list_lock, flags); 38.201 + ref = head; 38.202 + while (gnttab_list[ref] != GNTTAB_LIST_END) { 38.203 + ref = gnttab_list[ref]; 38.204 + count++; 38.205 } 38.206 + gnttab_list[ref] = gnttab_free_head; 38.207 + gnttab_free_head = head; 38.208 + gnttab_free_count += count; 38.209 + check_free_callbacks(); 38.210 + spin_unlock_irqrestore(&gnttab_list_lock, flags); 38.211 } 38.212 38.213 int 38.214 -gnttab_alloc_grant_references( u16 count, 38.215 - grant_ref_t *head, 38.216 - grant_ref_t *terminal ) 38.217 +gnttab_alloc_grant_references(u16 count, grant_ref_t *head) 38.218 { 38.219 - int i; 38.220 - grant_ref_t h = gnttab_free_head; 38.221 + int h = get_free_entries(count); 38.222 38.223 - for ( i = 0; i < count; i++ ) 38.224 - if ( unlikely(get_free_entry() == -1) ) 38.225 - goto not_enough_refs; 38.226 + if (h == -1) 38.227 + return -ENOSPC; 38.228 38.229 *head = h; 38.230 - *terminal = gnttab_free_head; 38.231 38.232 return 0; 38.233 - 38.234 -not_enough_refs: 38.235 - gnttab_free_head = h; 38.236 - return -ENOSPC; 38.237 } 38.238 38.239 int 38.240 -gnttab_claim_grant_reference( grant_ref_t *private_head, 38.241 - grant_ref_t terminal ) 38.242 +gnttab_claim_grant_reference(grant_ref_t *private_head) 38.243 { 38.244 - grant_ref_t g; 38.245 - if ( unlikely((g = *private_head) == terminal) ) 38.246 + grant_ref_t g = *private_head; 38.247 + if (unlikely(g == GNTTAB_LIST_END)) 38.248 return -ENOSPC; 38.249 - *private_head = gnttab_free_list[g]; 38.250 + *private_head = gnttab_list[g]; 38.251 return g; 38.252 } 38.253 38.254 void 38.255 -gnttab_release_grant_reference( grant_ref_t *private_head, 38.256 - grant_ref_t release ) 38.257 +gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release) 38.258 +{ 38.259 + gnttab_list[release] = *private_head; 38.260 + *private_head = release; 38.261 +} 38.262 + 38.263 +void 38.264 +gnttab_request_free_callback(struct gnttab_free_callback *callback, 38.265 + void (*fn)(void *), void *arg, u16 count) 38.266 { 38.267 - gnttab_free_list[release] = *private_head; 38.268 - *private_head = release; 38.269 + unsigned long flags; 38.270 + spin_lock_irqsave(&gnttab_list_lock, flags); 38.271 + if (callback->next) 38.272 + goto out; 38.273 + callback->fn = fn; 38.274 + callback->arg = arg; 38.275 + callback->count = count; 38.276 + callback->next = gnttab_free_callback_list; 38.277 + gnttab_free_callback_list = callback; 38.278 + check_free_callbacks(); 38.279 + out: 38.280 + spin_unlock_irqrestore(&gnttab_list_lock, flags); 38.281 } 38.282 38.283 /* 38.284 @@ -252,8 +313,9 @@ gnttab_release_grant_reference( grant_re 38.285 38.286 static struct proc_dir_entry *grant_pde; 38.287 38.288 -static int grant_ioctl(struct inode *inode, struct file *file, 38.289 - unsigned int cmd, unsigned long data) 38.290 +static int 38.291 +grant_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 38.292 + unsigned long data) 38.293 { 38.294 int ret; 38.295 privcmd_hypercall_t hypercall; 38.296 @@ -291,8 +353,9 @@ static struct file_operations grant_file 38.297 ioctl: grant_ioctl, 38.298 }; 38.299 38.300 -static int grant_read(char *page, char **start, off_t off, 38.301 - int count, int *eof, void *data) 38.302 +static int 38.303 +grant_read(char *page, char **start, off_t off, int count, int *eof, 38.304 + void *data) 38.305 { 38.306 int len; 38.307 unsigned int i; 38.308 @@ -321,8 +384,9 @@ static int grant_read(char *page, char * 38.309 return len; 38.310 } 38.311 38.312 -static int grant_write(struct file *file, const char __user *buffer, 38.313 - unsigned long count, void *data) 38.314 +static int 38.315 +grant_write(struct file *file, const char __user *buffer, unsigned long count, 38.316 + void *data) 38.317 { 38.318 /* TODO: implement this */ 38.319 return -ENOSYS; 38.320 @@ -330,7 +394,8 @@ static int grant_write(struct file *file 38.321 38.322 #endif /* CONFIG_PROC_FS */ 38.323 38.324 -int gnttab_resume(void) 38.325 +int 38.326 +gnttab_resume(void) 38.327 { 38.328 gnttab_setup_table_t setup; 38.329 unsigned long frames[NR_GRANT_FRAMES]; 38.330 @@ -349,7 +414,8 @@ int gnttab_resume(void) 38.331 return 0; 38.332 } 38.333 38.334 -int gnttab_suspend(void) 38.335 +int 38.336 +gnttab_suspend(void) 38.337 { 38.338 int i; 38.339 38.340 @@ -359,7 +425,8 @@ int gnttab_suspend(void) 38.341 return 0; 38.342 } 38.343 38.344 -static int __init gnttab_init(void) 38.345 +static int __init 38.346 +gnttab_init(void) 38.347 { 38.348 int i; 38.349 38.350 @@ -368,7 +435,7 @@ static int __init gnttab_init(void) 38.351 shared = (grant_entry_t *)fix_to_virt(FIX_GNTTAB_END); 38.352 38.353 for ( i = 0; i < NR_GRANT_ENTRIES; i++ ) 38.354 - gnttab_free_list[i] = i + 1; 38.355 + gnttab_list[i] = i + 1; 38.356 38.357 #ifdef CONFIG_PROC_FS 38.358 /*
44.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/Makefile Tue Aug 23 18:25:51 2005 +0000 44.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/Makefile Tue Aug 23 18:27:22 2005 +0000 44.3 @@ -44,7 +44,7 @@ obj-$(CONFIG_X86_PM_TIMER) += pmtimer.o 44.4 44.5 c-obj-$(CONFIG_MODULES) += module.o 44.6 44.7 -#obj-y += topology.o 44.8 +obj-y += topology.o 44.9 c-obj-y += intel_cacheinfo.o 44.10 44.11 bootflag-y += ../../../i386/kernel/bootflag.o
50.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c Tue Aug 23 18:25:51 2005 +0000 50.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c Tue Aug 23 18:27:22 2005 +0000 50.3 @@ -778,21 +778,21 @@ void __init setup_arch(char **cmdline_p) 50.4 /* Make sure we have a large enough P->M table. */ 50.5 if (end_pfn > xen_start_info.nr_pages) { 50.6 phys_to_machine_mapping = alloc_bootmem( 50.7 - max_pfn * sizeof(unsigned long)); 50.8 + max_pfn * sizeof(u32)); 50.9 memset(phys_to_machine_mapping, ~0, 50.10 - max_pfn * sizeof(unsigned long)); 50.11 + max_pfn * sizeof(u32)); 50.12 memcpy(phys_to_machine_mapping, 50.13 - (unsigned long *)xen_start_info.mfn_list, 50.14 - xen_start_info.nr_pages * sizeof(unsigned long)); 50.15 + (u32 *)xen_start_info.mfn_list, 50.16 + xen_start_info.nr_pages * sizeof(u32)); 50.17 free_bootmem( 50.18 __pa(xen_start_info.mfn_list), 50.19 PFN_PHYS(PFN_UP(xen_start_info.nr_pages * 50.20 - sizeof(unsigned long)))); 50.21 + sizeof(u32)))); 50.22 } 50.23 50.24 pfn_to_mfn_frame_list = alloc_bootmem(PAGE_SIZE); 50.25 50.26 - for ( i=0, j=0; i < end_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ ) 50.27 + for ( i=0, j=0; i < end_pfn; i+=(PAGE_SIZE/sizeof(u32)), j++ ) 50.28 { 50.29 pfn_to_mfn_frame_list[j] = 50.30 virt_to_mfn(&phys_to_machine_mapping[i]);
54.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/Makefile Tue Aug 23 18:25:51 2005 +0000 54.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/mm/Makefile Tue Aug 23 18:27:22 2005 +0000 54.3 @@ -6,10 +6,10 @@ XENARCH := $(subst ",,$(CONFIG_XENARCH)) 54.4 54.5 CFLAGS += -Iarch/$(XENARCH)/mm 54.6 54.7 -obj-y := init.o fault.o ioremap.o pageattr.o 54.8 +obj-y := init.o fault.o pageattr.o 54.9 c-obj-y := extable.o 54.10 54.11 -i386-obj-y := hypervisor.o 54.12 +i386-obj-y := hypervisor.o ioremap.o 54.13 54.14 #obj-y := init.o fault.o ioremap.o extable.o pageattr.o 54.15 #c-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
56.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c Tue Aug 23 18:25:51 2005 +0000 56.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c Tue Aug 23 18:27:22 2005 +0000 56.3 @@ -559,6 +559,11 @@ static void xen_copy_pt(void) 56.4 56.5 void __init xen_init_pt(void) 56.6 { 56.7 + int i; 56.8 + 56.9 + for (i = 0; i < NR_CPUS; i++) 56.10 + per_cpu(cur_pgd, i) = init_mm.pgd; 56.11 + 56.12 memcpy((void *)init_level4_pgt, 56.13 (void *)xen_start_info.pt_base, PAGE_SIZE); 56.14
57.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/ioremap.c Tue Aug 23 18:25:51 2005 +0000 57.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 57.3 @@ -1,499 +0,0 @@ 57.4 -/* 57.5 - * arch/x86_64/mm/ioremap.c 57.6 - * 57.7 - * Re-map IO memory to kernel address space so that we can access it. 57.8 - * This is needed for high PCI addresses that aren't mapped in the 57.9 - * 640k-1MB IO memory area on PC's 57.10 - * 57.11 - * (C) Copyright 1995 1996 Linus Torvalds 57.12 - */ 57.13 - 57.14 -#include <linux/vmalloc.h> 57.15 -#include <linux/init.h> 57.16 -#include <linux/slab.h> 57.17 -#include <linux/module.h> 57.18 -#include <asm/io.h> 57.19 -#include <asm/fixmap.h> 57.20 -#include <asm/cacheflush.h> 57.21 -#include <asm/tlbflush.h> 57.22 -#include <asm/pgtable.h> 57.23 -#include <asm/pgalloc.h> 57.24 - 57.25 -/* 57.26 - * Reuse arch/xen/i396/mm/ioremap.c. Need to merge later 57.27 - */ 57.28 -#ifndef CONFIG_XEN_PHYSDEV_ACCESS 57.29 - 57.30 -void * __ioremap(unsigned long phys_addr, unsigned long size, 57.31 - unsigned long flags) 57.32 -{ 57.33 - return NULL; 57.34 -} 57.35 - 57.36 -void *ioremap_nocache (unsigned long phys_addr, unsigned long size) 57.37 -{ 57.38 - return NULL; 57.39 -} 57.40 - 57.41 -void iounmap(volatile void __iomem *addr) 57.42 -{ 57.43 -} 57.44 - 57.45 -void __init *bt_ioremap(unsigned long phys_addr, unsigned long size) 57.46 -{ 57.47 - return NULL; 57.48 -} 57.49 - 57.50 -void __init bt_iounmap(void *addr, unsigned long size) 57.51 -{ 57.52 -} 57.53 - 57.54 -#else 57.55 - 57.56 -#if defined(__i386__) 57.57 -/* 57.58 - * Does @address reside within a non-highmem page that is local to this virtual 57.59 - * machine (i.e., not an I/O page, nor a memory page belonging to another VM). 57.60 - * See the comment that accompanies pte_pfn() in pgtable-2level.h to understand 57.61 - * why this works. 57.62 - */ 57.63 -static inline int is_local_lowmem(unsigned long address) 57.64 -{ 57.65 - extern unsigned long max_low_pfn; 57.66 - unsigned long mfn = address >> PAGE_SHIFT; 57.67 - unsigned long pfn = mfn_to_pfn(mfn); 57.68 - return ((pfn < max_low_pfn) && (pfn_to_mfn(pfn) == mfn)); 57.69 -} 57.70 -#elif defined(__x86_64__) 57.71 -/* 57.72 - * 57.73 - */ 57.74 -static inline int is_local_lowmem(unsigned long address) 57.75 -{ 57.76 - return 0; 57.77 -} 57.78 -#endif 57.79 - 57.80 -/* 57.81 - * Generic mapping function (not visible outside): 57.82 - */ 57.83 - 57.84 -/* 57.85 - * Remap an arbitrary physical address space into the kernel virtual 57.86 - * address space. Needed when the kernel wants to access high addresses 57.87 - * directly. 57.88 - * 57.89 - * NOTE! We need to allow non-page-aligned mappings too: we will obviously 57.90 - * have to convert them into an offset in a page-aligned mapping, but the 57.91 - * caller shouldn't need to know that small detail. 57.92 - */ 57.93 -void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) 57.94 -{ 57.95 - void __iomem * addr; 57.96 - struct vm_struct * area; 57.97 - unsigned long offset, last_addr; 57.98 - domid_t domid = DOMID_IO; 57.99 - 57.100 - /* Don't allow wraparound or zero size */ 57.101 - last_addr = phys_addr + size - 1; 57.102 - if (!size || last_addr < phys_addr) 57.103 - return NULL; 57.104 - 57.105 -#ifdef CONFIG_XEN_PRIVILEGED_GUEST 57.106 - /* 57.107 - * Don't remap the low PCI/ISA area, it's always mapped.. 57.108 - */ 57.109 - if (phys_addr >= 0x0 && last_addr < 0x100000) 57.110 - return isa_bus_to_virt(phys_addr); 57.111 -#endif 57.112 - 57.113 - /* 57.114 - * Don't allow anybody to remap normal RAM that we're using.. 57.115 - */ 57.116 - if (is_local_lowmem(phys_addr)) { 57.117 - char *t_addr, *t_end; 57.118 - struct page *page; 57.119 - 57.120 - t_addr = bus_to_virt(phys_addr); 57.121 - t_end = t_addr + (size - 1); 57.122 - 57.123 - for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) 57.124 - if(!PageReserved(page)) 57.125 - return NULL; 57.126 - 57.127 - domid = DOMID_LOCAL; 57.128 - } 57.129 - 57.130 - /* 57.131 - * Mappings have to be page-aligned 57.132 - */ 57.133 - offset = phys_addr & ~PAGE_MASK; 57.134 - phys_addr &= PAGE_MASK; 57.135 - size = PAGE_ALIGN(last_addr+1) - phys_addr; 57.136 - 57.137 - /* 57.138 - * Ok, go for it.. 57.139 - */ 57.140 - area = get_vm_area(size, VM_IOREMAP | (flags << 20)); 57.141 - if (!area) 57.142 - return NULL; 57.143 - area->phys_addr = phys_addr; 57.144 - addr = (void __iomem *) area->addr; 57.145 - if (direct_remap_area_pages(&init_mm, (unsigned long) addr, phys_addr, 57.146 - size, __pgprot(_PAGE_PRESENT | _PAGE_RW | 57.147 - _PAGE_DIRTY | _PAGE_ACCESSED 57.148 -#if defined(__x86_64__) 57.149 - | _PAGE_USER 57.150 -#endif 57.151 - | flags), domid)) { 57.152 - vunmap((void __force *) addr); 57.153 - return NULL; 57.154 - } 57.155 - return (void __iomem *) (offset + (char __iomem *)addr); 57.156 -} 57.157 - 57.158 - 57.159 -/** 57.160 - * ioremap_nocache - map bus memory into CPU space 57.161 - * @offset: bus address of the memory 57.162 - * @size: size of the resource to map 57.163 - * 57.164 - * ioremap_nocache performs a platform specific sequence of operations to 57.165 - * make bus memory CPU accessible via the readb/readw/readl/writeb/ 57.166 - * writew/writel functions and the other mmio helpers. The returned 57.167 - * address is not guaranteed to be usable directly as a virtual 57.168 - * address. 57.169 - * 57.170 - * This version of ioremap ensures that the memory is marked uncachable 57.171 - * on the CPU as well as honouring existing caching rules from things like 57.172 - * the PCI bus. Note that there are other caches and buffers on many 57.173 - * busses. In particular driver authors should read up on PCI writes 57.174 - * 57.175 - * It's useful if some control registers are in such an area and 57.176 - * write combining or read caching is not desirable: 57.177 - * 57.178 - * Must be freed with iounmap. 57.179 - */ 57.180 - 57.181 -void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size) 57.182 -{ 57.183 - unsigned long last_addr; 57.184 - void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD); 57.185 - if (!p) 57.186 - return p; 57.187 - 57.188 - /* Guaranteed to be > phys_addr, as per __ioremap() */ 57.189 - last_addr = phys_addr + size - 1; 57.190 - 57.191 - if (is_local_lowmem(last_addr)) { 57.192 - struct page *ppage = virt_to_page(bus_to_virt(phys_addr)); 57.193 - unsigned long npages; 57.194 - 57.195 - phys_addr &= PAGE_MASK; 57.196 - 57.197 - /* This might overflow and become zero.. */ 57.198 - last_addr = PAGE_ALIGN(last_addr); 57.199 - 57.200 - /* .. but that's ok, because modulo-2**n arithmetic will make 57.201 - * the page-aligned "last - first" come out right. 57.202 - */ 57.203 - npages = (last_addr - phys_addr) >> PAGE_SHIFT; 57.204 - 57.205 - if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { 57.206 - iounmap(p); 57.207 - p = NULL; 57.208 - } 57.209 - global_flush_tlb(); 57.210 - } 57.211 - 57.212 - return p; 57.213 -} 57.214 - 57.215 -void iounmap(volatile void __iomem *addr) 57.216 -{ 57.217 - struct vm_struct *p; 57.218 - if ((void __force *) addr <= high_memory) 57.219 - return; 57.220 -#ifdef CONFIG_XEN_PRIVILEGED_GUEST 57.221 - if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN)) 57.222 - return; 57.223 -#endif 57.224 - p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr)); 57.225 - if (!p) { 57.226 - printk("__iounmap: bad address %p\n", addr); 57.227 - return; 57.228 - } 57.229 - 57.230 - if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) { 57.231 - /* p->size includes the guard page, but cpa doesn't like that */ 57.232 - change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)), 57.233 - (p->size - PAGE_SIZE) >> PAGE_SHIFT, 57.234 - PAGE_KERNEL); 57.235 - global_flush_tlb(); 57.236 - } 57.237 - kfree(p); 57.238 -} 57.239 - 57.240 -#if defined(__i386__) 57.241 -void __init *bt_ioremap(unsigned long phys_addr, unsigned long size) 57.242 -{ 57.243 - unsigned long offset, last_addr; 57.244 - unsigned int nrpages; 57.245 - enum fixed_addresses idx; 57.246 - 57.247 - /* Don't allow wraparound or zero size */ 57.248 - last_addr = phys_addr + size - 1; 57.249 - if (!size || last_addr < phys_addr) 57.250 - return NULL; 57.251 - 57.252 -#ifdef CONFIG_XEN_PRIVILEGED_GUEST 57.253 - /* 57.254 - * Don't remap the low PCI/ISA area, it's always mapped.. 57.255 - */ 57.256 - if (phys_addr >= 0x0 && last_addr < 0x100000) 57.257 - return isa_bus_to_virt(phys_addr); 57.258 -#endif 57.259 - 57.260 - /* 57.261 - * Mappings have to be page-aligned 57.262 - */ 57.263 - offset = phys_addr & ~PAGE_MASK; 57.264 - phys_addr &= PAGE_MASK; 57.265 - size = PAGE_ALIGN(last_addr) - phys_addr; 57.266 - 57.267 - /* 57.268 - * Mappings have to fit in the FIX_BTMAP area. 57.269 - */ 57.270 - nrpages = size >> PAGE_SHIFT; 57.271 - if (nrpages > NR_FIX_BTMAPS) 57.272 - return NULL; 57.273 - 57.274 - /* 57.275 - * Ok, go for it.. 57.276 - */ 57.277 - idx = FIX_BTMAP_BEGIN; 57.278 - while (nrpages > 0) { 57.279 - set_fixmap(idx, phys_addr); 57.280 - phys_addr += PAGE_SIZE; 57.281 - --idx; 57.282 - --nrpages; 57.283 - } 57.284 - return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN)); 57.285 -} 57.286 - 57.287 -void __init bt_iounmap(void *addr, unsigned long size) 57.288 -{ 57.289 - unsigned long virt_addr; 57.290 - unsigned long offset; 57.291 - unsigned int nrpages; 57.292 - enum fixed_addresses idx; 57.293 - 57.294 - virt_addr = (unsigned long)addr; 57.295 - if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) 57.296 - return; 57.297 -#ifdef CONFIG_XEN_PRIVILEGED_GUEST 57.298 - if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN)) 57.299 - return; 57.300 -#endif 57.301 - offset = virt_addr & ~PAGE_MASK; 57.302 - nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; 57.303 - 57.304 - idx = FIX_BTMAP_BEGIN; 57.305 - while (nrpages > 0) { 57.306 - clear_fixmap(idx); 57.307 - --idx; 57.308 - --nrpages; 57.309 - } 57.310 -} 57.311 -#endif /* defined(__i386__) */ 57.312 - 57.313 -#endif /* CONFIG_XEN_PHYSDEV_ACCESS */ 57.314 - 57.315 -/* These hacky macros avoid phys->machine translations. */ 57.316 -#define __direct_pte(x) ((pte_t) { (x) } ) 57.317 -#define __direct_mk_pte(page_nr,pgprot) \ 57.318 - __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot)) 57.319 -#define direct_mk_pte_phys(physpage, pgprot) \ 57.320 - __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot) 57.321 - 57.322 -static inline void direct_remap_area_pte(pte_t *pte, 57.323 - unsigned long address, 57.324 - unsigned long size, 57.325 - mmu_update_t **v) 57.326 -{ 57.327 - unsigned long end; 57.328 - 57.329 - address &= ~PMD_MASK; 57.330 - end = address + size; 57.331 - if (end > PMD_SIZE) 57.332 - end = PMD_SIZE; 57.333 - if (address >= end) 57.334 - BUG(); 57.335 - 57.336 - do { 57.337 - (*v)->ptr = virt_to_machine(pte); 57.338 - (*v)++; 57.339 - address += PAGE_SIZE; 57.340 - pte++; 57.341 - } while (address && (address < end)); 57.342 -} 57.343 - 57.344 -static inline int direct_remap_area_pmd(struct mm_struct *mm, 57.345 - pmd_t *pmd, 57.346 - unsigned long address, 57.347 - unsigned long size, 57.348 - mmu_update_t **v) 57.349 -{ 57.350 - unsigned long end; 57.351 - 57.352 - address &= ~PGDIR_MASK; 57.353 - end = address + size; 57.354 - if (end > PGDIR_SIZE) 57.355 - end = PGDIR_SIZE; 57.356 - if (address >= end) 57.357 - BUG(); 57.358 - do { 57.359 - pte_t *pte = (mm == &init_mm) ? 57.360 - pte_alloc_kernel(mm, pmd, address) : 57.361 - pte_alloc_map(mm, pmd, address); 57.362 - if (!pte) 57.363 - return -ENOMEM; 57.364 - direct_remap_area_pte(pte, address, end - address, v); 57.365 - pte_unmap(pte); 57.366 - address = (address + PMD_SIZE) & PMD_MASK; 57.367 - pmd++; 57.368 - } while (address && (address < end)); 57.369 - return 0; 57.370 -} 57.371 - 57.372 -int __direct_remap_area_pages(struct mm_struct *mm, 57.373 - unsigned long address, 57.374 - unsigned long size, 57.375 - mmu_update_t *v) 57.376 -{ 57.377 - pgd_t * dir; 57.378 - unsigned long end = address + size; 57.379 - int error; 57.380 - 57.381 -#if defined(__i386__) 57.382 - dir = pgd_offset(mm, address); 57.383 -#elif defined (__x86_64) 57.384 - dir = (mm == &init_mm) ? 57.385 - pgd_offset_k(address): 57.386 - pgd_offset(mm, address); 57.387 -#endif 57.388 - if (address >= end) 57.389 - BUG(); 57.390 - spin_lock(&mm->page_table_lock); 57.391 - do { 57.392 - pud_t *pud; 57.393 - pmd_t *pmd; 57.394 - 57.395 - error = -ENOMEM; 57.396 - pud = pud_alloc(mm, dir, address); 57.397 - if (!pud) 57.398 - break; 57.399 - pmd = pmd_alloc(mm, pud, address); 57.400 - if (!pmd) 57.401 - break; 57.402 - error = 0; 57.403 - direct_remap_area_pmd(mm, pmd, address, end - address, &v); 57.404 - address = (address + PGDIR_SIZE) & PGDIR_MASK; 57.405 - dir++; 57.406 - 57.407 - } while (address && (address < end)); 57.408 - spin_unlock(&mm->page_table_lock); 57.409 - return error; 57.410 -} 57.411 - 57.412 - 57.413 -int direct_remap_area_pages(struct mm_struct *mm, 57.414 - unsigned long address, 57.415 - unsigned long machine_addr, 57.416 - unsigned long size, 57.417 - pgprot_t prot, 57.418 - domid_t domid) 57.419 -{ 57.420 - int i; 57.421 - unsigned long start_address; 57.422 -#define MAX_DIRECTMAP_MMU_QUEUE 130 57.423 - mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *v = u; 57.424 - 57.425 - start_address = address; 57.426 - 57.427 - flush_cache_all(); 57.428 - 57.429 - for (i = 0; i < size; i += PAGE_SIZE) { 57.430 - if ((v - u) == MAX_DIRECTMAP_MMU_QUEUE) { 57.431 - /* Fill in the PTE pointers. */ 57.432 - __direct_remap_area_pages(mm, 57.433 - start_address, 57.434 - address-start_address, 57.435 - u); 57.436 - 57.437 - if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0) 57.438 - return -EFAULT; 57.439 - v = u; 57.440 - start_address = address; 57.441 - } 57.442 - 57.443 - /* 57.444 - * Fill in the machine address: PTE ptr is done later by 57.445 - * __direct_remap_area_pages(). 57.446 - */ 57.447 - v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot); 57.448 - 57.449 - machine_addr += PAGE_SIZE; 57.450 - address += PAGE_SIZE; 57.451 - v++; 57.452 - } 57.453 - 57.454 - if (v != u) { 57.455 - /* get the ptep's filled in */ 57.456 - __direct_remap_area_pages(mm, 57.457 - start_address, 57.458 - address-start_address, 57.459 - u); 57.460 - if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)) 57.461 - return -EFAULT; 57.462 - } 57.463 - 57.464 - flush_tlb_all(); 57.465 - 57.466 - return 0; 57.467 -} 57.468 - 57.469 -EXPORT_SYMBOL(direct_remap_area_pages); 57.470 - 57.471 -static int lookup_pte_fn( 57.472 - pte_t *pte, struct page *pte_page, unsigned long addr, void *data) 57.473 -{ 57.474 - unsigned long *ptep = (unsigned long *)data; 57.475 - if (ptep) *ptep = (pfn_to_mfn(page_to_pfn(pte_page)) << PAGE_SHIFT) 57.476 - | ((unsigned long)pte & ~PAGE_MASK); 57.477 - return 0; 57.478 -} 57.479 - 57.480 -int create_lookup_pte_addr(struct mm_struct *mm, 57.481 - unsigned long address, 57.482 - unsigned long *ptep) 57.483 -{ 57.484 - return generic_page_range(mm, address, PAGE_SIZE, lookup_pte_fn, ptep); 57.485 -} 57.486 - 57.487 -EXPORT_SYMBOL(create_lookup_pte_addr); 57.488 - 57.489 -static int noop_fn( 57.490 - pte_t *pte, struct page *pte_page, unsigned long addr, void *data) 57.491 -{ 57.492 - return 0; 57.493 -} 57.494 - 57.495 -int touch_pte_range(struct mm_struct *mm, 57.496 - unsigned long address, 57.497 - unsigned long size) 57.498 -{ 57.499 - return generic_page_range(mm, address, size, noop_fn, NULL); 57.500 -} 57.501 - 57.502 -EXPORT_SYMBOL(touch_pte_range);
61.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c Tue Aug 23 18:25:51 2005 +0000 61.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c Tue Aug 23 18:27:22 2005 +0000 61.3 @@ -65,9 +65,6 @@ typedef unsigned int PEND_RING_IDX; 61.4 static PEND_RING_IDX pending_prod, pending_cons; 61.5 #define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons) 61.6 61.7 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) 61.8 -static kmem_cache_t *buffer_head_cachep; 61.9 -#else 61.10 static request_queue_t *plugged_queue; 61.11 static inline void flush_plugged_queue(void) 61.12 { 61.13 @@ -80,7 +77,6 @@ static inline void flush_plugged_queue(v 61.14 plugged_queue = NULL; 61.15 } 61.16 } 61.17 -#endif 61.18 61.19 /* When using grant tables to map a frame for device access then the 61.20 * handle returned must be used to unmap the frame. This is needed to 61.21 @@ -184,11 +180,7 @@ static int blkio_schedule(void *arg) 61.22 blkif_t *blkif; 61.23 struct list_head *ent; 61.24 61.25 - daemonize( 61.26 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 61.27 - "xenblkd" 61.28 -#endif 61.29 - ); 61.30 + daemonize("xenblkd"); 61.31 61.32 for ( ; ; ) 61.33 { 61.34 @@ -215,11 +207,7 @@ static int blkio_schedule(void *arg) 61.35 } 61.36 61.37 /* Push the batch through to disc. */ 61.38 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) 61.39 - run_task_queue(&tq_disk); 61.40 -#else 61.41 flush_plugged_queue(); 61.42 -#endif 61.43 } 61.44 } 61.45 61.46 @@ -268,13 +256,6 @@ static void __end_block_io_op(pending_re 61.47 } 61.48 } 61.49 61.50 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) 61.51 -static void end_block_io_op(struct buffer_head *bh, int uptodate) 61.52 -{ 61.53 - __end_block_io_op(bh->b_private, uptodate); 61.54 - kmem_cache_free(buffer_head_cachep, bh); 61.55 -} 61.56 -#else 61.57 static int end_block_io_op(struct bio *bio, unsigned int done, int error) 61.58 { 61.59 if ( bio->bi_size != 0 ) 61.60 @@ -283,7 +264,6 @@ static int end_block_io_op(struct bio *b 61.61 bio_put(bio); 61.62 return error; 61.63 } 61.64 -#endif 61.65 61.66 61.67 /****************************************************************************** 61.68 @@ -357,13 +337,9 @@ static void dispatch_rw_block_io(blkif_t 61.69 unsigned long buf; unsigned int nsec; 61.70 } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 61.71 unsigned int nseg; 61.72 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) 61.73 - struct buffer_head *bh; 61.74 -#else 61.75 struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 61.76 int nbio = 0; 61.77 request_queue_t *q; 61.78 -#endif 61.79 61.80 /* Check that number of segments is sane. */ 61.81 nseg = req->nr_segments; 61.82 @@ -435,49 +411,6 @@ static void dispatch_rw_block_io(blkif_t 61.83 pending_req->status = BLKIF_RSP_OKAY; 61.84 pending_req->nr_pages = nseg; 61.85 61.86 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) 61.87 - 61.88 - atomic_set(&pending_req->pendcnt, nseg); 61.89 - pending_cons++; 61.90 - blkif_get(blkif); 61.91 - 61.92 - for ( i = 0; i < nseg; i++ ) 61.93 - { 61.94 - bh = kmem_cache_alloc(buffer_head_cachep, GFP_KERNEL); 61.95 - if ( unlikely(bh == NULL) ) 61.96 - { 61.97 - __end_block_io_op(pending_req, 0); 61.98 - continue; 61.99 - } 61.100 - 61.101 - memset(bh, 0, sizeof (struct buffer_head)); 61.102 - 61.103 - init_waitqueue_head(&bh->b_wait); 61.104 - bh->b_size = seg[i].nsec << 9; 61.105 - bh->b_dev = preq.dev; 61.106 - bh->b_rdev = preq.dev; 61.107 - bh->b_rsector = (unsigned long)preq.sector_number; 61.108 - bh->b_data = (char *)MMAP_VADDR(pending_idx, i) + 61.109 - (seg[i].buf & ~PAGE_MASK); 61.110 - bh->b_page = virt_to_page(MMAP_VADDR(pending_idx, i)); 61.111 - bh->b_end_io = end_block_io_op; 61.112 - bh->b_private = pending_req; 61.113 - 61.114 - bh->b_state = (1 << BH_Mapped) | (1 << BH_Lock) | 61.115 - (1 << BH_Req) | (1 << BH_Launder); 61.116 - if ( operation == WRITE ) 61.117 - bh->b_state |= (1 << BH_JBD) | (1 << BH_Req) | (1 << BH_Uptodate); 61.118 - 61.119 - atomic_set(&bh->b_count, 1); 61.120 - 61.121 - /* Dispatch a single request. We'll flush it to disc later. */ 61.122 - generic_make_request(operation, bh); 61.123 - 61.124 - preq.sector_number += seg[i].nsec; 61.125 - } 61.126 - 61.127 -#else 61.128 - 61.129 for ( i = 0; i < nseg; i++ ) 61.130 { 61.131 if ( ((int)preq.sector_number|(int)seg[i].nsec) & 61.132 @@ -526,8 +459,6 @@ static void dispatch_rw_block_io(blkif_t 61.133 for ( i = 0; i < nbio; i++ ) 61.134 submit_bio(operation, biolist[i]); 61.135 61.136 -#endif 61.137 - 61.138 return; 61.139 61.140 bad_descriptor: 61.141 @@ -595,12 +526,6 @@ static int __init blkif_init(void) 61.142 if ( kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 ) 61.143 BUG(); 61.144 61.145 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) 61.146 - buffer_head_cachep = kmem_cache_create( 61.147 - "buffer_head_cache", sizeof(struct buffer_head), 61.148 - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); 61.149 -#endif 61.150 - 61.151 blkif_xenbus_init(); 61.152 61.153 memset( pending_grant_handles, BLKBACK_INVALID_HANDLE, MMAP_PAGES );
62.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/common.h Tue Aug 23 18:25:51 2005 +0000 62.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/common.h Tue Aug 23 18:27:22 2005 +0000 62.3 @@ -5,7 +5,6 @@ 62.4 #include <linux/config.h> 62.5 #include <linux/version.h> 62.6 #include <linux/module.h> 62.7 -#include <linux/rbtree.h> 62.8 #include <linux/interrupt.h> 62.9 #include <linux/slab.h> 62.10 #include <linux/blkdev.h> 62.11 @@ -30,12 +29,13 @@ 62.12 #define DPRINTK(_f, _a...) ((void)0) 62.13 #endif 62.14 62.15 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 62.16 -typedef struct rb_root rb_root_t; 62.17 -typedef struct rb_node rb_node_t; 62.18 -#else 62.19 -struct block_device; 62.20 -#endif 62.21 +struct vbd { 62.22 + blkif_vdev_t handle; /* what the domain refers to this vbd as */ 62.23 + unsigned char readonly; /* Non-zero -> read-only */ 62.24 + unsigned char type; /* VDISK_xxx */ 62.25 + blkif_pdev_t pdevice; /* phys device that this vbd maps to */ 62.26 + struct block_device *bdev; 62.27 +}; 62.28 62.29 typedef struct blkif_st { 62.30 /* Unique identifier for this interface. */ 62.31 @@ -48,25 +48,18 @@ typedef struct blkif_st { 62.32 /* Comms information. */ 62.33 blkif_back_ring_t blk_ring; 62.34 /* VBDs attached to this interface. */ 62.35 - rb_root_t vbd_rb; /* Mapping from 16-bit vdevices to VBDs.*/ 62.36 - spinlock_t vbd_lock; /* Protects VBD mapping. */ 62.37 + struct vbd vbd; 62.38 /* Private fields. */ 62.39 enum { DISCONNECTED, CONNECTED } status; 62.40 - /* 62.41 - * DISCONNECT response is deferred until pending requests are ack'ed. 62.42 - * We therefore need to store the id from the original request. 62.43 - */ 62.44 - u8 disconnect_rspid; 62.45 #ifdef CONFIG_XEN_BLKDEV_TAP_BE 62.46 /* Is this a blktap frontend */ 62.47 unsigned int is_blktap; 62.48 #endif 62.49 - struct blkif_st *hash_next; 62.50 struct list_head blkdev_list; 62.51 spinlock_t blk_ring_lock; 62.52 atomic_t refcnt; 62.53 62.54 - struct work_struct work; 62.55 + struct work_struct free_work; 62.56 u16 shmem_handle; 62.57 unsigned long shmem_vaddr; 62.58 grant_ref_t shmem_ref; 62.59 @@ -77,30 +70,25 @@ void blkif_destroy(blkif_be_destroy_t *d 62.60 void blkif_connect(blkif_be_connect_t *connect); 62.61 int blkif_disconnect(blkif_be_disconnect_t *disconnect, u8 rsp_id); 62.62 void blkif_disconnect_complete(blkif_t *blkif); 62.63 -blkif_t *blkif_find(domid_t domid); 62.64 -void free_blkif(blkif_t *blkif); 62.65 +blkif_t *alloc_blkif(domid_t domid); 62.66 +void free_blkif_callback(blkif_t *blkif); 62.67 int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn); 62.68 62.69 #define blkif_get(_b) (atomic_inc(&(_b)->refcnt)) 62.70 #define blkif_put(_b) \ 62.71 do { \ 62.72 if ( atomic_dec_and_test(&(_b)->refcnt) ) \ 62.73 - free_blkif(_b); \ 62.74 + free_blkif_callback(_b); \ 62.75 } while (0) 62.76 62.77 -struct vbd; 62.78 -void vbd_free(blkif_t *blkif, struct vbd *vbd); 62.79 - 62.80 -/* Creates inactive vbd. */ 62.81 -struct vbd *vbd_create(blkif_t *blkif, blkif_vdev_t vdevice, blkif_pdev_t pdevice, int readonly); 62.82 -int vbd_is_active(struct vbd *vbd); 62.83 -void vbd_activate(blkif_t *blkif, struct vbd *vbd); 62.84 +/* Create a vbd. */ 62.85 +int vbd_create(blkif_t *blkif, blkif_vdev_t vdevice, blkif_pdev_t pdevice, 62.86 + int readonly); 62.87 +void vbd_free(struct vbd *vbd); 62.88 62.89 unsigned long vbd_size(struct vbd *vbd); 62.90 unsigned int vbd_info(struct vbd *vbd); 62.91 unsigned long vbd_secsize(struct vbd *vbd); 62.92 -void vbd_destroy(blkif_be_vbd_destroy_t *delete); 62.93 -void destroy_all_vbds(blkif_t *blkif); 62.94 62.95 struct phys_req { 62.96 unsigned short dev;
63.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c Tue Aug 23 18:25:51 2005 +0000 63.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c Tue Aug 23 18:27:22 2005 +0000 63.3 @@ -9,27 +9,11 @@ 63.4 #include "common.h" 63.5 #include <asm-xen/evtchn.h> 63.6 63.7 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 63.8 -#define VMALLOC_VMADDR(x) ((unsigned long)(x)) 63.9 -#endif 63.10 - 63.11 -#define BLKIF_HASHSZ 1024 63.12 -#define BLKIF_HASH(_d) (((int)(_d))&(BLKIF_HASHSZ-1)) 63.13 - 63.14 static kmem_cache_t *blkif_cachep; 63.15 -static blkif_t *blkif_hash[BLKIF_HASHSZ]; 63.16 63.17 -blkif_t *blkif_find(domid_t domid) 63.18 +blkif_t *alloc_blkif(domid_t domid) 63.19 { 63.20 - blkif_t *blkif = blkif_hash[BLKIF_HASH(domid)]; 63.21 - 63.22 - while (blkif) { 63.23 - if (blkif->domid == domid) { 63.24 - blkif_get(blkif); 63.25 - return blkif; 63.26 - } 63.27 - blkif = blkif->hash_next; 63.28 - } 63.29 + blkif_t *blkif; 63.30 63.31 blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL); 63.32 if (!blkif) 63.33 @@ -38,12 +22,9 @@ blkif_t *blkif_find(domid_t domid) 63.34 memset(blkif, 0, sizeof(*blkif)); 63.35 blkif->domid = domid; 63.36 blkif->status = DISCONNECTED; 63.37 - spin_lock_init(&blkif->vbd_lock); 63.38 spin_lock_init(&blkif->blk_ring_lock); 63.39 atomic_set(&blkif->refcnt, 1); 63.40 63.41 - blkif->hash_next = blkif_hash[BLKIF_HASH(domid)]; 63.42 - blkif_hash[BLKIF_HASH(domid)] = blkif; 63.43 return blkif; 63.44 } 63.45 63.46 @@ -55,7 +36,7 @@ static int map_frontend_page(blkif_t *bl 63.47 op.flags = GNTMAP_host_map; 63.48 op.ref = shared_page; 63.49 op.dom = blkif->domid; 63.50 - 63.51 + 63.52 BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) ); 63.53 63.54 if (op.handle < 0) { 63.55 @@ -91,7 +72,7 @@ int blkif_map(blkif_t *blkif, unsigned l 63.56 if ( (vma = get_vm_area(PAGE_SIZE, VM_IOREMAP)) == NULL ) 63.57 return -ENOMEM; 63.58 63.59 - err = map_frontend_page(blkif, VMALLOC_VMADDR(vma->addr), shared_page); 63.60 + err = map_frontend_page(blkif, (unsigned long)vma->addr, shared_page); 63.61 if (err) { 63.62 vfree(vma->addr); 63.63 return err; 63.64 @@ -123,10 +104,10 @@ int blkif_map(blkif_t *blkif, unsigned l 63.65 return 0; 63.66 } 63.67 63.68 -void free_blkif(blkif_t *blkif) 63.69 +static void free_blkif(void *arg) 63.70 { 63.71 - blkif_t **pblkif; 63.72 evtchn_op_t op = { .cmd = EVTCHNOP_close }; 63.73 + blkif_t *blkif = (blkif_t *)arg; 63.74 63.75 op.u.close.port = blkif->evtchn; 63.76 op.u.close.dom = DOMID_SELF; 63.77 @@ -135,6 +116,8 @@ void free_blkif(blkif_t *blkif) 63.78 op.u.close.dom = blkif->domid; 63.79 HYPERVISOR_event_channel_op(&op); 63.80 63.81 + vbd_free(&blkif->vbd); 63.82 + 63.83 if (blkif->evtchn) 63.84 unbind_evtchn_from_irqhandler(blkif->evtchn, blkif); 63.85 63.86 @@ -143,20 +126,17 @@ void free_blkif(blkif_t *blkif) 63.87 vfree(blkif->blk_ring.sring); 63.88 } 63.89 63.90 - pblkif = &blkif_hash[BLKIF_HASH(blkif->domid)]; 63.91 - while ( *pblkif != blkif ) 63.92 - { 63.93 - BUG_ON(!*pblkif); 63.94 - pblkif = &(*pblkif)->hash_next; 63.95 - } 63.96 - *pblkif = blkif->hash_next; 63.97 - destroy_all_vbds(blkif); 63.98 kmem_cache_free(blkif_cachep, blkif); 63.99 } 63.100 63.101 +void free_blkif_callback(blkif_t *blkif) 63.102 +{ 63.103 + INIT_WORK(&blkif->free_work, free_blkif, (void *)blkif); 63.104 + schedule_work(&blkif->free_work); 63.105 +} 63.106 + 63.107 void __init blkif_interface_init(void) 63.108 { 63.109 blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t), 63.110 0, 0, NULL, NULL); 63.111 - memset(blkif_hash, 0, sizeof(blkif_hash)); 63.112 }
64.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/vbd.c Tue Aug 23 18:25:51 2005 +0000 64.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/vbd.c Tue Aug 23 18:27:22 2005 +0000 64.3 @@ -3,38 +3,19 @@ 64.4 * 64.5 * Routines for managing virtual block devices (VBDs). 64.6 * 64.7 - * NOTE: vbd_lock protects updates to the rb_tree against concurrent lookups 64.8 - * in vbd_translate. All other lookups are implicitly protected because the 64.9 - * only caller (the control message dispatch routine) serializes the calls. 64.10 - * 64.11 * Copyright (c) 2003-2005, Keir Fraser & Steve Hand 64.12 */ 64.13 64.14 #include "common.h" 64.15 #include <asm-xen/xenbus.h> 64.16 64.17 -struct vbd { 64.18 - blkif_vdev_t handle; /* what the domain refers to this vbd as */ 64.19 - unsigned char readonly; /* Non-zero -> read-only */ 64.20 - unsigned char type; /* VDISK_xxx */ 64.21 - blkif_pdev_t pdevice; /* phys device that this vbd maps to */ 64.22 - struct block_device *bdev; 64.23 - 64.24 - int active; 64.25 - rb_node_t rb; /* for linking into R-B tree lookup struct */ 64.26 -}; 64.27 - 64.28 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 64.29 static inline dev_t vbd_map_devnum(blkif_pdev_t cookie) 64.30 -{ return MKDEV(cookie>>8, cookie&0xff); } 64.31 +{ 64.32 + return MKDEV(BLKIF_MAJOR(cookie), BLKIF_MINOR(cookie)); 64.33 +} 64.34 #define vbd_sz(_v) ((_v)->bdev->bd_part ? \ 64.35 (_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity) 64.36 #define bdev_put(_b) blkdev_put(_b) 64.37 -#else 64.38 -#define vbd_sz(_v) (blk_size[MAJOR((_v)->pdevice)][MINOR((_v)->pdevice)]*2) 64.39 -#define bdev_put(_b) ((void)0) 64.40 -#define bdev_hardsect_size(_b) 512 64.41 -#endif 64.42 64.43 unsigned long vbd_size(struct vbd *vbd) 64.44 { 64.45 @@ -51,45 +32,32 @@ unsigned long vbd_secsize(struct vbd *vb 64.46 return bdev_hardsect_size(vbd->bdev); 64.47 } 64.48 64.49 -int vbd_is_active(struct vbd *vbd) 64.50 -{ 64.51 - return vbd->active; 64.52 -} 64.53 - 64.54 -struct vbd *vbd_create(blkif_t *blkif, blkif_vdev_t handle, 64.55 - blkif_pdev_t pdevice, int readonly) 64.56 +int vbd_create(blkif_t *blkif, blkif_vdev_t handle, 64.57 + blkif_pdev_t pdevice, int readonly) 64.58 { 64.59 - struct vbd *vbd; 64.60 + struct vbd *vbd; 64.61 64.62 - if ( unlikely((vbd = kmalloc(sizeof(struct vbd), GFP_KERNEL)) == NULL) ) 64.63 - { 64.64 - DPRINTK("vbd_create: out of memory\n"); 64.65 - return ERR_PTR(-ENOMEM); 64.66 - } 64.67 - 64.68 + vbd = &blkif->vbd; 64.69 vbd->handle = handle; 64.70 vbd->readonly = readonly; 64.71 vbd->type = 0; 64.72 - vbd->active = 0; 64.73 64.74 vbd->pdevice = pdevice; 64.75 64.76 - /* FIXME: Who frees vbd on failure? --RR */ 64.77 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 64.78 vbd->bdev = open_by_devnum( 64.79 vbd_map_devnum(vbd->pdevice), 64.80 vbd->readonly ? FMODE_READ : FMODE_WRITE); 64.81 if ( IS_ERR(vbd->bdev) ) 64.82 { 64.83 DPRINTK("vbd_creat: device %08x doesn't exist.\n", vbd->pdevice); 64.84 - return ERR_PTR(-ENOENT); 64.85 + return -ENOENT; 64.86 } 64.87 64.88 if ( (vbd->bdev->bd_disk == NULL) ) 64.89 { 64.90 DPRINTK("vbd_creat: device %08x doesn't exist.\n", vbd->pdevice); 64.91 - bdev_put(vbd->bdev); 64.92 - return ERR_PTR(-ENOENT); 64.93 + vbd_free(vbd); 64.94 + return -ENOENT; 64.95 } 64.96 64.97 if ( vbd->bdev->bd_disk->flags & GENHD_FL_CD ) 64.98 @@ -97,121 +65,27 @@ struct vbd *vbd_create(blkif_t *blkif, b 64.99 if ( vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE ) 64.100 vbd->type |= VDISK_REMOVABLE; 64.101 64.102 -#else 64.103 - if ( (blk_size[MAJOR(vbd->pdevice)] == NULL) || (vbd_sz(vbd) == 0) ) 64.104 - { 64.105 - DPRINTK("vbd_creat: device %08x doesn't exist.\n", vbd->pdevice); 64.106 - return ERR_PTR(-ENOENT); 64.107 - } 64.108 -#endif 64.109 - 64.110 DPRINTK("Successful creation of handle=%04x (dom=%u)\n", 64.111 handle, blkif->domid); 64.112 - return vbd; 64.113 + return 0; 64.114 } 64.115 64.116 -void vbd_activate(blkif_t *blkif, struct vbd *vbd) 64.117 +void vbd_free(struct vbd *vbd) 64.118 { 64.119 - rb_node_t **rb_p, *rb_parent = NULL; 64.120 - struct vbd *i; 64.121 - BUG_ON(vbd_is_active(vbd)); 64.122 - 64.123 - /* Find where to put it. */ 64.124 - rb_p = &blkif->vbd_rb.rb_node; 64.125 - while ( *rb_p != NULL ) 64.126 - { 64.127 - rb_parent = *rb_p; 64.128 - i = rb_entry(rb_parent, struct vbd, rb); 64.129 - if ( vbd->handle < i->handle ) 64.130 - { 64.131 - rb_p = &rb_parent->rb_left; 64.132 - } 64.133 - else if ( vbd->handle > i->handle ) 64.134 - { 64.135 - rb_p = &rb_parent->rb_right; 64.136 - } 64.137 - else 64.138 - { 64.139 - /* We never create two of same vbd, so not possible. */ 64.140 - BUG(); 64.141 - } 64.142 - } 64.143 - 64.144 - /* Now we're active. */ 64.145 - vbd->active = 1; 64.146 - blkif_get(blkif); 64.147 - 64.148 - spin_lock(&blkif->vbd_lock); 64.149 - rb_link_node(&vbd->rb, rb_parent, rb_p); 64.150 - rb_insert_color(&vbd->rb, &blkif->vbd_rb); 64.151 - spin_unlock(&blkif->vbd_lock); 64.152 -} 64.153 - 64.154 -void vbd_free(blkif_t *blkif, struct vbd *vbd) 64.155 -{ 64.156 - if (vbd_is_active(vbd)) { 64.157 - spin_lock(&blkif->vbd_lock); 64.158 - rb_erase(&vbd->rb, &blkif->vbd_rb); 64.159 - spin_unlock(&blkif->vbd_lock); 64.160 - blkif_put(blkif); 64.161 - } 64.162 - bdev_put(vbd->bdev); 64.163 - kfree(vbd); 64.164 -} 64.165 - 64.166 -void destroy_all_vbds(blkif_t *blkif) 64.167 -{ 64.168 - struct vbd *vbd; 64.169 - rb_node_t *rb; 64.170 - 64.171 - spin_lock(&blkif->vbd_lock); 64.172 - 64.173 - while ( (rb = blkif->vbd_rb.rb_node) != NULL ) 64.174 - { 64.175 - vbd = rb_entry(rb, struct vbd, rb); 64.176 - rb_erase(rb, &blkif->vbd_rb); 64.177 - spin_unlock(&blkif->vbd_lock); 64.178 - bdev_put(vbd->bdev); 64.179 - kfree(vbd); 64.180 - spin_lock(&blkif->vbd_lock); 64.181 - blkif_put(blkif); 64.182 - } 64.183 - 64.184 - spin_unlock(&blkif->vbd_lock); 64.185 + if (vbd->bdev) 64.186 + bdev_put(vbd->bdev); 64.187 + vbd->bdev = NULL; 64.188 } 64.189 64.190 int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation) 64.191 { 64.192 - struct vbd *vbd; 64.193 - rb_node_t *rb; 64.194 - int rc = -EACCES; 64.195 - 64.196 - /* Take the vbd_lock because another thread could be updating the tree. */ 64.197 - spin_lock(&blkif->vbd_lock); 64.198 + struct vbd *vbd = &blkif->vbd; 64.199 + int rc = -EACCES; 64.200 64.201 - rb = blkif->vbd_rb.rb_node; 64.202 - while ( rb != NULL ) 64.203 - { 64.204 - vbd = rb_entry(rb, struct vbd, rb); 64.205 - if ( req->dev < vbd->handle ) 64.206 - rb = rb->rb_left; 64.207 - else if ( req->dev > vbd->handle ) 64.208 - rb = rb->rb_right; 64.209 - else 64.210 - goto found; 64.211 - } 64.212 - 64.213 - DPRINTK("vbd_translate; domain %u attempted to access " 64.214 - "non-existent VBD.\n", blkif->domid); 64.215 - rc = -ENODEV; 64.216 - goto out; 64.217 - 64.218 - found: 64.219 - 64.220 - if ( (operation == WRITE) && vbd->readonly ) 64.221 + if ((operation == WRITE) && vbd->readonly) 64.222 goto out; 64.223 64.224 - if ( unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)) ) 64.225 + if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd))) 64.226 goto out; 64.227 64.228 req->dev = vbd->pdevice; 64.229 @@ -219,6 +93,5 @@ int vbd_translate(struct phys_req *req, 64.230 rc = 0; 64.231 64.232 out: 64.233 - spin_unlock(&blkif->vbd_lock); 64.234 return rc; 64.235 }
65.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c Tue Aug 23 18:25:51 2005 +0000 65.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c Tue Aug 23 18:27:22 2005 +0000 65.3 @@ -26,7 +26,6 @@ struct backend_info 65.4 65.5 /* our communications channel */ 65.6 blkif_t *blkif; 65.7 - struct vbd *vbd; 65.8 65.9 long int frontend_id; 65.10 long int pdev; 65.11 @@ -47,8 +46,6 @@ static int blkback_remove(struct xenbus_ 65.12 if (be->watch.node) 65.13 unregister_xenbus_watch(&be->watch); 65.14 unregister_xenbus_watch(&be->backend_watch); 65.15 - if (be->vbd) 65.16 - vbd_free(be->blkif, be->vbd); 65.17 if (be->blkif) 65.18 blkif_put(be->blkif); 65.19 if (be->frontpath) 65.20 @@ -72,7 +69,7 @@ static void frontend_changed(struct xenb 65.21 device_unregister(&be->dev->dev); 65.22 return; 65.23 } 65.24 - if (vbd_is_active(be->vbd)) 65.25 + if (be->blkif->status == CONNECTED) 65.26 return; 65.27 65.28 err = xenbus_gather(be->frontpath, "grant-id", "%lu", &sharedmfn, 65.29 @@ -85,9 +82,8 @@ static void frontend_changed(struct xenb 65.30 } 65.31 65.32 /* Domains must use same shared frame for all vbds. */ 65.33 - if (be->blkif->status == CONNECTED && 65.34 - (evtchn != be->blkif->remote_evtchn || 65.35 - sharedmfn != be->blkif->shmem_frame)) { 65.36 + if (evtchn != be->blkif->remote_evtchn || 65.37 + sharedmfn != be->blkif->shmem_frame) { 65.38 xenbus_dev_error(be->dev, err, 65.39 "Shared frame/evtchn %li/%u not same as" 65.40 " old %li/%u", 65.41 @@ -105,7 +101,7 @@ static void frontend_changed(struct xenb 65.42 } 65.43 65.44 err = xenbus_printf(be->dev->nodename, "sectors", "%lu", 65.45 - vbd_size(be->vbd)); 65.46 + vbd_size(&be->blkif->vbd)); 65.47 if (err) { 65.48 xenbus_dev_error(be->dev, err, "writing %s/sectors", 65.49 be->dev->nodename); 65.50 @@ -114,34 +110,29 @@ static void frontend_changed(struct xenb 65.51 65.52 /* FIXME: use a typename instead */ 65.53 err = xenbus_printf(be->dev->nodename, "info", "%u", 65.54 - vbd_info(be->vbd)); 65.55 + vbd_info(&be->blkif->vbd)); 65.56 if (err) { 65.57 xenbus_dev_error(be->dev, err, "writing %s/info", 65.58 be->dev->nodename); 65.59 goto abort; 65.60 } 65.61 err = xenbus_printf(be->dev->nodename, "sector-size", "%lu", 65.62 - vbd_secsize(be->vbd)); 65.63 + vbd_secsize(&be->blkif->vbd)); 65.64 if (err) { 65.65 xenbus_dev_error(be->dev, err, "writing %s/sector-size", 65.66 be->dev->nodename); 65.67 goto abort; 65.68 } 65.69 65.70 - /* First vbd? We need to map the shared frame, irq etc. */ 65.71 - if (be->blkif->status != CONNECTED) { 65.72 - err = blkif_map(be->blkif, sharedmfn, evtchn); 65.73 - if (err) { 65.74 - xenbus_dev_error(be->dev, err, 65.75 - "mapping shared-frame %lu port %u", 65.76 - sharedmfn, evtchn); 65.77 - goto abort; 65.78 - } 65.79 + /* Map the shared frame, irq etc. */ 65.80 + err = blkif_map(be->blkif, sharedmfn, evtchn); 65.81 + if (err) { 65.82 + xenbus_dev_error(be->dev, err, 65.83 + "mapping shared-frame %lu port %u", 65.84 + sharedmfn, evtchn); 65.85 + goto abort; 65.86 } 65.87 65.88 - /* We're ready, activate. */ 65.89 - vbd_activate(be->blkif, be->vbd); 65.90 - 65.91 xenbus_transaction_end(0); 65.92 xenbus_dev_ok(be->dev); 65.93 65.94 @@ -228,20 +219,16 @@ static void backend_changed(struct xenbu 65.95 p = strrchr(be->frontpath, '/') + 1; 65.96 handle = simple_strtoul(p, NULL, 0); 65.97 65.98 - be->blkif = blkif_find(be->frontend_id); 65.99 + be->blkif = alloc_blkif(be->frontend_id); 65.100 if (IS_ERR(be->blkif)) { 65.101 err = PTR_ERR(be->blkif); 65.102 be->blkif = NULL; 65.103 goto device_fail; 65.104 } 65.105 65.106 - be->vbd = vbd_create(be->blkif, handle, be->pdev, 65.107 - be->readonly); 65.108 - if (IS_ERR(be->vbd)) { 65.109 - err = PTR_ERR(be->vbd); 65.110 - be->vbd = NULL; 65.111 + err = vbd_create(be->blkif, handle, be->pdev, be->readonly); 65.112 + if (err) 65.113 goto device_fail; 65.114 - } 65.115 65.116 frontend_changed(&be->watch, be->frontpath); 65.117 }
66.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c Tue Aug 23 18:25:51 2005 +0000 66.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c Tue Aug 23 18:27:22 2005 +0000 66.3 @@ -63,25 +63,16 @@ typedef unsigned char byte; /* from linu 66.4 /* Control whether runtime update of vbds is enabled. */ 66.5 #define ENABLE_VBD_UPDATE 1 66.6 66.7 -#define BLKIF_STATE_CLOSED 0 66.8 -#define BLKIF_STATE_DISCONNECTED 1 66.9 -#define BLKIF_STATE_CONNECTED 2 66.10 +#define BLKIF_STATE_DISCONNECTED 0 66.11 +#define BLKIF_STATE_CONNECTED 1 66.12 66.13 -static unsigned int blkif_state = BLKIF_STATE_CLOSED; 66.14 -static unsigned int blkif_evtchn = 0; 66.15 -static unsigned int blkif_vbds = 0; 66.16 -static unsigned int blkif_vbds_connected = 0; 66.17 - 66.18 -static blkif_front_ring_t blk_ring; 66.19 +static unsigned int blkif_state = BLKIF_STATE_DISCONNECTED; 66.20 66.21 #define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE) 66.22 66.23 -static domid_t rdomid = 0; 66.24 -static grant_ref_t gref_head, gref_terminal; 66.25 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \ 66.26 (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLKIF_RING_SIZE) 66.27 #define GRANTREF_INVALID (1<<15) 66.28 -static int shmem_ref; 66.29 66.30 static struct blk_shadow { 66.31 blkif_request_t req; 66.32 @@ -92,7 +83,7 @@ unsigned long blk_shadow_free; 66.33 66.34 static int recovery = 0; /* Recovery in progress: protected by blkif_io_lock */ 66.35 66.36 -static void kick_pending_request_queues(void); 66.37 +static void kick_pending_request_queues(struct blkfront_info *info); 66.38 66.39 static int __init xlblk_init(void); 66.40 66.41 @@ -119,7 +110,7 @@ static inline void ADD_ID_TO_FREELIST(un 66.42 66.43 /* Kernel-specific definitions used in the common code */ 66.44 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 66.45 -#define DISABLE_SCATTERGATHER() 66.46 +#define DISABLE_SCATTERGATHER() 66.47 #else 66.48 static int sg_operation = -1; 66.49 #define DISABLE_SCATTERGATHER() (sg_operation = -1) 66.50 @@ -138,11 +129,11 @@ static inline void unpickle_request(blki 66.51 } 66.52 66.53 66.54 -static inline void flush_requests(void) 66.55 +static inline void flush_requests(struct blkfront_info *info) 66.56 { 66.57 DISABLE_SCATTERGATHER(); 66.58 - RING_PUSH_REQUESTS(&blk_ring); 66.59 - notify_via_evtchn(blkif_evtchn); 66.60 + RING_PUSH_REQUESTS(&info->ring); 66.61 + notify_via_evtchn(info->evtchn); 66.62 } 66.63 66.64 66.65 @@ -152,30 +143,39 @@ static inline void flush_requests(void) 66.66 66.67 module_init(xlblk_init); 66.68 66.69 -static struct xlbd_disk_info *head_waiting = NULL; 66.70 -static void kick_pending_request_queues(void) 66.71 +static void kick_pending_request_queues(struct blkfront_info *info) 66.72 { 66.73 - struct xlbd_disk_info *di; 66.74 - while ( ((di = head_waiting) != NULL) && !RING_FULL(&blk_ring) ) 66.75 - { 66.76 - head_waiting = di->next_waiting; 66.77 - di->next_waiting = NULL; 66.78 - /* Re-enable calldowns. */ 66.79 - blk_start_queue(di->rq); 66.80 - /* Kick things off immediately. */ 66.81 - do_blkif_request(di->rq); 66.82 - } 66.83 + if (!RING_FULL(&info->ring)) { 66.84 + /* Re-enable calldowns. */ 66.85 + blk_start_queue(info->rq); 66.86 + /* Kick things off immediately. */ 66.87 + do_blkif_request(info->rq); 66.88 + } 66.89 +} 66.90 + 66.91 +static void blkif_restart_queue(void *arg) 66.92 +{ 66.93 + struct blkfront_info *info = (struct blkfront_info *)arg; 66.94 + spin_lock_irq(&blkif_io_lock); 66.95 + kick_pending_request_queues(info); 66.96 + spin_unlock_irq(&blkif_io_lock); 66.97 +} 66.98 + 66.99 +static void blkif_restart_queue_callback(void *arg) 66.100 +{ 66.101 + struct blkfront_info *info = (struct blkfront_info *)arg; 66.102 + schedule_work(&info->work); 66.103 } 66.104 66.105 int blkif_open(struct inode *inode, struct file *filep) 66.106 { 66.107 - struct gendisk *gd = inode->i_bdev->bd_disk; 66.108 - struct xlbd_disk_info *di = (struct xlbd_disk_info *)gd->private_data; 66.109 + // struct gendisk *gd = inode->i_bdev->bd_disk; 66.110 + // struct xlbd_disk_info *di = (struct xlbd_disk_info *)gd->private_data; 66.111 66.112 - /* Update of usage count is protected by per-device semaphore. */ 66.113 - di->mi->usage++; 66.114 - 66.115 - return 0; 66.116 + /* Update of usage count is protected by per-device semaphore. */ 66.117 + // di->mi->usage++; 66.118 + 66.119 + return 0; 66.120 } 66.121 66.122 66.123 @@ -192,8 +192,8 @@ int blkif_ioctl(struct inode *inode, str 66.124 int i; 66.125 66.126 DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", 66.127 - command, (long)argument, inode->i_rdev); 66.128 - 66.129 + command, (long)argument, inode->i_rdev); 66.130 + 66.131 switch ( command ) 66.132 { 66.133 case HDIO_GETGEO: 66.134 @@ -219,7 +219,7 @@ int blkif_ioctl(struct inode *inode, str 66.135 /* 66.136 * blkif_queue_request 66.137 * 66.138 - * request block io 66.139 + * request block io 66.140 * 66.141 * id: for guest use only. 66.142 * operation: BLKIF_OP_{READ,WRITE,PROBE} 66.143 @@ -228,7 +228,7 @@ int blkif_ioctl(struct inode *inode, str 66.144 */ 66.145 static int blkif_queue_request(struct request *req) 66.146 { 66.147 - struct xlbd_disk_info *di = req->rq_disk->private_data; 66.148 + struct blkfront_info *info = req->rq_disk->private_data; 66.149 unsigned long buffer_ma; 66.150 blkif_request_t *ring_req; 66.151 struct bio *bio; 66.152 @@ -237,20 +237,28 @@ static int blkif_queue_request(struct re 66.153 unsigned long id; 66.154 unsigned int fsect, lsect; 66.155 int ref; 66.156 + grant_ref_t gref_head; 66.157 66.158 - if ( unlikely(blkif_state != BLKIF_STATE_CONNECTED) ) 66.159 + if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) 66.160 return 1; 66.161 66.162 + if (gnttab_alloc_grant_references(BLKIF_MAX_SEGMENTS_PER_REQUEST, 66.163 + &gref_head) < 0) { 66.164 + gnttab_request_free_callback(&info->callback, 66.165 + blkif_restart_queue_callback, info, 66.166 + BLKIF_MAX_SEGMENTS_PER_REQUEST); 66.167 + return 1; 66.168 + } 66.169 + 66.170 /* Fill out a communications ring structure. */ 66.171 - ring_req = RING_GET_REQUEST(&blk_ring, blk_ring.req_prod_pvt); 66.172 + ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); 66.173 id = GET_ID_FROM_FREELIST(); 66.174 blk_shadow[id].request = (unsigned long)req; 66.175 66.176 ring_req->id = id; 66.177 - ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : 66.178 - BLKIF_OP_READ; 66.179 + ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; 66.180 ring_req->sector_number = (blkif_sector_t)req->sector; 66.181 - ring_req->handle = di->handle; 66.182 + ring_req->handle = info->handle; 66.183 66.184 ring_req->nr_segments = 0; 66.185 rq_for_each_bio(bio, req) 66.186 @@ -263,56 +271,61 @@ static int blkif_queue_request(struct re 66.187 fsect = bvec->bv_offset >> 9; 66.188 lsect = fsect + (bvec->bv_len >> 9) - 1; 66.189 /* install a grant reference. */ 66.190 - ref = gnttab_claim_grant_reference(&gref_head, gref_terminal); 66.191 + ref = gnttab_claim_grant_reference(&gref_head); 66.192 ASSERT( ref != -ENOSPC ); 66.193 66.194 gnttab_grant_foreign_access_ref( 66.195 ref, 66.196 - rdomid, 66.197 + info->backend_id, 66.198 buffer_ma >> PAGE_SHIFT, 66.199 rq_data_dir(req) ); 66.200 66.201 blk_shadow[id].frame[ring_req->nr_segments] = 66.202 buffer_ma >> PAGE_SHIFT; 66.203 66.204 - ring_req->frame_and_sects[ring_req->nr_segments++] = 66.205 + ring_req->frame_and_sects[ring_req->nr_segments] = 66.206 blkif_fas_from_gref(ref, fsect, lsect); 66.207 + 66.208 + ring_req->nr_segments++; 66.209 } 66.210 } 66.211 66.212 - blk_ring.req_prod_pvt++; 66.213 - 66.214 + info->ring.req_prod_pvt++; 66.215 + 66.216 /* Keep a private copy so we can reissue requests when recovering. */ 66.217 pickle_request(&blk_shadow[id], ring_req); 66.218 66.219 + gnttab_free_grant_references(gref_head); 66.220 + 66.221 return 0; 66.222 } 66.223 66.224 - 66.225 /* 66.226 * do_blkif_request 66.227 * read a block; request is in a request queue 66.228 */ 66.229 void do_blkif_request(request_queue_t *rq) 66.230 { 66.231 - struct xlbd_disk_info *di; 66.232 + struct blkfront_info *info = NULL; 66.233 struct request *req; 66.234 int queued; 66.235 66.236 - DPRINTK("Entered do_blkif_request\n"); 66.237 + DPRINTK("Entered do_blkif_request\n"); 66.238 66.239 queued = 0; 66.240 66.241 while ( (req = elv_next_request(rq)) != NULL ) 66.242 { 66.243 + info = req->rq_disk->private_data; 66.244 + 66.245 if ( !blk_fs_request(req) ) 66.246 { 66.247 end_request(req, 0); 66.248 continue; 66.249 } 66.250 66.251 - if ( RING_FULL(&blk_ring) ) 66.252 - goto wait; 66.253 + if (RING_FULL(&info->ring)) 66.254 + goto wait; 66.255 66.256 DPRINTK("do_blk_req %p: cmd %p, sec %lx, (%u/%li) buffer:%p [%s]\n", 66.257 req, req->cmd, req->sector, req->current_nr_sectors, 66.258 @@ -320,25 +333,19 @@ void do_blkif_request(request_queue_t *r 66.259 rq_data_dir(req) ? "write" : "read"); 66.260 66.261 blkdev_dequeue_request(req); 66.262 - if ( blkif_queue_request(req) ) 66.263 - { 66.264 + if (blkif_queue_request(req)) { 66.265 + blk_requeue_request(rq, req); 66.266 wait: 66.267 - di = req->rq_disk->private_data; 66.268 - if ( di->next_waiting == NULL ) 66.269 - { 66.270 - di->next_waiting = head_waiting; 66.271 - head_waiting = di; 66.272 - /* Avoid pointless unplugs. */ 66.273 - blk_stop_queue(rq); 66.274 - } 66.275 - break; 66.276 + /* Avoid pointless unplugs. */ 66.277 + blk_stop_queue(rq); 66.278 + break; 66.279 } 66.280 66.281 queued++; 66.282 } 66.283 66.284 if ( queued != 0 ) 66.285 - flush_requests(); 66.286 + flush_requests(info); 66.287 } 66.288 66.289 66.290 @@ -347,25 +354,24 @@ static irqreturn_t blkif_int(int irq, vo 66.291 struct request *req; 66.292 blkif_response_t *bret; 66.293 RING_IDX i, rp; 66.294 - unsigned long flags; 66.295 - 66.296 - spin_lock_irqsave(&blkif_io_lock, flags); 66.297 + unsigned long flags; 66.298 + struct blkfront_info *info = (struct blkfront_info *)dev_id; 66.299 66.300 - if ( unlikely(blkif_state == BLKIF_STATE_CLOSED) || 66.301 - unlikely(recovery) ) 66.302 - { 66.303 + spin_lock_irqsave(&blkif_io_lock, flags); 66.304 + 66.305 + if (unlikely(info->connected != BLKIF_STATE_CONNECTED || recovery)) { 66.306 spin_unlock_irqrestore(&blkif_io_lock, flags); 66.307 return IRQ_HANDLED; 66.308 } 66.309 - 66.310 - rp = blk_ring.sring->rsp_prod; 66.311 + 66.312 + rp = info->ring.sring->rsp_prod; 66.313 rmb(); /* Ensure we see queued responses up to 'rp'. */ 66.314 66.315 - for ( i = blk_ring.rsp_cons; i != rp; i++ ) 66.316 + for ( i = info->ring.rsp_cons; i != rp; i++ ) 66.317 { 66.318 unsigned long id; 66.319 66.320 - bret = RING_GET_RESPONSE(&blk_ring, i); 66.321 + bret = RING_GET_RESPONSE(&info->ring, i); 66.322 id = bret->id; 66.323 req = (struct request *)blk_shadow[id].request; 66.324 66.325 @@ -382,7 +388,7 @@ static irqreturn_t blkif_int(int irq, vo 66.326 bret->status); 66.327 66.328 if ( unlikely(end_that_request_first 66.329 - (req, 66.330 + (req, 66.331 (bret->status == BLKIF_RSP_OKAY), 66.332 req->hard_nr_sectors)) ) 66.333 BUG(); 66.334 @@ -394,9 +400,9 @@ static irqreturn_t blkif_int(int irq, vo 66.335 } 66.336 } 66.337 66.338 - blk_ring.rsp_cons = i; 66.339 + info->ring.rsp_cons = i; 66.340 66.341 - kick_pending_request_queues(); 66.342 + kick_pending_request_queues(info); 66.343 66.344 spin_unlock_irqrestore(&blkif_io_lock, flags); 66.345 66.346 @@ -425,31 +431,31 @@ static int nr_pending; 66.347 static void kick_pending_request_queues(void) 66.348 { 66.349 /* We kick pending request queues if the ring is reasonably empty. */ 66.350 - if ( (nr_pending != 0) && 66.351 - (RING_PENDING_REQUESTS(&blk_ring) < (BLK_RING_SIZE >> 1)) ) 66.352 + if ( (nr_pending != 0) && 66.353 + (RING_PENDING_REQUESTS(&info->ring) < (BLK_RING_SIZE >> 1)) ) 66.354 { 66.355 /* Attempt to drain the queue, but bail if the ring becomes full. */ 66.356 - while ( (nr_pending != 0) && !RING_FULL(&blk_ring) ) 66.357 + while ( (nr_pending != 0) && !RING_FULL(&info->ring) ) 66.358 do_blkif_request(pending_queues[--nr_pending]); 66.359 } 66.360 } 66.361 66.362 int blkif_open(struct inode *inode, struct file *filep) 66.363 { 66.364 - short xldev = inode->i_rdev; 66.365 + short xldev = inode->i_rdev; 66.366 struct gendisk *gd = get_gendisk(xldev); 66.367 xl_disk_t *disk = xldev_to_xldisk(inode->i_rdev); 66.368 - short minor = MINOR(xldev); 66.369 + short minor = MINOR(xldev); 66.370 66.371 if ( gd->part[minor].nr_sects == 0 ) 66.372 - { 66.373 + { 66.374 /* 66.375 * Device either doesn't exist, or has zero capacity; we use a few 66.376 * cheesy heuristics to return the relevant error code 66.377 */ 66.378 if ( (gd->sizes[minor >> gd->minor_shift] != 0) || 66.379 ((minor & (gd->max_p - 1)) != 0) ) 66.380 - { 66.381 + { 66.382 /* 66.383 * We have a real device, but no such partition, or we just have a 66.384 * partition number so guess this is the problem. 66.385 @@ -458,16 +464,16 @@ int blkif_open(struct inode *inode, stru 66.386 } 66.387 else if ( gd->flags[minor >> gd->minor_shift] & GENHD_FL_REMOVABLE ) 66.388 { 66.389 - /* This is a removable device => assume that media is missing. */ 66.390 + /* This is a removable device => assume that media is missing. */ 66.391 return -ENOMEDIUM; /* media not present (this is a guess) */ 66.392 - } 66.393 + } 66.394 else 66.395 - { 66.396 + { 66.397 /* Just go for the general 'no such device' error. */ 66.398 return -ENODEV; /* no such device */ 66.399 } 66.400 } 66.401 - 66.402 + 66.403 /* Update of usage count is protected by per-device semaphore. */ 66.404 disk->usage++; 66.405 66.406 @@ -496,24 +502,24 @@ int blkif_ioctl(struct inode *inode, str 66.407 { 66.408 kdev_t dev = inode->i_rdev; 66.409 struct hd_geometry *geo = (struct hd_geometry *)argument; 66.410 - struct gendisk *gd; 66.411 - struct hd_struct *part; 66.412 + struct gendisk *gd; 66.413 + struct hd_struct *part; 66.414 int i; 66.415 unsigned short cylinders; 66.416 byte heads, sectors; 66.417 66.418 /* NB. No need to check permissions. That is done for us. */ 66.419 - 66.420 + 66.421 DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", 66.422 - command, (long) argument, dev); 66.423 - 66.424 + command, (long) argument, dev); 66.425 + 66.426 gd = get_gendisk(dev); 66.427 - part = &gd->part[MINOR(dev)]; 66.428 + part = &gd->part[MINOR(dev)]; 66.429 66.430 switch ( command ) 66.431 { 66.432 case BLKGETSIZE: 66.433 - DPRINTK_IOCTL(" BLKGETSIZE: %x %lx\n", BLKGETSIZE, part->nr_sects); 66.434 + DPRINTK_IOCTL(" BLKGETSIZE: %x %lx\n", BLKGETSIZE, part->nr_sects); 66.435 return put_user(part->nr_sects, (unsigned long *) argument); 66.436 66.437 case BLKGETSIZE64: 66.438 @@ -526,7 +532,7 @@ int blkif_ioctl(struct inode *inode, str 66.439 return blkif_revalidate(dev); 66.440 66.441 case BLKSSZGET: 66.442 - return hardsect_size[MAJOR(dev)][MINOR(dev)]; 66.443 + return hardsect_size[MAJOR(dev)][MINOR(dev)]; 66.444 66.445 case BLKBSZGET: /* get block size */ 66.446 DPRINTK_IOCTL(" BLKBSZGET: %x\n", BLKBSZGET); 66.447 @@ -552,7 +558,7 @@ int blkif_ioctl(struct inode *inode, str 66.448 values consistent with the size of the device */ 66.449 66.450 heads = 0xff; 66.451 - sectors = 0x3f; 66.452 + sectors = 0x3f; 66.453 cylinders = part->nr_sects / (heads * sectors); 66.454 66.455 if (put_user(0x00, (unsigned long *) &geo->start)) return -EFAULT; 66.456 @@ -562,7 +568,7 @@ int blkif_ioctl(struct inode *inode, str 66.457 66.458 return 0; 66.459 66.460 - case HDIO_GETGEO_BIG: 66.461 + case HDIO_GETGEO_BIG: 66.462 DPRINTK_IOCTL(" HDIO_GETGEO_BIG: %x\n", HDIO_GETGEO_BIG); 66.463 if (!argument) return -EINVAL; 66.464 66.465 @@ -570,7 +576,7 @@ int blkif_ioctl(struct inode *inode, str 66.466 values consistent with the size of the device */ 66.467 66.468 heads = 0xff; 66.469 - sectors = 0x3f; 66.470 + sectors = 0x3f; 66.471 cylinders = part->nr_sects / (heads * sectors); 66.472 66.473 if (put_user(0x00, (unsigned long *) &geo->start)) return -EFAULT; 66.474 @@ -594,7 +600,7 @@ int blkif_ioctl(struct inode *inode, str 66.475 WPRINTK("ioctl %08x not supported by XL blkif\n", command); 66.476 return -ENOSYS; 66.477 } 66.478 - 66.479 + 66.480 return 0; 66.481 } 66.482 66.483 @@ -614,7 +620,7 @@ int blkif_revalidate(kdev_t dev) 66.484 xl_disk_t *disk; 66.485 unsigned long capacity; 66.486 int i, rc = 0; 66.487 - 66.488 + 66.489 if ( (bd = bdget(dev)) == NULL ) 66.490 return -EINVAL; 66.491 66.492 @@ -662,7 +668,7 @@ int blkif_revalidate(kdev_t dev) 66.493 /* 66.494 * blkif_queue_request 66.495 * 66.496 - * request block io 66.497 + * request block io 66.498 * 66.499 * id: for guest use only. 66.500 * operation: BLKIF_OP_{READ,WRITE,PROBE} 66.501 @@ -696,7 +702,7 @@ static int blkif_queue_request(unsigned 66.502 66.503 buffer_ma &= PAGE_MASK; 66.504 66.505 - if ( unlikely(blkif_state != BLKIF_STATE_CONNECTED) ) 66.506 + if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) 66.507 return 1; 66.508 66.509 switch ( operation ) 66.510 @@ -704,7 +710,7 @@ static int blkif_queue_request(unsigned 66.511 66.512 case BLKIF_OP_READ: 66.513 case BLKIF_OP_WRITE: 66.514 - gd = get_gendisk(device); 66.515 + gd = get_gendisk(device); 66.516 66.517 /* 66.518 * Update the sector_number we'll pass down as appropriate; note that 66.519 @@ -714,10 +720,10 @@ static int blkif_queue_request(unsigned 66.520 sector_number += gd->part[MINOR(device)].start_sect; 66.521 66.522 /* 66.523 - * If this unit doesn't consist of virtual partitions then we clear 66.524 + * If this unit doesn't consist of virtual partitions then we clear 66.525 * the partn bits from the device number. 66.526 */ 66.527 - if ( !(gd->flags[MINOR(device)>>gd->minor_shift] & 66.528 + if ( !(gd->flags[MINOR(device)>>gd->minor_shift] & 66.529 GENHD_FL_VIRT_PARTNS) ) 66.530 device &= ~(gd->max_p - 1); 66.531 66.532 @@ -725,20 +731,20 @@ static int blkif_queue_request(unsigned 66.533 (sg_dev == device) && 66.534 (sg_next_sect == sector_number) ) 66.535 { 66.536 - req = RING_GET_REQUEST(&blk_ring, 66.537 - blk_ring.req_prod_pvt - 1); 66.538 + req = RING_GET_REQUEST(&info->ring, 66.539 + info->ring.req_prod_pvt - 1); 66.540 bh = (struct buffer_head *)id; 66.541 - 66.542 + 66.543 bh->b_reqnext = (struct buffer_head *)blk_shadow[req->id].request; 66.544 blk_shadow[req->id].request = (unsigned long)id; 66.545 66.546 /* install a grant reference. */ 66.547 - ref = gnttab_claim_grant_reference(&gref_head, gref_terminal); 66.548 + ref = gnttab_claim_grant_reference(&gref_head); 66.549 ASSERT( ref != -ENOSPC ); 66.550 66.551 gnttab_grant_foreign_access_ref( 66.552 ref, 66.553 - rdomid, 66.554 + info->backend_id, 66.555 buffer_ma >> PAGE_SHIFT, 66.556 ( operation == BLKIF_OP_WRITE ? 1 : 0 ) ); 66.557 66.558 @@ -757,7 +763,7 @@ static int blkif_queue_request(unsigned 66.559 66.560 return 0; 66.561 } 66.562 - else if ( RING_FULL(&blk_ring) ) 66.563 + else if ( RING_FULL(&info->ring) ) 66.564 { 66.565 return 1; 66.566 } 66.567 @@ -774,7 +780,7 @@ static int blkif_queue_request(unsigned 66.568 } 66.569 66.570 /* Fill out a communications ring structure. */ 66.571 - req = RING_GET_REQUEST(&blk_ring, blk_ring.req_prod_pvt); 66.572 + req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); 66.573 66.574 xid = GET_ID_FROM_FREELIST(); 66.575 blk_shadow[xid].request = (unsigned long)id; 66.576 @@ -782,15 +788,15 @@ static int blkif_queue_request(unsigned 66.577 req->id = xid; 66.578 req->operation = operation; 66.579 req->sector_number = (blkif_sector_t)sector_number; 66.580 - req->handle = handle; 66.581 + req->handle = handle; 66.582 req->nr_segments = 1; 66.583 /* install a grant reference. */ 66.584 - ref = gnttab_claim_grant_reference(&gref_head, gref_terminal); 66.585 + ref = gnttab_claim_grant_reference(&gref_head); 66.586 ASSERT( ref != -ENOSPC ); 66.587 66.588 gnttab_grant_foreign_access_ref( 66.589 ref, 66.590 - rdomid, 66.591 + info->backend_id, 66.592 buffer_ma >> PAGE_SHIFT, 66.593 ( operation == BLKIF_OP_WRITE ? 1 : 0 ) ); 66.594 66.595 @@ -798,11 +804,11 @@ static int blkif_queue_request(unsigned 66.596 66.597 req->frame_and_sects[0] = blkif_fas_from_gref(ref, fsect, lsect); 66.598 66.599 - /* Keep a private copy so we can reissue requests when recovering. */ 66.600 + /* Keep a private copy so we can reissue requests when recovering. */ 66.601 pickle_request(&blk_shadow[xid], req); 66.602 66.603 - blk_ring.req_prod_pvt++; 66.604 - 66.605 + info->ring.req_prod_pvt++; 66.606 + 66.607 return 0; 66.608 } 66.609 66.610 @@ -817,13 +823,13 @@ void do_blkif_request(request_queue_t *r 66.611 struct buffer_head *bh, *next_bh; 66.612 int rw, nsect, full, queued = 0; 66.613 66.614 - DPRINTK("Entered do_blkif_request\n"); 66.615 + DPRINTK("Entered do_blkif_request\n"); 66.616 66.617 while ( !rq->plugged && !list_empty(&rq->queue_head)) 66.618 { 66.619 - if ( (req = blkdev_entry_next_request(&rq->queue_head)) == NULL ) 66.620 + if ( (req = blkdev_entry_next_request(&rq->queue_head)) == NULL ) 66.621 goto out; 66.622 - 66.623 + 66.624 DPRINTK("do_blkif_request %p: cmd %i, sec %lx, (%li/%li) bh:%p\n", 66.625 req, req->cmd, req->sector, 66.626 req->current_nr_sectors, req->nr_sectors, req->bh); 66.627 @@ -844,16 +850,16 @@ void do_blkif_request(request_queue_t *r 66.628 66.629 full = blkif_queue_request( 66.630 (unsigned long)bh, 66.631 - (rw == READ) ? BLKIF_OP_READ : BLKIF_OP_WRITE, 66.632 + (rw == READ) ? BLKIF_OP_READ : BLKIF_OP_WRITE, 66.633 bh->b_data, bh->b_rsector, bh->b_size>>9, bh->b_rdev); 66.634 66.635 if ( full ) 66.636 - { 66.637 + { 66.638 bh->b_reqnext = next_bh; 66.639 pending_queues[nr_pending++] = rq; 66.640 if ( unlikely(nr_pending >= MAX_PENDING) ) 66.641 BUG(); 66.642 - goto out; 66.643 + goto out; 66.644 } 66.645 66.646 queued++; 66.647 @@ -861,7 +867,7 @@ void do_blkif_request(request_queue_t *r 66.648 /* Dequeue the buffer head from the request. */ 66.649 nsect = bh->b_size >> 9; 66.650 bh = req->bh = next_bh; 66.651 - 66.652 + 66.653 if ( bh != NULL ) 66.654 { 66.655 /* There's another buffer head to do. Update the request. */ 66.656 @@ -891,27 +897,27 @@ void do_blkif_request(request_queue_t *r 66.657 66.658 static void blkif_int(int irq, void *dev_id, struct pt_regs *ptregs) 66.659 { 66.660 - RING_IDX i, rp; 66.661 - unsigned long flags; 66.662 + RING_IDX i, rp; 66.663 + unsigned long flags; 66.664 struct buffer_head *bh, *next_bh; 66.665 - 66.666 - spin_lock_irqsave(&io_request_lock, flags); 66.667 66.668 - if ( unlikely(blkif_state == BLKIF_STATE_CLOSED || recovery) ) 66.669 + spin_lock_irqsave(&io_request_lock, flags); 66.670 + 66.671 + if ( unlikely(info->connected != BLKIF_STATE_CONNECTED || recovery) ) 66.672 { 66.673 spin_unlock_irqrestore(&io_request_lock, flags); 66.674 return; 66.675 } 66.676 66.677 - rp = blk_ring.sring->rsp_prod; 66.678 + rp = info->ring.sring->rsp_prod; 66.679 rmb(); /* Ensure we see queued responses up to 'rp'. */ 66.680 66.681 - for ( i = blk_ring.rsp_cons; i != rp; i++ ) 66.682 + for ( i = info->ring.rsp_cons; i != rp; i++ ) 66.683 { 66.684 unsigned long id; 66.685 blkif_response_t *bret; 66.686 - 66.687 - bret = RING_GET_RESPONSE(&blk_ring, i); 66.688 + 66.689 + bret = RING_GET_RESPONSE(&info->ring, i); 66.690 id = bret->id; 66.691 bh = (struct buffer_head *)blk_shadow[id].request; 66.692 66.693 @@ -943,8 +949,8 @@ static void blkif_int(int irq, void *dev 66.694 } 66.695 66.696 } 66.697 - blk_ring.rsp_cons = i; 66.698 - 66.699 + info->ring.rsp_cons = i; 66.700 + 66.701 kick_pending_request_queues(); 66.702 66.703 spin_unlock_irqrestore(&io_request_lock, flags); 66.704 @@ -954,24 +960,24 @@ static void blkif_int(int irq, void *dev 66.705 66.706 /***************************** COMMON CODE *******************************/ 66.707 66.708 -static void blkif_free(void) 66.709 +static void blkif_free(struct blkfront_info *info) 66.710 { 66.711 /* Prevent new requests being issued until we fix things up. */ 66.712 spin_lock_irq(&blkif_io_lock); 66.713 - blkif_state = BLKIF_STATE_DISCONNECTED; 66.714 + info->connected = BLKIF_STATE_DISCONNECTED; 66.715 spin_unlock_irq(&blkif_io_lock); 66.716 66.717 /* Free resources associated with old device channel. */ 66.718 - if ( blk_ring.sring != NULL ) 66.719 + if ( info->ring.sring != NULL ) 66.720 { 66.721 - free_page((unsigned long)blk_ring.sring); 66.722 - blk_ring.sring = NULL; 66.723 + free_page((unsigned long)info->ring.sring); 66.724 + info->ring.sring = NULL; 66.725 } 66.726 - unbind_evtchn_from_irqhandler(blkif_evtchn, NULL); 66.727 - blkif_evtchn = 0; 66.728 + unbind_evtchn_from_irqhandler(info->evtchn, NULL); 66.729 + info->evtchn = 0; 66.730 } 66.731 66.732 -static void blkif_recover(void) 66.733 +static void blkif_recover(struct blkfront_info *info) 66.734 { 66.735 int i; 66.736 blkif_request_t *req; 66.737 @@ -987,7 +993,7 @@ static void blkif_recover(void) 66.738 memset(&blk_shadow, 0, sizeof(blk_shadow)); 66.739 for ( i = 0; i < BLK_RING_SIZE; i++ ) 66.740 blk_shadow[i].req.id = i+1; 66.741 - blk_shadow_free = blk_ring.req_prod_pvt; 66.742 + blk_shadow_free = info->ring.req_prod_pvt; 66.743 blk_shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; 66.744 66.745 /* Stage 3: Find pending requests and requeue them. */ 66.746 @@ -999,7 +1005,7 @@ static void blkif_recover(void) 66.747 66.748 /* Grab a request slot and unpickle shadow state into it. */ 66.749 req = RING_GET_REQUEST( 66.750 - &blk_ring, blk_ring.req_prod_pvt); 66.751 + &info->ring, info->ring.req_prod_pvt); 66.752 unpickle_request(req, ©[i]); 66.753 66.754 /* We get a new request id, and must reset the shadow state. */ 66.755 @@ -1012,7 +1018,7 @@ static void blkif_recover(void) 66.756 if ( req->frame_and_sects[j] & GRANTREF_INVALID ) 66.757 gnttab_grant_foreign_access_ref( 66.758 blkif_gref_from_fas(req->frame_and_sects[j]), 66.759 - rdomid, 66.760 + info->backend_id, 66.761 blk_shadow[req->id].frame[j], 66.762 rq_data_dir((struct request *) 66.763 blk_shadow[req->id].request)); 66.764 @@ -1020,32 +1026,31 @@ static void blkif_recover(void) 66.765 } 66.766 blk_shadow[req->id].req = *req; 66.767 66.768 - blk_ring.req_prod_pvt++; 66.769 + info->ring.req_prod_pvt++; 66.770 } 66.771 66.772 kfree(copy); 66.773 66.774 recovery = 0; 66.775 66.776 - /* blk_ring->req_prod will be set when we flush_requests().*/ 66.777 + /* info->ring->req_prod will be set when we flush_requests().*/ 66.778 wmb(); 66.779 66.780 /* Kicks things back into life. */ 66.781 - flush_requests(); 66.782 + flush_requests(info); 66.783 66.784 /* Now safe to left other people use the interface. */ 66.785 - blkif_state = BLKIF_STATE_CONNECTED; 66.786 + info->connected = BLKIF_STATE_CONNECTED; 66.787 } 66.788 66.789 -static void blkif_connect(u16 evtchn, domid_t domid) 66.790 +static void blkif_connect(struct blkfront_info *info, u16 evtchn) 66.791 { 66.792 int err = 0; 66.793 66.794 - blkif_evtchn = evtchn; 66.795 - rdomid = domid; 66.796 + info->evtchn = evtchn; 66.797 66.798 err = bind_evtchn_to_irqhandler( 66.799 - blkif_evtchn, blkif_int, SA_SAMPLE_RANDOM, "blkif", NULL); 66.800 + info->evtchn, blkif_int, SA_SAMPLE_RANDOM, "blkif", info); 66.801 if ( err != 0 ) 66.802 { 66.803 WPRINTK("bind_evtchn_to_irqhandler failed (err=%d)\n", err); 66.804 @@ -1059,17 +1064,6 @@ static struct xenbus_device_id blkfront_ 66.805 { "" } 66.806 }; 66.807 66.808 -struct blkfront_info 66.809 -{ 66.810 - /* We watch the backend */ 66.811 - struct xenbus_watch watch; 66.812 - int vdevice; 66.813 - u16 handle; 66.814 - int connected; 66.815 - struct xenbus_device *dev; 66.816 - char *backend; 66.817 -}; 66.818 - 66.819 static void watch_for_status(struct xenbus_watch *watch, const char *node) 66.820 { 66.821 struct blkfront_info *info; 66.822 @@ -1081,35 +1075,33 @@ static void watch_for_status(struct xenb 66.823 node += strlen(watch->node); 66.824 66.825 /* FIXME: clean up when error on the other end. */ 66.826 - if (info->connected) 66.827 + if (info->connected == BLKIF_STATE_CONNECTED) 66.828 return; 66.829 66.830 - err = xenbus_gather(watch->node, 66.831 + err = xenbus_gather(watch->node, 66.832 "sectors", "%lu", §ors, 66.833 "info", "%u", &binfo, 66.834 "sector-size", "%lu", §or_size, 66.835 NULL); 66.836 if (err) { 66.837 - xenbus_dev_error(info->dev, err, "reading backend fields"); 66.838 + xenbus_dev_error(info->xbdev, err, "reading backend fields"); 66.839 return; 66.840 } 66.841 66.842 - xlvbd_add(sectors, info->vdevice, info->handle, binfo, sector_size); 66.843 - info->connected = 1; 66.844 + xlvbd_add(sectors, info->vdevice, binfo, sector_size, info); 66.845 + info->connected = BLKIF_STATE_CONNECTED; 66.846 66.847 - /* First to connect? blkif is now connected. */ 66.848 - if (blkif_vbds_connected++ == 0) 66.849 - blkif_state = BLKIF_STATE_CONNECTED; 66.850 + blkif_state = BLKIF_STATE_CONNECTED; 66.851 66.852 - xenbus_dev_ok(info->dev); 66.853 + xenbus_dev_ok(info->xbdev); 66.854 66.855 /* Kick pending requests. */ 66.856 spin_lock_irq(&blkif_io_lock); 66.857 - kick_pending_request_queues(); 66.858 + kick_pending_request_queues(info); 66.859 spin_unlock_irq(&blkif_io_lock); 66.860 } 66.861 66.862 -static int setup_blkring(struct xenbus_device *dev, unsigned int backend_id) 66.863 +static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info) 66.864 { 66.865 blkif_sring_t *sring; 66.866 evtchn_op_t op = { .cmd = EVTCHNOP_alloc_unbound }; 66.867 @@ -1121,25 +1113,28 @@ static int setup_blkring(struct xenbus_d 66.868 return -ENOMEM; 66.869 } 66.870 SHARED_RING_INIT(sring); 66.871 - FRONT_RING_INIT(&blk_ring, sring, PAGE_SIZE); 66.872 + FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); 66.873 66.874 - shmem_ref = gnttab_claim_grant_reference(&gref_head, 66.875 - gref_terminal); 66.876 - ASSERT(shmem_ref != -ENOSPC); 66.877 - gnttab_grant_foreign_access_ref(shmem_ref, 66.878 - backend_id, 66.879 - virt_to_mfn(blk_ring.sring), 66.880 - 0); 66.881 + err = gnttab_grant_foreign_access(info->backend_id, 66.882 + virt_to_mfn(info->ring.sring), 0); 66.883 + if (err == -ENOSPC) { 66.884 + free_page((unsigned long)info->ring.sring); 66.885 + info->ring.sring = 0; 66.886 + xenbus_dev_error(dev, err, "granting access to ring page"); 66.887 + return err; 66.888 + } 66.889 + info->grant_id = err; 66.890 66.891 - op.u.alloc_unbound.dom = backend_id; 66.892 + op.u.alloc_unbound.dom = info->backend_id; 66.893 err = HYPERVISOR_event_channel_op(&op); 66.894 if (err) { 66.895 - free_page((unsigned long)blk_ring.sring); 66.896 - blk_ring.sring = 0; 66.897 + gnttab_end_foreign_access(info->grant_id, 0); 66.898 + free_page((unsigned long)info->ring.sring); 66.899 + info->ring.sring = 0; 66.900 xenbus_dev_error(dev, err, "allocating event channel"); 66.901 return err; 66.902 } 66.903 - blkif_connect(op.u.alloc_unbound.port, backend_id); 66.904 + blkif_connect(info, op.u.alloc_unbound.port); 66.905 return 0; 66.906 } 66.907 66.908 @@ -1149,11 +1144,11 @@ static int talk_to_backend(struct xenbus 66.909 { 66.910 char *backend; 66.911 const char *message; 66.912 - int err, backend_id; 66.913 + int err; 66.914 66.915 backend = NULL; 66.916 err = xenbus_gather(dev->nodename, 66.917 - "backend-id", "%i", &backend_id, 66.918 + "backend-id", "%i", &info->backend_id, 66.919 "backend", NULL, &backend, 66.920 NULL); 66.921 if (XENBUS_EXIST_ERR(err)) 66.922 @@ -1168,12 +1163,10 @@ static int talk_to_backend(struct xenbus 66.923 goto out; 66.924 } 66.925 66.926 - /* First device? We create shared ring, alloc event channel. */ 66.927 - if (blkif_vbds == 0) { 66.928 - err = setup_blkring(dev, backend_id); 66.929 - if (err) 66.930 - goto out; 66.931 - } 66.932 + /* Create shared ring, alloc event channel. */ 66.933 + err = setup_blkring(dev, info); 66.934 + if (err) 66.935 + goto out; 66.936 66.937 err = xenbus_transaction_start(dev->nodename); 66.938 if (err) { 66.939 @@ -1181,13 +1174,13 @@ static int talk_to_backend(struct xenbus 66.940 goto destroy_blkring; 66.941 } 66.942 66.943 - err = xenbus_printf(dev->nodename, "grant-id","%u", shmem_ref); 66.944 + err = xenbus_printf(dev->nodename, "grant-id","%u", info->grant_id); 66.945 if (err) { 66.946 message = "writing grant-id"; 66.947 goto abort_transaction; 66.948 } 66.949 err = xenbus_printf(dev->nodename, 66.950 - "event-channel", "%u", blkif_evtchn); 66.951 + "event-channel", "%u", info->evtchn); 66.952 if (err) { 66.953 message = "writing event-channel"; 66.954 goto abort_transaction; 66.955 @@ -1220,8 +1213,7 @@ static int talk_to_backend(struct xenbus 66.956 /* Have to do this *outside* transaction. */ 66.957 xenbus_dev_error(dev, err, "%s", message); 66.958 destroy_blkring: 66.959 - if (blkif_vbds == 0) 66.960 - blkif_free(); 66.961 + blkif_free(info); 66.962 goto out; 66.963 } 66.964 66.965 @@ -1250,9 +1242,11 @@ static int blkfront_probe(struct xenbus_ 66.966 xenbus_dev_error(dev, err, "allocating info structure"); 66.967 return err; 66.968 } 66.969 - info->dev = dev; 66.970 + info->xbdev = dev; 66.971 info->vdevice = vdevice; 66.972 - info->connected = 0; 66.973 + info->connected = BLKIF_STATE_DISCONNECTED; 66.974 + info->mi = NULL; 66.975 + INIT_WORK(&info->work, blkif_restart_queue, (void *)info); 66.976 66.977 /* Front end dir is a number, which is used as the id. */ 66.978 info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0); 66.979 @@ -1266,7 +1260,6 @@ static int blkfront_probe(struct xenbus_ 66.980 66.981 /* Call once in case entries already there. */ 66.982 watch_for_status(&info->watch, info->watch.node); 66.983 - blkif_vbds++; 66.984 return 0; 66.985 } 66.986 66.987 @@ -1277,16 +1270,14 @@ static int blkfront_remove(struct xenbus 66.988 if (info->backend) 66.989 unregister_xenbus_watch(&info->watch); 66.990 66.991 - if (info->connected) { 66.992 - xlvbd_del(info->handle); 66.993 - blkif_vbds_connected--; 66.994 - } 66.995 + if (info->mi) 66.996 + xlvbd_del(info); 66.997 + 66.998 + blkif_free(info); 66.999 + 66.1000 kfree(info->backend); 66.1001 kfree(info); 66.1002 66.1003 - if (--blkif_vbds == 0) 66.1004 - blkif_free(); 66.1005 - 66.1006 return 0; 66.1007 } 66.1008 66.1009 @@ -1298,10 +1289,8 @@ static int blkfront_suspend(struct xenbu 66.1010 kfree(info->backend); 66.1011 info->backend = NULL; 66.1012 66.1013 - if (--blkif_vbds == 0) { 66.1014 - recovery = 1; 66.1015 - blkif_free(); 66.1016 - } 66.1017 + recovery = 1; 66.1018 + blkif_free(info); 66.1019 66.1020 return 0; 66.1021 } 66.1022 @@ -1314,8 +1303,7 @@ static int blkfront_resume(struct xenbus 66.1023 /* FIXME: Check geometry hasn't changed here... */ 66.1024 err = talk_to_backend(dev, info); 66.1025 if (!err) { 66.1026 - if (blkif_vbds++ == 0) 66.1027 - blkif_recover(); 66.1028 + blkif_recover(info); 66.1029 } 66.1030 return err; 66.1031 } 66.1032 @@ -1363,11 +1351,6 @@ static int __init xlblk_init(void) 66.1033 { 66.1034 int i; 66.1035 66.1036 - /* A grant for every ring slot, plus one for the ring itself. */ 66.1037 - if (gnttab_alloc_grant_references(MAXIMUM_OUTSTANDING_BLOCK_REQS + 1, 66.1038 - &gref_head, &gref_terminal) < 0) 66.1039 - return 1; 66.1040 - 66.1041 if ( (xen_start_info.flags & SIF_INITDOMAIN) || 66.1042 (xen_start_info.flags & SIF_BLK_BE_DOMAIN) ) 66.1043 return 0; 66.1044 @@ -1391,6 +1374,6 @@ static void blkif_completion(struct blk_ 66.1045 { 66.1046 int i; 66.1047 for ( i = 0; i < s->req.nr_segments; i++ ) 66.1048 - gnttab_release_grant_reference( 66.1049 - &gref_head, blkif_gref_from_fas(s->req.frame_and_sects[i])); 66.1050 + gnttab_free_grant_reference( 66.1051 + blkif_gref_from_fas(s->req.frame_and_sects[i])); 66.1052 }
67.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h Tue Aug 23 18:25:51 2005 +0000 67.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h Tue Aug 23 18:27:22 2005 +0000 67.3 @@ -46,6 +46,7 @@ 67.4 #include <linux/major.h> 67.5 #include <linux/devfs_fs_kernel.h> 67.6 #include <asm-xen/hypervisor.h> 67.7 +#include <asm-xen/xenbus.h> 67.8 #include <asm-xen/xen-public/xen.h> 67.9 #include <asm-xen/xen-public/io/blkif.h> 67.10 #include <asm-xen/xen-public/io/ring.h> 67.11 @@ -79,11 +80,20 @@ 67.12 #define DPRINTK_IOCTL(_f, _a...) ((void)0) 67.13 #endif 67.14 67.15 -struct xlbd_type_info { 67.16 - int partn_shift; 67.17 - int disks_per_major; 67.18 - char *devname; 67.19 - char *diskname; 67.20 +struct xlbd_type_info 67.21 +{ 67.22 + int partn_shift; 67.23 + int disks_per_major; 67.24 + char *devname; 67.25 + char *diskname; 67.26 +}; 67.27 + 67.28 +struct xlbd_major_info 67.29 +{ 67.30 + int major; 67.31 + int index; 67.32 + int usage; 67.33 + struct xlbd_type_info *type; 67.34 }; 67.35 67.36 /* 67.37 @@ -91,27 +101,28 @@ struct xlbd_type_info { 67.38 * hang in private_data off the gendisk structure. We may end up 67.39 * putting all kinds of interesting stuff here :-) 67.40 */ 67.41 -struct xlbd_major_info { 67.42 - int major; 67.43 - int index; 67.44 - int usage; 67.45 - struct xlbd_type_info *type; 67.46 +struct blkfront_info 67.47 +{ 67.48 + struct xenbus_device *xbdev; 67.49 + /* We watch the backend */ 67.50 + struct xenbus_watch watch; 67.51 + dev_t dev; 67.52 + int vdevice; 67.53 + blkif_vdev_t handle; 67.54 + int connected; 67.55 + char *backend; 67.56 + int backend_id; 67.57 + int grant_id; 67.58 + blkif_front_ring_t ring; 67.59 + unsigned int evtchn; 67.60 + struct xlbd_major_info *mi; 67.61 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 67.62 + request_queue_t *rq; 67.63 +#endif 67.64 + struct work_struct work; 67.65 + struct gnttab_free_callback callback; 67.66 }; 67.67 67.68 -struct xlbd_disk_info { 67.69 - int xd_device; 67.70 - blkif_vdev_t handle; 67.71 - struct xlbd_major_info *mi; 67.72 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 67.73 - struct xlbd_disk_info *next_waiting; 67.74 - request_queue_t *rq; 67.75 -#endif 67.76 -}; 67.77 - 67.78 -typedef struct xen_block { 67.79 - int usage; 67.80 -} xen_block_t; 67.81 - 67.82 extern spinlock_t blkif_io_lock; 67.83 67.84 extern int blkif_open(struct inode *inode, struct file *filep); 67.85 @@ -123,7 +134,7 @@ extern int blkif_revalidate(dev_t dev); 67.86 extern void do_blkif_request (request_queue_t *rq); 67.87 67.88 /* Virtual block-device subsystem. */ 67.89 -int xlvbd_add(blkif_sector_t capacity, int device, blkif_vdev_t handle, 67.90 - u16 info, u16 sector_size); 67.91 -void xlvbd_del(blkif_vdev_t handle); 67.92 +int xlvbd_add(blkif_sector_t capacity, int device, 67.93 + u16 vdisk_info, u16 sector_size, struct blkfront_info *info); 67.94 +void xlvbd_del(struct blkfront_info *info); 67.95 #endif /* __XEN_DRIVERS_BLOCK_H__ */
68.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c Tue Aug 23 18:25:51 2005 +0000 68.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c Tue Aug 23 18:27:22 2005 +0000 68.3 @@ -43,325 +43,269 @@ 68.4 #define NUM_SCSI_MAJORS 9 68.5 #define NUM_VBD_MAJORS 1 68.6 68.7 -struct lvdisk 68.8 -{ 68.9 - blkif_sector_t capacity; /* 0: Size in terms of 512-byte sectors. */ 68.10 - blkif_vdev_t handle; /* 8: Device number (opaque 16 bit value). */ 68.11 - u16 info; 68.12 - dev_t dev; 68.13 - struct list_head list; 68.14 -}; 68.15 - 68.16 static struct xlbd_type_info xlbd_ide_type = { 68.17 - .partn_shift = 6, 68.18 - .disks_per_major = 2, 68.19 - .devname = "ide", 68.20 - .diskname = "hd", 68.21 + .partn_shift = 6, 68.22 + .disks_per_major = 2, 68.23 + .devname = "ide", 68.24 + .diskname = "hd", 68.25 }; 68.26 68.27 static struct xlbd_type_info xlbd_scsi_type = { 68.28 - .partn_shift = 4, 68.29 - .disks_per_major = 16, 68.30 - .devname = "sd", 68.31 - .diskname = "sd", 68.32 + .partn_shift = 4, 68.33 + .disks_per_major = 16, 68.34 + .devname = "sd", 68.35 + .diskname = "sd", 68.36 }; 68.37 68.38 static struct xlbd_type_info xlbd_vbd_type = { 68.39 - .partn_shift = 4, 68.40 - .disks_per_major = 16, 68.41 - .devname = "xvd", 68.42 - .diskname = "xvd", 68.43 + .partn_shift = 4, 68.44 + .disks_per_major = 16, 68.45 + .devname = "xvd", 68.46 + .diskname = "xvd", 68.47 }; 68.48 68.49 static struct xlbd_major_info *major_info[NUM_IDE_MAJORS + NUM_SCSI_MAJORS + 68.50 - NUM_VBD_MAJORS]; 68.51 + NUM_VBD_MAJORS]; 68.52 68.53 -#define XLBD_MAJOR_IDE_START 0 68.54 -#define XLBD_MAJOR_SCSI_START (NUM_IDE_MAJORS) 68.55 -#define XLBD_MAJOR_VBD_START (NUM_IDE_MAJORS + NUM_SCSI_MAJORS) 68.56 +#define XLBD_MAJOR_IDE_START 0 68.57 +#define XLBD_MAJOR_SCSI_START (NUM_IDE_MAJORS) 68.58 +#define XLBD_MAJOR_VBD_START (NUM_IDE_MAJORS + NUM_SCSI_MAJORS) 68.59 68.60 -#define XLBD_MAJOR_IDE_RANGE XLBD_MAJOR_IDE_START ... XLBD_MAJOR_SCSI_START - 1 68.61 -#define XLBD_MAJOR_SCSI_RANGE XLBD_MAJOR_SCSI_START ... XLBD_MAJOR_VBD_START - 1 68.62 -#define XLBD_MAJOR_VBD_RANGE XLBD_MAJOR_VBD_START ... XLBD_MAJOR_VBD_START + NUM_VBD_MAJORS - 1 68.63 +#define XLBD_MAJOR_IDE_RANGE XLBD_MAJOR_IDE_START ... XLBD_MAJOR_SCSI_START - 1 68.64 +#define XLBD_MAJOR_SCSI_RANGE XLBD_MAJOR_SCSI_START ... XLBD_MAJOR_VBD_START - 1 68.65 +#define XLBD_MAJOR_VBD_RANGE XLBD_MAJOR_VBD_START ... XLBD_MAJOR_VBD_START + NUM_VBD_MAJORS - 1 68.66 68.67 /* Information about our VBDs. */ 68.68 #define MAX_VBDS 64 68.69 static LIST_HEAD(vbds_list); 68.70 68.71 -#define MAJOR_XEN(dev) ((dev)>>8) 68.72 -#define MINOR_XEN(dev) ((dev) & 0xff) 68.73 - 68.74 -static struct block_device_operations xlvbd_block_fops = 68.75 +static struct block_device_operations xlvbd_block_fops = 68.76 { 68.77 - .owner = THIS_MODULE, 68.78 - .open = blkif_open, 68.79 - .release = blkif_release, 68.80 - .ioctl = blkif_ioctl, 68.81 + .owner = THIS_MODULE, 68.82 + .open = blkif_open, 68.83 + .release = blkif_release, 68.84 + .ioctl = blkif_ioctl, 68.85 }; 68.86 68.87 spinlock_t blkif_io_lock = SPIN_LOCK_UNLOCKED; 68.88 68.89 -static struct lvdisk *xlvbd_device_alloc(void) 68.90 -{ 68.91 - struct lvdisk *disk; 68.92 - 68.93 - disk = kmalloc(sizeof(*disk), GFP_KERNEL); 68.94 - if (disk != NULL) { 68.95 - memset(disk, 0, sizeof(*disk)); 68.96 - INIT_LIST_HEAD(&disk->list); 68.97 - } 68.98 - return disk; 68.99 -} 68.100 - 68.101 -static void xlvbd_device_free(struct lvdisk *disk) 68.102 +static struct xlbd_major_info * 68.103 +xlbd_alloc_major_info(int major, int minor, int index) 68.104 { 68.105 - list_del(&disk->list); 68.106 - kfree(disk); 68.107 -} 68.108 + struct xlbd_major_info *ptr; 68.109 68.110 -static struct xlbd_major_info *xlbd_alloc_major_info( 68.111 - int major, int minor, int index) 68.112 -{ 68.113 - struct xlbd_major_info *ptr; 68.114 + ptr = kmalloc(sizeof(struct xlbd_major_info), GFP_KERNEL); 68.115 + if (ptr == NULL) 68.116 + return NULL; 68.117 68.118 - ptr = kmalloc(sizeof(struct xlbd_major_info), GFP_KERNEL); 68.119 - if (ptr == NULL) 68.120 - return NULL; 68.121 + memset(ptr, 0, sizeof(struct xlbd_major_info)); 68.122 68.123 - memset(ptr, 0, sizeof(struct xlbd_major_info)); 68.124 + ptr->major = major; 68.125 68.126 - ptr->major = major; 68.127 + switch (index) { 68.128 + case XLBD_MAJOR_IDE_RANGE: 68.129 + ptr->type = &xlbd_ide_type; 68.130 + ptr->index = index - XLBD_MAJOR_IDE_START; 68.131 + break; 68.132 + case XLBD_MAJOR_SCSI_RANGE: 68.133 + ptr->type = &xlbd_scsi_type; 68.134 + ptr->index = index - XLBD_MAJOR_SCSI_START; 68.135 + break; 68.136 + case XLBD_MAJOR_VBD_RANGE: 68.137 + ptr->type = &xlbd_vbd_type; 68.138 + ptr->index = index - XLBD_MAJOR_VBD_START; 68.139 + break; 68.140 + } 68.141 68.142 - switch (index) { 68.143 - case XLBD_MAJOR_IDE_RANGE: 68.144 - ptr->type = &xlbd_ide_type; 68.145 - ptr->index = index - XLBD_MAJOR_IDE_START; 68.146 - break; 68.147 - case XLBD_MAJOR_SCSI_RANGE: 68.148 - ptr->type = &xlbd_scsi_type; 68.149 - ptr->index = index - XLBD_MAJOR_SCSI_START; 68.150 - break; 68.151 - case XLBD_MAJOR_VBD_RANGE: 68.152 - ptr->type = &xlbd_vbd_type; 68.153 - ptr->index = index - XLBD_MAJOR_VBD_START; 68.154 - break; 68.155 - } 68.156 - 68.157 - printk("Registering block device major %i\n", ptr->major); 68.158 - if (register_blkdev(ptr->major, ptr->type->devname)) { 68.159 - WPRINTK("can't get major %d with name %s\n", 68.160 - ptr->major, ptr->type->devname); 68.161 - kfree(ptr); 68.162 - return NULL; 68.163 - } 68.164 + printk("Registering block device major %i\n", ptr->major); 68.165 + if (register_blkdev(ptr->major, ptr->type->devname)) { 68.166 + WPRINTK("can't get major %d with name %s\n", 68.167 + ptr->major, ptr->type->devname); 68.168 + kfree(ptr); 68.169 + return NULL; 68.170 + } 68.171 68.172 - devfs_mk_dir(ptr->type->devname); 68.173 - major_info[index] = ptr; 68.174 - return ptr; 68.175 + devfs_mk_dir(ptr->type->devname); 68.176 + major_info[index] = ptr; 68.177 + return ptr; 68.178 } 68.179 68.180 -static struct xlbd_major_info *xlbd_get_major_info(int device) 68.181 +static struct xlbd_major_info * 68.182 +xlbd_get_major_info(int vdevice) 68.183 { 68.184 - int major, minor, index; 68.185 + struct xlbd_major_info *mi; 68.186 + int major, minor, index; 68.187 68.188 - major = MAJOR_XEN(device); 68.189 - minor = MINOR_XEN(device); 68.190 + major = BLKIF_MAJOR(vdevice); 68.191 + minor = BLKIF_MINOR(vdevice); 68.192 68.193 - switch (major) { 68.194 - case IDE0_MAJOR: index = 0; break; 68.195 - case IDE1_MAJOR: index = 1; break; 68.196 - case IDE2_MAJOR: index = 2; break; 68.197 - case IDE3_MAJOR: index = 3; break; 68.198 - case IDE4_MAJOR: index = 4; break; 68.199 - case IDE5_MAJOR: index = 5; break; 68.200 - case IDE6_MAJOR: index = 6; break; 68.201 - case IDE7_MAJOR: index = 7; break; 68.202 - case IDE8_MAJOR: index = 8; break; 68.203 - case IDE9_MAJOR: index = 9; break; 68.204 - case SCSI_DISK0_MAJOR: index = 10; break; 68.205 - case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: 68.206 - index = 11 + major - SCSI_DISK1_MAJOR; 68.207 - break; 68.208 - case SCSI_CDROM_MAJOR: index = 18; break; 68.209 - default: index = 19; break; 68.210 - } 68.211 + switch (major) { 68.212 + case IDE0_MAJOR: index = 0; break; 68.213 + case IDE1_MAJOR: index = 1; break; 68.214 + case IDE2_MAJOR: index = 2; break; 68.215 + case IDE3_MAJOR: index = 3; break; 68.216 + case IDE4_MAJOR: index = 4; break; 68.217 + case IDE5_MAJOR: index = 5; break; 68.218 + case IDE6_MAJOR: index = 6; break; 68.219 + case IDE7_MAJOR: index = 7; break; 68.220 + case IDE8_MAJOR: index = 8; break; 68.221 + case IDE9_MAJOR: index = 9; break; 68.222 + case SCSI_DISK0_MAJOR: index = 10; break; 68.223 + case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: 68.224 + index = 11 + major - SCSI_DISK1_MAJOR; 68.225 + break; 68.226 + case SCSI_CDROM_MAJOR: index = 18; break; 68.227 + default: index = 19; break; 68.228 + } 68.229 68.230 - return ((major_info[index] != NULL) ? major_info[index] : 68.231 - xlbd_alloc_major_info(major, minor, index)); 68.232 + mi = ((major_info[index] != NULL) ? major_info[index] : 68.233 + xlbd_alloc_major_info(major, minor, index)); 68.234 + mi->usage++; 68.235 + return mi; 68.236 } 68.237 68.238 -static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) 68.239 +static void 68.240 +xlbd_put_major_info(struct xlbd_major_info *mi) 68.241 { 68.242 - request_queue_t *rq; 68.243 - 68.244 - rq = blk_init_queue(do_blkif_request, &blkif_io_lock); 68.245 - if (rq == NULL) 68.246 - return -1; 68.247 - 68.248 - elevator_init(rq, "noop"); 68.249 - 68.250 - /* Hard sector size and max sectors impersonate the equiv. hardware. */ 68.251 - blk_queue_hardsect_size(rq, sector_size); 68.252 - blk_queue_max_sectors(rq, 512); 68.253 - 68.254 - /* Each segment in a request is up to an aligned page in size. */ 68.255 - blk_queue_segment_boundary(rq, PAGE_SIZE - 1); 68.256 - blk_queue_max_segment_size(rq, PAGE_SIZE); 68.257 - 68.258 - /* Ensure a merged request will fit in a single I/O ring slot. */ 68.259 - blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); 68.260 - blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); 68.261 - 68.262 - /* Make sure buffer addresses are sector-aligned. */ 68.263 - blk_queue_dma_alignment(rq, 511); 68.264 - 68.265 - gd->queue = rq; 68.266 - 68.267 - return 0; 68.268 + mi->usage--; 68.269 + /* XXX: release major if 0 */ 68.270 } 68.271 68.272 -static struct gendisk *xlvbd_alloc_gendisk( 68.273 - struct xlbd_major_info *mi, int minor, blkif_sector_t capacity, 68.274 - int device, blkif_vdev_t handle, u16 info, u16 sector_size) 68.275 +static int 68.276 +xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) 68.277 { 68.278 - struct gendisk *gd; 68.279 - struct xlbd_disk_info *di; 68.280 - int nr_minors = 1; 68.281 + request_queue_t *rq; 68.282 68.283 - di = kmalloc(sizeof(struct xlbd_disk_info), GFP_KERNEL); 68.284 - if (di == NULL) 68.285 - return NULL; 68.286 - memset(di, 0, sizeof(*di)); 68.287 - di->mi = mi; 68.288 - di->xd_device = device; 68.289 - di->handle = handle; 68.290 + rq = blk_init_queue(do_blkif_request, &blkif_io_lock); 68.291 + if (rq == NULL) 68.292 + return -1; 68.293 68.294 - if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0) 68.295 - nr_minors = 1 << mi->type->partn_shift; 68.296 + elevator_init(rq, "noop"); 68.297 68.298 - gd = alloc_disk(nr_minors); 68.299 - if (gd == NULL) 68.300 - goto out; 68.301 + /* Hard sector size and max sectors impersonate the equiv. hardware. */ 68.302 + blk_queue_hardsect_size(rq, sector_size); 68.303 + blk_queue_max_sectors(rq, 512); 68.304 68.305 - if (nr_minors > 1) 68.306 - sprintf(gd->disk_name, "%s%c", mi->type->diskname, 68.307 - 'a' + mi->index * mi->type->disks_per_major + 68.308 - (minor >> mi->type->partn_shift)); 68.309 - else 68.310 - sprintf(gd->disk_name, "%s%c%d", mi->type->diskname, 68.311 - 'a' + mi->index * mi->type->disks_per_major + 68.312 - (minor >> mi->type->partn_shift), 68.313 - minor & ((1 << mi->type->partn_shift) - 1)); 68.314 - 68.315 - gd->major = mi->major; 68.316 - gd->first_minor = minor; 68.317 - gd->fops = &xlvbd_block_fops; 68.318 - gd->private_data = di; 68.319 - set_capacity(gd, capacity); 68.320 + /* Each segment in a request is up to an aligned page in size. */ 68.321 + blk_queue_segment_boundary(rq, PAGE_SIZE - 1); 68.322 + blk_queue_max_segment_size(rq, PAGE_SIZE); 68.323 68.324 - if (xlvbd_init_blk_queue(gd, sector_size)) { 68.325 - del_gendisk(gd); 68.326 - goto out; 68.327 - } 68.328 - 68.329 - di->rq = gd->queue; 68.330 - 68.331 - if (info & VDISK_READONLY) 68.332 - set_disk_ro(gd, 1); 68.333 + /* Ensure a merged request will fit in a single I/O ring slot. */ 68.334 + blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); 68.335 + blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); 68.336 68.337 - if (info & VDISK_REMOVABLE) 68.338 - gd->flags |= GENHD_FL_REMOVABLE; 68.339 - 68.340 - if (info & VDISK_CDROM) 68.341 - gd->flags |= GENHD_FL_CD; 68.342 + /* Make sure buffer addresses are sector-aligned. */ 68.343 + blk_queue_dma_alignment(rq, 511); 68.344 68.345 - add_disk(gd); 68.346 - 68.347 - return gd; 68.348 + gd->queue = rq; 68.349 68.350 -out: 68.351 - kfree(di); 68.352 - return NULL; 68.353 + return 0; 68.354 } 68.355 68.356 -int xlvbd_add(blkif_sector_t capacity, int device, blkif_vdev_t handle, 68.357 - u16 info, u16 sector_size) 68.358 +static int 68.359 +xlvbd_alloc_gendisk(int minor, blkif_sector_t capacity, int vdevice, 68.360 + u16 vdisk_info, u16 sector_size, 68.361 + struct blkfront_info *info) 68.362 { 68.363 - struct lvdisk *new; 68.364 - struct block_device *bd; 68.365 - struct gendisk *gd; 68.366 - struct xlbd_major_info *mi; 68.367 + struct gendisk *gd; 68.368 + struct xlbd_major_info *mi; 68.369 + int nr_minors = 1; 68.370 + int err = -ENODEV; 68.371 68.372 - mi = xlbd_get_major_info(device); 68.373 - if (mi == NULL) 68.374 - return -EPERM; 68.375 + mi = xlbd_get_major_info(vdevice); 68.376 + if (mi == NULL) 68.377 + goto out; 68.378 + info->mi = mi; 68.379 + 68.380 + if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0) 68.381 + nr_minors = 1 << mi->type->partn_shift; 68.382 + 68.383 + gd = alloc_disk(nr_minors); 68.384 + if (gd == NULL) 68.385 + goto out; 68.386 68.387 - new = xlvbd_device_alloc(); 68.388 - if (new == NULL) 68.389 - return -ENOMEM; 68.390 - new->capacity = capacity; 68.391 - new->info = info; 68.392 - new->handle = handle; 68.393 - new->dev = MKDEV(MAJOR_XEN(device), MINOR_XEN(device)); 68.394 + if (nr_minors > 1) 68.395 + sprintf(gd->disk_name, "%s%c", mi->type->diskname, 68.396 + 'a' + mi->index * mi->type->disks_per_major + 68.397 + (minor >> mi->type->partn_shift)); 68.398 + else 68.399 + sprintf(gd->disk_name, "%s%c%d", mi->type->diskname, 68.400 + 'a' + mi->index * mi->type->disks_per_major + 68.401 + (minor >> mi->type->partn_shift), 68.402 + minor & ((1 << mi->type->partn_shift) - 1)); 68.403 + 68.404 + gd->major = mi->major; 68.405 + gd->first_minor = minor; 68.406 + gd->fops = &xlvbd_block_fops; 68.407 + gd->private_data = info; 68.408 + set_capacity(gd, capacity); 68.409 68.410 - bd = bdget(new->dev); 68.411 - if (bd == NULL) 68.412 - goto out; 68.413 - 68.414 - gd = xlvbd_alloc_gendisk(mi, MINOR_XEN(device), capacity, device, handle, 68.415 - info, sector_size); 68.416 - if (gd == NULL) 68.417 - goto out_bd; 68.418 + if (xlvbd_init_blk_queue(gd, sector_size)) { 68.419 + del_gendisk(gd); 68.420 + goto out; 68.421 + } 68.422 + 68.423 + info->rq = gd->queue; 68.424 + 68.425 + if (vdisk_info & VDISK_READONLY) 68.426 + set_disk_ro(gd, 1); 68.427 68.428 - list_add(&new->list, &vbds_list); 68.429 -out_bd: 68.430 - bdput(bd); 68.431 -out: 68.432 - return 0; 68.433 + if (vdisk_info & VDISK_REMOVABLE) 68.434 + gd->flags |= GENHD_FL_REMOVABLE; 68.435 + 68.436 + if (vdisk_info & VDISK_CDROM) 68.437 + gd->flags |= GENHD_FL_CD; 68.438 + 68.439 + add_disk(gd); 68.440 + 68.441 + return 0; 68.442 + 68.443 + out: 68.444 + if (mi) 68.445 + xlbd_put_major_info(mi); 68.446 + return err; 68.447 } 68.448 68.449 -static int xlvbd_device_del(struct lvdisk *disk) 68.450 +int 68.451 +xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info, 68.452 + u16 sector_size, struct blkfront_info *info) 68.453 { 68.454 - struct block_device *bd; 68.455 - struct gendisk *gd; 68.456 - struct xlbd_disk_info *di; 68.457 - int ret = 0, unused; 68.458 - request_queue_t *rq; 68.459 + struct block_device *bd; 68.460 + int err = 0; 68.461 68.462 - bd = bdget(disk->dev); 68.463 - if (bd == NULL) 68.464 - return -1; 68.465 - 68.466 - gd = get_gendisk(disk->dev, &unused); 68.467 - di = gd->private_data; 68.468 + info->dev = MKDEV(BLKIF_MAJOR(vdevice), BLKIF_MINOR(vdevice)); 68.469 68.470 -#if 0 /* This is wrong: hda and hdb share same major, for example. */ 68.471 - if (di->mi->usage != 0) { 68.472 - WPRINTK("disk removal failed: used [dev=%x]\n", disk->dev); 68.473 - ret = -1; 68.474 - goto out; 68.475 - } 68.476 -#endif 68.477 + bd = bdget(info->dev); 68.478 + if (bd == NULL) 68.479 + return -ENODEV; 68.480 68.481 - rq = gd->queue; 68.482 - del_gendisk(gd); 68.483 - put_disk(gd); 68.484 - blk_cleanup_queue(rq); 68.485 + err = xlvbd_alloc_gendisk(BLKIF_MINOR(vdevice), capacity, vdevice, 68.486 + vdisk_info, sector_size, info); 68.487 68.488 - xlvbd_device_free(disk); 68.489 - bdput(bd); 68.490 - return ret; 68.491 + bdput(bd); 68.492 + return err; 68.493 } 68.494 68.495 -void xlvbd_del(blkif_vdev_t handle) 68.496 +void 68.497 +xlvbd_del(struct blkfront_info *info) 68.498 { 68.499 - struct lvdisk *i; 68.500 + struct block_device *bd; 68.501 + struct gendisk *gd; 68.502 + int unused; 68.503 + request_queue_t *rq; 68.504 + 68.505 + bd = bdget(info->dev); 68.506 + if (bd == NULL) 68.507 + return; 68.508 68.509 - list_for_each_entry(i, &vbds_list, list) { 68.510 - if (i->handle == handle) { 68.511 - xlvbd_device_del(i); 68.512 - return; 68.513 - } 68.514 - } 68.515 - BUG(); 68.516 + gd = get_gendisk(info->dev, &unused); 68.517 + rq = gd->queue; 68.518 + 68.519 + del_gendisk(gd); 68.520 + put_disk(gd); 68.521 + xlbd_put_major_info(info->mi); 68.522 + info->mi = NULL; 68.523 + blk_cleanup_queue(rq); 68.524 + 68.525 + bdput(bd); 68.526 }
77.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Tue Aug 23 18:25:51 2005 +0000 77.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Tue Aug 23 18:27:22 2005 +0000 77.3 @@ -102,12 +102,12 @@ dump_packet(int tag, void *addr, u32 ap) 77.4 #endif 77.5 77.6 #ifdef CONFIG_XEN_NETDEV_GRANT_TX 77.7 -static grant_ref_t gref_tx_head, gref_tx_terminal; 77.8 +static grant_ref_t gref_tx_head; 77.9 static grant_ref_t grant_tx_ref[NETIF_TX_RING_SIZE + 1]; 77.10 #endif 77.11 77.12 #ifdef CONFIG_XEN_NETDEV_GRANT_RX 77.13 -static grant_ref_t gref_rx_head, gref_rx_terminal; 77.14 +static grant_ref_t gref_rx_head; 77.15 static grant_ref_t grant_rx_ref[NETIF_RX_RING_SIZE + 1]; 77.16 #endif 77.17 77.18 @@ -441,8 +441,8 @@ static void network_alloc_rx_buffers(str 77.19 77.20 np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id; 77.21 #ifdef CONFIG_XEN_NETDEV_GRANT_RX 77.22 - if (unlikely((ref = gnttab_claim_grant_reference(&gref_rx_head, 77.23 - gref_rx_terminal)) < 0)) { 77.24 + ref = gnttab_claim_grant_reference(&gref_rx_head); 77.25 + if (unlikely(ref < 0)) { 77.26 printk(KERN_ALERT "#### netfront can't claim rx reference\n"); 77.27 BUG(); 77.28 } 77.29 @@ -537,8 +537,8 @@ static int network_start_xmit(struct sk_ 77.30 77.31 tx->id = id; 77.32 #ifdef CONFIG_XEN_NETDEV_GRANT_TX 77.33 - if (unlikely((ref = gnttab_claim_grant_reference(&gref_tx_head, 77.34 - gref_tx_terminal)) < 0)) { 77.35 + ref = gnttab_claim_grant_reference(&gref_tx_head); 77.36 + if (unlikely(ref < 0)) { 77.37 printk(KERN_ALERT "#### netfront can't claim tx grant reference\n"); 77.38 BUG(); 77.39 } 77.40 @@ -929,8 +929,7 @@ static void send_interface_connect(struc 77.41 msg->handle = np->handle; 77.42 msg->tx_shmem_frame = virt_to_mfn(np->tx); 77.43 #ifdef CONFIG_XEN_NETDEV_GRANT_TX 77.44 - msg->tx_shmem_ref = (u32)gnttab_claim_grant_reference(&gref_tx_head, 77.45 - gref_tx_terminal); 77.46 + msg->tx_shmem_ref = (u32)gnttab_claim_grant_reference(&gref_tx_head); 77.47 if(msg->tx_shmem_ref < 0) { 77.48 printk(KERN_ALERT "#### netfront can't claim tx_shmem reference\n"); 77.49 BUG(); 77.50 @@ -941,8 +940,7 @@ static void send_interface_connect(struc 77.51 77.52 msg->rx_shmem_frame = virt_to_mfn(np->rx); 77.53 #ifdef CONFIG_XEN_NETDEV_GRANT_RX 77.54 - msg->rx_shmem_ref = (u32)gnttab_claim_grant_reference(&gref_rx_head, 77.55 - gref_rx_terminal); 77.56 + msg->rx_shmem_ref = (u32)gnttab_claim_grant_reference(&gref_rx_head); 77.57 if(msg->rx_shmem_ref < 0) { 77.58 printk(KERN_ALERT "#### netfront can't claim rx_shmem reference\n"); 77.59 BUG(); 77.60 @@ -1420,7 +1418,7 @@ static int __init netif_init(void) 77.61 #ifdef CONFIG_XEN_NETDEV_GRANT_TX 77.62 /* A grant for every ring slot, plus one for the ring itself */ 77.63 if (gnttab_alloc_grant_references(NETIF_TX_RING_SIZE + 1, 77.64 - &gref_tx_head, &gref_tx_terminal) < 0) { 77.65 + &gref_tx_head) < 0) { 77.66 printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); 77.67 return 1; 77.68 } 77.69 @@ -1429,7 +1427,7 @@ static int __init netif_init(void) 77.70 #ifdef CONFIG_XEN_NETDEV_GRANT_RX 77.71 /* A grant for every ring slot, plus one for the ring itself */ 77.72 if (gnttab_alloc_grant_references(NETIF_RX_RING_SIZE + 1, 77.73 - &gref_rx_head, &gref_rx_terminal) < 0) { 77.74 + &gref_rx_head) < 0) { 77.75 printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); 77.76 return 1; 77.77 } 77.78 @@ -1457,10 +1455,10 @@ static int __init netif_init(void) 77.79 static void netif_exit(void) 77.80 { 77.81 #ifdef CONFIG_XEN_NETDEV_GRANT_TX 77.82 - gnttab_free_grant_references(NETIF_TX_RING_SIZE + 1, gref_tx_head); 77.83 + gnttab_free_grant_references(gref_tx_head); 77.84 #endif 77.85 #ifdef CONFIG_XEN_NETDEV_GRANT_RX 77.86 - gnttab_free_grant_references(NETIF_RX_RING_SIZE + 1, gref_rx_head); 77.87 + gnttab_free_grant_references(gref_rx_head); 77.88 #endif 77.89 } 77.90
78.1 --- a/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c Tue Aug 23 18:25:51 2005 +0000 78.2 +++ b/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c Tue Aug 23 18:27:22 2005 +0000 78.3 @@ -167,7 +167,7 @@ static int privcmd_ioctl(struct inode *i 78.4 if (ret) 78.5 goto batch_err; 78.6 78.7 - u.val = (mfn << PAGE_SHIFT) | pgprot_val(vma->vm_page_prot); 78.8 + u.val = pte_val_ma(pfn_pte_ma(mfn, vma->vm_page_prot)); 78.9 u.ptr = ptep; 78.10 78.11 if ( unlikely(HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0) )
91.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/page.h Tue Aug 23 18:25:51 2005 +0000 91.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/page.h Tue Aug 23 18:27:22 2005 +0000 91.3 @@ -60,9 +60,13 @@ 91.4 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 91.5 91.6 /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ 91.7 +#define INVALID_P2M_ENTRY (~0U) 91.8 +#define FOREIGN_FRAME(m) ((m) | 0x80000000U) 91.9 extern unsigned int *phys_to_machine_mapping; 91.10 -#define pfn_to_mfn(_pfn) ((unsigned long)(phys_to_machine_mapping[(_pfn)])) 91.11 -#define mfn_to_pfn(_mfn) ((unsigned long)(machine_to_phys_mapping[(_mfn)])) 91.12 +#define pfn_to_mfn(pfn) \ 91.13 +((unsigned long)phys_to_machine_mapping[(unsigned int)(pfn)] & 0x7FFFFFFFUL) 91.14 +#define mfn_to_pfn(mfn) \ 91.15 +((unsigned long)machine_to_phys_mapping[(unsigned int)(mfn)]) 91.16 91.17 /* Definitions for machine and pseudophysical addresses. */ 91.18 #ifdef CONFIG_X86_PAE
94.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h Tue Aug 23 18:25:51 2005 +0000 94.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h Tue Aug 23 18:27:22 2005 +0000 94.3 @@ -63,17 +63,15 @@ inline static void set_pte_at_sync(struc 94.4 * 94.5 * NB2. When deliberately mapping foreign pages into the p2m table, you *must* 94.6 * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we 94.7 - * require. In all the cases we care about, the high bit gets shifted out 94.8 - * (e.g., phys_to_machine()) so behaviour there is correct. 94.9 + * require. In all the cases we care about, the FOREIGN_FRAME bit is 94.10 + * masked (e.g., pfn_to_mfn()) so behaviour there is correct. 94.11 */ 94.12 -#define INVALID_P2M_ENTRY (~0U) 94.13 -#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1))) 94.14 #define pte_mfn(_pte) ((_pte).pte_low >> PAGE_SHIFT) 94.15 #define pte_pfn(_pte) \ 94.16 ({ \ 94.17 unsigned long mfn = pte_mfn(_pte); \ 94.18 unsigned long pfn = mfn_to_pfn(mfn); \ 94.19 - if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn)) \ 94.20 + if ((pfn >= max_mapnr) || (phys_to_machine_mapping[pfn] != mfn))\ 94.21 pfn = max_mapnr; /* special: force !pfn_valid() */ \ 94.22 pfn; \ 94.23 })
95.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable-3level.h Tue Aug 23 18:25:51 2005 +0000 95.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable-3level.h Tue Aug 23 18:27:22 2005 +0000 95.3 @@ -150,15 +150,13 @@ static inline int pte_none(pte_t pte) 95.4 return !pte.pte_low && !pte.pte_high; 95.5 } 95.6 95.7 -#define INVALID_P2M_ENTRY (~0U) 95.8 -#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1))) 95.9 #define pte_mfn(_pte) ( ((_pte).pte_low >> PAGE_SHIFT) |\ 95.10 (((_pte).pte_high & 0xfff) << (32-PAGE_SHIFT)) ) 95.11 #define pte_pfn(_pte) \ 95.12 ({ \ 95.13 unsigned long mfn = pte_mfn(_pte); \ 95.14 unsigned long pfn = mfn_to_pfn(mfn); \ 95.15 - if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn)) \ 95.16 + if ((pfn >= max_mapnr) || (phys_to_machine_mapping[pfn] != mfn))\ 95.17 pfn = max_mapnr; /* special: force !pfn_valid() */ \ 95.18 pfn; \ 95.19 })
100.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/page.h Tue Aug 23 18:25:51 2005 +0000 100.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/page.h Tue Aug 23 18:27:22 2005 +0000 100.3 @@ -62,9 +62,13 @@ void copy_page(void *, void *); 100.4 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 100.5 100.6 /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ 100.7 +#define INVALID_P2M_ENTRY (~0U) 100.8 +#define FOREIGN_FRAME(m) ((m) | 0x80000000U) 100.9 extern u32 *phys_to_machine_mapping; 100.10 -#define pfn_to_mfn(_pfn) ((unsigned long) phys_to_machine_mapping[(unsigned int)(_pfn)]) 100.11 -#define mfn_to_pfn(_mfn) ((unsigned long) machine_to_phys_mapping[(unsigned int)(_mfn)]) 100.12 +#define pfn_to_mfn(pfn) \ 100.13 +((unsigned long)phys_to_machine_mapping[(unsigned int)(pfn)] & 0x7FFFFFFFUL) 100.14 +#define mfn_to_pfn(mfn) \ 100.15 +((unsigned long)machine_to_phys_mapping[(unsigned int)(mfn)]) 100.16 100.17 /* Definitions for machine and pseudophysical addresses. */ 100.18 typedef unsigned long paddr_t;
102.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h Tue Aug 23 18:25:51 2005 +0000 102.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h Tue Aug 23 18:27:22 2005 +0000 102.3 @@ -300,17 +300,15 @@ inline static void set_pte_at(struct mm_ 102.4 * 102.5 * NB2. When deliberately mapping foreign pages into the p2m table, you *must* 102.6 * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we 102.7 - * require. In all the cases we care about, the high bit gets shifted out 102.8 - * (e.g., phys_to_machine()) so behaviour there is correct. 102.9 + * require. In all the cases we care about, the FOREIGN_FRAME bit is 102.10 + * masked (e.g., pfn_to_mfn()) so behaviour there is correct. 102.11 */ 102.12 -#define INVALID_P2M_ENTRY (~0U) 102.13 -#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1))) 102.14 #define pte_mfn(_pte) (((_pte).pte & PTE_MASK) >> PAGE_SHIFT) 102.15 #define pte_pfn(_pte) \ 102.16 ({ \ 102.17 unsigned long mfn = pte_mfn(_pte); \ 102.18 unsigned pfn = mfn_to_pfn(mfn); \ 102.19 - if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn)) \ 102.20 + if ((pfn >= max_mapnr) || (phys_to_machine_mapping[pfn] != mfn))\ 102.21 pfn = max_mapnr; /* special: force !pfn_valid() */ \ 102.22 pfn; \ 102.23 })
104.1 --- a/linux-2.6-xen-sparse/include/asm-xen/gnttab.h Tue Aug 23 18:25:51 2005 +0000 104.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/gnttab.h Tue Aug 23 18:27:22 2005 +0000 104.3 @@ -19,54 +19,46 @@ 104.4 104.5 /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */ 104.6 #define NR_GRANT_FRAMES 4 104.7 -#define NR_GRANT_ENTRIES (NR_GRANT_FRAMES * PAGE_SIZE / sizeof(grant_entry_t)) 104.8 104.9 -int 104.10 -gnttab_grant_foreign_access( 104.11 - domid_t domid, unsigned long frame, int readonly); 104.12 - 104.13 -void 104.14 -gnttab_end_foreign_access( 104.15 - grant_ref_t ref, int readonly); 104.16 +struct gnttab_free_callback { 104.17 + struct gnttab_free_callback *next; 104.18 + void (*fn)(void *); 104.19 + void *arg; 104.20 + u16 count; 104.21 +}; 104.22 104.23 -int 104.24 -gnttab_grant_foreign_transfer( 104.25 - domid_t domid, unsigned long pfn); 104.26 +int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, 104.27 + int readonly); 104.28 + 104.29 +void gnttab_end_foreign_access(grant_ref_t ref, int readonly); 104.30 104.31 -unsigned long 104.32 -gnttab_end_foreign_transfer( 104.33 - grant_ref_t ref); 104.34 +int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn); 104.35 104.36 -int 104.37 -gnttab_query_foreign_access( 104.38 - grant_ref_t ref ); 104.39 +unsigned long gnttab_end_foreign_transfer(grant_ref_t ref); 104.40 + 104.41 +int gnttab_query_foreign_access(grant_ref_t ref); 104.42 104.43 /* 104.44 * operations on reserved batches of grant references 104.45 */ 104.46 -int 104.47 -gnttab_alloc_grant_references( 104.48 - u16 count, grant_ref_t *pprivate_head, grant_ref_t *private_terminal ); 104.49 +int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head); 104.50 + 104.51 +void gnttab_free_grant_reference(grant_ref_t ref); 104.52 104.53 -void 104.54 -gnttab_free_grant_references( 104.55 - u16 count, grant_ref_t private_head ); 104.56 +void gnttab_free_grant_references(grant_ref_t head); 104.57 104.58 -int 104.59 -gnttab_claim_grant_reference( grant_ref_t *pprivate_head, grant_ref_t terminal 104.60 -); 104.61 +int gnttab_claim_grant_reference(grant_ref_t *pprivate_head); 104.62 104.63 -void 104.64 -gnttab_release_grant_reference( 104.65 - grant_ref_t *private_head, grant_ref_t release ); 104.66 +void gnttab_release_grant_reference(grant_ref_t *private_head, 104.67 + grant_ref_t release); 104.68 + 104.69 +void gnttab_request_free_callback(struct gnttab_free_callback *callback, 104.70 + void (*fn)(void *), void *arg, u16 count); 104.71 104.72 -void 104.73 -gnttab_grant_foreign_access_ref( 104.74 - grant_ref_t ref, domid_t domid, unsigned long frame, int readonly); 104.75 +void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, 104.76 + unsigned long frame, int readonly); 104.77 104.78 -void 104.79 -gnttab_grant_foreign_transfer_ref( 104.80 - grant_ref_t, domid_t domid, unsigned long pfn); 104.81 - 104.82 +void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid, 104.83 + unsigned long pfn); 104.84 104.85 #endif /* __ASM_GNTTAB_H__ */
172.1 --- a/tools/xenstat/xentop/Makefile Tue Aug 23 18:25:51 2005 +0000 172.2 +++ b/tools/xenstat/xentop/Makefile Tue Aug 23 18:27:22 2005 +0000 172.3 @@ -28,7 +28,7 @@ sbindir=$(prefix)/sbin 172.4 172.5 CFLAGS += -DGCC_PRINTF -Wall -Werror -I$(XEN_LIBXENSTAT) 172.6 LDFLAGS += -L$(XEN_LIBXENSTAT) 172.7 -LDLIBS += -lxenstat -lcurses 172.8 +LDLIBS += -lxenstat -lncurses 172.9 172.10 all: xentop 172.11
185.1 --- a/xen/arch/x86/io_apic.c Tue Aug 23 18:25:51 2005 +0000 185.2 +++ b/xen/arch/x86/io_apic.c Tue Aug 23 18:27:22 2005 +0000 185.3 @@ -1751,8 +1751,30 @@ int ioapic_guest_write(int apicid, int a 185.4 185.5 pin = (address - 0x10) >> 1; 185.6 185.7 + *(u32 *)&rte = val; 185.8 rte.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); 185.9 - *(int *)&rte = val; 185.10 + 185.11 + /* 185.12 + * What about weird destination types? 185.13 + * SMI: Ignore? Ought to be set up by the BIOS. 185.14 + * NMI: Ignore? Watchdog functionality is Xen's concern. 185.15 + * INIT: Definitely ignore: probably a guest OS bug. 185.16 + * ExtINT: Ignore? Linux only asserts this at start of day. 185.17 + * For now, print a message and return an error. We can fix up on demand. 185.18 + */ 185.19 + if ( rte.delivery_mode > dest_LowestPrio ) 185.20 + { 185.21 + printk("ERROR: Attempt to write weird IOAPIC destination mode!\n"); 185.22 + printk(" APIC=%d/%d, lo-reg=%x\n", apicid, pin, val); 185.23 + return -EINVAL; 185.24 + } 185.25 + 185.26 + /* 185.27 + * The guest does not know physical APIC arrangement (flat vs. cluster). 185.28 + * Apply genapic conventions for this platform. 185.29 + */ 185.30 + rte.delivery_mode = INT_DELIVERY_MODE; 185.31 + rte.dest_mode = INT_DEST_MODE; 185.32 185.33 if ( rte.vector >= FIRST_DEVICE_VECTOR ) 185.34 {
186.1 --- a/xen/arch/x86/mm.c Tue Aug 23 18:25:51 2005 +0000 186.2 +++ b/xen/arch/x86/mm.c Tue Aug 23 18:27:22 2005 +0000 186.3 @@ -444,7 +444,7 @@ get_page_from_l1e( 186.4 186.5 if ( unlikely(l1e_get_flags(l1e) & L1_DISALLOW_MASK) ) 186.6 { 186.7 - MEM_LOG("Bad L1 flags %x\n", l1e_get_flags(l1e) & L1_DISALLOW_MASK); 186.8 + MEM_LOG("Bad L1 flags %x", l1e_get_flags(l1e) & L1_DISALLOW_MASK); 186.9 return 0; 186.10 } 186.11 186.12 @@ -490,7 +490,7 @@ get_page_from_l2e( 186.13 186.14 if ( unlikely((l2e_get_flags(l2e) & L2_DISALLOW_MASK)) ) 186.15 { 186.16 - MEM_LOG("Bad L2 flags %x\n", l2e_get_flags(l2e) & L2_DISALLOW_MASK); 186.17 + MEM_LOG("Bad L2 flags %x", l2e_get_flags(l2e) & L2_DISALLOW_MASK); 186.18 return 0; 186.19 } 186.20 186.21 @@ -523,7 +523,7 @@ get_page_from_l3e( 186.22 186.23 if ( unlikely((l3e_get_flags(l3e) & L3_DISALLOW_MASK)) ) 186.24 { 186.25 - MEM_LOG("Bad L3 flags %x\n", l3e_get_flags(l3e) & L3_DISALLOW_MASK); 186.26 + MEM_LOG("Bad L3 flags %x", l3e_get_flags(l3e) & L3_DISALLOW_MASK); 186.27 return 0; 186.28 } 186.29 186.30 @@ -557,7 +557,7 @@ get_page_from_l4e( 186.31 186.32 if ( unlikely((l4e_get_flags(l4e) & L4_DISALLOW_MASK)) ) 186.33 { 186.34 - MEM_LOG("Bad L4 flags %x\n", l4e_get_flags(l4e) & L4_DISALLOW_MASK); 186.35 + MEM_LOG("Bad L4 flags %x", l4e_get_flags(l4e) & L4_DISALLOW_MASK); 186.36 return 0; 186.37 } 186.38 186.39 @@ -1025,7 +1025,7 @@ static inline int update_l1e(l1_pgentry_ 186.40 unlikely(o != l1e_get_intpte(ol1e)) ) 186.41 { 186.42 MEM_LOG("Failed to update %" PRIpte " -> %" PRIpte 186.43 - ": saw %" PRIpte "\n", 186.44 + ": saw %" PRIpte, 186.45 l1e_get_intpte(ol1e), 186.46 l1e_get_intpte(nl1e), 186.47 o); 186.48 @@ -1051,7 +1051,7 @@ static int mod_l1_entry(l1_pgentry_t *pl 186.49 { 186.50 if ( unlikely(l1e_get_flags(nl1e) & L1_DISALLOW_MASK) ) 186.51 { 186.52 - MEM_LOG("Bad L1 flags %x\n", 186.53 + MEM_LOG("Bad L1 flags %x", 186.54 l1e_get_flags(nl1e) & L1_DISALLOW_MASK); 186.55 return 0; 186.56 } 186.57 @@ -1113,7 +1113,7 @@ static int mod_l2_entry(l2_pgentry_t *pl 186.58 { 186.59 if ( unlikely(l2e_get_flags(nl2e) & L2_DISALLOW_MASK) ) 186.60 { 186.61 - MEM_LOG("Bad L2 flags %x\n", 186.62 + MEM_LOG("Bad L2 flags %x", 186.63 l2e_get_flags(nl2e) & L2_DISALLOW_MASK); 186.64 return 0; 186.65 } 186.66 @@ -1175,7 +1175,7 @@ static int mod_l3_entry(l3_pgentry_t *pl 186.67 { 186.68 if ( unlikely(l3e_get_flags(nl3e) & L3_DISALLOW_MASK) ) 186.69 { 186.70 - MEM_LOG("Bad L3 flags %x\n", 186.71 + MEM_LOG("Bad L3 flags %x", 186.72 l3e_get_flags(nl3e) & L3_DISALLOW_MASK); 186.73 return 0; 186.74 } 186.75 @@ -1237,7 +1237,7 @@ static int mod_l4_entry(l4_pgentry_t *pl 186.76 { 186.77 if ( unlikely(l4e_get_flags(nl4e) & L4_DISALLOW_MASK) ) 186.78 { 186.79 - MEM_LOG("Bad L4 flags %x\n", 186.80 + MEM_LOG("Bad L4 flags %x", 186.81 l4e_get_flags(nl4e) & L4_DISALLOW_MASK); 186.82 return 0; 186.83 } 186.84 @@ -1598,7 +1598,7 @@ static int set_foreigndom(unsigned int c 186.85 percpu_info[cpu].foreign = dom_io; 186.86 break; 186.87 default: 186.88 - MEM_LOG("Dom %u cannot set foreign dom\n", d->domain_id); 186.89 + MEM_LOG("Dom %u cannot set foreign dom", d->domain_id); 186.90 okay = 0; 186.91 break; 186.92 } 186.93 @@ -1831,7 +1831,7 @@ int do_mmuext_op( 186.94 case MMUEXT_FLUSH_CACHE: 186.95 if ( unlikely(!IS_CAPABLE_PHYSDEV(d)) ) 186.96 { 186.97 - MEM_LOG("Non-physdev domain tried to FLUSH_CACHE.\n"); 186.98 + MEM_LOG("Non-physdev domain tried to FLUSH_CACHE."); 186.99 okay = 0; 186.100 } 186.101 else 186.102 @@ -1845,7 +1845,7 @@ int do_mmuext_op( 186.103 if ( shadow_mode_external(d) ) 186.104 { 186.105 MEM_LOG("ignoring SET_LDT hypercall from external " 186.106 - "domain %u\n", d->domain_id); 186.107 + "domain %u", d->domain_id); 186.108 okay = 0; 186.109 break; 186.110 } 186.111 @@ -1916,7 +1916,7 @@ int do_mmuext_op( 186.112 unlikely(IS_XEN_HEAP_FRAME(page)) ) 186.113 { 186.114 MEM_LOG("Transferee has no reservation headroom (%d,%d), or " 186.115 - "page is in Xen heap (%lx), or dom is dying (%ld).\n", 186.116 + "page is in Xen heap (%lx), or dom is dying (%ld).", 186.117 e->tot_pages, e->max_pages, op.mfn, e->domain_flags); 186.118 okay = 0; 186.119 goto reassign_fail; 186.120 @@ -1937,7 +1937,7 @@ int do_mmuext_op( 186.121 unlikely(_nd != _d) ) 186.122 { 186.123 MEM_LOG("Bad page values %lx: ed=%p(%u), sd=%p," 186.124 - " caf=%08x, taf=%" PRtype_info "\n", 186.125 + " caf=%08x, taf=%" PRtype_info, 186.126 page_to_pfn(page), d, d->domain_id, 186.127 unpickle_domptr(_nd), x, page->u.inuse.type_info); 186.128 okay = 0; 186.129 @@ -2301,7 +2301,7 @@ int update_grant_pte_mapping( 186.130 if ( ((type_info & PGT_type_mask) != PGT_l1_page_table) || 186.131 !get_page_type(page, type_info & (PGT_type_mask|PGT_va_mask)) ) 186.132 { 186.133 - DPRINTK("Grant map attempted to update a non-L1 page\n"); 186.134 + MEM_LOG("Grant map attempted to update a non-L1 page"); 186.135 rc = GNTST_general_error; 186.136 goto failed; 186.137 } 186.138 @@ -2363,7 +2363,7 @@ int clear_grant_pte_mapping( 186.139 if ( ((type_info & PGT_type_mask) != PGT_l1_page_table) || 186.140 !get_page_type(page, type_info & (PGT_type_mask|PGT_va_mask)) ) 186.141 { 186.142 - DPRINTK("Grant map attempted to update a non-L1 page\n"); 186.143 + MEM_LOG("Grant map attempted to update a non-L1 page"); 186.144 rc = GNTST_general_error; 186.145 goto failed; 186.146 } 186.147 @@ -2378,7 +2378,7 @@ int clear_grant_pte_mapping( 186.148 /* Check that the virtual address supplied is actually mapped to frame. */ 186.149 if ( unlikely((l1e_get_intpte(ol1e) >> PAGE_SHIFT) != frame) ) 186.150 { 186.151 - DPRINTK("PTE entry %lx for address %lx doesn't match frame %lx\n", 186.152 + MEM_LOG("PTE entry %lx for address %lx doesn't match frame %lx", 186.153 (unsigned long)l1e_get_intpte(ol1e), addr, frame); 186.154 put_page_type(page); 186.155 rc = GNTST_general_error; 186.156 @@ -2388,7 +2388,7 @@ int clear_grant_pte_mapping( 186.157 /* Delete pagetable entry. */ 186.158 if ( unlikely(__put_user(0, (intpte_t *)va))) 186.159 { 186.160 - DPRINTK("Cannot delete PTE entry at %p.\n", va); 186.161 + MEM_LOG("Cannot delete PTE entry at %p", va); 186.162 put_page_type(page); 186.163 rc = GNTST_general_error; 186.164 goto failed; 186.165 @@ -2452,7 +2452,7 @@ int clear_grant_va_mapping(unsigned long 186.166 186.167 if ( unlikely(__get_user(ol1e.l1, &pl1e->l1) != 0) ) 186.168 { 186.169 - DPRINTK("Could not find PTE entry for address %lx\n", addr); 186.170 + MEM_LOG("Could not find PTE entry for address %lx", addr); 186.171 return GNTST_general_error; 186.172 } 186.173 186.174 @@ -2462,7 +2462,7 @@ int clear_grant_va_mapping(unsigned long 186.175 */ 186.176 if ( unlikely(l1e_get_pfn(ol1e) != frame) ) 186.177 { 186.178 - DPRINTK("PTE entry %lx for address %lx doesn't match frame %lx\n", 186.179 + MEM_LOG("PTE entry %lx for address %lx doesn't match frame %lx", 186.180 l1e_get_pfn(ol1e), addr, frame); 186.181 return GNTST_general_error; 186.182 } 186.183 @@ -2470,7 +2470,7 @@ int clear_grant_va_mapping(unsigned long 186.184 /* Delete pagetable entry. */ 186.185 if ( unlikely(__put_user(0, &pl1e->l1)) ) 186.186 { 186.187 - DPRINTK("Cannot delete PTE entry at %p.\n", (unsigned long *)pl1e); 186.188 + MEM_LOG("Cannot delete PTE entry at %p", (unsigned long *)pl1e); 186.189 return GNTST_general_error; 186.190 } 186.191 186.192 @@ -2930,7 +2930,7 @@ int revalidate_l1( 186.193 186.194 if ( unlikely(!get_page_from_l1e(nl1e, d)) ) 186.195 { 186.196 - MEM_LOG("ptwr: Could not re-validate l1 page\n"); 186.197 + MEM_LOG("ptwr: Could not re-validate l1 page"); 186.198 /* 186.199 * Make the remaining p.t's consistent before crashing, so the 186.200 * reference counts are correct. 186.201 @@ -3056,7 +3056,7 @@ static int ptwr_emulated_update( 186.202 /* Aligned access only, thank you. */ 186.203 if ( !access_ok(addr, bytes) || ((addr & (bytes-1)) != 0) ) 186.204 { 186.205 - MEM_LOG("ptwr_emulate: Unaligned or bad size ptwr access (%d, %lx)\n", 186.206 + MEM_LOG("ptwr_emulate: Unaligned or bad size ptwr access (%d, %lx)", 186.207 bytes, addr); 186.208 return X86EMUL_UNHANDLEABLE; 186.209 } 186.210 @@ -3089,7 +3089,7 @@ static int ptwr_emulated_update( 186.211 if (__copy_from_user(&pte, &linear_pg_table[l1_linear_offset(addr)], 186.212 sizeof(pte))) 186.213 { 186.214 - MEM_LOG("ptwr_emulate: Cannot read thru linear_pg_table\n"); 186.215 + MEM_LOG("ptwr_emulate: Cannot read thru linear_pg_table"); 186.216 return X86EMUL_UNHANDLEABLE; 186.217 } 186.218 186.219 @@ -3102,7 +3102,7 @@ static int ptwr_emulated_update( 186.220 (page_get_owner(page) != d) ) 186.221 { 186.222 MEM_LOG("ptwr_emulate: Page is mistyped or bad pte " 186.223 - "(%lx, %" PRtype_info ")\n", 186.224 + "(%lx, %" PRtype_info ")", 186.225 l1e_get_pfn(pte), page->u.inuse.type_info); 186.226 return X86EMUL_UNHANDLEABLE; 186.227 }
193.1 --- a/xen/arch/x86/vmx.c Tue Aug 23 18:25:51 2005 +0000 193.2 +++ b/xen/arch/x86/vmx.c Tue Aug 23 18:27:22 2005 +0000 193.3 @@ -1712,9 +1712,6 @@ asmlinkage void vmx_vmexit_handler(struc 193.4 default: 193.5 __vmx_bug(®s); /* should not happen */ 193.6 } 193.7 - 193.8 - vmx_intr_assist(v); 193.9 - return; 193.10 } 193.11 193.12 asmlinkage void load_cr2(void)
194.1 --- a/xen/arch/x86/vmx_io.c Tue Aug 23 18:25:51 2005 +0000 194.2 +++ b/xen/arch/x86/vmx_io.c Tue Aug 23 18:27:22 2005 +0000 194.3 @@ -631,12 +631,14 @@ static inline int irq_masked(unsigned lo 194.4 return ((eflags & X86_EFLAGS_IF) == 0); 194.5 } 194.6 194.7 -void vmx_intr_assist(struct vcpu *v) 194.8 +asmlinkage void vmx_intr_assist(void) 194.9 { 194.10 int intr_type = 0; 194.11 - int highest_vector = find_highest_pending_irq(v, &intr_type); 194.12 + int highest_vector; 194.13 unsigned long intr_fields, eflags, interruptibility, cpu_exec_control; 194.14 + struct vcpu *v = current; 194.15 194.16 + highest_vector = find_highest_pending_irq(v, &intr_type); 194.17 __vmread(CPU_BASED_VM_EXEC_CONTROL, &cpu_exec_control); 194.18 194.19 if (highest_vector == -1) { 194.20 @@ -712,9 +714,6 @@ void vmx_do_resume(struct vcpu *d) 194.21 194.22 /* We can't resume the guest if we're waiting on I/O */ 194.23 ASSERT(!test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags)); 194.24 - 194.25 - /* We always check for interrupts before resuming guest */ 194.26 - vmx_intr_assist(d); 194.27 } 194.28 194.29 #endif /* CONFIG_VMX */
195.1 --- a/xen/arch/x86/x86_32/entry.S Tue Aug 23 18:25:51 2005 +0000 195.2 +++ b/xen/arch/x86/x86_32/entry.S Tue Aug 23 18:27:22 2005 +0000 195.3 @@ -140,6 +140,7 @@ 1: 195.4 jnz 2f 195.5 195.6 /* vmx_restore_all_guest */ 195.7 + call vmx_intr_assist 195.8 call load_cr2 195.9 .endif 195.10 VMX_RESTORE_ALL_NOSEGREGS
196.1 --- a/xen/arch/x86/x86_32/traps.c Tue Aug 23 18:25:51 2005 +0000 196.2 +++ b/xen/arch/x86/x86_32/traps.c Tue Aug 23 18:27:22 2005 +0000 196.3 @@ -1,5 +1,6 @@ 196.4 196.5 #include <xen/config.h> 196.6 +#include <xen/domain_page.h> 196.7 #include <xen/init.h> 196.8 #include <xen/sched.h> 196.9 #include <xen/lib.h> 196.10 @@ -86,24 +87,33 @@ void show_registers(struct cpu_user_regs 196.11 196.12 void show_page_walk(unsigned long addr) 196.13 { 196.14 - l2_pgentry_t pmd; 196.15 - l1_pgentry_t *pte; 196.16 - 196.17 - if ( addr < PAGE_OFFSET ) 196.18 - return; 196.19 + unsigned long pfn = read_cr3() >> PAGE_SHIFT; 196.20 + intpte_t *ptab, ent; 196.21 196.22 printk("Pagetable walk from %08lx:\n", addr); 196.23 - 196.24 - pmd = idle_pg_table_l2[l2_linear_offset(addr)]; 196.25 - printk(" L2 = %"PRIpte" %s\n", l2e_get_intpte(pmd), 196.26 - (l2e_get_flags(pmd) & _PAGE_PSE) ? "(2/4MB)" : ""); 196.27 - if ( !(l2e_get_flags(pmd) & _PAGE_PRESENT) || 196.28 - (l2e_get_flags(pmd) & _PAGE_PSE) ) 196.29 + 196.30 +#ifdef CONFIG_X86_PAE 196.31 + ptab = map_domain_page(pfn); 196.32 + ent = ptab[l3_table_offset(addr)]; 196.33 + printk(" L3 = %"PRIpte"\n", ent); 196.34 + unmap_domain_page(ptab); 196.35 + if ( !(ent & _PAGE_PRESENT) ) 196.36 return; 196.37 + pfn = ent >> PAGE_SHIFT; 196.38 +#endif 196.39 196.40 - pte = __va(l2e_get_paddr(pmd)); 196.41 - pte += l1_table_offset(addr); 196.42 - printk(" L1 = %"PRIpte"\n", l1e_get_intpte(*pte)); 196.43 + ptab = map_domain_page(pfn); 196.44 + ent = ptab[l2_table_offset(addr)]; 196.45 + printk(" L2 = %"PRIpte" %s\n", ent, (ent & _PAGE_PSE) ? "(PSE)" : ""); 196.46 + unmap_domain_page(ptab); 196.47 + if ( !(ent & _PAGE_PRESENT) || (ent & _PAGE_PSE) ) 196.48 + return; 196.49 + pfn = ent >> PAGE_SHIFT; 196.50 + 196.51 + ptab = map_domain_page(ent >> PAGE_SHIFT); 196.52 + ent = ptab[l2_table_offset(addr)]; 196.53 + printk(" L1 = %"PRIpte"\n", ent); 196.54 + unmap_domain_page(ptab); 196.55 } 196.56 196.57 #define DOUBLEFAULT_STACK_SIZE 1024
197.1 --- a/xen/arch/x86/x86_64/entry.S Tue Aug 23 18:25:51 2005 +0000 197.2 +++ b/xen/arch/x86/x86_64/entry.S Tue Aug 23 18:27:22 2005 +0000 197.3 @@ -233,6 +233,7 @@ 1: 197.4 jnz 2f 197.5 197.6 /* vmx_restore_all_guest */ 197.7 + call vmx_intr_assist 197.8 call load_cr2 197.9 .endif 197.10 /*
212.1 --- a/xen/include/asm-x86/vmx.h Tue Aug 23 18:25:51 2005 +0000 212.2 +++ b/xen/include/asm-x86/vmx.h Tue Aug 23 18:27:22 2005 +0000 212.3 @@ -31,7 +31,7 @@ 212.4 extern void vmx_asm_vmexit_handler(struct cpu_user_regs); 212.5 extern void vmx_asm_do_resume(void); 212.6 extern void vmx_asm_do_launch(void); 212.7 -extern void vmx_intr_assist(struct vcpu *d); 212.8 +extern void vmx_intr_assist(void); 212.9 212.10 extern void arch_vmx_do_launch(struct vcpu *); 212.11 extern void arch_vmx_do_resume(struct vcpu *); 212.12 @@ -355,7 +355,7 @@ static inline int __vmxon (u64 addr) 212.13 } 212.14 212.15 /* Make sure that xen intercepts any FP accesses from current */ 212.16 -static inline void vmx_stts() 212.17 +static inline void vmx_stts(void) 212.18 { 212.19 unsigned long cr0; 212.20
221.1 --- a/xen/include/public/io/blkif.h Tue Aug 23 18:25:51 2005 +0000 221.2 +++ b/xen/include/public/io/blkif.h Tue Aug 23 18:27:22 2005 +0000 221.3 @@ -58,6 +58,9 @@ typedef struct blkif_response { 221.4 #define BLKIF_RSP_ERROR -1 /* non-specific 'error' */ 221.5 #define BLKIF_RSP_OKAY 0 /* non-specific 'okay' */ 221.6 221.7 +#define BLKIF_MAJOR(dev) ((dev)>>8) 221.8 +#define BLKIF_MINOR(dev) ((dev) & 0xff) 221.9 + 221.10 /* 221.11 * Generate blkif ring structures and types. 221.12 */